[
  {
    "path": ".gitignore",
    "content": "*.iml\n.gradle\n/local.properties\n/.idea/workspace.xml\n/.idea/libraries\n.idea/modules.xml\n.DS_Store\n/build\n/captures\n.externalNativeBuild\n.ipynb_checkpoints/\n"
  },
  {
    "path": ".idea/compiler.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"CompilerConfiguration\">\n    <resourceExtensions />\n    <wildcardResourcePatterns>\n      <entry name=\"!?*.java\" />\n      <entry name=\"!?*.form\" />\n      <entry name=\"!?*.class\" />\n      <entry name=\"!?*.groovy\" />\n      <entry name=\"!?*.scala\" />\n      <entry name=\"!?*.flex\" />\n      <entry name=\"!?*.kt\" />\n      <entry name=\"!?*.clj\" />\n      <entry name=\"!?*.aj\" />\n    </wildcardResourcePatterns>\n    <annotationProcessing>\n      <profile default=\"true\" name=\"Default\" enabled=\"false\">\n        <processorPath useClasspath=\"true\" />\n      </profile>\n    </annotationProcessing>\n  </component>\n</project>"
  },
  {
    "path": ".idea/copyright/profiles_settings.xml",
    "content": "<component name=\"CopyrightManager\">\n  <settings default=\"\" />\n</component>"
  },
  {
    "path": ".idea/gradle.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"GradleSettings\">\n    <option name=\"linkedExternalProjectsSettings\">\n      <GradleProjectSettings>\n        <option name=\"distributionType\" value=\"DEFAULT_WRAPPED\" />\n        <option name=\"externalProjectPath\" value=\"$PROJECT_DIR$\" />\n        <option name=\"modules\">\n          <set>\n            <option value=\"$PROJECT_DIR$\" />\n            <option value=\"$PROJECT_DIR$/app\" />\n          </set>\n        </option>\n        <option name=\"resolveModulePerSourceSet\" value=\"false\" />\n      </GradleProjectSettings>\n    </option>\n  </component>\n</project>"
  },
  {
    "path": ".idea/misc.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"EntryPointsManager\">\n    <entry_points version=\"2.0\" />\n  </component>\n  <component name=\"NullableNotNullManager\">\n    <option name=\"myDefaultNullable\" value=\"android.support.annotation.Nullable\" />\n    <option name=\"myDefaultNotNull\" value=\"android.support.annotation.NonNull\" />\n    <option name=\"myNullables\">\n      <value>\n        <list size=\"4\">\n          <item index=\"0\" class=\"java.lang.String\" itemvalue=\"org.jetbrains.annotations.Nullable\" />\n          <item index=\"1\" class=\"java.lang.String\" itemvalue=\"javax.annotation.Nullable\" />\n          <item index=\"2\" class=\"java.lang.String\" itemvalue=\"edu.umd.cs.findbugs.annotations.Nullable\" />\n          <item index=\"3\" class=\"java.lang.String\" itemvalue=\"android.support.annotation.Nullable\" />\n        </list>\n      </value>\n    </option>\n    <option name=\"myNotNulls\">\n      <value>\n        <list size=\"4\">\n          <item index=\"0\" class=\"java.lang.String\" itemvalue=\"org.jetbrains.annotations.NotNull\" />\n          <item index=\"1\" class=\"java.lang.String\" itemvalue=\"javax.annotation.Nonnull\" />\n          <item index=\"2\" class=\"java.lang.String\" itemvalue=\"edu.umd.cs.findbugs.annotations.NonNull\" />\n          <item index=\"3\" class=\"java.lang.String\" itemvalue=\"android.support.annotation.NonNull\" />\n        </list>\n      </value>\n    </option>\n  </component>\n  <component name=\"ProjectRootManager\" version=\"2\" languageLevel=\"JDK_1_7\" default=\"true\" project-jdk-name=\"1.8\" project-jdk-type=\"JavaSDK\">\n    <output url=\"file://$PROJECT_DIR$/build/classes\" />\n  </component>\n  <component name=\"ProjectType\">\n    <option name=\"id\" value=\"Android\" />\n  </component>\n</project>"
  },
  {
    "path": ".idea/runConfigurations.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"RunConfigurationProducerService\">\n    <option name=\"ignoredProducers\">\n      <set>\n        <option value=\"org.jetbrains.plugins.gradle.execution.test.runner.AllInPackageGradleConfigurationProducer\" />\n        <option value=\"org.jetbrains.plugins.gradle.execution.test.runner.TestClassGradleConfigurationProducer\" />\n        <option value=\"org.jetbrains.plugins.gradle.execution.test.runner.TestMethodGradleConfigurationProducer\" />\n      </set>\n    </option>\n  </component>\n</project>"
  },
  {
    "path": ".idea/vcs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping directory=\"$PROJECT_DIR$\" vcs=\"Git\" />\n  </component>\n</project>"
  },
  {
    "path": "Exporting Squeezenet to mobile.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"In this tutorial we'll show how to export squeezenet which is implemented and trained in pytorch to run on mobile devices.\\n\",\n    \"Before we start, you should have [pytorch](https://github.com/pytorch/pytorch), [caffe2](https://github.com/caffe2/caffe2), [onnx](https://github.com/onnx/onnx) and [onnx-caffe2](https://github.com/onnx/onnx-caffe2) installed in your environment and cloned [AICamera](https://github.com/bwasti/AICamera).\\n\",\n    \"Please checkout their github page for installation instructions.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Some standard imports\\n\",\n    \"import io\\n\",\n    \"import numpy as np\\n\",\n    \"import torch.onnx\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The following implementation of squeezenet is from [torchvision](https://github.com/pytorch/vision/blob/master/torchvision/models/squeezenet.py).\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"import math\\n\",\n    \"import torch\\n\",\n    \"import torch.nn as nn\\n\",\n    \"import torch.nn.init as init\\n\",\n    \"import torch.utils.model_zoo as model_zoo\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"model_urls = {\\n\",\n    \"    'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',\\n\",\n    \"    'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',\\n\",\n    \"}\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"class Fire(nn.Module):\\n\",\n    \"\\n\",\n    \"    def __init__(self, inplanes, squeeze_planes,\\n\",\n    \"                 expand1x1_planes, expand3x3_planes):\\n\",\n    \"        super(Fire, self).__init__()\\n\",\n    \"        self.inplanes = inplanes\\n\",\n    \"        self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)\\n\",\n    \"        self.squeeze_activation = nn.ReLU(inplace=True)\\n\",\n    \"        self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,\\n\",\n    \"                                   kernel_size=1)\\n\",\n    \"        self.expand1x1_activation = nn.ReLU(inplace=True)\\n\",\n    \"        self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,\\n\",\n    \"                                   kernel_size=3, padding=1)\\n\",\n    \"        self.expand3x3_activation = nn.ReLU(inplace=True)\\n\",\n    \"\\n\",\n    \"    def forward(self, x):\\n\",\n    \"        x = self.squeeze_activation(self.squeeze(x))\\n\",\n    \"        return torch.cat([\\n\",\n    \"            self.expand1x1_activation(self.expand1x1(x)),\\n\",\n    \"            self.expand3x3_activation(self.expand3x3(x))\\n\",\n    \"        ], 1)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"class SqueezeNet(nn.Module):\\n\",\n    \"\\n\",\n    \"    def __init__(self, version=1.0, num_classes=1000):\\n\",\n    \"        super(SqueezeNet, self).__init__()\\n\",\n    \"        if version not in [1.0, 1.1]:\\n\",\n    \"            raise ValueError(\\\"Unsupported SqueezeNet version {version}:\\\"\\n\",\n    \"                             \\\"1.0 or 1.1 expected\\\".format(version=version))\\n\",\n    \"        self.num_classes = num_classes\\n\",\n    \"        if version == 1.0:\\n\",\n    \"            self.features = nn.Sequential(\\n\",\n    \"                nn.Conv2d(3, 96, kernel_size=7, stride=2),\\n\",\n    \"                nn.ReLU(inplace=True),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(96, 16, 64, 64),\\n\",\n    \"                Fire(128, 16, 64, 64),\\n\",\n    \"                Fire(128, 32, 128, 128),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(256, 32, 128, 128),\\n\",\n    \"                Fire(256, 48, 192, 192),\\n\",\n    \"                Fire(384, 48, 192, 192),\\n\",\n    \"                Fire(384, 64, 256, 256),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(512, 64, 256, 256),\\n\",\n    \"            )\\n\",\n    \"        else:\\n\",\n    \"            self.features = nn.Sequential(\\n\",\n    \"                nn.Conv2d(3, 64, kernel_size=3, stride=2),\\n\",\n    \"                nn.ReLU(inplace=True),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(64, 16, 64, 64),\\n\",\n    \"                Fire(128, 16, 64, 64),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(128, 32, 128, 128),\\n\",\n    \"                Fire(256, 32, 128, 128),\\n\",\n    \"                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=False),\\n\",\n    \"                Fire(256, 48, 192, 192),\\n\",\n    \"                Fire(384, 48, 192, 192),\\n\",\n    \"                Fire(384, 64, 256, 256),\\n\",\n    \"                Fire(512, 64, 256, 256),\\n\",\n    \"            )\\n\",\n    \"        # Final convolution is initialized differently form the rest\\n\",\n    \"        final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)\\n\",\n    \"        self.classifier = nn.Sequential(\\n\",\n    \"            nn.Dropout(p=0.5),\\n\",\n    \"            final_conv,\\n\",\n    \"            nn.ReLU(inplace=True),\\n\",\n    \"            nn.AvgPool2d(13)\\n\",\n    \"        )\\n\",\n    \"\\n\",\n    \"        for m in self.modules():\\n\",\n    \"            if isinstance(m, nn.Conv2d):\\n\",\n    \"                if m is final_conv:\\n\",\n    \"                    init.normal(m.weight.data, mean=0.0, std=0.01)\\n\",\n    \"                else:\\n\",\n    \"                    init.kaiming_uniform(m.weight.data)\\n\",\n    \"                if m.bias is not None:\\n\",\n    \"                    m.bias.data.zero_()\\n\",\n    \"\\n\",\n    \"    def forward(self, x):\\n\",\n    \"        x = self.features(x)\\n\",\n    \"        x = self.classifier(x)\\n\",\n    \"        return x.view(x.size(0), self.num_classes)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def squeezenet1_0(pretrained=False, **kwargs):\\n\",\n    \"    r\\\"\\\"\\\"SqueezeNet model architecture from the `\\\"SqueezeNet: AlexNet-level\\n\",\n    \"    accuracy with 50x fewer parameters and <0.5MB model size\\\"\\n\",\n    \"    <https://arxiv.org/abs/1602.07360>`_ paper.\\n\",\n    \"    Args:\\n\",\n    \"        pretrained (bool): If True, returns a model pre-trained on ImageNet\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    model = SqueezeNet(version=1.0, **kwargs)\\n\",\n    \"    if pretrained:\\n\",\n    \"        model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))\\n\",\n    \"    return model\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def squeezenet1_1(pretrained=False, **kwargs):\\n\",\n    \"    r\\\"\\\"\\\"SqueezeNet 1.1 model from the `official SqueezeNet repo\\n\",\n    \"    <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.\\n\",\n    \"    SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\\n\",\n    \"    than SqueezeNet 1.0, without sacrificing accuracy.\\n\",\n    \"    Args:\\n\",\n    \"        pretrained (bool): If True, returns a model pre-trained on ImageNet\\n\",\n    \"    \\\"\\\"\\\"\\n\",\n    \"    model = SqueezeNet(version=1.1, **kwargs)\\n\",\n    \"    if pretrained:\\n\",\n    \"        model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_1']))\\n\",\n    \"    return model\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can get the torch model by calling the following function:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Get pretrained squeezenet model\\n\",\n    \"torch_model = squeezenet1_1(True)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"and export the pytorch model as onnx model:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"from torch.autograd import Variable\\n\",\n    \"batch_size = 1    # just a random number\\n\",\n    \"\\n\",\n    \"# Input to the model\\n\",\n    \"x = Variable(torch.randn(batch_size, 3, 224, 224), requires_grad=True)\\n\",\n    \"\\n\",\n    \"# Export the model\\n\",\n    \"torch_out = torch.onnx._export(torch_model,             # model being run\\n\",\n    \"                               x,                       # model input (or a tuple for multiple inputs)\\n\",\n    \"                               \\\"squeezenet.onnx\\\",       # where to save the model (can be a file or file-like object)\\n\",\n    \"                               export_params=True)      # store the trained parameter weights inside the model file\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"After that, we can prepare and run the model and verify that the result of the model running on pytorch matches the result running on onnx-caffe2 backend.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"WARNING:root:This caffe2 python run does not have GPU support. Will run in CPU only mode.\\n\",\n      \"WARNING:root:Debug message: No module named caffe2_pybind11_state_gpu\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import onnx\\n\",\n    \"import onnx_caffe2.backend\\n\",\n    \"from onnx import helper\\n\",\n    \"\\n\",\n    \"# Load the ONNX GraphProto object. Graph is a standard Python protobuf object\\n\",\n    \"model = onnx.load(\\\"squeezenet.onnx\\\")\\n\",\n    \"\\n\",\n    \"# prepare the caffe2 backend for executing the model this converts the ONNX graph into a\\n\",\n    \"# Caffe2 NetDef that can execute it. Other ONNX backends, like one for CNTK will be\\n\",\n    \"# availiable soon.\\n\",\n    \"prepared_backend = onnx_caffe2.backend.prepare(model)\\n\",\n    \"\\n\",\n    \"# run the model in Caffe2\\n\",\n    \"\\n\",\n    \"# Construct a map from input names to Tensor data.\\n\",\n    \"# The graph itself contains inputs for all weight parameters, followed by the input image.\\n\",\n    \"# Since the weights are already embedded, we just need to pass the input image.\\n\",\n    \"# last input the grap\\n\",\n    \"W = {model.graph.input[0].name: x.data.numpy()}\\n\",\n    \"\\n\",\n    \"# Run the Caffe2 net:\\n\",\n    \"c2_out = prepared_backend.run(W)[0]\\n\",\n    \"\\n\",\n    \"# Verify the numerical correctness upto 3 decimal places\\n\",\n    \"np.testing.assert_almost_equal(torch_out.data.cpu().numpy(), c2_out, decimal=3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Then we can export the model to run on mobile devices, leveraging the cross-platform capability of caffe2.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"metadata\": {\n    \"collapsed\": true\n   },\n   \"outputs\": [],\n   \"source\": [\n    \"# Export to mobile\\n\",\n    \"from onnx_caffe2.backend import Caffe2Backend as c2\\n\",\n    \"\\n\",\n    \"init_net, predict_net = c2.onnx_graph_to_caffe2_net(model.graph, True)\\n\",\n    \"with open(\\\"squeeze_init_net.pb\\\", \\\"wb\\\") as f:\\n\",\n    \"    f.write(init_net.SerializeToString())\\n\",\n    \"with open(\\\"squeeze_predict_net.pb\\\", \\\"wb\\\") as f:\\n\",\n    \"    f.write(predict_net.SerializeToString())\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You'll see squeeze_init_net.pb and squeeze_predict_net.pb in the same directory of this notebook. Let's make sure it can run with predictor since that's what we'll use in the Mobile App.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# Verify it runs with predictor\\n\",\n    \"with open(\\\"squeeze_init_net.pb\\\") as f:\\n\",\n    \"    init_net = f.read()\\n\",\n    \"with open(\\\"squeeze_predict_net.pb\\\") as f:\\n\",\n    \"    predict_net = f.read()\\n\",\n    \"from caffe2.python import workspace\\n\",\n    \"p = workspace.Predictor(init_net, predict_net)\\n\",\n    \"# The following code should run:\\n\",\n    \"# img = np.random.rand(1, 3, 224, 224).astype(np.float32)\\n\",\n    \"# p.run([img])\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"After we are sure that it runs with predictor, we can copy squeeze_init_net.pb and squeeze_predict_net.pb to \\n\",\n    \"AICamera/app/src/main/assets.\\n\",\n    \"Now we can open Android Studio and import the AICamera project, run the app by clicking the green play button.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 2\",\n   \"language\": \"python\",\n   \"name\": \"python2\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 2\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython2\",\n   \"version\": \"2.7.13\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2017-present, Facebook, Inc. All rights reserved.\n\nThe examples provided by Facebook are for non-commercial testing and evaluation\npurposes only. Facebook reserves all rights not expressly granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\nFACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\nACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "## AICamera\n\nAICamera is a demo app that was displayed at Facebook's F8 event.  The previous version (also on this repo) was getting quite old and attempted to demonstrate a build system that happened inside Android Studio.  This led to some hacky techniques and I decided to rewrite the demo with a prebuilt Caffe2 library (which can be built using `build_android.sh` in the Caffe2 source).\n\n![example](https://thumbs.gfycat.com/FlimsyInbornIndianabat-size_restricted.gif)\n\n### Download\n\n    git clone https://github.com/caffe2/AICamera.git\n\n### Build\n\nClick the green play button in Android Studio 3.0.1 and everything should build :)\n\n### Tests\n\n| Device             | Network       |  FPS  |\n| ------------------ | ------------- | ----- |\n| Samsung Galaxy S7  | SqueezeNet    |  5.8  |\n| Google Pixel       | SqueezeNet    |  5.7  |\n\n### License\n\nPlease see the LICENSE file in the root directory the source tree.\n"
  },
  {
    "path": "app/.gitignore",
    "content": "/build\n"
  },
  {
    "path": "app/CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.4.1)\n\nadd_library(\n             native-lib\n             SHARED\n             src/main/cpp/native-lib.cpp\n             )\nfind_library(\n          android-lib\n          android\n          )\n\ninclude(AndroidNdkModules)\nandroid_ndk_import_module_cpufeatures()\n\nadd_library(\n    caffe2\n    STATIC\n    IMPORTED\n    )\nset_target_properties(\n    caffe2\n    PROPERTIES IMPORTED_LOCATION\n    ${CMAKE_CURRENT_LIST_DIR}/src/main/jniLibs/${ANDROID_ABI}/libCaffe2_CPU.a\n    )\nadd_library(\n    thread_pool\n    STATIC\n    IMPORTED\n    )\nset_target_properties(\n    thread_pool\n    PROPERTIES IMPORTED_LOCATION\n    ${CMAKE_CURRENT_LIST_DIR}/src/main/jniLibs/${ANDROID_ABI}/libCAFFE2_PTHREADPOOL.a\n    )\nadd_library(\n    glog\n    SHARED\n    IMPORTED\n    )\nset_target_properties(\n    glog\n    PROPERTIES IMPORTED_LOCATION\n    ${CMAKE_CURRENT_LIST_DIR}/src/main/jniLibs/${ANDROID_ABI}/libglog.so\n    )\n\nadd_library(\n    protobuf\n    SHARED\n    IMPORTED\n    )\nset_target_properties(\n    protobuf\n    PROPERTIES IMPORTED_LOCATION\n    ${CMAKE_CURRENT_LIST_DIR}/src/main/jniLibs/${ANDROID_ABI}/libprotobuf.a\n    )\n\nadd_library(\n    NNPACK\n    STATIC\n    IMPORTED\n    )\nset_target_properties(\n    NNPACK\n    PROPERTIES IMPORTED_LOCATION\n    ${CMAKE_CURRENT_LIST_DIR}/src/main/jniLibs/${ANDROID_ABI}/libCAFFE2_NNPACK.a\n    )\n\ninclude_directories( src/main/cpp )\n\nfind_library(\n     log-lib\n     log\n     )\n\ntarget_link_libraries(\n                       native-lib\n                       -Wl,--whole-archive\n                       caffe2\n                       -Wl,--no-whole-archive\n                       NNPACK\n                       thread_pool\n                       glog\n                       protobuf\n                       cpufeatures\n                       ${log-lib}\n                       ${android-lib})"
  },
  {
    "path": "app/build.gradle",
    "content": "apply plugin: 'com.android.application'\n\nandroid {\n    compileSdkVersion 25\n    defaultConfig {\n        applicationId \"facebook.f8demo\"\n        minSdkVersion 22\n        targetSdkVersion 25\n        versionCode 1\n        versionName \"1.0\"\n        testInstrumentationRunner \"android.support.test.runner.AndroidJUnitRunner\"\n        externalNativeBuild {\n            cmake {\n                cppFlags \"-frtti -fexceptions -std=c++11\"\n                arguments \"-DANDROID_STL=gnustl_shared\"\n            }\n        }\n        ndk {\n            // Specifies the ABI configurations of your native\n            // libraries Gradle should build and package with your APK.\n            abiFilters  'armeabi-v7a'\n        }\n    }\n    buildTypes {\n        release {\n            minifyEnabled false\n            proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'\n        }\n    }\n    externalNativeBuild {\n        cmake {\n            path \"CMakeLists.txt\"\n        }\n    }\n    packagingOptions {\n        pickFirst 'lib/armeabi-v7a/libgnustl_shared.so'\n    }\n\n}\n\ndependencies {\n    compile fileTree(dir: 'libs', include: ['*.jar', '*.so'])\n    androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', {\n        exclude group: 'com.android.support', module: 'support-annotations'\n    })\n    compile 'com.android.support:appcompat-v7:25.1.1'\n    compile 'com.android.support.constraint:constraint-layout:1.0.2'\n    testCompile 'junit:junit:4.12'\n}\n"
  },
  {
    "path": "app/proguard-rules.pro",
    "content": "# Add project specific ProGuard rules here.\n# By default, the flags in this file are appended to flags specified\n# in /Users/bwasti/Library/Android/sdk/tools/proguard/proguard-android.txt\n# You can edit the include path and order by changing the proguardFiles\n# directive in build.gradle.\n#\n# For more details, see\n#   http://developer.android.com/guide/developing/tools/proguard.html\n\n# Add any project specific keep options here:\n\n# If your project uses WebView with JS, uncomment the following\n# and specify the fully qualified class name to the JavaScript interface\n# class:\n#-keepclassmembers class fqcn.of.javascript.interface.for.webview {\n#   public *;\n#}\n\n# Uncomment this to preserve the line number information for\n# debugging stack traces.\n#-keepattributes SourceFile,LineNumberTable\n\n# If you keep the line number information, uncomment this to\n# hide the original source file name.\n#-renamesourcefileattribute SourceFile\n"
  },
  {
    "path": "app/src/androidTest/java/facebook/f8demo/ExampleInstrumentedTest.java",
    "content": "package facebook.f8demo;\n\nimport android.content.Context;\nimport android.support.test.InstrumentationRegistry;\nimport android.support.test.runner.AndroidJUnit4;\n\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\n\nimport static org.junit.Assert.*;\n\n/**\n * Instrumentation test, which will execute on an Android device.\n *\n * @see <a href=\"http://d.android.com/tools/testing\">Testing documentation</a>\n */\n@RunWith(AndroidJUnit4.class)\npublic class ExampleInstrumentedTest {\n    @Test\n    public void useAppContext() throws Exception {\n        // Context of the app under test.\n        Context appContext = InstrumentationRegistry.getTargetContext();\n\n        assertEquals(\"facebook.f8demo\", appContext.getPackageName());\n    }\n}\n"
  },
  {
    "path": "app/src/main/AndroidManifest.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    package=\"facebook.f8demo\">\n    <uses-permission android:name=\"android.permission.CAMERA\" />\n    <uses-feature android:name=\"android.hardware.camera2.full\" />\n    <application\n        android:allowBackup=\"true\"\n        android:icon=\"@mipmap/ic_launcher\"\n        android:label=\"@string/app_name\"\n        android:roundIcon=\"@mipmap/ic_launcher_round\"\n        android:supportsRtl=\"true\"\n        android:theme=\"@style/AppTheme\">\n        <activity android:name=\".ClassifyCamera\" android:screenOrientation=\"portrait\">\n            <intent-filter>\n                <action android:name=\"android.intent.action.MAIN\" />\n\n                <category android:name=\"android.intent.category.LAUNCHER\" />\n            </intent-filter>\n        </activity>\n    </application>\n\n</manifest>"
  },
  {
    "path": "app/src/main/cpp/Eigen/CMakeLists.txt",
    "content": "include(RegexUtils)\ntest_escape_string_as_regex()\n\nfile(GLOB Eigen_directory_files \"*\")\n\nescape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR \"${CMAKE_CURRENT_SOURCE_DIR}\")\n\nforeach(f ${Eigen_directory_files})\n  if(NOT f MATCHES \"\\\\.txt\" AND NOT f MATCHES \"${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+\" AND NOT f MATCHES \"${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/src\")\n    list(APPEND Eigen_directory_files_to_install ${f})\n  endif()\nendforeach(f ${Eigen_directory_files})\n\ninstall(FILES\n  ${Eigen_directory_files_to_install}\n  DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel\n  )\n\ninstall(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen COMPONENT Devel FILES_MATCHING PATTERN \"*.h\")\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Cholesky",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CHOLESKY_MODULE_H\n#define EIGEN_CHOLESKY_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup Cholesky_Module Cholesky module\n  *\n  *\n  *\n  * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.\n  * Those decompositions are also accessible via the following methods:\n  *  - MatrixBase::llt()\n  *  - MatrixBase::ldlt()\n  *  - SelfAdjointView::llt()\n  *  - SelfAdjointView::ldlt()\n  *\n  * \\code\n  * #include <Eigen/Cholesky>\n  * \\endcode\n  */\n\n#include \"src/Cholesky/LLT.h\"\n#include \"src/Cholesky/LDLT.h\"\n#ifdef EIGEN_USE_LAPACKE\n#include \"src/misc/lapacke.h\"\n#include \"src/Cholesky/LLT_LAPACKE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_CHOLESKY_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/CholmodSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H\n#define EIGEN_CHOLMODSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\nextern \"C\" {\n  #include <cholmod.h>\n}\n\n/** \\ingroup Support_modules\n  * \\defgroup CholmodSupport_Module CholmodSupport module\n  *\n  * This module provides an interface to the Cholmod library which is part of the <a href=\"http://www.suitesparse.com\">suitesparse</a> package.\n  * It provides the two following main factorization classes:\n  * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.\n  * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).\n  *\n  * For the sake of completeness, this module also propose the two following classes:\n  * - class CholmodSimplicialLLT\n  * - class CholmodSimplicialLDLT\n  * Note that these classes does not bring any particular advantage compared to the built-in\n  * SimplicialLLT and SimplicialLDLT factorization classes.\n  *\n  * \\code\n  * #include <Eigen/CholmodSupport>\n  * \\endcode\n  *\n  * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies.\n  * The dependencies depend on how cholmod has been compiled.\n  * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task.\n  *\n  */\n\n#include \"src/CholmodSupport/CholmodSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_CHOLMODSUPPORT_MODULE_H\n\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Core",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2007-2011 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CORE_H\n#define EIGEN_CORE_H\n\n// first thing Eigen does: stop the compiler from committing suicide\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n// Handle NVCC/CUDA/SYCL\n#if defined(__CUDACC__) || defined(__SYCL_DEVICE_ONLY__)\n  // Do not try asserts on CUDA and SYCL!\n  #ifndef EIGEN_NO_DEBUG\n  #define EIGEN_NO_DEBUG\n  #endif\n\n  #ifdef EIGEN_INTERNAL_DEBUGGING\n  #undef EIGEN_INTERNAL_DEBUGGING\n  #endif\n\n  #ifdef EIGEN_EXCEPTIONS\n  #undef EIGEN_EXCEPTIONS\n  #endif\n\n  // All functions callable from CUDA code must be qualified with __device__\n  #ifdef __CUDACC__\n    // Do not try to vectorize on CUDA and SYCL!\n    #ifndef EIGEN_DONT_VECTORIZE\n    #define EIGEN_DONT_VECTORIZE\n    #endif\n\n    #define EIGEN_DEVICE_FUNC __host__ __device__\n    // We need math_functions.hpp to ensure that that EIGEN_USING_STD_MATH macro\n    // works properly on the device side\n    #include <math_functions.hpp>\n  #else\n    #define EIGEN_DEVICE_FUNC\n  #endif\n#else\n  #define EIGEN_DEVICE_FUNC\n#endif\n\n// When compiling CUDA device code with NVCC, pull in math functions from the\n// global namespace.  In host mode, and when device doee with clang, use the\n// std versions.\n#if defined(__CUDA_ARCH__) && defined(__NVCC__)\n  #define EIGEN_USING_STD_MATH(FUNC) using ::FUNC;\n#else\n  #define EIGEN_USING_STD_MATH(FUNC) using std::FUNC;\n#endif\n\n#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(__CUDA_ARCH__) && !defined(EIGEN_EXCEPTIONS) && !defined(EIGEN_USE_SYCL)\n  #define EIGEN_EXCEPTIONS\n#endif\n\n#ifdef EIGEN_EXCEPTIONS\n  #include <new>\n#endif\n\n// then include this file where all our macros are defined. It's really important to do it first because\n// it's where we do all the alignment settings (platform detection and honoring the user's will if he\n// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization.\n#include \"src/Core/util/Macros.h\"\n\n// Disable the ipa-cp-clone optimization flag with MinGW 6.x or newer (enabled by default with -O3)\n// See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=556 for details.\n#if EIGEN_COMP_MINGW && EIGEN_GNUC_AT_LEAST(4,6)\n  #pragma GCC optimize (\"-fno-ipa-cp-clone\")\n#endif\n\n#include <complex>\n\n// this include file manages BLAS and MKL related macros\n// and inclusion of their respective header files\n#include \"src/Core/util/MKL_support.h\"\n\n// if alignment is disabled, then disable vectorization. Note: EIGEN_MAX_ALIGN_BYTES is the proper check, it takes into\n// account both the user's will (EIGEN_MAX_ALIGN_BYTES,EIGEN_DONT_ALIGN) and our own platform checks\n#if EIGEN_MAX_ALIGN_BYTES==0\n  #ifndef EIGEN_DONT_VECTORIZE\n    #define EIGEN_DONT_VECTORIZE\n  #endif\n#endif\n\n#if EIGEN_COMP_MSVC\n  #include <malloc.h> // for _aligned_malloc -- need it regardless of whether vectorization is enabled\n  #if (EIGEN_COMP_MSVC >= 1500) // 2008 or later\n    // Remember that usage of defined() in a #define is undefined by the standard.\n    // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP.\n    #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || EIGEN_ARCH_x86_64\n      #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER\n    #endif\n  #endif\n#else\n  // Remember that usage of defined() in a #define is undefined by the standard\n  #if (defined __SSE2__) && ( (!EIGEN_COMP_GNUC) || EIGEN_COMP_ICC || EIGEN_GNUC_AT_LEAST(4,2) )\n    #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC\n  #endif\n#endif\n\n#ifndef EIGEN_DONT_VECTORIZE\n\n  #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER)\n\n    // Defines symbols for compile-time detection of which instructions are\n    // used.\n    // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used\n    #define EIGEN_VECTORIZE\n    #define EIGEN_VECTORIZE_SSE\n    #define EIGEN_VECTORIZE_SSE2\n\n    // Detect sse3/ssse3/sse4:\n    // gcc and icc defines __SSE3__, ...\n    // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you\n    // want to force the use of those instructions with msvc.\n    #ifdef __SSE3__\n      #define EIGEN_VECTORIZE_SSE3\n    #endif\n    #ifdef __SSSE3__\n      #define EIGEN_VECTORIZE_SSSE3\n    #endif\n    #ifdef __SSE4_1__\n      #define EIGEN_VECTORIZE_SSE4_1\n    #endif\n    #ifdef __SSE4_2__\n      #define EIGEN_VECTORIZE_SSE4_2\n    #endif\n    #ifdef __AVX__\n      #define EIGEN_VECTORIZE_AVX\n      #define EIGEN_VECTORIZE_SSE3\n      #define EIGEN_VECTORIZE_SSSE3\n      #define EIGEN_VECTORIZE_SSE4_1\n      #define EIGEN_VECTORIZE_SSE4_2\n    #endif\n    #ifdef __AVX2__\n      #define EIGEN_VECTORIZE_AVX2\n      #define EIGEN_VECTORIZE_AVX\n      #define EIGEN_VECTORIZE_SSE3\n      #define EIGEN_VECTORIZE_SSSE3\n      #define EIGEN_VECTORIZE_SSE4_1\n      #define EIGEN_VECTORIZE_SSE4_2\n    #endif\n    #ifdef __FMA__\n      #define EIGEN_VECTORIZE_FMA\n    #endif\n    #if defined(__AVX512F__)\n      #define EIGEN_VECTORIZE_AVX512\n      #define EIGEN_VECTORIZE_AVX2\n      #define EIGEN_VECTORIZE_AVX\n      #define EIGEN_VECTORIZE_FMA\n      #define EIGEN_VECTORIZE_SSE3\n      #define EIGEN_VECTORIZE_SSSE3\n      #define EIGEN_VECTORIZE_SSE4_1\n      #define EIGEN_VECTORIZE_SSE4_2\n      #ifdef __AVX512DQ__\n        #define EIGEN_VECTORIZE_AVX512DQ\n      #endif\n    #endif\n\n    // include files\n\n    // This extern \"C\" works around a MINGW-w64 compilation issue\n    // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354\n    // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do).\n    // However, intrin.h uses an extern \"C\" declaration, and g++ thus complains of duplicate declarations\n    // with conflicting linkage.  The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know;\n    // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern \"C\" here too.\n    // notice that since these are C headers, the extern \"C\" is theoretically needed anyways.\n    extern \"C\" {\n      // In theory we should only include immintrin.h and not the other *mmintrin.h header files directly.\n      // Doing so triggers some issues with ICC. However old gcc versions seems to not have this file, thus:\n      #if EIGEN_COMP_ICC >= 1110\n        #include <immintrin.h>\n      #else\n        #include <mmintrin.h>\n        #include <emmintrin.h>\n        #include <xmmintrin.h>\n        #ifdef  EIGEN_VECTORIZE_SSE3\n        #include <pmmintrin.h>\n        #endif\n        #ifdef EIGEN_VECTORIZE_SSSE3\n        #include <tmmintrin.h>\n        #endif\n        #ifdef EIGEN_VECTORIZE_SSE4_1\n        #include <smmintrin.h>\n        #endif\n        #ifdef EIGEN_VECTORIZE_SSE4_2\n        #include <nmmintrin.h>\n        #endif\n        #if defined(EIGEN_VECTORIZE_AVX) || defined(EIGEN_VECTORIZE_AVX512)\n        #include <immintrin.h>\n        #endif\n      #endif\n    } // end extern \"C\"\n  #elif defined __VSX__\n    #define EIGEN_VECTORIZE\n    #define EIGEN_VECTORIZE_VSX\n    #include <altivec.h>\n    // We need to #undef all these ugly tokens defined in <altivec.h>\n    // => use __vector instead of vector\n    #undef bool\n    #undef vector\n    #undef pixel\n  #elif defined __ALTIVEC__\n    #define EIGEN_VECTORIZE\n    #define EIGEN_VECTORIZE_ALTIVEC\n    #include <altivec.h>\n    // We need to #undef all these ugly tokens defined in <altivec.h>\n    // => use __vector instead of vector\n    #undef bool\n    #undef vector\n    #undef pixel\n  #elif (defined  __ARM_NEON) || (defined __ARM_NEON__)\n    #define EIGEN_VECTORIZE\n    #define EIGEN_VECTORIZE_NEON\n    #include <arm_neon.h>\n  #elif (defined __s390x__ && defined __VEC__)\n    #define EIGEN_VECTORIZE\n    #define EIGEN_VECTORIZE_ZVECTOR\n    #include <vecintrin.h>\n  #endif\n#endif\n\n#if defined(__F16C__) && !defined(EIGEN_COMP_CLANG)\n  // We can use the optimized fp16 to float and float to fp16 conversion routines\n  #define EIGEN_HAS_FP16_C\n#endif\n\n#if defined __CUDACC__\n  #define EIGEN_VECTORIZE_CUDA\n  #include <vector_types.h>\n  #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500\n    #define EIGEN_HAS_CUDA_FP16\n  #endif\n#endif\n\n#if defined EIGEN_HAS_CUDA_FP16\n  #include <host_defines.h>\n  #include <cuda_fp16.h>\n#endif\n\n#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE)\n  #define EIGEN_HAS_OPENMP\n#endif\n\n#ifdef EIGEN_HAS_OPENMP\n#include <omp.h>\n#endif\n\n// MSVC for windows mobile does not have the errno.h file\n#if !(EIGEN_COMP_MSVC && EIGEN_OS_WINCE) && !EIGEN_COMP_ARM\n#define EIGEN_HAS_ERRNO\n#endif\n\n#ifdef EIGEN_HAS_ERRNO\n#include <cerrno>\n#endif\n#include <cstddef>\n#include <cstdlib>\n#include <cmath>\n#include <cassert>\n#include <functional>\n#include <iosfwd>\n#include <cstring>\n#include <string>\n#include <limits>\n#include <climits> // for CHAR_BIT\n// for min/max:\n#include <algorithm>\n\n// for std::is_nothrow_move_assignable\n#ifdef EIGEN_INCLUDE_TYPE_TRAITS\n#include <type_traits>\n#endif\n\n// for outputting debug info\n#ifdef EIGEN_DEBUG_ASSIGN\n#include <iostream>\n#endif\n\n// required for __cpuid, needs to be included after cmath\n#if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE\n  #include <intrin.h>\n#endif\n\n#if defined(__SYCL_DEVICE_ONLY__)\n  #undef min\n  #undef max\n  #undef isnan\n  #undef isinf\n  #undef isfinite\n  #include <SYCL/sycl.hpp>\n#endif\n\n/** \\brief Namespace containing all symbols from the %Eigen library. */\nnamespace Eigen {\n\ninline static const char *SimdInstructionSetsInUse(void) {\n#if defined(EIGEN_VECTORIZE_AVX512)\n  return \"AVX512, FMA, AVX2, AVX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2\";\n#elif defined(EIGEN_VECTORIZE_AVX)\n  return \"AVX SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2\";\n#elif defined(EIGEN_VECTORIZE_SSE4_2)\n  return \"SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2\";\n#elif defined(EIGEN_VECTORIZE_SSE4_1)\n  return \"SSE, SSE2, SSE3, SSSE3, SSE4.1\";\n#elif defined(EIGEN_VECTORIZE_SSSE3)\n  return \"SSE, SSE2, SSE3, SSSE3\";\n#elif defined(EIGEN_VECTORIZE_SSE3)\n  return \"SSE, SSE2, SSE3\";\n#elif defined(EIGEN_VECTORIZE_SSE2)\n  return \"SSE, SSE2\";\n#elif defined(EIGEN_VECTORIZE_ALTIVEC)\n  return \"AltiVec\";\n#elif defined(EIGEN_VECTORIZE_VSX)\n  return \"VSX\";\n#elif defined(EIGEN_VECTORIZE_NEON)\n  return \"ARM NEON\";\n#elif defined(EIGEN_VECTORIZE_ZVECTOR)\n  return \"S390X ZVECTOR\";\n#else\n  return \"None\";\n#endif\n}\n\n} // end namespace Eigen\n\n#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS || defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API || defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS || defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API || defined EIGEN2_SUPPORT\n// This will generate an error message:\n#error Eigen2-support is only available up to version 3.2. Please go to \"http://eigen.tuxfamily.org/index.php?title=Eigen2\" for further information\n#endif\n\nnamespace Eigen {\n\n// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to\n// ensure QNX/QCC support\nusing std::size_t;\n// gcc 4.6.0 wants std:: for ptrdiff_t\nusing std::ptrdiff_t;\n\n}\n\n/** \\defgroup Core_Module Core module\n  * This is the main module of Eigen providing dense matrix and vector support\n  * (both fixed and dynamic size) with all the features corresponding to a BLAS library\n  * and much more...\n  *\n  * \\code\n  * #include <Eigen/Core>\n  * \\endcode\n  */\n\n#include \"src/Core/util/Constants.h\"\n#include \"src/Core/util/Meta.h\"\n#include \"src/Core/util/ForwardDeclarations.h\"\n#include \"src/Core/util/StaticAssert.h\"\n#include \"src/Core/util/XprHelper.h\"\n#include \"src/Core/util/Memory.h\"\n#include \"src/Core/util/IntegralConstant.h\"\n#include \"src/Core/util/SymbolicIndex.h\"\n\n\n#include \"src/Core/NumTraits.h\"\n#include \"src/Core/MathFunctions.h\"\n#include \"src/Core/GenericPacketMath.h\"\n#include \"src/Core/MathFunctionsImpl.h\"\n\n#if defined EIGEN_VECTORIZE_AVX512\n  #include \"src/Core/arch/SSE/PacketMath.h\"\n  #include \"src/Core/arch/AVX/PacketMath.h\"\n  #include \"src/Core/arch/AVX512/PacketMath.h\"\n  #include \"src/Core/arch/SSE/MathFunctions.h\"\n  #include \"src/Core/arch/AVX/MathFunctions.h\"\n  #include \"src/Core/arch/AVX512/MathFunctions.h\"\n#elif defined EIGEN_VECTORIZE_AVX\n  // Use AVX for floats and doubles, SSE for integers\n  #include \"src/Core/arch/SSE/PacketMath.h\"\n  #include \"src/Core/arch/SSE/Complex.h\"\n  #include \"src/Core/arch/SSE/MathFunctions.h\"\n  #include \"src/Core/arch/AVX/PacketMath.h\"\n  #include \"src/Core/arch/AVX/MathFunctions.h\"\n  #include \"src/Core/arch/AVX/Complex.h\"\n  #include \"src/Core/arch/AVX/TypeCasting.h\"\n#elif defined EIGEN_VECTORIZE_SSE\n  #include \"src/Core/arch/SSE/PacketMath.h\"\n  #include \"src/Core/arch/SSE/MathFunctions.h\"\n  #include \"src/Core/arch/SSE/Complex.h\"\n  #include \"src/Core/arch/SSE/TypeCasting.h\"\n#elif defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)\n  #include \"src/Core/arch/AltiVec/PacketMath.h\"\n  #include \"src/Core/arch/AltiVec/MathFunctions.h\"\n  #include \"src/Core/arch/AltiVec/Complex.h\"\n#elif defined EIGEN_VECTORIZE_NEON\n  #include \"src/Core/arch/NEON/PacketMath.h\"\n  #include \"src/Core/arch/NEON/MathFunctions.h\"\n  #include \"src/Core/arch/NEON/Complex.h\"\n#elif defined EIGEN_VECTORIZE_ZVECTOR\n  #include \"src/Core/arch/ZVector/PacketMath.h\"\n  #include \"src/Core/arch/ZVector/MathFunctions.h\"\n  #include \"src/Core/arch/ZVector/Complex.h\"\n#endif\n\n// Half float support\n#include \"src/Core/arch/CUDA/Half.h\"\n#include \"src/Core/arch/CUDA/PacketMathHalf.h\"\n#include \"src/Core/arch/CUDA/TypeCasting.h\"\n\n#if defined EIGEN_VECTORIZE_CUDA\n  #include \"src/Core/arch/CUDA/PacketMath.h\"\n  #include \"src/Core/arch/CUDA/MathFunctions.h\"\n#endif\n\n#include \"src/Core/arch/Default/Settings.h\"\n\n#include \"src/Core/functors/TernaryFunctors.h\"\n#include \"src/Core/functors/BinaryFunctors.h\"\n#include \"src/Core/functors/UnaryFunctors.h\"\n#include \"src/Core/functors/NullaryFunctors.h\"\n#include \"src/Core/functors/StlFunctors.h\"\n#include \"src/Core/functors/AssignmentFunctors.h\"\n\n// Specialized functors to enable the processing of complex numbers\n// on CUDA devices\n#include \"src/Core/arch/CUDA/Complex.h\"\n\n#include \"src/Core/util/IndexedViewHelper.h\"\n#include \"src/Core/ArithmeticSequence.h\"\n#include \"src/Core/IO.h\"\n#include \"src/Core/DenseCoeffsBase.h\"\n#include \"src/Core/DenseBase.h\"\n#include \"src/Core/MatrixBase.h\"\n#include \"src/Core/EigenBase.h\"\n\n#include \"src/Core/Product.h\"\n#include \"src/Core/CoreEvaluators.h\"\n#include \"src/Core/AssignEvaluator.h\"\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874\n                                // at least confirmed with Doxygen 1.5.5 and 1.5.6\n  #include \"src/Core/Assign.h\"\n#endif\n\n#include \"src/Core/ArrayBase.h\"\n#include \"src/Core/util/BlasUtil.h\"\n#include \"src/Core/DenseStorage.h\"\n#include \"src/Core/NestByValue.h\"\n\n// #include \"src/Core/ForceAlignedAccess.h\"\n\n#include \"src/Core/ReturnByValue.h\"\n#include \"src/Core/NoAlias.h\"\n#include \"src/Core/PlainObjectBase.h\"\n#include \"src/Core/Matrix.h\"\n#include \"src/Core/Array.h\"\n#include \"src/Core/CwiseTernaryOp.h\"\n#include \"src/Core/CwiseBinaryOp.h\"\n#include \"src/Core/CwiseUnaryOp.h\"\n#include \"src/Core/CwiseNullaryOp.h\"\n#include \"src/Core/CwiseUnaryView.h\"\n#include \"src/Core/SelfCwiseBinaryOp.h\"\n#include \"src/Core/Dot.h\"\n#include \"src/Core/StableNorm.h\"\n#include \"src/Core/Stride.h\"\n#include \"src/Core/MapBase.h\"\n#include \"src/Core/Map.h\"\n#include \"src/Core/Ref.h\"\n#include \"src/Core/Block.h\"\n#include \"src/Core/VectorBlock.h\"\n#include \"src/Core/IndexedView.h\"\n#include \"src/Core/Transpose.h\"\n#include \"src/Core/DiagonalMatrix.h\"\n#include \"src/Core/Diagonal.h\"\n#include \"src/Core/DiagonalProduct.h\"\n#include \"src/Core/Redux.h\"\n#include \"src/Core/Visitor.h\"\n#include \"src/Core/Fuzzy.h\"\n#include \"src/Core/Swap.h\"\n#include \"src/Core/CommaInitializer.h\"\n#include \"src/Core/GeneralProduct.h\"\n#include \"src/Core/Solve.h\"\n#include \"src/Core/Inverse.h\"\n#include \"src/Core/SolverBase.h\"\n#include \"src/Core/PermutationMatrix.h\"\n#include \"src/Core/Transpositions.h\"\n#include \"src/Core/TriangularMatrix.h\"\n#include \"src/Core/SelfAdjointView.h\"\n#include \"src/Core/products/GeneralBlockPanelKernel.h\"\n#include \"src/Core/products/Parallelizer.h\"\n#include \"src/Core/ProductEvaluators.h\"\n#include \"src/Core/products/GeneralMatrixVector.h\"\n#include \"src/Core/products/GeneralMatrixMatrix.h\"\n#include \"src/Core/SolveTriangular.h\"\n#include \"src/Core/products/GeneralMatrixMatrixTriangular.h\"\n#include \"src/Core/products/SelfadjointMatrixVector.h\"\n#include \"src/Core/products/SelfadjointMatrixMatrix.h\"\n#include \"src/Core/products/SelfadjointProduct.h\"\n#include \"src/Core/products/SelfadjointRank2Update.h\"\n#include \"src/Core/products/TriangularMatrixVector.h\"\n#include \"src/Core/products/TriangularMatrixMatrix.h\"\n#include \"src/Core/products/TriangularSolverMatrix.h\"\n#include \"src/Core/products/TriangularSolverVector.h\"\n#include \"src/Core/BandMatrix.h\"\n#include \"src/Core/CoreIterators.h\"\n#include \"src/Core/ConditionEstimator.h\"\n\n#include \"src/Core/BooleanRedux.h\"\n#include \"src/Core/Select.h\"\n#include \"src/Core/VectorwiseOp.h\"\n#include \"src/Core/Random.h\"\n#include \"src/Core/Replicate.h\"\n#include \"src/Core/Reverse.h\"\n#include \"src/Core/ArrayWrapper.h\"\n\n#ifdef EIGEN_USE_BLAS\n#include \"src/Core/products/GeneralMatrixMatrix_BLAS.h\"\n#include \"src/Core/products/GeneralMatrixVector_BLAS.h\"\n#include \"src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h\"\n#include \"src/Core/products/SelfadjointMatrixMatrix_BLAS.h\"\n#include \"src/Core/products/SelfadjointMatrixVector_BLAS.h\"\n#include \"src/Core/products/TriangularMatrixMatrix_BLAS.h\"\n#include \"src/Core/products/TriangularMatrixVector_BLAS.h\"\n#include \"src/Core/products/TriangularSolverMatrix_BLAS.h\"\n#endif // EIGEN_USE_BLAS\n\n#ifdef EIGEN_USE_MKL_VML\n#include \"src/Core/Assign_MKL.h\"\n#endif\n\n#include \"src/Core/GlobalFunctions.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_CORE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Dense",
    "content": "#include \"Core\"\n#include \"LU\"\n#include \"Cholesky\"\n#include \"QR\"\n#include \"SVD\"\n#include \"Geometry\"\n#include \"Eigenvalues\"\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Eigen",
    "content": "#include \"Dense\"\n#include \"Sparse\"\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Eigenvalues",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_EIGENVALUES_MODULE_H\n#define EIGEN_EIGENVALUES_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include \"Cholesky\"\n#include \"Jacobi\"\n#include \"Householder\"\n#include \"LU\"\n#include \"Geometry\"\n\n/** \\defgroup Eigenvalues_Module Eigenvalues module\n  *\n  *\n  *\n  * This module mainly provides various eigenvalue solvers.\n  * This module also provides some MatrixBase methods, including:\n  *  - MatrixBase::eigenvalues(),\n  *  - MatrixBase::operatorNorm()\n  *\n  * \\code\n  * #include <Eigen/Eigenvalues>\n  * \\endcode\n  */\n\n#include \"src/misc/RealSvd2x2.h\"\n#include \"src/Eigenvalues/Tridiagonalization.h\"\n#include \"src/Eigenvalues/RealSchur.h\"\n#include \"src/Eigenvalues/EigenSolver.h\"\n#include \"src/Eigenvalues/SelfAdjointEigenSolver.h\"\n#include \"src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h\"\n#include \"src/Eigenvalues/HessenbergDecomposition.h\"\n#include \"src/Eigenvalues/ComplexSchur.h\"\n#include \"src/Eigenvalues/ComplexEigenSolver.h\"\n#include \"src/Eigenvalues/RealQZ.h\"\n#include \"src/Eigenvalues/GeneralizedEigenSolver.h\"\n#include \"src/Eigenvalues/MatrixBaseEigenvalues.h\"\n#ifdef EIGEN_USE_LAPACKE\n#include \"src/misc/lapacke.h\"\n#include \"src/Eigenvalues/RealSchur_LAPACKE.h\"\n#include \"src/Eigenvalues/ComplexSchur_LAPACKE.h\"\n#include \"src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_EIGENVALUES_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Geometry",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GEOMETRY_MODULE_H\n#define EIGEN_GEOMETRY_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include \"SVD\"\n#include \"LU\"\n#include <limits>\n\n/** \\defgroup Geometry_Module Geometry module\n  *\n  * This module provides support for:\n  *  - fixed-size homogeneous transformations\n  *  - translation, scaling, 2D and 3D rotations\n  *  - \\link Quaternion quaternions \\endlink\n  *  - cross products (\\ref MatrixBase::cross, \\ref MatrixBase::cross3)\n  *  - orthognal vector generation (\\ref MatrixBase::unitOrthogonal)\n  *  - some linear components: \\link ParametrizedLine parametrized-lines \\endlink and \\link Hyperplane hyperplanes \\endlink\n  *  - \\link AlignedBox axis aligned bounding boxes \\endlink\n  *  - \\link umeyama least-square transformation fitting \\endlink\n  *\n  * \\code\n  * #include <Eigen/Geometry>\n  * \\endcode\n  */\n\n#include \"src/Geometry/OrthoMethods.h\"\n#include \"src/Geometry/EulerAngles.h\"\n\n#include \"src/Geometry/Homogeneous.h\"\n#include \"src/Geometry/RotationBase.h\"\n#include \"src/Geometry/Rotation2D.h\"\n#include \"src/Geometry/Quaternion.h\"\n#include \"src/Geometry/AngleAxis.h\"\n#include \"src/Geometry/Transform.h\"\n#include \"src/Geometry/Translation.h\"\n#include \"src/Geometry/Scaling.h\"\n#include \"src/Geometry/Hyperplane.h\"\n#include \"src/Geometry/ParametrizedLine.h\"\n#include \"src/Geometry/AlignedBox.h\"\n#include \"src/Geometry/Umeyama.h\"\n\n// Use the SSE optimized version whenever possible. At the moment the\n// SSE version doesn't compile when AVX is enabled\n#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX\n#include \"src/Geometry/arch/Geometry_SSE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_GEOMETRY_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Householder",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HOUSEHOLDER_MODULE_H\n#define EIGEN_HOUSEHOLDER_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup Householder_Module Householder module\n  * This module provides Householder transformations.\n  *\n  * \\code\n  * #include <Eigen/Householder>\n  * \\endcode\n  */\n\n#include \"src/Householder/Householder.h\"\n#include \"src/Householder/HouseholderSequence.h\"\n#include \"src/Householder/BlockHouseholder.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_HOUSEHOLDER_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/IterativeLinearSolvers",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H\n#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H\n\n#include \"SparseCore\"\n#include \"OrderingMethods\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \n  * \\defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module\n  *\n  * This module currently provides iterative methods to solve problems of the form \\c A \\c x = \\c b, where \\c A is a squared matrix, usually very large and sparse.\n  * Those solvers are accessible via the following classes:\n  *  - ConjugateGradient for selfadjoint (hermitian) matrices,\n  *  - LeastSquaresConjugateGradient for rectangular least-square problems,\n  *  - BiCGSTAB for general square matrices.\n  *\n  * These iterative solvers are associated with some preconditioners:\n  *  - IdentityPreconditioner - not really useful\n  *  - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices.\n  *  - IncompleteLUT - incomplete LU factorization with dual thresholding\n  *\n  * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport.\n  *\n    \\code\n    #include <Eigen/IterativeLinearSolvers>\n    \\endcode\n  */\n\n#include \"src/IterativeLinearSolvers/SolveWithGuess.h\"\n#include \"src/IterativeLinearSolvers/IterativeSolverBase.h\"\n#include \"src/IterativeLinearSolvers/BasicPreconditioners.h\"\n#include \"src/IterativeLinearSolvers/ConjugateGradient.h\"\n#include \"src/IterativeLinearSolvers/LeastSquareConjugateGradient.h\"\n#include \"src/IterativeLinearSolvers/BiCGSTAB.h\"\n#include \"src/IterativeLinearSolvers/IncompleteLUT.h\"\n#include \"src/IterativeLinearSolvers/IncompleteCholesky.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Jacobi",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_JACOBI_MODULE_H\n#define EIGEN_JACOBI_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup Jacobi_Module Jacobi module\n  * This module provides Jacobi and Givens rotations.\n  *\n  * \\code\n  * #include <Eigen/Jacobi>\n  * \\endcode\n  *\n  * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation:\n  *  - MatrixBase::applyOnTheLeft()\n  *  - MatrixBase::applyOnTheRight().\n  */\n\n#include \"src/Jacobi/Jacobi.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_JACOBI_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/LU",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_LU_MODULE_H\n#define EIGEN_LU_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup LU_Module LU module\n  * This module includes %LU decomposition and related notions such as matrix inversion and determinant.\n  * This module defines the following MatrixBase methods:\n  *  - MatrixBase::inverse()\n  *  - MatrixBase::determinant()\n  *\n  * \\code\n  * #include <Eigen/LU>\n  * \\endcode\n  */\n\n#include \"src/misc/Kernel.h\"\n#include \"src/misc/Image.h\"\n#include \"src/LU/FullPivLU.h\"\n#include \"src/LU/PartialPivLU.h\"\n#ifdef EIGEN_USE_LAPACKE\n#include \"src/misc/lapacke.h\"\n#include \"src/LU/PartialPivLU_LAPACKE.h\"\n#endif\n#include \"src/LU/Determinant.h\"\n#include \"src/LU/InverseImpl.h\"\n\n// Use the SSE optimized version whenever possible. At the moment the\n// SSE version doesn't compile when AVX is enabled\n#if defined EIGEN_VECTORIZE_SSE && !defined EIGEN_VECTORIZE_AVX\n  #include \"src/LU/arch/Inverse_SSE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_LU_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/MetisSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_METISSUPPORT_MODULE_H\n#define EIGEN_METISSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\nextern \"C\" {\n#include <metis.h>\n}\n\n\n/** \\ingroup Support_modules\n  * \\defgroup MetisSupport_Module MetisSupport module\n  *\n  * \\code\n  * #include <Eigen/MetisSupport>\n  * \\endcode\n  * This module defines an interface to the METIS reordering package (http://glaros.dtc.umn.edu/gkhome/views/metis). \n  * It can be used just as any other built-in method as explained in \\link OrderingMethods_Module here. \\endlink\n  */\n\n\n#include \"src/MetisSupport/MetisSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_METISSUPPORT_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/OrderingMethods",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ORDERINGMETHODS_MODULE_H\n#define EIGEN_ORDERINGMETHODS_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \n  * \\defgroup OrderingMethods_Module OrderingMethods module\n  *\n  * This module is currently for internal use only\n  * \n  * It defines various built-in and external ordering methods for sparse matrices. \n  * They are typically used to reduce the number of elements during \n  * the sparse matrix decomposition (LLT, LU, QR).\n  * Precisely, in a preprocessing step, a permutation matrix P is computed using \n  * those ordering methods and applied to the columns of the matrix. \n  * Using for instance the sparse Cholesky decomposition, it is expected that \n  * the nonzeros elements in LLT(A*P) will be much smaller than that in LLT(A).\n  * \n  * \n  * Usage : \n  * \\code\n  * #include <Eigen/OrderingMethods>\n  * \\endcode\n  * \n  * A simple usage is as a template parameter in the sparse decomposition classes : \n  * \n  * \\code \n  * SparseLU<MatrixType, COLAMDOrdering<int> > solver;\n  * \\endcode \n  * \n  * \\code \n  * SparseQR<MatrixType, COLAMDOrdering<int> > solver;\n  * \\endcode\n  * \n  * It is possible as well to call directly a particular ordering method for your own purpose, \n  * \\code \n  * AMDOrdering<int> ordering;\n  * PermutationMatrix<Dynamic, Dynamic, int> perm;\n  * SparseMatrix<double> A; \n  * //Fill the matrix ...\n  * \n  * ordering(A, perm); // Call AMD\n  * \\endcode\n  * \n  * \\note Some of these methods (like AMD or METIS), need the sparsity pattern \n  * of the input matrix to be symmetric. When the matrix is structurally unsymmetric, \n  * Eigen computes internally the pattern of \\f$A^T*A\\f$ before calling the method.\n  * If your matrix is already symmetric (at leat in structure), you can avoid that\n  * by calling the method with a SelfAdjointView type.\n  * \n  * \\code\n  *  // Call the ordering on the pattern of the lower triangular matrix A\n  * ordering(A.selfadjointView<Lower>(), perm);\n  * \\endcode\n  */\n\n#ifndef EIGEN_MPL2_ONLY\n#include \"src/OrderingMethods/Amd.h\"\n#endif\n\n#include \"src/OrderingMethods/Ordering.h\"\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_ORDERINGMETHODS_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/PaStiXSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PASTIXSUPPORT_MODULE_H\n#define EIGEN_PASTIXSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\nextern \"C\" {\n#include <pastix_nompi.h>\n#include <pastix.h>\n}\n\n#ifdef complex\n#undef complex\n#endif\n\n/** \\ingroup Support_modules\n  * \\defgroup PaStiXSupport_Module PaStiXSupport module\n  * \n  * This module provides an interface to the <a href=\"http://pastix.gforge.inria.fr/\">PaSTiX</a> library.\n  * PaSTiX is a general \\b supernodal, \\b parallel and \\b opensource sparse solver.\n  * It provides the two following main factorization classes:\n  * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization.\n  * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization.\n  * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern).\n  * \n  * \\code\n  * #include <Eigen/PaStiXSupport>\n  * \\endcode\n  *\n  * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies.\n  * The dependencies depend on how PaSTiX has been compiled.\n  * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task.\n  *\n  */\n\n#include \"src/PaStiXSupport/PaStiXSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_PASTIXSUPPORT_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/PardisoSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARDISOSUPPORT_MODULE_H\n#define EIGEN_PARDISOSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include <mkl_pardiso.h>\n\n/** \\ingroup Support_modules\n  * \\defgroup PardisoSupport_Module PardisoSupport module\n  *\n  * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers.\n  *\n  * \\code\n  * #include <Eigen/PardisoSupport>\n  * \\endcode\n  *\n  * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies.\n  * See this \\ref TopicUsingIntelMKL \"page\" for more information on MKL-Eigen integration.\n  * \n  */\n\n#include \"src/PardisoSupport/PardisoSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_PARDISOSUPPORT_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/QR",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_QR_MODULE_H\n#define EIGEN_QR_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include \"Cholesky\"\n#include \"Jacobi\"\n#include \"Householder\"\n\n/** \\defgroup QR_Module QR module\n  *\n  *\n  *\n  * This module provides various QR decompositions\n  * This module also provides some MatrixBase methods, including:\n  *  - MatrixBase::householderQr()\n  *  - MatrixBase::colPivHouseholderQr()\n  *  - MatrixBase::fullPivHouseholderQr()\n  *\n  * \\code\n  * #include <Eigen/QR>\n  * \\endcode\n  */\n\n#include \"src/QR/HouseholderQR.h\"\n#include \"src/QR/FullPivHouseholderQR.h\"\n#include \"src/QR/ColPivHouseholderQR.h\"\n#include \"src/QR/CompleteOrthogonalDecomposition.h\"\n#ifdef EIGEN_USE_LAPACKE\n#include \"src/misc/lapacke.h\"\n#include \"src/QR/HouseholderQR_LAPACKE.h\"\n#include \"src/QR/ColPivHouseholderQR_LAPACKE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_QR_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/QtAlignedMalloc",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_QTMALLOC_MODULE_H\n#define EIGEN_QTMALLOC_MODULE_H\n\n#include \"Core\"\n\n#if (!EIGEN_MALLOC_ALREADY_ALIGNED)\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\nvoid *qMalloc(std::size_t size)\n{\n  return Eigen::internal::aligned_malloc(size);\n}\n\nvoid qFree(void *ptr)\n{\n  Eigen::internal::aligned_free(ptr);\n}\n\nvoid *qRealloc(void *ptr, std::size_t size)\n{\n  void* newPtr = Eigen::internal::aligned_malloc(size);\n  memcpy(newPtr, ptr, size);\n  Eigen::internal::aligned_free(ptr);\n  return newPtr;\n}\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif\n\n#endif // EIGEN_QTMALLOC_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SPQRSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPQRSUPPORT_MODULE_H\n#define EIGEN_SPQRSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include \"SuiteSparseQR.hpp\"\n\n/** \\ingroup Support_modules\n  * \\defgroup SPQRSupport_Module SuiteSparseQR module\n  * \n  * This module provides an interface to the SPQR library, which is part of the <a href=\"http://www.suitesparse.com\">suitesparse</a> package.\n  *\n  * \\code\n  * #include <Eigen/SPQRSupport>\n  * \\endcode\n  *\n  * In order to use this module, the SPQR headers must be accessible from the include paths, and your binary must be linked to the SPQR library and its dependencies (Cholmod, AMD, COLAMD,...).\n  * For a cmake based project, you can use our FindSPQR.cmake and FindCholmod.Cmake modules\n  *\n  */\n\n#include \"src/CholmodSupport/CholmodSupport.h\"\n#include \"src/SPQRSupport/SuiteSparseQRSupport.h\"\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SVD",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SVD_MODULE_H\n#define EIGEN_SVD_MODULE_H\n\n#include \"QR\"\n#include \"Householder\"\n#include \"Jacobi\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup SVD_Module SVD module\n  *\n  *\n  *\n  * This module provides SVD decomposition for matrices (both real and complex).\n  * Two decomposition algorithms are provided:\n  *  - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones.\n  *  - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems.\n  * These decompositions are accessible via the respective classes and following MatrixBase methods:\n  *  - MatrixBase::jacobiSvd()\n  *  - MatrixBase::bdcSvd()\n  *\n  * \\code\n  * #include <Eigen/SVD>\n  * \\endcode\n  */\n\n#include \"src/misc/RealSvd2x2.h\"\n#include \"src/SVD/UpperBidiagonalization.h\"\n#include \"src/SVD/SVDBase.h\"\n#include \"src/SVD/JacobiSVD.h\"\n#include \"src/SVD/BDCSVD.h\"\n#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)\n#include \"src/misc/lapacke.h\"\n#include \"src/SVD/JacobiSVD_LAPACKE.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_SVD_MODULE_H\n/* vim: set filetype=cpp et sw=2 ts=2 ai: */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/Sparse",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_MODULE_H\n#define EIGEN_SPARSE_MODULE_H\n\n/** \\defgroup Sparse_Module Sparse meta-module\n  *\n  * Meta-module including all related modules:\n  * - \\ref SparseCore_Module\n  * - \\ref OrderingMethods_Module\n  * - \\ref SparseCholesky_Module\n  * - \\ref SparseLU_Module\n  * - \\ref SparseQR_Module\n  * - \\ref IterativeLinearSolvers_Module\n  *\n    \\code\n    #include <Eigen/Sparse>\n    \\endcode\n  */\n\n#include \"SparseCore\"\n#include \"OrderingMethods\"\n#ifndef EIGEN_MPL2_ONLY\n#include \"SparseCholesky\"\n#endif\n#include \"SparseLU\"\n#include \"SparseQR\"\n#include \"IterativeLinearSolvers\"\n\n#endif // EIGEN_SPARSE_MODULE_H\n\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SparseCholesky",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2013 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSECHOLESKY_MODULE_H\n#define EIGEN_SPARSECHOLESKY_MODULE_H\n\n#include \"SparseCore\"\n#include \"OrderingMethods\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \n  * \\defgroup SparseCholesky_Module SparseCholesky module\n  *\n  * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices.\n  * Those decompositions are accessible via the following classes:\n  *  - SimplicialLLt,\n  *  - SimplicialLDLt\n  *\n  * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module.\n  *\n  * \\code\n  * #include <Eigen/SparseCholesky>\n  * \\endcode\n  */\n\n#ifdef EIGEN_MPL2_ONLY\n#error The SparseCholesky module has nothing to offer in MPL2 only mode\n#endif\n\n#include \"src/SparseCholesky/SimplicialCholesky.h\"\n\n#ifndef EIGEN_MPL2_ONLY\n#include \"src/SparseCholesky/SimplicialCholesky_impl.h\"\n#endif\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_SPARSECHOLESKY_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SparseCore",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSECORE_MODULE_H\n#define EIGEN_SPARSECORE_MODULE_H\n\n#include \"Core\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#include <vector>\n#include <map>\n#include <cstdlib>\n#include <cstring>\n#include <algorithm>\n\n/** \n  * \\defgroup SparseCore_Module SparseCore module\n  *\n  * This module provides a sparse matrix representation, and basic associated matrix manipulations\n  * and operations.\n  *\n  * See the \\ref TutorialSparse \"Sparse tutorial\"\n  *\n  * \\code\n  * #include <Eigen/SparseCore>\n  * \\endcode\n  *\n  * This module depends on: Core.\n  */\n\n#include \"src/SparseCore/SparseUtil.h\"\n#include \"src/SparseCore/SparseMatrixBase.h\"\n#include \"src/SparseCore/SparseAssign.h\"\n#include \"src/SparseCore/CompressedStorage.h\"\n#include \"src/SparseCore/AmbiVector.h\"\n#include \"src/SparseCore/SparseCompressedBase.h\"\n#include \"src/SparseCore/SparseMatrix.h\"\n#include \"src/SparseCore/SparseMap.h\"\n#include \"src/SparseCore/MappedSparseMatrix.h\"\n#include \"src/SparseCore/SparseVector.h\"\n#include \"src/SparseCore/SparseRef.h\"\n#include \"src/SparseCore/SparseCwiseUnaryOp.h\"\n#include \"src/SparseCore/SparseCwiseBinaryOp.h\"\n#include \"src/SparseCore/SparseTranspose.h\"\n#include \"src/SparseCore/SparseBlock.h\"\n#include \"src/SparseCore/SparseDot.h\"\n#include \"src/SparseCore/SparseRedux.h\"\n#include \"src/SparseCore/SparseView.h\"\n#include \"src/SparseCore/SparseDiagonalProduct.h\"\n#include \"src/SparseCore/ConservativeSparseSparseProduct.h\"\n#include \"src/SparseCore/SparseSparseProductWithPruning.h\"\n#include \"src/SparseCore/SparseProduct.h\"\n#include \"src/SparseCore/SparseDenseProduct.h\"\n#include \"src/SparseCore/SparseSelfAdjointView.h\"\n#include \"src/SparseCore/SparseTriangularView.h\"\n#include \"src/SparseCore/TriangularSolver.h\"\n#include \"src/SparseCore/SparsePermutation.h\"\n#include \"src/SparseCore/SparseFuzzy.h\"\n#include \"src/SparseCore/SparseSolverBase.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_SPARSECORE_MODULE_H\n\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SparseLU",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSELU_MODULE_H\n#define EIGEN_SPARSELU_MODULE_H\n\n#include \"SparseCore\"\n\n/** \n  * \\defgroup SparseLU_Module SparseLU module\n  * This module defines a supernodal factorization of general sparse matrices.\n  * The code is fully optimized for supernode-panel updates with specialized kernels.\n  * Please, see the documentation of the SparseLU class for more details.\n  */\n\n// Ordering interface\n#include \"OrderingMethods\"\n\n#include \"src/SparseLU/SparseLU_gemm_kernel.h\"\n\n#include \"src/SparseLU/SparseLU_Structs.h\"\n#include \"src/SparseLU/SparseLU_SupernodalMatrix.h\"\n#include \"src/SparseLU/SparseLUImpl.h\"\n#include \"src/SparseCore/SparseColEtree.h\"\n#include \"src/SparseLU/SparseLU_Memory.h\"\n#include \"src/SparseLU/SparseLU_heap_relax_snode.h\"\n#include \"src/SparseLU/SparseLU_relax_snode.h\"\n#include \"src/SparseLU/SparseLU_pivotL.h\"\n#include \"src/SparseLU/SparseLU_panel_dfs.h\"\n#include \"src/SparseLU/SparseLU_kernel_bmod.h\"\n#include \"src/SparseLU/SparseLU_panel_bmod.h\"\n#include \"src/SparseLU/SparseLU_column_dfs.h\"\n#include \"src/SparseLU/SparseLU_column_bmod.h\"\n#include \"src/SparseLU/SparseLU_copy_to_ucol.h\"\n#include \"src/SparseLU/SparseLU_pruneL.h\"\n#include \"src/SparseLU/SparseLU_Utils.h\"\n#include \"src/SparseLU/SparseLU.h\"\n\n#endif // EIGEN_SPARSELU_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SparseQR",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEQR_MODULE_H\n#define EIGEN_SPARSEQR_MODULE_H\n\n#include \"SparseCore\"\n#include \"OrderingMethods\"\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n/** \\defgroup SparseQR_Module SparseQR module\n  * \\brief Provides QR decomposition for sparse matrices\n  * \n  * This module provides a simplicial version of the left-looking Sparse QR decomposition. \n  * The columns of the input matrix should be reordered to limit the fill-in during the \n  * decomposition. Built-in methods (COLAMD, AMD) or external  methods (METIS) can be used to this end.\n  * See the \\link OrderingMethods_Module OrderingMethods\\endlink module for the list \n  * of built-in and external ordering methods.\n  * \n  * \\code\n  * #include <Eigen/SparseQR>\n  * \\endcode\n  * \n  * \n  */\n\n#include \"OrderingMethods\"\n#include \"src/SparseCore/SparseColEtree.h\"\n#include \"src/SparseQR/SparseQR.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/StdDeque",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDDEQUE_MODULE_H\n#define EIGEN_STDDEQUE_MODULE_H\n\n#include \"Core\"\n#include <deque>\n\n#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */\n\n#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...)\n\n#else\n\n#include \"src/StlSupport/StdDeque.h\"\n\n#endif\n\n#endif // EIGEN_STDDEQUE_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/StdList",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDLIST_MODULE_H\n#define EIGEN_STDLIST_MODULE_H\n\n#include \"Core\"\n#include <list>\n\n#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */\n\n#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...)\n\n#else\n\n#include \"src/StlSupport/StdList.h\"\n\n#endif\n\n#endif // EIGEN_STDLIST_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/StdVector",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDVECTOR_MODULE_H\n#define EIGEN_STDVECTOR_MODULE_H\n\n#include \"Core\"\n#include <vector>\n\n#if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */\n\n#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...)\n\n#else\n\n#include \"src/StlSupport/StdVector.h\"\n\n#endif\n\n#endif // EIGEN_STDVECTOR_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/SuperLUSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H\n#define EIGEN_SUPERLUSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\n#ifdef EMPTY\n#define EIGEN_EMPTY_WAS_ALREADY_DEFINED\n#endif\n\ntypedef int int_t;\n#include <slu_Cnames.h>\n#include <supermatrix.h>\n#include <slu_util.h>\n\n// slu_util.h defines a preprocessor token named EMPTY which is really polluting,\n// so we remove it in favor of a SUPERLU_EMPTY token.\n// If EMPTY was already defined then we don't undef it.\n\n#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED)\n# undef EIGEN_EMPTY_WAS_ALREADY_DEFINED\n#elif defined(EMPTY)\n# undef EMPTY\n#endif\n\n#define SUPERLU_EMPTY (-1)\n\nnamespace Eigen { struct SluMatrix; }\n\n/** \\ingroup Support_modules\n  * \\defgroup SuperLUSupport_Module SuperLUSupport module\n  *\n  * This module provides an interface to the <a href=\"http://crd-legacy.lbl.gov/~xiaoye/SuperLU/\">SuperLU</a> library.\n  * It provides the following factorization class:\n  * - class SuperLU: a supernodal sequential LU factorization.\n  * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods).\n  *\n  * \\warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported.\n  *\n  * \\warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting.\n  *\n  * \\code\n  * #include <Eigen/SuperLUSupport>\n  * \\endcode\n  *\n  * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies.\n  * The dependencies depend on how superlu has been compiled.\n  * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task.\n  *\n  */\n\n#include \"src/SuperLUSupport/SuperLUSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_SUPERLUSUPPORT_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/UmfPackSupport",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H\n#define EIGEN_UMFPACKSUPPORT_MODULE_H\n\n#include \"SparseCore\"\n\n#include \"src/Core/util/DisableStupidWarnings.h\"\n\nextern \"C\" {\n#include <umfpack.h>\n}\n\n/** \\ingroup Support_modules\n  * \\defgroup UmfPackSupport_Module UmfPackSupport module\n  *\n  * This module provides an interface to the UmfPack library which is part of the <a href=\"http://www.suitesparse.com\">suitesparse</a> package.\n  * It provides the following factorization class:\n  * - class UmfPackLU: a multifrontal sequential LU factorization.\n  *\n  * \\code\n  * #include <Eigen/UmfPackSupport>\n  * \\endcode\n  *\n  * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be linked to the umfpack library and its dependencies.\n  * The dependencies depend on how umfpack has been compiled.\n  * For a cmake based project, you can use our FindUmfPack.cmake module to help you in this task.\n  *\n  */\n\n#include \"src/UmfPackSupport/UmfPackSupport.h\"\n\n#include \"src/Core/util/ReenableStupidWarnings.h\"\n\n#endif // EIGEN_UMFPACKSUPPORT_MODULE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Cholesky/LDLT.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Keir Mierle <mierle@gmail.com>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2011 Timothy E. Holy <tim.holy@gmail.com >\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_LDLT_H\n#define EIGEN_LDLT_H\n\nnamespace Eigen {\n\nnamespace internal {\n  template<typename MatrixType, int UpLo> struct LDLT_Traits;\n\n  // PositiveSemiDef means positive semi-definite and non-zero; same for NegativeSemiDef\n  enum SignMatrix { PositiveSemiDef, NegativeSemiDef, ZeroSign, Indefinite };\n}\n\n/** \\ingroup Cholesky_Module\n  *\n  * \\class LDLT\n  *\n  * \\brief Robust Cholesky decomposition of a matrix with pivoting\n  *\n  * \\tparam _MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition\n  * \\tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.\n  *             The other triangular part won't be read.\n  *\n  * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite\n  * matrix \\f$ A \\f$ such that \\f$ A =  P^TLDL^*P \\f$, where P is a permutation matrix, L\n  * is lower triangular with a unit diagonal and D is a diagonal matrix.\n  *\n  * The decomposition uses pivoting to ensure stability, so that L will have\n  * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root\n  * on D also stabilizes the computation.\n  *\n  * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky\n  * decomposition to determine whether a system of equations has a solution.\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT\n  */\ntemplate<typename _MatrixType, int _UpLo> class LDLT\n{\n  public:\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n      UpLo = _UpLo\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType;\n\n    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;\n    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;\n\n    typedef internal::LDLT_Traits<MatrixType,UpLo> Traits;\n\n    /** \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via LDLT::compute(const MatrixType&).\n      */\n    LDLT()\n      : m_matrix(),\n        m_transpositions(),\n        m_sign(internal::ZeroSign),\n        m_isInitialized(false)\n    {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa LDLT()\n      */\n    explicit LDLT(Index size)\n      : m_matrix(size, size),\n        m_transpositions(size),\n        m_temporary(size),\n        m_sign(internal::ZeroSign),\n        m_isInitialized(false)\n    {}\n\n    /** \\brief Constructor with decomposition\n      *\n      * This calculates the decomposition for the input \\a matrix.\n      *\n      * \\sa LDLT(Index size)\n      */\n    template<typename InputType>\n    explicit LDLT(const EigenBase<InputType>& matrix)\n      : m_matrix(matrix.rows(), matrix.cols()),\n        m_transpositions(matrix.rows()),\n        m_temporary(matrix.rows()),\n        m_sign(internal::ZeroSign),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** \\brief Constructs a LDLT factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa LDLT(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit LDLT(EigenBase<InputType>& matrix)\n      : m_matrix(matrix.derived()),\n        m_transpositions(matrix.rows()),\n        m_temporary(matrix.rows()),\n        m_sign(internal::ZeroSign),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** Clear any existing decomposition\n     * \\sa rankUpdate(w,sigma)\n     */\n    void setZero()\n    {\n      m_isInitialized = false;\n    }\n\n    /** \\returns a view of the upper triangular matrix U */\n    inline typename Traits::MatrixU matrixU() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return Traits::getU(m_matrix);\n    }\n\n    /** \\returns a view of the lower triangular matrix L */\n    inline typename Traits::MatrixL matrixL() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return Traits::getL(m_matrix);\n    }\n\n    /** \\returns the permutation matrix P as a transposition sequence.\n      */\n    inline const TranspositionType& transpositionsP() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_transpositions;\n    }\n\n    /** \\returns the coefficients of the diagonal matrix D */\n    inline Diagonal<const MatrixType> vectorD() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_matrix.diagonal();\n    }\n\n    /** \\returns true if the matrix is positive (semidefinite) */\n    inline bool isPositive() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;\n    }\n\n    /** \\returns true if the matrix is negative (semidefinite) */\n    inline bool isNegative(void) const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_sign == internal::NegativeSemiDef || m_sign == internal::ZeroSign;\n    }\n\n    /** \\returns a solution x of \\f$ A x = b \\f$ using the current decomposition of A.\n      *\n      * This function also supports in-place solves using the syntax <tt>x = decompositionObject.solve(x)</tt> .\n      *\n      * \\note_about_checking_solutions\n      *\n      * More precisely, this method solves \\f$ A x = b \\f$ using the decomposition \\f$ A = P^T L D L^* P \\f$\n      * by solving the systems \\f$ P^T y_1 = b \\f$, \\f$ L y_2 = y_1 \\f$, \\f$ D y_3 = y_2 \\f$,\n      * \\f$ L^* y_4 = y_3 \\f$ and \\f$ P x = y_4 \\f$ in succession. If the matrix \\f$ A \\f$ is singular, then\n      * \\f$ D \\f$ will also be singular (all the other matrices are invertible). In that case, the\n      * least-square solution of \\f$ D y_3 = y_2 \\f$ is computed. This does not mean that this function\n      * computes the least-square solution of \\f$ A x = b \\f$ is \\f$ A \\f$ is singular.\n      *\n      * \\sa MatrixBase::ldlt(), SelfAdjointView::ldlt()\n      */\n    template<typename Rhs>\n    inline const Solve<LDLT, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      eigen_assert(m_matrix.rows()==b.rows()\n                && \"LDLT::solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<LDLT, Rhs>(*this, b.derived());\n    }\n\n    template<typename Derived>\n    bool solveInPlace(MatrixBase<Derived> &bAndX) const;\n\n    template<typename InputType>\n    LDLT& compute(const EigenBase<InputType>& matrix);\n\n    /** \\returns an estimate of the reciprocal condition number of the matrix of\n     *  which \\c *this is the LDLT decomposition.\n     */\n    RealScalar rcond() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return internal::rcond_estimate_helper(m_l1_norm, *this);\n    }\n\n    template <typename Derived>\n    LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);\n\n    /** \\returns the internal LDLT decomposition matrix\n      *\n      * TODO: document the storage layout\n      */\n    inline const MatrixType& matrixLDLT() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_matrix;\n    }\n\n    MatrixType reconstructedMatrix() const;\n\n    /** \\returns the adjoint of \\c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.\n      *\n      * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:\n      * \\code x = decomposition.adjoint().solve(b) \\endcode\n      */\n    const LDLT& adjoint() const { return *this; };\n\n    inline Index rows() const { return m_matrix.rows(); }\n    inline Index cols() const { return m_matrix.cols(); }\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n      return m_info;\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    /** \\internal\n      * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.\n      * The strict upper part is used during the decomposition, the strict lower\n      * part correspond to the coefficients of L (its diagonal is equal to 1 and\n      * is not stored), and the diagonal entries correspond to D.\n      */\n    MatrixType m_matrix;\n    RealScalar m_l1_norm;\n    TranspositionType m_transpositions;\n    TmpMatrixType m_temporary;\n    internal::SignMatrix m_sign;\n    bool m_isInitialized;\n    ComputationInfo m_info;\n};\n\nnamespace internal {\n\ntemplate<int UpLo> struct ldlt_inplace;\n\ntemplate<> struct ldlt_inplace<Lower>\n{\n  template<typename MatrixType, typename TranspositionType, typename Workspace>\n  static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)\n  {\n    using std::abs;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename TranspositionType::StorageIndex IndexType;\n    eigen_assert(mat.rows()==mat.cols());\n    const Index size = mat.rows();\n    bool found_zero_pivot = false;\n    bool ret = true;\n\n    if (size <= 1)\n    {\n      transpositions.setIdentity();\n      if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;\n      else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef;\n      else sign = ZeroSign;\n      return true;\n    }\n\n    for (Index k = 0; k < size; ++k)\n    {\n      // Find largest diagonal element\n      Index index_of_biggest_in_corner;\n      mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);\n      index_of_biggest_in_corner += k;\n\n      transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner);\n      if(k != index_of_biggest_in_corner)\n      {\n        // apply the transposition while taking care to consider only\n        // the lower triangular part\n        Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element\n        mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));\n        mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));\n        std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));\n        for(Index i=k+1;i<index_of_biggest_in_corner;++i)\n        {\n          Scalar tmp = mat.coeffRef(i,k);\n          mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));\n          mat.coeffRef(index_of_biggest_in_corner,i) = numext::conj(tmp);\n        }\n        if(NumTraits<Scalar>::IsComplex)\n          mat.coeffRef(index_of_biggest_in_corner,k) = numext::conj(mat.coeff(index_of_biggest_in_corner,k));\n      }\n\n      // partition the matrix:\n      //       A00 |  -  |  -\n      // lu  = A10 | A11 |  -\n      //       A20 | A21 | A22\n      Index rs = size - k - 1;\n      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);\n      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);\n      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);\n\n      if(k>0)\n      {\n        temp.head(k) = mat.diagonal().real().head(k).asDiagonal() * A10.adjoint();\n        mat.coeffRef(k,k) -= (A10 * temp.head(k)).value();\n        if(rs>0)\n          A21.noalias() -= A20 * temp.head(k);\n      }\n\n      // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot\n      // was smaller than the cutoff value. However, since LDLT is not rank-revealing\n      // we should only make sure that we do not introduce INF or NaN values.\n      // Remark that LAPACK also uses 0 as the cutoff value.\n      RealScalar realAkk = numext::real(mat.coeffRef(k,k));\n      bool pivot_is_valid = (abs(realAkk) > RealScalar(0));\n\n      if(k==0 && !pivot_is_valid)\n      {\n        // The entire diagonal is zero, there is nothing more to do\n        // except filling the transpositions, and checking whether the matrix is zero.\n        sign = ZeroSign;\n        for(Index j = 0; j<size; ++j)\n        {\n          transpositions.coeffRef(j) = IndexType(j);\n          ret = ret && (mat.col(j).tail(size-j-1).array()==Scalar(0)).all();\n        }\n        return ret;\n      }\n\n      if((rs>0) && pivot_is_valid)\n        A21 /= realAkk;\n\n      if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed\n      else if(!pivot_is_valid) found_zero_pivot = true;\n\n      if (sign == PositiveSemiDef) {\n        if (realAkk < static_cast<RealScalar>(0)) sign = Indefinite;\n      } else if (sign == NegativeSemiDef) {\n        if (realAkk > static_cast<RealScalar>(0)) sign = Indefinite;\n      } else if (sign == ZeroSign) {\n        if (realAkk > static_cast<RealScalar>(0)) sign = PositiveSemiDef;\n        else if (realAkk < static_cast<RealScalar>(0)) sign = NegativeSemiDef;\n      }\n    }\n\n    return ret;\n  }\n\n  // Reference for the algorithm: Davis and Hager, \"Multiple Rank\n  // Modifications of a Sparse Cholesky Factorization\" (Algorithm 1)\n  // Trivial rearrangements of their computations (Timothy E. Holy)\n  // allow their algorithm to work for rank-1 updates even if the\n  // original matrix is not of full rank.\n  // Here only rank-1 updates are implemented, to reduce the\n  // requirement for intermediate storage and improve accuracy\n  template<typename MatrixType, typename WDerived>\n  static bool updateInPlace(MatrixType& mat, MatrixBase<WDerived>& w, const typename MatrixType::RealScalar& sigma=1)\n  {\n    using numext::isfinite;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n\n    const Index size = mat.rows();\n    eigen_assert(mat.cols() == size && w.size()==size);\n\n    RealScalar alpha = 1;\n\n    // Apply the update\n    for (Index j = 0; j < size; j++)\n    {\n      // Check for termination due to an original decomposition of low-rank\n      if (!(isfinite)(alpha))\n        break;\n\n      // Update the diagonal terms\n      RealScalar dj = numext::real(mat.coeff(j,j));\n      Scalar wj = w.coeff(j);\n      RealScalar swj2 = sigma*numext::abs2(wj);\n      RealScalar gamma = dj*alpha + swj2;\n\n      mat.coeffRef(j,j) += swj2/alpha;\n      alpha += swj2/dj;\n\n\n      // Update the terms of L\n      Index rs = size-j-1;\n      w.tail(rs) -= wj * mat.col(j).tail(rs);\n      if(gamma != 0)\n        mat.col(j).tail(rs) += (sigma*numext::conj(wj)/gamma)*w.tail(rs);\n    }\n    return true;\n  }\n\n  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>\n  static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, const typename MatrixType::RealScalar& sigma=1)\n  {\n    // Apply the permutation to the input w\n    tmp = transpositions * w;\n\n    return ldlt_inplace<Lower>::updateInPlace(mat,tmp,sigma);\n  }\n};\n\ntemplate<> struct ldlt_inplace<Upper>\n{\n  template<typename MatrixType, typename TranspositionType, typename Workspace>\n  static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, SignMatrix& sign)\n  {\n    Transpose<MatrixType> matt(mat);\n    return ldlt_inplace<Lower>::unblocked(matt, transpositions, temp, sign);\n  }\n\n  template<typename MatrixType, typename TranspositionType, typename Workspace, typename WType>\n  static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, const typename MatrixType::RealScalar& sigma=1)\n  {\n    Transpose<MatrixType> matt(mat);\n    return ldlt_inplace<Lower>::update(matt, transpositions, tmp, w.conjugate(), sigma);\n  }\n};\n\ntemplate<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>\n{\n  typedef const TriangularView<const MatrixType, UnitLower> MatrixL;\n  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }\n};\n\ntemplate<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>\n{\n  typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;\n  typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }\n};\n\n} // end namespace internal\n\n/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \\a matrix\n  */\ntemplate<typename MatrixType, int _UpLo>\ntemplate<typename InputType>\nLDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)\n{\n  check_template_parameters();\n\n  eigen_assert(a.rows()==a.cols());\n  const Index size = a.rows();\n\n  m_matrix = a.derived();\n\n  // Compute matrix L1 norm = max abs column sum.\n  m_l1_norm = RealScalar(0);\n  // TODO move this code to SelfAdjointView\n  for (Index col = 0; col < size; ++col) {\n    RealScalar abs_col_sum;\n    if (_UpLo == Lower)\n      abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();\n    else\n      abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();\n    if (abs_col_sum > m_l1_norm)\n      m_l1_norm = abs_col_sum;\n  }\n\n  m_transpositions.resize(size);\n  m_isInitialized = false;\n  m_temporary.resize(size);\n  m_sign = internal::ZeroSign;\n\n  m_info = internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue;\n\n  m_isInitialized = true;\n  return *this;\n}\n\n/** Update the LDLT decomposition:  given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T.\n * \\param w a vector to be incorporated into the decomposition.\n * \\param sigma a scalar, +1 for updates and -1 for \"downdates,\" which correspond to removing previously-added column vectors. Optional; default value is +1.\n * \\sa setZero()\n  */\ntemplate<typename MatrixType, int _UpLo>\ntemplate<typename Derived>\nLDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)\n{\n  typedef typename TranspositionType::StorageIndex IndexType;\n  const Index size = w.rows();\n  if (m_isInitialized)\n  {\n    eigen_assert(m_matrix.rows()==size);\n  }\n  else\n  {\n    m_matrix.resize(size,size);\n    m_matrix.setZero();\n    m_transpositions.resize(size);\n    for (Index i = 0; i < size; i++)\n      m_transpositions.coeffRef(i) = IndexType(i);\n    m_temporary.resize(size);\n    m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;\n    m_isInitialized = true;\n  }\n\n  internal::ldlt_inplace<UpLo>::update(m_matrix, m_transpositions, m_temporary, w, sigma);\n\n  return *this;\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType, int _UpLo>\ntemplate<typename RhsType, typename DstType>\nvoid LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  eigen_assert(rhs.rows() == rows());\n  // dst = P b\n  dst = m_transpositions * rhs;\n\n  // dst = L^-1 (P b)\n  matrixL().solveInPlace(dst);\n\n  // dst = D^-1 (L^-1 P b)\n  // more precisely, use pseudo-inverse of D (see bug 241)\n  using std::abs;\n  const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());\n  // In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon\n  // as motivated by LAPACK's xGELSS:\n  // RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());\n  // However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest\n  // diagonal element is not well justified and leads to numerical issues in some cases.\n  // Moreover, Lapack's xSYTRS routines use 0 for the tolerance.\n  RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();\n\n  for (Index i = 0; i < vecD.size(); ++i)\n  {\n    if(abs(vecD(i)) > tolerance)\n      dst.row(i) /= vecD(i);\n    else\n      dst.row(i).setZero();\n  }\n\n  // dst = L^-T (D^-1 L^-1 P b)\n  matrixU().solveInPlace(dst);\n\n  // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b\n  dst = m_transpositions.transpose() * dst;\n}\n#endif\n\n/** \\internal use x = ldlt_object.solve(x);\n  *\n  * This is the \\em in-place version of solve().\n  *\n  * \\param bAndX represents both the right-hand side matrix b and result x.\n  *\n  * \\returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD.\n  *\n  * This version avoids a copy when the right hand side matrix b is not\n  * needed anymore.\n  *\n  * \\sa LDLT::solve(), MatrixBase::ldlt()\n  */\ntemplate<typename MatrixType,int _UpLo>\ntemplate<typename Derived>\nbool LDLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const\n{\n  eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n  eigen_assert(m_matrix.rows() == bAndX.rows());\n\n  bAndX = this->solve(bAndX);\n\n  return true;\n}\n\n/** \\returns the matrix represented by the decomposition,\n * i.e., it returns the product: P^T L D L^* P.\n * This function is provided for debug purpose. */\ntemplate<typename MatrixType, int _UpLo>\nMatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const\n{\n  eigen_assert(m_isInitialized && \"LDLT is not initialized.\");\n  const Index size = m_matrix.rows();\n  MatrixType res(size,size);\n\n  // P\n  res.setIdentity();\n  res = transpositionsP() * res;\n  // L^* P\n  res = matrixU() * res;\n  // D(L^*P)\n  res = vectorD().real().asDiagonal() * res;\n  // L(DL^*P)\n  res = matrixL() * res;\n  // P^T (LDL^*P)\n  res = transpositionsP().transpose() * res;\n\n  return res;\n}\n\n/** \\cholesky_module\n  * \\returns the Cholesky decomposition with full pivoting without square root of \\c *this\n  * \\sa MatrixBase::ldlt()\n  */\ntemplate<typename MatrixType, unsigned int UpLo>\ninline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>\nSelfAdjointView<MatrixType, UpLo>::ldlt() const\n{\n  return LDLT<PlainObject,UpLo>(m_matrix);\n}\n\n/** \\cholesky_module\n  * \\returns the Cholesky decomposition with full pivoting without square root of \\c *this\n  * \\sa SelfAdjointView::ldlt()\n  */\ntemplate<typename Derived>\ninline const LDLT<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::ldlt() const\n{\n  return LDLT<PlainObject>(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_LDLT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Cholesky/LLT.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_LLT_H\n#define EIGEN_LLT_H\n\nnamespace Eigen {\n\nnamespace internal{\ntemplate<typename MatrixType, int UpLo> struct LLT_Traits;\n}\n\n/** \\ingroup Cholesky_Module\n  *\n  * \\class LLT\n  *\n  * \\brief Standard Cholesky decomposition (LL^T) of a matrix and associated features\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition\n  * \\tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.\n  *             The other triangular part won't be read.\n  *\n  * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite\n  * matrix A such that A = LL^* = U^*U, where L is lower triangular.\n  *\n  * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like  D^*D x = b,\n  * for that purpose, we recommend the Cholesky decomposition without square root which is more stable\n  * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other\n  * situations like generalised eigen problems with hermitian matrices.\n  *\n  * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices,\n  * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations\n  * has a solution.\n  *\n  * Example: \\include LLT_example.cpp\n  * Output: \\verbinclude LLT_example.out\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  *\n  * \\sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT\n  */\n /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)\n  * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,\n  * the strict lower part does not have to store correct values.\n  */\ntemplate<typename _MatrixType, int _UpLo> class LLT\n{\n  public:\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    typedef typename MatrixType::StorageIndex StorageIndex;\n\n    enum {\n      PacketSize = internal::packet_traits<Scalar>::size,\n      AlignmentMask = int(PacketSize)-1,\n      UpLo = _UpLo\n    };\n\n    typedef internal::LLT_Traits<MatrixType,UpLo> Traits;\n\n    /**\n      * \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via LLT::compute(const MatrixType&).\n      */\n    LLT() : m_matrix(), m_isInitialized(false) {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa LLT()\n      */\n    explicit LLT(Index size) : m_matrix(size, size),\n                    m_isInitialized(false) {}\n\n    template<typename InputType>\n    explicit LLT(const EigenBase<InputType>& matrix)\n      : m_matrix(matrix.rows(), matrix.cols()),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** \\brief Constructs a LDLT factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when\n      * \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa LLT(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit LLT(EigenBase<InputType>& matrix)\n      : m_matrix(matrix.derived()),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** \\returns a view of the upper triangular matrix U */\n    inline typename Traits::MatrixU matrixU() const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      return Traits::getU(m_matrix);\n    }\n\n    /** \\returns a view of the lower triangular matrix L */\n    inline typename Traits::MatrixL matrixL() const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      return Traits::getL(m_matrix);\n    }\n\n    /** \\returns the solution x of \\f$ A x = b \\f$ using the current decomposition of A.\n      *\n      * Since this LLT class assumes anyway that the matrix A is invertible, the solution\n      * theoretically exists and is unique regardless of b.\n      *\n      * Example: \\include LLT_solve.cpp\n      * Output: \\verbinclude LLT_solve.out\n      *\n      * \\sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt()\n      */\n    template<typename Rhs>\n    inline const Solve<LLT, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      eigen_assert(m_matrix.rows()==b.rows()\n                && \"LLT::solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<LLT, Rhs>(*this, b.derived());\n    }\n\n    template<typename Derived>\n    void solveInPlace(MatrixBase<Derived> &bAndX) const;\n\n    template<typename InputType>\n    LLT& compute(const EigenBase<InputType>& matrix);\n\n    /** \\returns an estimate of the reciprocal condition number of the matrix of\n      *  which \\c *this is the Cholesky decomposition.\n      */\n    RealScalar rcond() const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      eigen_assert(m_info == Success && \"LLT failed because matrix appears to be negative\");\n      return internal::rcond_estimate_helper(m_l1_norm, *this);\n    }\n\n    /** \\returns the LLT decomposition matrix\n      *\n      * TODO: document the storage layout\n      */\n    inline const MatrixType& matrixLLT() const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      return m_matrix;\n    }\n\n    MatrixType reconstructedMatrix() const;\n\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n      return m_info;\n    }\n\n    /** \\returns the adjoint of \\c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.\n      *\n      * This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:\n      * \\code x = decomposition.adjoint().solve(b) \\endcode\n      */\n    const LLT& adjoint() const { return *this; };\n\n    inline Index rows() const { return m_matrix.rows(); }\n    inline Index cols() const { return m_matrix.cols(); }\n\n    template<typename VectorType>\n    LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    /** \\internal\n      * Used to compute and store L\n      * The strict upper part is not used and even not initialized.\n      */\n    MatrixType m_matrix;\n    RealScalar m_l1_norm;\n    bool m_isInitialized;\n    ComputationInfo m_info;\n};\n\nnamespace internal {\n\ntemplate<typename Scalar, int UpLo> struct llt_inplace;\n\ntemplate<typename MatrixType, typename VectorType>\nstatic Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)\n{\n  using std::sqrt;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  typedef typename MatrixType::ColXpr ColXpr;\n  typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;\n  typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;\n  typedef Matrix<Scalar,Dynamic,1> TempVectorType;\n  typedef typename TempVectorType::SegmentReturnType TempVecSegment;\n\n  Index n = mat.cols();\n  eigen_assert(mat.rows()==n && vec.size()==n);\n\n  TempVectorType temp;\n\n  if(sigma>0)\n  {\n    // This version is based on Givens rotations.\n    // It is faster than the other one below, but only works for updates,\n    // i.e., for sigma > 0\n    temp = sqrt(sigma) * vec;\n\n    for(Index i=0; i<n; ++i)\n    {\n      JacobiRotation<Scalar> g;\n      g.makeGivens(mat(i,i), -temp(i), &mat(i,i));\n\n      Index rs = n-i-1;\n      if(rs>0)\n      {\n        ColXprSegment x(mat.col(i).tail(rs));\n        TempVecSegment y(temp.tail(rs));\n        apply_rotation_in_the_plane(x, y, g);\n      }\n    }\n  }\n  else\n  {\n    temp = vec;\n    RealScalar beta = 1;\n    for(Index j=0; j<n; ++j)\n    {\n      RealScalar Ljj = numext::real(mat.coeff(j,j));\n      RealScalar dj = numext::abs2(Ljj);\n      Scalar wj = temp.coeff(j);\n      RealScalar swj2 = sigma*numext::abs2(wj);\n      RealScalar gamma = dj*beta + swj2;\n\n      RealScalar x = dj + swj2/beta;\n      if (x<=RealScalar(0))\n        return j;\n      RealScalar nLjj = sqrt(x);\n      mat.coeffRef(j,j) = nLjj;\n      beta += swj2/dj;\n\n      // Update the terms of L\n      Index rs = n-j-1;\n      if(rs)\n      {\n        temp.tail(rs) -= (wj/Ljj) * mat.col(j).tail(rs);\n        if(gamma != 0)\n          mat.col(j).tail(rs) = (nLjj/Ljj) * mat.col(j).tail(rs) + (nLjj * sigma*numext::conj(wj)/gamma)*temp.tail(rs);\n      }\n    }\n  }\n  return -1;\n}\n\ntemplate<typename Scalar> struct llt_inplace<Scalar, Lower>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  template<typename MatrixType>\n  static Index unblocked(MatrixType& mat)\n  {\n    using std::sqrt;\n\n    eigen_assert(mat.rows()==mat.cols());\n    const Index size = mat.rows();\n    for(Index k = 0; k < size; ++k)\n    {\n      Index rs = size-k-1; // remaining size\n\n      Block<MatrixType,Dynamic,1> A21(mat,k+1,k,rs,1);\n      Block<MatrixType,1,Dynamic> A10(mat,k,0,1,k);\n      Block<MatrixType,Dynamic,Dynamic> A20(mat,k+1,0,rs,k);\n\n      RealScalar x = numext::real(mat.coeff(k,k));\n      if (k>0) x -= A10.squaredNorm();\n      if (x<=RealScalar(0))\n        return k;\n      mat.coeffRef(k,k) = x = sqrt(x);\n      if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint();\n      if (rs>0) A21 /= x;\n    }\n    return -1;\n  }\n\n  template<typename MatrixType>\n  static Index blocked(MatrixType& m)\n  {\n    eigen_assert(m.rows()==m.cols());\n    Index size = m.rows();\n    if(size<32)\n      return unblocked(m);\n\n    Index blockSize = size/8;\n    blockSize = (blockSize/16)*16;\n    blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128));\n\n    for (Index k=0; k<size; k+=blockSize)\n    {\n      // partition the matrix:\n      //       A00 |  -  |  -\n      // lu  = A10 | A11 |  -\n      //       A20 | A21 | A22\n      Index bs = (std::min)(blockSize, size-k);\n      Index rs = size - k - bs;\n      Block<MatrixType,Dynamic,Dynamic> A11(m,k,   k,   bs,bs);\n      Block<MatrixType,Dynamic,Dynamic> A21(m,k+bs,k,   rs,bs);\n      Block<MatrixType,Dynamic,Dynamic> A22(m,k+bs,k+bs,rs,rs);\n\n      Index ret;\n      if((ret=unblocked(A11))>=0) return k+ret;\n      if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21);\n      if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,typename NumTraits<RealScalar>::Literal(-1)); // bottleneck\n    }\n    return -1;\n  }\n\n  template<typename MatrixType, typename VectorType>\n  static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)\n  {\n    return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);\n  }\n};\n\ntemplate<typename Scalar> struct llt_inplace<Scalar, Upper>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n\n  template<typename MatrixType>\n  static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat)\n  {\n    Transpose<MatrixType> matt(mat);\n    return llt_inplace<Scalar, Lower>::unblocked(matt);\n  }\n  template<typename MatrixType>\n  static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat)\n  {\n    Transpose<MatrixType> matt(mat);\n    return llt_inplace<Scalar, Lower>::blocked(matt);\n  }\n  template<typename MatrixType, typename VectorType>\n  static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)\n  {\n    Transpose<MatrixType> matt(mat);\n    return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma);\n  }\n};\n\ntemplate<typename MatrixType> struct LLT_Traits<MatrixType,Lower>\n{\n  typedef const TriangularView<const MatrixType, Lower> MatrixL;\n  typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }\n  static bool inplace_decomposition(MatrixType& m)\n  { return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; }\n};\n\ntemplate<typename MatrixType> struct LLT_Traits<MatrixType,Upper>\n{\n  typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL;\n  typedef const TriangularView<const MatrixType, Upper> MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }\n  static bool inplace_decomposition(MatrixType& m)\n  { return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; }\n};\n\n} // end namespace internal\n\n/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \\a matrix\n  *\n  * \\returns a reference to *this\n  *\n  * Example: \\include TutorialLinAlgComputeTwice.cpp\n  * Output: \\verbinclude TutorialLinAlgComputeTwice.out\n  */\ntemplate<typename MatrixType, int _UpLo>\ntemplate<typename InputType>\nLLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)\n{\n  check_template_parameters();\n\n  eigen_assert(a.rows()==a.cols());\n  const Index size = a.rows();\n  m_matrix.resize(size, size);\n  m_matrix = a.derived();\n\n  // Compute matrix L1 norm = max abs column sum.\n  m_l1_norm = RealScalar(0);\n  // TODO move this code to SelfAdjointView\n  for (Index col = 0; col < size; ++col) {\n    RealScalar abs_col_sum;\n    if (_UpLo == Lower)\n      abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();\n    else\n      abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();\n    if (abs_col_sum > m_l1_norm)\n      m_l1_norm = abs_col_sum;\n  }\n\n  m_isInitialized = true;\n  bool ok = Traits::inplace_decomposition(m_matrix);\n  m_info = ok ? Success : NumericalIssue;\n\n  return *this;\n}\n\n/** Performs a rank one update (or dowdate) of the current decomposition.\n  * If A = LL^* before the rank one update,\n  * then after it we have LL^* = A + sigma * v v^* where \\a v must be a vector\n  * of same dimension.\n  */\ntemplate<typename _MatrixType, int _UpLo>\ntemplate<typename VectorType>\nLLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType);\n  eigen_assert(v.size()==m_matrix.cols());\n  eigen_assert(m_isInitialized);\n  if(internal::llt_inplace<typename MatrixType::Scalar, UpLo>::rankUpdate(m_matrix,v,sigma)>=0)\n    m_info = NumericalIssue;\n  else\n    m_info = Success;\n\n  return *this;\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType,int _UpLo>\ntemplate<typename RhsType, typename DstType>\nvoid LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  dst = rhs;\n  solveInPlace(dst);\n}\n#endif\n\n/** \\internal use x = llt_object.solve(x);\n  *\n  * This is the \\em in-place version of solve().\n  *\n  * \\param bAndX represents both the right-hand side matrix b and result x.\n  *\n  * This version avoids a copy when the right hand side matrix b is not needed anymore.\n  *\n  * \\sa LLT::solve(), MatrixBase::llt()\n  */\ntemplate<typename MatrixType, int _UpLo>\ntemplate<typename Derived>\nvoid LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const\n{\n  eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n  eigen_assert(m_matrix.rows()==bAndX.rows());\n  matrixL().solveInPlace(bAndX);\n  matrixU().solveInPlace(bAndX);\n}\n\n/** \\returns the matrix represented by the decomposition,\n * i.e., it returns the product: L L^*.\n * This function is provided for debug purpose. */\ntemplate<typename MatrixType, int _UpLo>\nMatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const\n{\n  eigen_assert(m_isInitialized && \"LLT is not initialized.\");\n  return matrixL() * matrixL().adjoint().toDenseMatrix();\n}\n\n/** \\cholesky_module\n  * \\returns the LLT decomposition of \\c *this\n  * \\sa SelfAdjointView::llt()\n  */\ntemplate<typename Derived>\ninline const LLT<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::llt() const\n{\n  return LLT<PlainObject>(derived());\n}\n\n/** \\cholesky_module\n  * \\returns the LLT decomposition of \\c *this\n  * \\sa SelfAdjointView::llt()\n  */\ntemplate<typename MatrixType, unsigned int UpLo>\ninline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>\nSelfAdjointView<MatrixType, UpLo>::llt() const\n{\n  return LLT<PlainObject,UpLo>(m_matrix);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_LLT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Cholesky/LLT_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *     LLt decomposition based on LAPACKE_?potrf function.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_LLT_LAPACKE_H\n#define EIGEN_LLT_LAPACKE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Scalar> struct lapacke_llt;\n\n#define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \\\ntemplate<> struct lapacke_llt<EIGTYPE> \\\n{ \\\n  template<typename MatrixType> \\\n  static inline Index potrf(MatrixType& m, char uplo) \\\n  { \\\n    lapack_int matrix_order; \\\n    lapack_int size, lda, info, StorageOrder; \\\n    EIGTYPE* a; \\\n    eigen_assert(m.rows()==m.cols()); \\\n    /* Set up parameters for ?potrf */ \\\n    size = convert_index<lapack_int>(m.rows()); \\\n    StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \\\n    matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \\\n    a = &(m.coeffRef(0,0)); \\\n    lda = convert_index<lapack_int>(m.outerStride()); \\\n\\\n    info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \\\n    info = (info==0) ? -1 : info>0 ? info-1 : size; \\\n    return info; \\\n  } \\\n}; \\\ntemplate<> struct llt_inplace<EIGTYPE, Lower> \\\n{ \\\n  template<typename MatrixType> \\\n  static Index blocked(MatrixType& m) \\\n  { \\\n    return lapacke_llt<EIGTYPE>::potrf(m, 'L'); \\\n  } \\\n  template<typename MatrixType, typename VectorType> \\\n  static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \\\n  { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \\\n}; \\\ntemplate<> struct llt_inplace<EIGTYPE, Upper> \\\n{ \\\n  template<typename MatrixType> \\\n  static Index blocked(MatrixType& m) \\\n  { \\\n    return lapacke_llt<EIGTYPE>::potrf(m, 'U'); \\\n  } \\\n  template<typename MatrixType, typename VectorType> \\\n  static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \\\n  { \\\n    Transpose<MatrixType> matt(mat); \\\n    return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \\\n  } \\\n};\n\nEIGEN_LAPACKE_LLT(double, double, d)\nEIGEN_LAPACKE_LLT(float, float, s)\nEIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z)\nEIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_LLT_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/CholmodSupport/CholmodSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CHOLMODSUPPORT_H\n#define EIGEN_CHOLMODSUPPORT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Scalar> struct cholmod_configure_matrix;\n\ntemplate<> struct cholmod_configure_matrix<double> {\n  template<typename CholmodType>\n  static void run(CholmodType& mat) {\n    mat.xtype = CHOLMOD_REAL;\n    mat.dtype = CHOLMOD_DOUBLE;\n  }\n};\n\ntemplate<> struct cholmod_configure_matrix<std::complex<double> > {\n  template<typename CholmodType>\n  static void run(CholmodType& mat) {\n    mat.xtype = CHOLMOD_COMPLEX;\n    mat.dtype = CHOLMOD_DOUBLE;\n  }\n};\n\n// Other scalar types are not yet supported by Cholmod\n// template<> struct cholmod_configure_matrix<float> {\n//   template<typename CholmodType>\n//   static void run(CholmodType& mat) {\n//     mat.xtype = CHOLMOD_REAL;\n//     mat.dtype = CHOLMOD_SINGLE;\n//   }\n// };\n//\n// template<> struct cholmod_configure_matrix<std::complex<float> > {\n//   template<typename CholmodType>\n//   static void run(CholmodType& mat) {\n//     mat.xtype = CHOLMOD_COMPLEX;\n//     mat.dtype = CHOLMOD_SINGLE;\n//   }\n// };\n\n} // namespace internal\n\n/** Wraps the Eigen sparse matrix \\a mat into a Cholmod sparse matrix object.\n  * Note that the data are shared.\n  */\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\ncholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> > mat)\n{\n  cholmod_sparse res;\n  res.nzmax   = mat.nonZeros();\n  res.nrow    = mat.rows();\n  res.ncol    = mat.cols();\n  res.p       = mat.outerIndexPtr();\n  res.i       = mat.innerIndexPtr();\n  res.x       = mat.valuePtr();\n  res.z       = 0;\n  res.sorted  = 1;\n  if(mat.isCompressed())\n  {\n    res.packed  = 1;\n    res.nz = 0;\n  }\n  else\n  {\n    res.packed  = 0;\n    res.nz = mat.innerNonZeroPtr();\n  }\n\n  res.dtype   = 0;\n  res.stype   = -1;\n  \n  if (internal::is_same<_StorageIndex,int>::value)\n  {\n    res.itype = CHOLMOD_INT;\n  }\n  else if (internal::is_same<_StorageIndex,long>::value)\n  {\n    res.itype = CHOLMOD_LONG;\n  }\n  else\n  {\n    eigen_assert(false && \"Index type not supported yet\");\n  }\n\n  // setup res.xtype\n  internal::cholmod_configure_matrix<_Scalar>::run(res);\n  \n  res.stype = 0;\n  \n  return res;\n}\n\ntemplate<typename _Scalar, int _Options, typename _Index>\nconst cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)\n{\n  cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived()));\n  return res;\n}\n\ntemplate<typename _Scalar, int _Options, typename _Index>\nconst cholmod_sparse viewAsCholmod(const SparseVector<_Scalar,_Options,_Index>& mat)\n{\n  cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived()));\n  return res;\n}\n\n/** Returns a view of the Eigen sparse matrix \\a mat as Cholmod sparse matrix.\n  * The data are not copied but shared. */\ntemplate<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>\ncholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)\n{\n  cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived()));\n  \n  if(UpLo==Upper) res.stype =  1;\n  if(UpLo==Lower) res.stype = -1;\n  // swap stype for rowmajor matrices (only works for real matrices)\n  EIGEN_STATIC_ASSERT((_Options & RowMajorBit) == 0 || NumTraits<_Scalar>::IsComplex == 0, THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n  if(_Options & RowMajorBit) res.stype *=-1;\n\n  return res;\n}\n\n/** Returns a view of the Eigen \\b dense matrix \\a mat as Cholmod dense matrix.\n  * The data are not copied but shared. */\ntemplate<typename Derived>\ncholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)\n{\n  EIGEN_STATIC_ASSERT((internal::traits<Derived>::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n  typedef typename Derived::Scalar Scalar;\n\n  cholmod_dense res;\n  res.nrow   = mat.rows();\n  res.ncol   = mat.cols();\n  res.nzmax  = res.nrow * res.ncol;\n  res.d      = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride();\n  res.x      = (void*)(mat.derived().data());\n  res.z      = 0;\n\n  internal::cholmod_configure_matrix<Scalar>::run(res);\n\n  return res;\n}\n\n/** Returns a view of the Cholmod sparse matrix \\a cm as an Eigen sparse matrix.\n  * The data are not copied but shared. */\ntemplate<typename Scalar, int Flags, typename StorageIndex>\nMappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm)\n{\n  return MappedSparseMatrix<Scalar,Flags,StorageIndex>\n         (cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol],\n          static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) );\n}\n\nnamespace internal {\n\n// template specializations for int and long that call the correct cholmod method\n\n#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \\\n    template<typename _StorageIndex> ret cm_ ## name       (cholmod_common &Common) { return cholmod_ ## name   (&Common); } \\\n    template<>                       ret cm_ ## name<long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }\n\n#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \\\n    template<typename _StorageIndex> ret cm_ ## name       (t1& a1, cholmod_common &Common) { return cholmod_ ## name   (&a1, &Common); } \\\n    template<>                       ret cm_ ## name<long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }\n\nEIGEN_CHOLMOD_SPECIALIZE0(int, start)\nEIGEN_CHOLMOD_SPECIALIZE0(int, finish)\n\nEIGEN_CHOLMOD_SPECIALIZE1(int, free_factor, cholmod_factor*, L)\nEIGEN_CHOLMOD_SPECIALIZE1(int, free_dense,  cholmod_dense*,  X)\nEIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)\n\nEIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)\n\ntemplate<typename _StorageIndex> cholmod_dense*  cm_solve         (int sys, cholmod_factor& L, cholmod_dense&  B, cholmod_common &Common) { return cholmod_solve     (sys, &L, &B, &Common); }\ntemplate<>                       cholmod_dense*  cm_solve<long>   (int sys, cholmod_factor& L, cholmod_dense&  B, cholmod_common &Common) { return cholmod_l_solve   (sys, &L, &B, &Common); }\n\ntemplate<typename _StorageIndex> cholmod_sparse* cm_spsolve       (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve   (sys, &L, &B, &Common); }\ntemplate<>                       cholmod_sparse* cm_spsolve<long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }\n\ntemplate<typename _StorageIndex>\nint  cm_factorize_p       (cholmod_sparse*  A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p   (A, beta, fset, fsize, L, &Common); }\ntemplate<>\nint  cm_factorize_p<long> (cholmod_sparse*  A, double beta[2], long* fset,          std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }\n\n#undef EIGEN_CHOLMOD_SPECIALIZE0\n#undef EIGEN_CHOLMOD_SPECIALIZE1\n\n}  // namespace internal\n\n\nenum CholmodMode {\n  CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt\n};\n\n\n/** \\ingroup CholmodSupport_Module\n  * \\class CholmodBase\n  * \\brief The base class for the direct Cholesky factorization of Cholmod\n  * \\sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT\n  */\ntemplate<typename _MatrixType, int _UpLo, typename Derived>\nclass CholmodBase : public SparseSolverBase<Derived>\n{\n  protected:\n    typedef SparseSolverBase<Derived> Base;\n    using Base::derived;\n    using Base::m_isInitialized;\n  public:\n    typedef _MatrixType MatrixType;\n    enum { UpLo = _UpLo };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef MatrixType CholMatrixType;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n  public:\n\n    CholmodBase()\n      : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false)\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);\n      m_shiftOffset[0] = m_shiftOffset[1] = 0.0;\n      internal::cm_start<StorageIndex>(m_cholmod);\n    }\n\n    explicit CholmodBase(const MatrixType& matrix)\n      : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false)\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);\n      m_shiftOffset[0] = m_shiftOffset[1] = 0.0;\n      internal::cm_start<StorageIndex>(m_cholmod);\n      compute(matrix);\n    }\n\n    ~CholmodBase()\n    {\n      if(m_cholmodFactor)\n        internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);\n      internal::cm_finish<StorageIndex>(m_cholmod);\n    }\n    \n    inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }\n    inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }\n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was successful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n\n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    Derived& compute(const MatrixType& matrix)\n    {\n      analyzePattern(matrix);\n      factorize(matrix);\n      return derived();\n    }\n    \n    /** Performs a symbolic decomposition on the sparsity pattern of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      * \n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    {\n      if(m_cholmodFactor)\n      {\n        internal::cm_free_factor<StorageIndex>(m_cholmodFactor, m_cholmod);\n        m_cholmodFactor = 0;\n      }\n      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());\n      m_cholmodFactor = internal::cm_analyze<StorageIndex>(A, m_cholmod);\n      \n      this->m_isInitialized = true;\n      this->m_info = Success;\n      m_analysisIsOk = true;\n      m_factorizationIsOk = false;\n    }\n    \n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& matrix)\n    {\n      eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n      cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());\n      internal::cm_factorize_p<StorageIndex>(&A, m_shiftOffset, 0, 0, m_cholmodFactor, m_cholmod);\n\n      // If the factorization failed, minor is the column at which it did. On success minor == n.\n      this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);\n      m_factorizationIsOk = true;\n    }\n    \n    /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations.\n     *  See the Cholmod user guide for details. */\n    cholmod_common& cholmod() { return m_cholmod; }\n    \n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const\n    {\n      eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()\");\n      const Index size = m_cholmodFactor->n;\n      EIGEN_UNUSED_VARIABLE(size);\n      eigen_assert(size==b.rows());\n      \n      // Cholmod needs column-major storage without inner-stride, which corresponds to the default behavior of Ref.\n      Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived());\n\n      cholmod_dense b_cd = viewAsCholmod(b_ref);\n      cholmod_dense* x_cd = internal::cm_solve<StorageIndex>(CHOLMOD_A, *m_cholmodFactor, b_cd, m_cholmod);\n      if(!x_cd)\n      {\n        this->m_info = NumericalIssue;\n        return;\n      }\n      // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)\n      // NOTE Actually, the copy can be avoided by calling cholmod_solve2 instead of cholmod_solve\n      dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());\n      internal::cm_free_dense<StorageIndex>(x_cd, m_cholmod);\n    }\n    \n    /** \\internal */\n    template<typename RhsDerived, typename DestDerived>\n    void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const\n    {\n      eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()\");\n      const Index size = m_cholmodFactor->n;\n      EIGEN_UNUSED_VARIABLE(size);\n      eigen_assert(size==b.rows());\n\n      // note: cs stands for Cholmod Sparse\n      Ref<SparseMatrix<typename RhsDerived::Scalar,ColMajor,typename RhsDerived::StorageIndex> > b_ref(b.const_cast_derived());\n      cholmod_sparse b_cs = viewAsCholmod(b_ref);\n      cholmod_sparse* x_cs = internal::cm_spsolve<StorageIndex>(CHOLMOD_A, *m_cholmodFactor, b_cs, m_cholmod);\n      if(!x_cs)\n      {\n        this->m_info = NumericalIssue;\n        return;\n      }\n      // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)\n      // NOTE cholmod_spsolve in fact just calls the dense solver for blocks of 4 columns at a time (similar to Eigen's sparse solver)\n      dest.derived() = viewAsEigen<typename DestDerived::Scalar,ColMajor,typename DestDerived::StorageIndex>(*x_cs);\n      internal::cm_free_sparse<StorageIndex>(x_cs, m_cholmod);\n    }\n    #endif // EIGEN_PARSED_BY_DOXYGEN\n    \n    \n    /** Sets the shift parameter that will be used to adjust the diagonal coefficients during the numerical factorization.\n      *\n      * During the numerical factorization, an offset term is added to the diagonal coefficients:\\n\n      * \\c d_ii = \\a offset + \\c d_ii\n      *\n      * The default is \\a offset=0.\n      *\n      * \\returns a reference to \\c *this.\n      */\n    Derived& setShift(const RealScalar& offset)\n    {\n      m_shiftOffset[0] = double(offset);\n      return derived();\n    }\n    \n    /** \\returns the determinant of the underlying matrix from the current factorization */\n    Scalar determinant() const\n    {\n      using std::exp;\n      return exp(logDeterminant());\n    }\n\n    /** \\returns the log determinant of the underlying matrix from the current factorization */\n    Scalar logDeterminant() const\n    {\n      using std::log;\n      using numext::real;\n      eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()\");\n\n      RealScalar logDet = 0;\n      Scalar *x = static_cast<Scalar*>(m_cholmodFactor->x);\n      if (m_cholmodFactor->is_super)\n      {\n        // Supernodal factorization stored as a packed list of dense column-major blocs,\n        // as described by the following structure:\n\n        // super[k] == index of the first column of the j-th super node\n        StorageIndex *super = static_cast<StorageIndex*>(m_cholmodFactor->super);\n        // pi[k] == offset to the description of row indices\n        StorageIndex *pi = static_cast<StorageIndex*>(m_cholmodFactor->pi);\n        // px[k] == offset to the respective dense block\n        StorageIndex *px = static_cast<StorageIndex*>(m_cholmodFactor->px);\n\n        Index nb_super_nodes = m_cholmodFactor->nsuper;\n        for (Index k=0; k < nb_super_nodes; ++k)\n        {\n          StorageIndex ncols = super[k + 1] - super[k];\n          StorageIndex nrows = pi[k + 1] - pi[k];\n\n          Map<const Array<Scalar,1,Dynamic>, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1));\n          logDet += sk.real().log().sum();\n        }\n      }\n      else\n      {\n        // Simplicial factorization stored as standard CSC matrix.\n        StorageIndex *p = static_cast<StorageIndex*>(m_cholmodFactor->p);\n        Index size = m_cholmodFactor->n;\n        for (Index k=0; k<size; ++k)\n          logDet += log(real( x[p[k]] ));\n      }\n      if (m_cholmodFactor->is_ll)\n        logDet *= 2.0;\n      return logDet;\n    };\n\n    template<typename Stream>\n    void dumpMemory(Stream& /*s*/)\n    {}\n    \n  protected:\n    mutable cholmod_common m_cholmod;\n    cholmod_factor* m_cholmodFactor;\n    double m_shiftOffset[2];\n    mutable ComputationInfo m_info;\n    int m_factorizationIsOk;\n    int m_analysisIsOk;\n};\n\n/** \\ingroup CholmodSupport_Module\n  * \\class CholmodSimplicialLLT\n  * \\brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization\n  * using the Cholmod library.\n  * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Therefore, it has little practical interest.\n  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices\n  * X and B can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  *\n  * \\implsparsesolverconcept\n  *\n  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.\n  *\n  * \\warning Only double precision real and complex scalar types are supported by Cholmod.\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT\n  */\ntemplate<typename _MatrixType, int _UpLo = Lower>\nclass CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >\n{\n    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base;\n    using Base::m_cholmod;\n    \n  public:\n    \n    typedef _MatrixType MatrixType;\n    \n    CholmodSimplicialLLT() : Base() { init(); }\n\n    CholmodSimplicialLLT(const MatrixType& matrix) : Base()\n    {\n      init();\n      this->compute(matrix);\n    }\n\n    ~CholmodSimplicialLLT() {}\n  protected:\n    void init()\n    {\n      m_cholmod.final_asis = 0;\n      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;\n      m_cholmod.final_ll = 1;\n    }\n};\n\n\n/** \\ingroup CholmodSupport_Module\n  * \\class CholmodSimplicialLDLT\n  * \\brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization\n  * using the Cholmod library.\n  * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Therefore, it has little practical interest.\n  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices\n  * X and B can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  *\n  * \\implsparsesolverconcept\n  *\n  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.\n  *\n  * \\warning Only double precision real and complex scalar types are supported by Cholmod.\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT\n  */\ntemplate<typename _MatrixType, int _UpLo = Lower>\nclass CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >\n{\n    typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base;\n    using Base::m_cholmod;\n    \n  public:\n    \n    typedef _MatrixType MatrixType;\n    \n    CholmodSimplicialLDLT() : Base() { init(); }\n\n    CholmodSimplicialLDLT(const MatrixType& matrix) : Base()\n    {\n      init();\n      this->compute(matrix);\n    }\n\n    ~CholmodSimplicialLDLT() {}\n  protected:\n    void init()\n    {\n      m_cholmod.final_asis = 1;\n      m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;\n    }\n};\n\n/** \\ingroup CholmodSupport_Module\n  * \\class CholmodSupernodalLLT\n  * \\brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization\n  * using the Cholmod library.\n  * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM.\n  * The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices\n  * X and B can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  *\n  * \\implsparsesolverconcept\n  *\n  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.\n  *\n  * \\warning Only double precision real and complex scalar types are supported by Cholmod.\n  *\n  * \\sa \\ref TutorialSparseSolverConcept\n  */\ntemplate<typename _MatrixType, int _UpLo = Lower>\nclass CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >\n{\n    typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base;\n    using Base::m_cholmod;\n    \n  public:\n    \n    typedef _MatrixType MatrixType;\n    \n    CholmodSupernodalLLT() : Base() { init(); }\n\n    CholmodSupernodalLLT(const MatrixType& matrix) : Base()\n    {\n      init();\n      this->compute(matrix);\n    }\n\n    ~CholmodSupernodalLLT() {}\n  protected:\n    void init()\n    {\n      m_cholmod.final_asis = 1;\n      m_cholmod.supernodal = CHOLMOD_SUPERNODAL;\n    }\n};\n\n/** \\ingroup CholmodSupport_Module\n  * \\class CholmodDecomposition\n  * \\brief A general Cholesky factorization and solver based on Cholmod\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization\n  * using the Cholmod library. The sparse matrix A must be selfadjoint and positive definite. The vectors or matrices\n  * X and B can be either dense or sparse.\n  *\n  * This variant permits to change the underlying Cholesky method at runtime.\n  * On the other hand, it does not provide access to the result of the factorization.\n  * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  *\n  * \\implsparsesolverconcept\n  *\n  * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.\n  *\n  * \\warning Only double precision real and complex scalar types are supported by Cholmod.\n  *\n  * \\sa \\ref TutorialSparseSolverConcept\n  */\ntemplate<typename _MatrixType, int _UpLo = Lower>\nclass CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >\n{\n    typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base;\n    using Base::m_cholmod;\n    \n  public:\n    \n    typedef _MatrixType MatrixType;\n    \n    CholmodDecomposition() : Base() { init(); }\n\n    CholmodDecomposition(const MatrixType& matrix) : Base()\n    {\n      init();\n      this->compute(matrix);\n    }\n\n    ~CholmodDecomposition() {}\n    \n    void setMode(CholmodMode mode)\n    {\n      switch(mode)\n      {\n        case CholmodAuto:\n          m_cholmod.final_asis = 1;\n          m_cholmod.supernodal = CHOLMOD_AUTO;\n          break;\n        case CholmodSimplicialLLt:\n          m_cholmod.final_asis = 0;\n          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;\n          m_cholmod.final_ll = 1;\n          break;\n        case CholmodSupernodalLLt:\n          m_cholmod.final_asis = 1;\n          m_cholmod.supernodal = CHOLMOD_SUPERNODAL;\n          break;\n        case CholmodLDLt:\n          m_cholmod.final_asis = 1;\n          m_cholmod.supernodal = CHOLMOD_SIMPLICIAL;\n          break;\n        default:\n          break;\n      }\n    }\n  protected:\n    void init()\n    {\n      m_cholmod.final_asis = 1;\n      m_cholmod.supernodal = CHOLMOD_AUTO;\n    }\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_CHOLMODSUPPORT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ArithmeticSequence.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ARITHMETIC_SEQUENCE_H\n#define EIGEN_ARITHMETIC_SEQUENCE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#if (!EIGEN_HAS_CXX11) || !((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48)\ntemplate<typename T> struct aseq_negate {};\n\ntemplate<> struct aseq_negate<Index> {\n  typedef Index type;\n};\n\ntemplate<int N> struct aseq_negate<FixedInt<N> > {\n  typedef FixedInt<-N> type;\n};\n\n// Compilation error in the following case:\ntemplate<> struct aseq_negate<FixedInt<DynamicIndex> > {};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType,\n         bool FirstIsSymbolic=Symbolic::is_symbolic<FirstType>::value,\n         bool SizeIsSymbolic =Symbolic::is_symbolic<SizeType>::value>\nstruct aseq_reverse_first_type {\n  typedef Index type;\n};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nstruct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,true> {\n  typedef Symbolic::AddExpr<FirstType,\n                            Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,\n                                                  Symbolic::ValueExpr<IncrType> >\n                           > type;\n};\n\ntemplate<typename SizeType,typename IncrType,typename EnableIf = void>\nstruct aseq_reverse_first_type_aux {\n  typedef Index type;\n};\n\ntemplate<typename SizeType,typename IncrType>\nstruct aseq_reverse_first_type_aux<SizeType,IncrType,typename internal::enable_if<bool((SizeType::value+IncrType::value)|0x1)>::type> {\n  typedef FixedInt<(SizeType::value-1)*IncrType::value> type;\n};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nstruct aseq_reverse_first_type<FirstType,SizeType,IncrType,true,false> {\n  typedef typename aseq_reverse_first_type_aux<SizeType,IncrType>::type Aux;\n  typedef Symbolic::AddExpr<FirstType,Symbolic::ValueExpr<Aux> > type;\n};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nstruct aseq_reverse_first_type<FirstType,SizeType,IncrType,false,true> {\n  typedef Symbolic::AddExpr<Symbolic::ProductExpr<Symbolic::AddExpr<SizeType,Symbolic::ValueExpr<FixedInt<-1> > >,\n                                                  Symbolic::ValueExpr<IncrType> >,\n                            Symbolic::ValueExpr<> > type;\n};\n#endif\n\n// Helper to cleanup the type of the increment:\ntemplate<typename T> struct cleanup_seq_incr {\n  typedef typename cleanup_index_type<T,DynamicIndex>::type type;\n};\n\n}\n\n//--------------------------------------------------------------------------------\n// seq(first,last,incr) and seqN(first,size,incr)\n//--------------------------------------------------------------------------------\n\ntemplate<typename FirstType=Index,typename SizeType=Index,typename IncrType=internal::FixedInt<1> >\nclass ArithmeticSequence;\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,\n                   typename internal::cleanup_index_type<SizeType>::type,\n                   typename internal::cleanup_seq_incr<IncrType>::type >\nseqN(FirstType first, SizeType size, IncrType incr);\n\n/** \\class ArithmeticSequence\n  * \\ingroup Core_Module\n  *\n  * This class represents an arithmetic progression \\f$ a_0, a_1, a_2, ..., a_{n-1}\\f$ defined by\n  * its \\em first value \\f$ a_0 \\f$, its \\em size (aka length) \\em n, and the \\em increment (aka stride)\n  * that is equal to \\f$ a_{i+1}-a_{i}\\f$ for any \\em i.\n  *\n  * It is internally used as the return type of the Eigen::seq and Eigen::seqN functions, and as the input arguments\n  * of DenseBase::operator()(const RowIndices&, const ColIndices&), and most of the time this is the\n  * only way it is used.\n  *\n  * \\tparam FirstType type of the first element, usually an Index,\n  *                   but internally it can be a symbolic expression\n  * \\tparam SizeType type representing the size of the sequence, usually an Index\n  *                  or a compile time integral constant. Internally, it can also be a symbolic expression\n  * \\tparam IncrType type of the increment, can be a runtime Index, or a compile time integral constant (default is compile-time 1)\n  *\n  * \\sa Eigen::seq, Eigen::seqN, DenseBase::operator()(const RowIndices&, const ColIndices&), class IndexedView\n  */\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nclass ArithmeticSequence\n{\npublic:\n  ArithmeticSequence(FirstType first, SizeType size) : m_first(first), m_size(size) {}\n  ArithmeticSequence(FirstType first, SizeType size, IncrType incr) : m_first(first), m_size(size), m_incr(incr) {}\n\n  enum {\n    SizeAtCompileTime = internal::get_fixed_value<SizeType>::value,\n    IncrAtCompileTime = internal::get_fixed_value<IncrType,DynamicIndex>::value\n  };\n\n  /** \\returns the size, i.e., number of elements, of the sequence */\n  Index size()  const { return m_size; }\n\n  /** \\returns the first element \\f$ a_0 \\f$ in the sequence */\n  Index first()  const { return m_first; }\n\n  /** \\returns the value \\f$ a_i \\f$ at index \\a i in the sequence. */\n  Index operator[](Index i) const { return m_first + i * m_incr; }\n\n  const FirstType& firstObject() const { return m_first; }\n  const SizeType&  sizeObject()  const { return m_size; }\n  const IncrType&  incrObject()  const { return m_incr; }\n\nprotected:\n  FirstType m_first;\n  SizeType  m_size;\n  IncrType  m_incr;\n\npublic:\n\n#if EIGEN_HAS_CXX11 && ((!EIGEN_COMP_GNUC) || EIGEN_COMP_GNUC>=48)\n  auto reverse() const -> decltype(Eigen::seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr)) {\n    return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr);\n  }\n#else\nprotected:\n  typedef typename internal::aseq_negate<IncrType>::type ReverseIncrType;\n  typedef typename internal::aseq_reverse_first_type<FirstType,SizeType,IncrType>::type ReverseFirstType;\npublic:\n  ArithmeticSequence<ReverseFirstType,SizeType,ReverseIncrType>\n  reverse() const {\n    return seqN(m_first+(m_size+fix<-1>())*m_incr,m_size,-m_incr);\n  }\n#endif\n};\n\n/** \\returns an ArithmeticSequence starting at \\a first, of length \\a size, and increment \\a incr\n  *\n  * \\sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,typename internal::cleanup_index_type<SizeType>::type,typename internal::cleanup_seq_incr<IncrType>::type >\nseqN(FirstType first, SizeType size, IncrType incr)  {\n  return ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,typename internal::cleanup_index_type<SizeType>::type,typename internal::cleanup_seq_incr<IncrType>::type>(first,size,incr);\n}\n\n/** \\returns an ArithmeticSequence starting at \\a first, of length \\a size, and unit increment\n  *\n  * \\sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType) */\ntemplate<typename FirstType,typename SizeType>\nArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,typename internal::cleanup_index_type<SizeType>::type >\nseqN(FirstType first, SizeType size)  {\n  return ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,typename internal::cleanup_index_type<SizeType>::type>(first,size);\n}\n\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n\n/** \\returns an ArithmeticSequence starting at \\a f, up (or down) to \\a l, and with positive (or negative) increment \\a incr\n  *\n  * It is essentially an alias to:\n  * \\code\n  * seqN(f, (l-f+incr)/incr, incr);\n  * \\endcode\n  *\n  * \\sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType)\n  */\ntemplate<typename FirstType,typename LastType, typename IncrType>\nauto seq(FirstType f, LastType l, IncrType incr);\n\n/** \\returns an ArithmeticSequence starting at \\a f, up (or down) to \\a l, and unit increment\n  *\n  * It is essentially an alias to:\n  * \\code\n  * seqN(f,l-f+1);\n  * \\endcode\n  *\n  * \\sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType)\n  */\ntemplate<typename FirstType,typename LastType>\nauto seq(FirstType f, LastType l);\n\n#else // EIGEN_PARSED_BY_DOXYGEN\n\n#if EIGEN_HAS_CXX11\ntemplate<typename FirstType,typename LastType>\nauto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n                                                   (  typename internal::cleanup_index_type<LastType>::type(l)\n                                                    - typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>())))\n{\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n              (typename internal::cleanup_index_type<LastType>::type(l)\n               -typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));\n}\n\ntemplate<typename FirstType,typename LastType, typename IncrType>\nauto seq(FirstType f, LastType l, IncrType incr)\n  -> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n                   (   typename internal::cleanup_index_type<LastType>::type(l)\n                     - typename internal::cleanup_index_type<FirstType>::type(f)+typename internal::cleanup_seq_incr<IncrType>::type(incr)\n                   ) / typename internal::cleanup_seq_incr<IncrType>::type(incr),\n                   typename internal::cleanup_seq_incr<IncrType>::type(incr)))\n{\n  typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n              ( typename internal::cleanup_index_type<LastType>::type(l)\n               -typename internal::cleanup_index_type<FirstType>::type(f)+CleanedIncrType(incr)) / CleanedIncrType(incr),\n              CleanedIncrType(incr));\n}\n#else\n\ntemplate<typename FirstType,typename LastType>\ntypename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),\n                             ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index> >::type\nseq(FirstType f, LastType l)\n{\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n              Index((typename internal::cleanup_index_type<LastType>::type(l)-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>())));\n}\n\ntemplate<typename FirstTypeDerived,typename LastType>\ntypename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,\n    ArithmeticSequence<FirstTypeDerived, Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,Symbolic::ValueExpr<> >,\n                                                            Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type\nseq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l)\n{\n  return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+fix<1>()));\n}\n\ntemplate<typename FirstType,typename LastTypeDerived>\ntypename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,\n    ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,\n                        Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,\n                                          Symbolic::ValueExpr<internal::FixedInt<1> > > > >::type\nseq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l)\n{\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),(l.derived()-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));\n}\n\ntemplate<typename FirstTypeDerived,typename LastTypeDerived>\nArithmeticSequence<FirstTypeDerived,\n                    Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::NegateExpr<FirstTypeDerived> >,Symbolic::ValueExpr<internal::FixedInt<1> > > >\nseq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l)\n{\n  return seqN(f.derived(),(l.derived()-f.derived()+fix<1>()));\n}\n\n\ntemplate<typename FirstType,typename LastType, typename IncrType>\ntypename internal::enable_if<!(Symbolic::is_symbolic<FirstType>::value || Symbolic::is_symbolic<LastType>::value),\n    ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,Index,typename internal::cleanup_seq_incr<IncrType>::type> >::type\nseq(FirstType f, LastType l, IncrType incr)\n{\n  typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n              Index((typename internal::cleanup_index_type<LastType>::type(l)-typename internal::cleanup_index_type<FirstType>::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr)), incr);\n}\n\ntemplate<typename FirstTypeDerived,typename LastType, typename IncrType>\ntypename internal::enable_if<!Symbolic::is_symbolic<LastType>::value,\n    ArithmeticSequence<FirstTypeDerived,\n                        Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<Symbolic::NegateExpr<FirstTypeDerived>,\n                                                                                   Symbolic::ValueExpr<> >,\n                                                                 Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                                              Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                        typename internal::cleanup_seq_incr<IncrType>::type> >::type\nseq(const Symbolic::BaseExpr<FirstTypeDerived> &f, LastType l, IncrType incr)\n{\n  typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;\n  return seqN(f.derived(),(typename internal::cleanup_index_type<LastType>::type(l)-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);\n}\n\ntemplate<typename FirstType,typename LastTypeDerived, typename IncrType>\ntypename internal::enable_if<!Symbolic::is_symbolic<FirstType>::value,\n    ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,\n                        Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,Symbolic::ValueExpr<> >,\n                                                                 Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                                               Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                        typename internal::cleanup_seq_incr<IncrType>::type> >::type\nseq(FirstType f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)\n{\n  typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;\n  return seqN(typename internal::cleanup_index_type<FirstType>::type(f),\n              (l.derived()-typename internal::cleanup_index_type<FirstType>::type(f)+CleanedIncrType(incr))/CleanedIncrType(incr), incr);\n}\n\ntemplate<typename FirstTypeDerived,typename LastTypeDerived, typename IncrType>\nArithmeticSequence<FirstTypeDerived,\n                    Symbolic::QuotientExpr<Symbolic::AddExpr<Symbolic::AddExpr<LastTypeDerived,\n                                                                               Symbolic::NegateExpr<FirstTypeDerived> >,\n                                                             Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                                          Symbolic::ValueExpr<typename internal::cleanup_seq_incr<IncrType>::type> >,\n                    typename internal::cleanup_seq_incr<IncrType>::type>\nseq(const Symbolic::BaseExpr<FirstTypeDerived> &f, const Symbolic::BaseExpr<LastTypeDerived> &l, IncrType incr)\n{\n  typedef typename internal::cleanup_seq_incr<IncrType>::type CleanedIncrType;\n  return seqN(f.derived(),(l.derived()-f.derived()+CleanedIncrType(incr))/CleanedIncrType(incr), incr);\n}\n#endif\n\n#endif // EIGEN_PARSED_BY_DOXYGEN\n\nnamespace internal {\n\n// Convert a symbolic span into a usable one (i.e., remove last/end \"keywords\")\ntemplate<typename T>\nstruct make_size_type {\n  typedef typename internal::conditional<Symbolic::is_symbolic<T>::value, Index, T>::type type;\n};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType,int XprSize>\nstruct IndexedViewCompatibleType<ArithmeticSequence<FirstType,SizeType,IncrType>, XprSize> {\n  typedef ArithmeticSequence<Index,typename make_size_type<SizeType>::type,IncrType> type;\n};\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nArithmeticSequence<Index,typename make_size_type<SizeType>::type,IncrType>\nmakeIndexedViewCompatible(const ArithmeticSequence<FirstType,SizeType,IncrType>& ids, Index size,SpecializedType) {\n  return ArithmeticSequence<Index,typename make_size_type<SizeType>::type,IncrType>(\n            eval_expr_given_size(ids.firstObject(),size),eval_expr_given_size(ids.sizeObject(),size),ids.incrObject());\n}\n\ntemplate<typename FirstType,typename SizeType,typename IncrType>\nstruct get_compile_time_incr<ArithmeticSequence<FirstType,SizeType,IncrType> > {\n  enum { value = get_fixed_value<IncrType,DynamicIndex>::value };\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_ARITHMETIC_SEQUENCE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Array.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ARRAY_H\n#define EIGEN_ARRAY_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct traits<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > : traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n{\n  typedef ArrayXpr XprKind;\n  typedef ArrayBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> > XprBase;\n};\n}\n\n/** \\class Array\n  * \\ingroup Core_Module\n  *\n  * \\brief General-purpose arrays with easy API for coefficient-wise operations\n  *\n  * The %Array class is very similar to the Matrix class. It provides\n  * general-purpose one- and two-dimensional arrays. The difference between the\n  * %Array and the %Matrix class is primarily in the API: the API for the\n  * %Array class provides easy access to coefficient-wise operations, while the\n  * API for the %Matrix class provides easy access to linear-algebra\n  * operations.\n  *\n  * See documentation of class Matrix for detailed information on the template parameters\n  * storage layout.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_ARRAY_PLUGIN.\n  *\n  * \\sa \\blank \\ref TutorialArrayClass, \\ref TopicClassHierarchy\n  */\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nclass Array\n  : public PlainObjectBase<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n{\n  public:\n\n    typedef PlainObjectBase<Array> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Array)\n\n    enum { Options = _Options };\n    typedef typename Base::PlainObject PlainObject;\n\n  protected:\n    template <typename Derived, typename OtherDerived, bool IsVector>\n    friend struct internal::conservative_resize_like_impl;\n\n    using Base::m_storage;\n\n  public:\n\n    using Base::base;\n    using Base::coeff;\n    using Base::coeffRef;\n\n    /**\n      * The usage of\n      *   using Base::operator=;\n      * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped\n      * the usage of 'using'. This should be done only for operator=.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)\n    {\n      return Base::operator=(other);\n    }\n\n    /** Set all the entries to \\a value.\n      * \\sa DenseBase::setConstant(), DenseBase::fill()\n      */\n    /* This overload is needed because the usage of\n      *   using Base::operator=;\n      * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped\n      * the usage of 'using'. This should be done only for operator=.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array& operator=(const Scalar &value)\n    {\n      Base::setConstant(value);\n      return *this;\n    }\n\n    /** Copies the value of the expression \\a other into \\c *this with automatic resizing.\n      *\n      * *this might be resized to match the dimensions of \\a other. If *this was a null matrix (not already initialized),\n      * it will be initialized.\n      *\n      * Note that copying a row-vector into a vector (and conversely) is allowed.\n      * The resizing, if any, is then done in the appropriate way so that row-vectors\n      * remain row-vectors and vectors remain vectors.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array& operator=(const DenseBase<OtherDerived>& other)\n    {\n      return Base::_set(other);\n    }\n\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array& operator=(const Array& other)\n    {\n      return Base::_set(other);\n    }\n    \n    /** Default constructor.\n      *\n      * For fixed-size matrices, does nothing.\n      *\n      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix\n      * is called a null matrix. This constructor is the unique way to create null matrices: resizing\n      * a matrix to 0 is not supported.\n      *\n      * \\sa resize(Index,Index)\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array() : Base()\n    {\n      Base::_check_template_params();\n      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    // FIXME is it still needed ??\n    /** \\internal */\n    EIGEN_DEVICE_FUNC\n    Array(internal::constructor_without_unaligned_array_assert)\n      : Base(internal::constructor_without_unaligned_array_assert())\n    {\n      Base::_check_template_params();\n      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n#endif\n\n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    Array(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)\n      : Base(std::move(other))\n    {\n      Base::_check_template_params();\n      if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)\n        Base::_set_noalias(other);\n    }\n    EIGEN_DEVICE_FUNC\n    Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)\n    {\n      other.swap(*this);\n      return *this;\n    }\n#endif\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE explicit Array(const T& x)\n    {\n      Base::_check_template_params();\n      Base::template _init1<T>(x);\n    }\n\n    template<typename T0, typename T1>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array(const T0& val0, const T1& val1)\n    {\n      Base::_check_template_params();\n      this->template _init2<T0,T1>(val0, val1);\n    }\n    #else\n    /** \\brief Constructs a fixed-sized array initialized with coefficients starting at \\a data */\n    EIGEN_DEVICE_FUNC explicit Array(const Scalar *data);\n    /** Constructs a vector or row-vector with given dimension. \\only_for_vectors\n      *\n      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,\n      * it is redundant to pass the dimension here, so it makes more sense to use the default\n      * constructor Array() instead.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE explicit Array(Index dim);\n    /** constructs an initialized 1x1 Array with the given coefficient */\n    Array(const Scalar& value);\n    /** constructs an uninitialized array with \\a rows rows and \\a cols columns.\n      *\n      * This is useful for dynamic-size arrays. For fixed-size arrays,\n      * it is redundant to pass these parameters, so one should use the default constructor\n      * Array() instead. */\n    Array(Index rows, Index cols);\n    /** constructs an initialized 2D vector with given coefficients */\n    Array(const Scalar& val0, const Scalar& val1);\n    #endif\n\n    /** constructs an initialized 3D vector with given coefficients */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2)\n    {\n      Base::_check_template_params();\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3)\n      m_storage.data()[0] = val0;\n      m_storage.data()[1] = val1;\n      m_storage.data()[2] = val2;\n    }\n    /** constructs an initialized 4D vector with given coefficients */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array(const Scalar& val0, const Scalar& val1, const Scalar& val2, const Scalar& val3)\n    {\n      Base::_check_template_params();\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4)\n      m_storage.data()[0] = val0;\n      m_storage.data()[1] = val1;\n      m_storage.data()[2] = val2;\n      m_storage.data()[3] = val3;\n    }\n\n    /** Copy constructor */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array(const Array& other)\n            : Base(other)\n    { }\n\n    /** \\sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)\n      : Base(other.derived())\n    { }\n\n    EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; }\n    EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); }\n\n    #ifdef EIGEN_ARRAY_PLUGIN\n    #include EIGEN_ARRAY_PLUGIN\n    #endif\n\n  private:\n\n    template<typename MatrixType, typename OtherDerived, bool SwapPointers>\n    friend struct internal::matrix_swap_impl;\n};\n\n/** \\defgroup arraytypedefs Global array typedefs\n  * \\ingroup Core_Module\n  *\n  * Eigen defines several typedef shortcuts for most common 1D and 2D array types.\n  *\n  * The general patterns are the following:\n  *\n  * \\c ArrayRowsColsType where \\c Rows and \\c Cols can be \\c 2,\\c 3,\\c 4 for fixed size square matrices or \\c X for dynamic size,\n  * and where \\c Type can be \\c i for integer, \\c f for float, \\c d for double, \\c cf for complex float, \\c cd\n  * for complex double.\n  *\n  * For example, \\c Array33d is a fixed-size 3x3 array type of doubles, and \\c ArrayXXf is a dynamic-size matrix of floats.\n  *\n  * There are also \\c ArraySizeType which are self-explanatory. For example, \\c Array4cf is\n  * a fixed-size 1D array of 4 complex floats.\n  *\n  * \\sa class Array\n  */\n\n#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \\\n/** \\ingroup arraytypedefs */                                    \\\ntypedef Array<Type, Size, Size> Array##SizeSuffix##SizeSuffix##TypeSuffix;  \\\n/** \\ingroup arraytypedefs */                                    \\\ntypedef Array<Type, Size, 1>    Array##SizeSuffix##TypeSuffix;\n\n#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \\\n/** \\ingroup arraytypedefs */                                    \\\ntypedef Array<Type, Size, Dynamic> Array##Size##X##TypeSuffix;  \\\n/** \\ingroup arraytypedefs */                                    \\\ntypedef Array<Type, Dynamic, Size> Array##X##Size##TypeSuffix;\n\n#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \\\nEIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \\\nEIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \\\nEIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \\\nEIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \\\nEIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \\\nEIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \\\nEIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4)\n\nEIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int,                  i)\nEIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float,                f)\nEIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double,               d)\nEIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)\nEIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)\n\n#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES\n#undef EIGEN_MAKE_ARRAY_TYPEDEFS\n\n#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE\n\n#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \\\nusing Eigen::Matrix##SizeSuffix##TypeSuffix; \\\nusing Eigen::Vector##SizeSuffix##TypeSuffix; \\\nusing Eigen::RowVector##SizeSuffix##TypeSuffix;\n\n#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \\\n\n#define EIGEN_USING_ARRAY_TYPEDEFS \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \\\nEIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd)\n\n} // end namespace Eigen\n\n#endif // EIGEN_ARRAY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ArrayBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ARRAYBASE_H\n#define EIGEN_ARRAYBASE_H\n\nnamespace Eigen { \n\ntemplate<typename ExpressionType> class MatrixWrapper;\n\n/** \\class ArrayBase\n  * \\ingroup Core_Module\n  *\n  * \\brief Base class for all 1D and 2D array, and related expressions\n  *\n  * An array is similar to a dense vector or matrix. While matrices are mathematical\n  * objects with well defined linear algebra operators, an array is just a collection\n  * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence,\n  * all operations applied to an array are performed coefficient wise. Furthermore,\n  * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient\n  * constructors allowing to easily write generic code working for both scalar values\n  * and arrays.\n  *\n  * This class is the base that is inherited by all array expression types.\n  *\n  * \\tparam Derived is the derived type, e.g., an array or an expression type.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_ARRAYBASE_PLUGIN.\n  *\n  * \\sa class MatrixBase, \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived> class ArrayBase\n  : public DenseBase<Derived>\n{\n  public:\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** The base class for a given storage type. */\n    typedef ArrayBase StorageBaseType;\n\n    typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl;\n\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    typedef DenseBase<Derived> Base;\n    using Base::RowsAtCompileTime;\n    using Base::ColsAtCompileTime;\n    using Base::SizeAtCompileTime;\n    using Base::MaxRowsAtCompileTime;\n    using Base::MaxColsAtCompileTime;\n    using Base::MaxSizeAtCompileTime;\n    using Base::IsVectorAtCompileTime;\n    using Base::Flags;\n    \n    using Base::derived;\n    using Base::const_cast_derived;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::coeff;\n    using Base::coeffRef;\n    using Base::lazyAssign;\n    using Base::operator-;\n    using Base::operator=;\n    using Base::operator+=;\n    using Base::operator-=;\n    using Base::operator*=;\n    using Base::operator/=;\n\n    typedef typename Base::CoeffReturnType CoeffReturnType;\n\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename Base::PlainObject PlainObject;\n\n    /** \\internal Represents a matrix with all coefficients equal to one another*/\n    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase\n#define EIGEN_DOC_UNARY_ADDONS(X,Y)\n#   include \"../plugins/MatrixCwiseUnaryOps.h\"\n#   include \"../plugins/ArrayCwiseUnaryOps.h\"\n#   include \"../plugins/CommonCwiseBinaryOps.h\"\n#   include \"../plugins/MatrixCwiseBinaryOps.h\"\n#   include \"../plugins/ArrayCwiseBinaryOps.h\"\n#   ifdef EIGEN_ARRAYBASE_PLUGIN\n#     include EIGEN_ARRAYBASE_PLUGIN\n#   endif\n#undef EIGEN_CURRENT_STORAGE_BASE_CLASS\n#undef EIGEN_DOC_UNARY_ADDONS\n\n    /** Special case of the template operator=, in order to prevent the compiler\n      * from generating a default operator= (issue hit with g++ 4.1)\n      */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const ArrayBase& other)\n    {\n      internal::call_assignment(derived(), other.derived());\n      return derived();\n    }\n    \n    /** Set all the entries to \\a value.\n      * \\sa DenseBase::setConstant(), DenseBase::fill() */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const Scalar &value)\n    { Base::setConstant(value); return derived(); }\n\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator+=(const Scalar& scalar);\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator-=(const Scalar& scalar);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator+=(const ArrayBase<OtherDerived>& other);\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator-=(const ArrayBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator*=(const ArrayBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator/=(const ArrayBase<OtherDerived>& other);\n\n  public:\n    EIGEN_DEVICE_FUNC\n    ArrayBase<Derived>& array() { return *this; }\n    EIGEN_DEVICE_FUNC\n    const ArrayBase<Derived>& array() const { return *this; }\n\n    /** \\returns an \\link Eigen::MatrixBase Matrix \\endlink expression of this array\n      * \\sa MatrixBase::array() */\n    EIGEN_DEVICE_FUNC\n    MatrixWrapper<Derived> matrix() { return MatrixWrapper<Derived>(derived()); }\n    EIGEN_DEVICE_FUNC\n    const MatrixWrapper<const Derived> matrix() const { return MatrixWrapper<const Derived>(derived()); }\n\n//     template<typename Dest>\n//     inline void evalTo(Dest& dst) const { dst = matrix(); }\n\n  protected:\n    EIGEN_DEVICE_FUNC\n    ArrayBase() : Base() {}\n\n  private:\n    explicit ArrayBase(Index);\n    ArrayBase(Index,Index);\n    template<typename OtherDerived> explicit ArrayBase(const ArrayBase<OtherDerived>&);\n  protected:\n    // mixing arrays and matrices is not legal\n    template<typename OtherDerived> Derived& operator+=(const MatrixBase<OtherDerived>& )\n    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}\n    // mixing arrays and matrices is not legal\n    template<typename OtherDerived> Derived& operator-=(const MatrixBase<OtherDerived>& )\n    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}\n};\n\n/** replaces \\c *this by \\c *this - \\a other.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nArrayBase<Derived>::operator-=(const ArrayBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n/** replaces \\c *this by \\c *this + \\a other.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nArrayBase<Derived>::operator+=(const ArrayBase<OtherDerived>& other)\n{\n  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n/** replaces \\c *this by \\c *this * \\a other coefficient wise.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nArrayBase<Derived>::operator*=(const ArrayBase<OtherDerived>& other)\n{\n  call_assignment(derived(), other.derived(), internal::mul_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n/** replaces \\c *this by \\c *this / \\a other coefficient wise.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)\n{\n  call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_ARRAYBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ArrayWrapper.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ARRAYWRAPPER_H\n#define EIGEN_ARRAYWRAPPER_H\n\nnamespace Eigen { \n\n/** \\class ArrayWrapper\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a mathematical vector or matrix as an array object\n  *\n  * This class is the return type of MatrixBase::array(), and most of the time\n  * this is the only way it is use.\n  *\n  * \\sa MatrixBase::array(), class MatrixWrapper\n  */\n\nnamespace internal {\ntemplate<typename ExpressionType>\nstruct traits<ArrayWrapper<ExpressionType> >\n  : public traits<typename remove_all<typename ExpressionType::Nested>::type >\n{\n  typedef ArrayXpr XprKind;\n  // Let's remove NestByRefBit\n  enum {\n    Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,\n    Flags = Flags0 & ~NestByRefBit\n  };\n};\n}\n\ntemplate<typename ExpressionType>\nclass ArrayWrapper : public ArrayBase<ArrayWrapper<ExpressionType> >\n{\n  public:\n    typedef ArrayBase<ArrayWrapper> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper)\n    typedef typename internal::remove_all<ExpressionType>::type NestedExpression;\n\n    typedef typename internal::conditional<\n                       internal::is_lvalue<ExpressionType>::value,\n                       Scalar,\n                       const Scalar\n                     >::type ScalarWithConstIfNotLvalue;\n\n    typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;\n\n    using Base::coeffRef;\n\n    EIGEN_DEVICE_FUNC\n    explicit EIGEN_STRONG_INLINE ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {}\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return m_expression.rows(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return m_expression.cols(); }\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const { return m_expression.outerStride(); }\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const { return m_expression.innerStride(); }\n\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }\n    EIGEN_DEVICE_FUNC\n    inline const Scalar* data() const { return m_expression.data(); }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      return m_expression.coeffRef(rowId, colId);\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index index) const\n    {\n      return m_expression.coeffRef(index);\n    }\n\n    template<typename Dest>\n    EIGEN_DEVICE_FUNC\n    inline void evalTo(Dest& dst) const { dst = m_expression; }\n\n    const typename internal::remove_all<NestedExpressionType>::type& \n    EIGEN_DEVICE_FUNC\n    nestedExpression() const \n    {\n      return m_expression;\n    }\n\n    /** Forwards the resizing request to the nested expression\n      * \\sa DenseBase::resize(Index)  */\n    EIGEN_DEVICE_FUNC\n    void resize(Index newSize) { m_expression.resize(newSize); }\n    /** Forwards the resizing request to the nested expression\n      * \\sa DenseBase::resize(Index,Index)*/\n    EIGEN_DEVICE_FUNC\n    void resize(Index rows, Index cols) { m_expression.resize(rows,cols); }\n\n  protected:\n    NestedExpressionType m_expression;\n};\n\n/** \\class MatrixWrapper\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of an array as a mathematical vector or matrix\n  *\n  * This class is the return type of ArrayBase::matrix(), and most of the time\n  * this is the only way it is use.\n  *\n  * \\sa MatrixBase::matrix(), class ArrayWrapper\n  */\n\nnamespace internal {\ntemplate<typename ExpressionType>\nstruct traits<MatrixWrapper<ExpressionType> >\n : public traits<typename remove_all<typename ExpressionType::Nested>::type >\n{\n  typedef MatrixXpr XprKind;\n  // Let's remove NestByRefBit\n  enum {\n    Flags0 = traits<typename remove_all<typename ExpressionType::Nested>::type >::Flags,\n    Flags = Flags0 & ~NestByRefBit\n  };\n};\n}\n\ntemplate<typename ExpressionType>\nclass MatrixWrapper : public MatrixBase<MatrixWrapper<ExpressionType> >\n{\n  public:\n    typedef MatrixBase<MatrixWrapper<ExpressionType> > Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper)\n    typedef typename internal::remove_all<ExpressionType>::type NestedExpression;\n\n    typedef typename internal::conditional<\n                       internal::is_lvalue<ExpressionType>::value,\n                       Scalar,\n                       const Scalar\n                     >::type ScalarWithConstIfNotLvalue;\n\n    typedef typename internal::ref_selector<ExpressionType>::non_const_type NestedExpressionType;\n\n    using Base::coeffRef;\n\n    EIGEN_DEVICE_FUNC\n    explicit inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {}\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return m_expression.rows(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return m_expression.cols(); }\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const { return m_expression.outerStride(); }\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const { return m_expression.innerStride(); }\n\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); }\n    EIGEN_DEVICE_FUNC\n    inline const Scalar* data() const { return m_expression.data(); }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      return m_expression.derived().coeffRef(rowId, colId);\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index index) const\n    {\n      return m_expression.coeffRef(index);\n    }\n\n    EIGEN_DEVICE_FUNC\n    const typename internal::remove_all<NestedExpressionType>::type& \n    nestedExpression() const \n    {\n      return m_expression;\n    }\n\n    /** Forwards the resizing request to the nested expression\n      * \\sa DenseBase::resize(Index)  */\n    EIGEN_DEVICE_FUNC\n    void resize(Index newSize) { m_expression.resize(newSize); }\n    /** Forwards the resizing request to the nested expression\n      * \\sa DenseBase::resize(Index,Index)*/\n    EIGEN_DEVICE_FUNC\n    void resize(Index rows, Index cols) { m_expression.resize(rows,cols); }\n\n  protected:\n    NestedExpressionType m_expression;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_ARRAYWRAPPER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Assign.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007 Michael Olbrich <michael.olbrich@gmx.net>\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ASSIGN_H\n#define EIGEN_ASSIGN_H\n\nnamespace Eigen {\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>\n  ::lazyAssign(const DenseBase<OtherDerived>& other)\n{\n  enum{\n    SameType = internal::is_same<typename Derived::Scalar,typename OtherDerived::Scalar>::value\n  };\n\n  EIGEN_STATIC_ASSERT_LVALUE(Derived)\n  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived)\n  EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n\n  eigen_assert(rows() == other.rows() && cols() == other.cols());\n  internal::call_assignment_no_alias(derived(),other.derived());\n  \n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase<OtherDerived>& other)\n{\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator=(const DenseBase& other)\n{\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const MatrixBase& other)\n{\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate <typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const DenseBase<OtherDerived>& other)\n{\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate <typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const EigenBase<OtherDerived>& other)\n{\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)\n{\n  other.derived().evalTo(derived());\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_ASSIGN_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/AssignEvaluator.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ASSIGN_EVALUATOR_H\n#define EIGEN_ASSIGN_EVALUATOR_H\n\nnamespace Eigen {\n\n// This implementation is based on Assign.h\n\nnamespace internal {\n  \n/***************************************************************************\n* Part 1 : the logic deciding a strategy for traversal and unrolling       *\n***************************************************************************/\n\n// copy_using_evaluator_traits is based on assign_traits\n\ntemplate <typename DstEvaluator, typename SrcEvaluator, typename AssignFunc>\nstruct copy_using_evaluator_traits\n{\n  typedef typename DstEvaluator::XprType Dst;\n  typedef typename Dst::Scalar DstScalar;\n  \n  enum {\n    DstFlags = DstEvaluator::Flags,\n    SrcFlags = SrcEvaluator::Flags\n  };\n  \npublic:\n  enum {\n    DstAlignment = DstEvaluator::Alignment,\n    SrcAlignment = SrcEvaluator::Alignment,\n    DstHasDirectAccess = DstFlags & DirectAccessBit,\n    JointAlignment = EIGEN_PLAIN_ENUM_MIN(DstAlignment,SrcAlignment)\n  };\n\nprivate:\n  enum {\n    InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)\n              : int(DstFlags)&RowMajorBit ? int(Dst::ColsAtCompileTime)\n              : int(Dst::RowsAtCompileTime),\n    InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)\n              : int(DstFlags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)\n              : int(Dst::MaxRowsAtCompileTime),\n    OuterStride = int(outer_stride_at_compile_time<Dst>::ret),\n    MaxSizeAtCompileTime = Dst::SizeAtCompileTime\n  };\n\n  // TODO distinguish between linear traversal and inner-traversals\n  typedef typename find_best_packet<DstScalar,Dst::SizeAtCompileTime>::type LinearPacketType;\n  typedef typename find_best_packet<DstScalar,InnerSize>::type InnerPacketType;\n\n  enum {\n    LinearPacketSize = unpacket_traits<LinearPacketType>::size,\n    InnerPacketSize = unpacket_traits<InnerPacketType>::size\n  };\n\npublic:\n  enum {\n    LinearRequiredAlignment = unpacket_traits<LinearPacketType>::alignment,\n    InnerRequiredAlignment = unpacket_traits<InnerPacketType>::alignment\n  };\n\nprivate:\n  enum {\n    DstIsRowMajor = DstFlags&RowMajorBit,\n    SrcIsRowMajor = SrcFlags&RowMajorBit,\n    StorageOrdersAgree = (int(DstIsRowMajor) == int(SrcIsRowMajor)),\n    MightVectorize = bool(StorageOrdersAgree)\n                  && (int(DstFlags) & int(SrcFlags) & ActualPacketAccessBit)\n                  && bool(functor_traits<AssignFunc>::PacketAccess),\n    MayInnerVectorize  = MightVectorize\n                       && int(InnerSize)!=Dynamic && int(InnerSize)%int(InnerPacketSize)==0\n                       && int(OuterStride)!=Dynamic && int(OuterStride)%int(InnerPacketSize)==0\n                       && (EIGEN_UNALIGNED_VECTORIZE  || int(JointAlignment)>=int(InnerRequiredAlignment)),\n    MayLinearize = bool(StorageOrdersAgree) && (int(DstFlags) & int(SrcFlags) & LinearAccessBit),\n    MayLinearVectorize = bool(MightVectorize) && MayLinearize && DstHasDirectAccess\n                       && (EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)) || MaxSizeAtCompileTime == Dynamic),\n      /* If the destination isn't aligned, we have to do runtime checks and we don't unroll,\n         so it's only good for large enough sizes. */\n    MaySliceVectorize  = bool(MightVectorize) && bool(DstHasDirectAccess)\n                       && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=(EIGEN_UNALIGNED_VECTORIZE?InnerPacketSize:(3*InnerPacketSize)))\n      /* slice vectorization can be slow, so we only want it if the slices are big, which is\n         indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block\n         in a fixed-size matrix\n         However, with EIGEN_UNALIGNED_VECTORIZE and unrolling, slice vectorization is still worth it */\n  };\n\npublic:\n  enum {\n    Traversal = int(MayLinearVectorize) && (LinearPacketSize>InnerPacketSize) ? int(LinearVectorizedTraversal)\n              : int(MayInnerVectorize)   ? int(InnerVectorizedTraversal)\n              : int(MayLinearVectorize)  ? int(LinearVectorizedTraversal)\n              : int(MaySliceVectorize)   ? int(SliceVectorizedTraversal)\n              : int(MayLinearize)        ? int(LinearTraversal)\n                                         : int(DefaultTraversal),\n    Vectorized = int(Traversal) == InnerVectorizedTraversal\n              || int(Traversal) == LinearVectorizedTraversal\n              || int(Traversal) == SliceVectorizedTraversal\n  };\n\n  typedef typename conditional<int(Traversal)==LinearVectorizedTraversal, LinearPacketType, InnerPacketType>::type PacketType;\n\nprivate:\n  enum {\n    ActualPacketSize    = int(Traversal)==LinearVectorizedTraversal ? LinearPacketSize\n                        : Vectorized ? InnerPacketSize\n                        : 1,\n    UnrollingLimit      = EIGEN_UNROLLING_LIMIT * ActualPacketSize,\n    MayUnrollCompletely = int(Dst::SizeAtCompileTime) != Dynamic\n                       && int(Dst::SizeAtCompileTime) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit),\n    MayUnrollInner      = int(InnerSize) != Dynamic\n                       && int(InnerSize) * (int(DstEvaluator::CoeffReadCost)+int(SrcEvaluator::CoeffReadCost)) <= int(UnrollingLimit)\n  };\n\npublic:\n  enum {\n    Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))\n                ? (\n                    int(MayUnrollCompletely) ? int(CompleteUnrolling)\n                  : int(MayUnrollInner)      ? int(InnerUnrolling)\n                                             : int(NoUnrolling)\n                  )\n              : int(Traversal) == int(LinearVectorizedTraversal)\n                ? ( bool(MayUnrollCompletely) && ( EIGEN_UNALIGNED_VECTORIZE || (int(DstAlignment)>=int(LinearRequiredAlignment)))\n                          ? int(CompleteUnrolling)\n                          : int(NoUnrolling) )\n              : int(Traversal) == int(LinearTraversal)\n                ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) \n                                              : int(NoUnrolling) )\n#if EIGEN_UNALIGNED_VECTORIZE\n              : int(Traversal) == int(SliceVectorizedTraversal)\n                ? ( bool(MayUnrollInner) ? int(InnerUnrolling)\n                                         : int(NoUnrolling) )\n#endif\n              : int(NoUnrolling)\n  };\n\n#ifdef EIGEN_DEBUG_ASSIGN\n  static void debug()\n  {\n    std::cerr << \"DstXpr: \" << typeid(typename DstEvaluator::XprType).name() << std::endl;\n    std::cerr << \"SrcXpr: \" << typeid(typename SrcEvaluator::XprType).name() << std::endl;\n    std::cerr.setf(std::ios::hex, std::ios::basefield);\n    std::cerr << \"DstFlags\" << \" = \" << DstFlags << \" (\" << demangle_flags(DstFlags) << \" )\" << std::endl;\n    std::cerr << \"SrcFlags\" << \" = \" << SrcFlags << \" (\" << demangle_flags(SrcFlags) << \" )\" << std::endl;\n    std::cerr.unsetf(std::ios::hex);\n    EIGEN_DEBUG_VAR(DstAlignment)\n    EIGEN_DEBUG_VAR(SrcAlignment)\n    EIGEN_DEBUG_VAR(LinearRequiredAlignment)\n    EIGEN_DEBUG_VAR(InnerRequiredAlignment)\n    EIGEN_DEBUG_VAR(JointAlignment)\n    EIGEN_DEBUG_VAR(InnerSize)\n    EIGEN_DEBUG_VAR(InnerMaxSize)\n    EIGEN_DEBUG_VAR(LinearPacketSize)\n    EIGEN_DEBUG_VAR(InnerPacketSize)\n    EIGEN_DEBUG_VAR(ActualPacketSize)\n    EIGEN_DEBUG_VAR(StorageOrdersAgree)\n    EIGEN_DEBUG_VAR(MightVectorize)\n    EIGEN_DEBUG_VAR(MayLinearize)\n    EIGEN_DEBUG_VAR(MayInnerVectorize)\n    EIGEN_DEBUG_VAR(MayLinearVectorize)\n    EIGEN_DEBUG_VAR(MaySliceVectorize)\n    std::cerr << \"Traversal\" << \" = \" << Traversal << \" (\" << demangle_traversal(Traversal) << \")\" << std::endl;\n    EIGEN_DEBUG_VAR(SrcEvaluator::CoeffReadCost)\n    EIGEN_DEBUG_VAR(UnrollingLimit)\n    EIGEN_DEBUG_VAR(MayUnrollCompletely)\n    EIGEN_DEBUG_VAR(MayUnrollInner)\n    std::cerr << \"Unrolling\" << \" = \" << Unrolling << \" (\" << demangle_unrolling(Unrolling) << \")\" << std::endl;\n    std::cerr << std::endl;\n  }\n#endif\n};\n\n/***************************************************************************\n* Part 2 : meta-unrollers\n***************************************************************************/\n\n/************************\n*** Default traversal ***\n************************/\n\ntemplate<typename Kernel, int Index, int Stop>\nstruct copy_using_evaluator_DefaultTraversal_CompleteUnrolling\n{\n  // FIXME: this is not very clean, perhaps this information should be provided by the kernel?\n  typedef typename Kernel::DstEvaluatorType DstEvaluatorType;\n  typedef typename DstEvaluatorType::XprType DstXprType;\n  \n  enum {\n    outer = Index / DstXprType::InnerSizeAtCompileTime,\n    inner = Index % DstXprType::InnerSizeAtCompileTime\n  };\n\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    kernel.assignCoeffByOuterInner(outer, inner);\n    copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Index+1, Stop>::run(kernel);\n  }\n};\n\ntemplate<typename Kernel, int Stop>\nstruct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, Stop, Stop>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }\n};\n\ntemplate<typename Kernel, int Index_, int Stop>\nstruct copy_using_evaluator_DefaultTraversal_InnerUnrolling\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)\n  {\n    kernel.assignCoeffByOuterInner(outer, Index_);\n    copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Index_+1, Stop>::run(kernel, outer);\n  }\n};\n\ntemplate<typename Kernel, int Stop>\nstruct copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, Stop, Stop>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index) { }\n};\n\n/***********************\n*** Linear traversal ***\n***********************/\n\ntemplate<typename Kernel, int Index, int Stop>\nstruct copy_using_evaluator_LinearTraversal_CompleteUnrolling\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel& kernel)\n  {\n    kernel.assignCoeff(Index);\n    copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, Index+1, Stop>::run(kernel);\n  }\n};\n\ntemplate<typename Kernel, int Stop>\nstruct copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, Stop, Stop>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }\n};\n\n/**************************\n*** Inner vectorization ***\n**************************/\n\ntemplate<typename Kernel, int Index, int Stop>\nstruct copy_using_evaluator_innervec_CompleteUnrolling\n{\n  // FIXME: this is not very clean, perhaps this information should be provided by the kernel?\n  typedef typename Kernel::DstEvaluatorType DstEvaluatorType;\n  typedef typename DstEvaluatorType::XprType DstXprType;\n  typedef typename Kernel::PacketType PacketType;\n  \n  enum {\n    outer = Index / DstXprType::InnerSizeAtCompileTime,\n    inner = Index % DstXprType::InnerSizeAtCompileTime,\n    SrcAlignment = Kernel::AssignmentTraits::SrcAlignment,\n    DstAlignment = Kernel::AssignmentTraits::DstAlignment\n  };\n\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, inner);\n    enum { NextIndex = Index + unpacket_traits<PacketType>::size };\n    copy_using_evaluator_innervec_CompleteUnrolling<Kernel, NextIndex, Stop>::run(kernel);\n  }\n};\n\ntemplate<typename Kernel, int Stop>\nstruct copy_using_evaluator_innervec_CompleteUnrolling<Kernel, Stop, Stop>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&) { }\n};\n\ntemplate<typename Kernel, int Index_, int Stop, int SrcAlignment, int DstAlignment>\nstruct copy_using_evaluator_innervec_InnerUnrolling\n{\n  typedef typename Kernel::PacketType PacketType;\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)\n  {\n    kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, Index_);\n    enum { NextIndex = Index_ + unpacket_traits<PacketType>::size };\n    copy_using_evaluator_innervec_InnerUnrolling<Kernel, NextIndex, Stop, SrcAlignment, DstAlignment>::run(kernel, outer);\n  }\n};\n\ntemplate<typename Kernel, int Stop, int SrcAlignment, int DstAlignment>\nstruct copy_using_evaluator_innervec_InnerUnrolling<Kernel, Stop, Stop, SrcAlignment, DstAlignment>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &, Index) { }\n};\n\n/***************************************************************************\n* Part 3 : implementation of all cases\n***************************************************************************/\n\n// dense_assignment_loop is based on assign_impl\n\ntemplate<typename Kernel,\n         int Traversal = Kernel::AssignmentTraits::Traversal,\n         int Unrolling = Kernel::AssignmentTraits::Unrolling>\nstruct dense_assignment_loop;\n\n/************************\n*** Default traversal ***\n************************/\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, DefaultTraversal, NoUnrolling>\n{\n  EIGEN_DEVICE_FUNC static void EIGEN_STRONG_INLINE run(Kernel &kernel)\n  {\n    for(Index outer = 0; outer < kernel.outerSize(); ++outer) {\n      for(Index inner = 0; inner < kernel.innerSize(); ++inner) {\n        kernel.assignCoeffByOuterInner(outer, inner);\n      }\n    }\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, DefaultTraversal, CompleteUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, DefaultTraversal, InnerUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n\n    const Index outerSize = kernel.outerSize();\n    for(Index outer = 0; outer < outerSize; ++outer)\n      copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, 0, DstXprType::InnerSizeAtCompileTime>::run(kernel, outer);\n  }\n};\n\n/***************************\n*** Linear vectorization ***\n***************************/\n\n\n// The goal of unaligned_dense_assignment_loop is simply to factorize the handling\n// of the non vectorizable beginning and ending parts\n\ntemplate <bool IsAligned = false>\nstruct unaligned_dense_assignment_loop\n{\n  // if IsAligned = true, then do nothing\n  template <typename Kernel>\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel&, Index, Index) {}\n};\n\ntemplate <>\nstruct unaligned_dense_assignment_loop<false>\n{\n  // MSVC must not inline this functions. If it does, it fails to optimize the\n  // packet access path.\n  // FIXME check which version exhibits this issue\n#if EIGEN_COMP_MSVC\n  template <typename Kernel>\n  static EIGEN_DONT_INLINE void run(Kernel &kernel,\n                                    Index start,\n                                    Index end)\n#else\n  template <typename Kernel>\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel,\n                                      Index start,\n                                      Index end)\n#endif\n  {\n    for (Index index = start; index < end; ++index)\n      kernel.assignCoeff(index);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, LinearVectorizedTraversal, NoUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    const Index size = kernel.size();\n    typedef typename Kernel::Scalar Scalar;\n    typedef typename Kernel::PacketType PacketType;\n    enum {\n      requestedAlignment = Kernel::AssignmentTraits::LinearRequiredAlignment,\n      packetSize = unpacket_traits<PacketType>::size,\n      dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment),\n      dstAlignment = packet_traits<Scalar>::AlignedOnScalar ? int(requestedAlignment)\n                                                            : int(Kernel::AssignmentTraits::DstAlignment),\n      srcAlignment = Kernel::AssignmentTraits::JointAlignment\n    };\n    const Index alignedStart = dstIsAligned ? 0 : internal::first_aligned<requestedAlignment>(kernel.dstDataPtr(), size);\n    const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;\n\n    unaligned_dense_assignment_loop<dstIsAligned!=0>::run(kernel, 0, alignedStart);\n\n    for(Index index = alignedStart; index < alignedEnd; index += packetSize)\n      kernel.template assignPacket<dstAlignment, srcAlignment, PacketType>(index);\n\n    unaligned_dense_assignment_loop<>::run(kernel, alignedEnd, size);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, LinearVectorizedTraversal, CompleteUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    typedef typename Kernel::PacketType PacketType;\n    \n    enum { size = DstXprType::SizeAtCompileTime,\n           packetSize =unpacket_traits<PacketType>::size,\n           alignedSize = (size/packetSize)*packetSize };\n\n    copy_using_evaluator_innervec_CompleteUnrolling<Kernel, 0, alignedSize>::run(kernel);\n    copy_using_evaluator_DefaultTraversal_CompleteUnrolling<Kernel, alignedSize, size>::run(kernel);\n  }\n};\n\n/**************************\n*** Inner vectorization ***\n**************************/\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling>\n{\n  typedef typename Kernel::PacketType PacketType;\n  enum {\n    SrcAlignment = Kernel::AssignmentTraits::SrcAlignment,\n    DstAlignment = Kernel::AssignmentTraits::DstAlignment\n  };\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    const Index innerSize = kernel.innerSize();\n    const Index outerSize = kernel.outerSize();\n    const Index packetSize = unpacket_traits<PacketType>::size;\n    for(Index outer = 0; outer < outerSize; ++outer)\n      for(Index inner = 0; inner < innerSize; inner+=packetSize)\n        kernel.template assignPacketByOuterInner<DstAlignment, SrcAlignment, PacketType>(outer, inner);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, InnerVectorizedTraversal, CompleteUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    copy_using_evaluator_innervec_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, InnerVectorizedTraversal, InnerUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    typedef typename Kernel::AssignmentTraits Traits;\n    const Index outerSize = kernel.outerSize();\n    for(Index outer = 0; outer < outerSize; ++outer)\n      copy_using_evaluator_innervec_InnerUnrolling<Kernel, 0, DstXprType::InnerSizeAtCompileTime,\n                                                   Traits::SrcAlignment, Traits::DstAlignment>::run(kernel, outer);\n  }\n};\n\n/***********************\n*** Linear traversal ***\n***********************/\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, LinearTraversal, NoUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    const Index size = kernel.size();\n    for(Index i = 0; i < size; ++i)\n      kernel.assignCoeff(i);\n  }\n};\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, LinearTraversal, CompleteUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    copy_using_evaluator_LinearTraversal_CompleteUnrolling<Kernel, 0, DstXprType::SizeAtCompileTime>::run(kernel);\n  }\n};\n\n/**************************\n*** Slice vectorization ***\n***************************/\n\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, SliceVectorizedTraversal, NoUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::Scalar Scalar;\n    typedef typename Kernel::PacketType PacketType;\n    enum {\n      packetSize = unpacket_traits<PacketType>::size,\n      requestedAlignment = int(Kernel::AssignmentTraits::InnerRequiredAlignment),\n      alignable = packet_traits<Scalar>::AlignedOnScalar || int(Kernel::AssignmentTraits::DstAlignment)>=sizeof(Scalar),\n      dstIsAligned = int(Kernel::AssignmentTraits::DstAlignment)>=int(requestedAlignment),\n      dstAlignment = alignable ? int(requestedAlignment)\n                               : int(Kernel::AssignmentTraits::DstAlignment)\n    };\n    const Scalar *dst_ptr = kernel.dstDataPtr();\n    if((!bool(dstIsAligned)) && (UIntPtr(dst_ptr) % sizeof(Scalar))>0)\n    {\n      // the pointer is not aligend-on scalar, so alignment is not possible\n      return dense_assignment_loop<Kernel,DefaultTraversal,NoUnrolling>::run(kernel);\n    }\n    const Index packetAlignedMask = packetSize - 1;\n    const Index innerSize = kernel.innerSize();\n    const Index outerSize = kernel.outerSize();\n    const Index alignedStep = alignable ? (packetSize - kernel.outerStride() % packetSize) & packetAlignedMask : 0;\n    Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned<requestedAlignment>(dst_ptr, innerSize);\n\n    for(Index outer = 0; outer < outerSize; ++outer)\n    {\n      const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);\n      // do the non-vectorizable part of the assignment\n      for(Index inner = 0; inner<alignedStart ; ++inner)\n        kernel.assignCoeffByOuterInner(outer, inner);\n\n      // do the vectorizable part of the assignment\n      for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize)\n        kernel.template assignPacketByOuterInner<dstAlignment, Unaligned, PacketType>(outer, inner);\n\n      // do the non-vectorizable part of the assignment\n      for(Index inner = alignedEnd; inner<innerSize ; ++inner)\n        kernel.assignCoeffByOuterInner(outer, inner);\n\n      alignedStart = numext::mini((alignedStart+alignedStep)%packetSize, innerSize);\n    }\n  }\n};\n\n#if EIGEN_UNALIGNED_VECTORIZE\ntemplate<typename Kernel>\nstruct dense_assignment_loop<Kernel, SliceVectorizedTraversal, InnerUnrolling>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)\n  {\n    typedef typename Kernel::DstEvaluatorType::XprType DstXprType;\n    typedef typename Kernel::PacketType PacketType;\n\n    enum { size = DstXprType::InnerSizeAtCompileTime,\n           packetSize =unpacket_traits<PacketType>::size,\n           vectorizableSize = (size/packetSize)*packetSize };\n\n    for(Index outer = 0; outer < kernel.outerSize(); ++outer)\n    {\n      copy_using_evaluator_innervec_InnerUnrolling<Kernel, 0, vectorizableSize, 0, 0>::run(kernel, outer);\n      copy_using_evaluator_DefaultTraversal_InnerUnrolling<Kernel, vectorizableSize, size>::run(kernel, outer);\n    }\n  }\n};\n#endif\n\n\n/***************************************************************************\n* Part 4 : Generic dense assignment kernel\n***************************************************************************/\n\n// This class generalize the assignment of a coefficient (or packet) from one dense evaluator\n// to another dense writable evaluator.\n// It is parametrized by the two evaluators, and the actual assignment functor.\n// This abstraction level permits to keep the evaluation loops as simple and as generic as possible.\n// One can customize the assignment using this generic dense_assignment_kernel with different\n// functors, or by completely overloading it, by-passing a functor.\ntemplate<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version = Specialized>\nclass generic_dense_assignment_kernel\n{\nprotected:\n  typedef typename DstEvaluatorTypeT::XprType DstXprType;\n  typedef typename SrcEvaluatorTypeT::XprType SrcXprType;\npublic:\n  \n  typedef DstEvaluatorTypeT DstEvaluatorType;\n  typedef SrcEvaluatorTypeT SrcEvaluatorType;\n  typedef typename DstEvaluatorType::Scalar Scalar;\n  typedef copy_using_evaluator_traits<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor> AssignmentTraits;\n  typedef typename AssignmentTraits::PacketType PacketType;\n  \n  \n  EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)\n    : m_dst(dst), m_src(src), m_functor(func), m_dstExpr(dstExpr)\n  {\n    #ifdef EIGEN_DEBUG_ASSIGN\n    AssignmentTraits::debug();\n    #endif\n  }\n  \n  EIGEN_DEVICE_FUNC Index size() const        { return m_dstExpr.size(); }\n  EIGEN_DEVICE_FUNC Index innerSize() const   { return m_dstExpr.innerSize(); }\n  EIGEN_DEVICE_FUNC Index outerSize() const   { return m_dstExpr.outerSize(); }\n  EIGEN_DEVICE_FUNC Index rows() const        { return m_dstExpr.rows(); }\n  EIGEN_DEVICE_FUNC Index cols() const        { return m_dstExpr.cols(); }\n  EIGEN_DEVICE_FUNC Index outerStride() const { return m_dstExpr.outerStride(); }\n  \n  EIGEN_DEVICE_FUNC DstEvaluatorType& dstEvaluator() { return m_dst; }\n  EIGEN_DEVICE_FUNC const SrcEvaluatorType& srcEvaluator() const { return m_src; }\n  \n  /// Assign src(row,col) to dst(row,col) through the assignment functor.\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index row, Index col)\n  {\n    m_functor.assignCoeff(m_dst.coeffRef(row,col), m_src.coeff(row,col));\n  }\n  \n  /// \\sa assignCoeff(Index,Index)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Index index)\n  {\n    m_functor.assignCoeff(m_dst.coeffRef(index), m_src.coeff(index));\n  }\n  \n  /// \\sa assignCoeff(Index,Index)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeffByOuterInner(Index outer, Index inner)\n  {\n    Index row = rowIndexByOuterInner(outer, inner); \n    Index col = colIndexByOuterInner(outer, inner); \n    assignCoeff(row, col);\n  }\n  \n  \n  template<int StoreMode, int LoadMode, typename PacketType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index row, Index col)\n  {\n    m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(row,col), m_src.template packet<LoadMode,PacketType>(row,col));\n  }\n  \n  template<int StoreMode, int LoadMode, typename PacketType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacket(Index index)\n  {\n    m_functor.template assignPacket<StoreMode>(&m_dst.coeffRef(index), m_src.template packet<LoadMode,PacketType>(index));\n  }\n  \n  template<int StoreMode, int LoadMode, typename PacketType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignPacketByOuterInner(Index outer, Index inner)\n  {\n    Index row = rowIndexByOuterInner(outer, inner); \n    Index col = colIndexByOuterInner(outer, inner);\n    assignPacket<StoreMode,LoadMode,PacketType>(row, col);\n  }\n  \n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner)\n  {\n    typedef typename DstEvaluatorType::ExpressionTraits Traits;\n    return int(Traits::RowsAtCompileTime) == 1 ? 0\n      : int(Traits::ColsAtCompileTime) == 1 ? inner\n      : int(DstEvaluatorType::Flags)&RowMajorBit ? outer\n      : inner;\n  }\n\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner)\n  {\n    typedef typename DstEvaluatorType::ExpressionTraits Traits;\n    return int(Traits::ColsAtCompileTime) == 1 ? 0\n      : int(Traits::RowsAtCompileTime) == 1 ? inner\n      : int(DstEvaluatorType::Flags)&RowMajorBit ? inner\n      : outer;\n  }\n\n  EIGEN_DEVICE_FUNC const Scalar* dstDataPtr() const\n  {\n    return m_dstExpr.data();\n  }\n  \nprotected:\n  DstEvaluatorType& m_dst;\n  const SrcEvaluatorType& m_src;\n  const Functor &m_functor;\n  // TODO find a way to avoid the needs of the original expression\n  DstXprType& m_dstExpr;\n};\n\n/***************************************************************************\n* Part 5 : Entry point for dense rectangular assignment\n***************************************************************************/\n\ntemplate<typename DstXprType,typename SrcXprType, typename Functor>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid resize_if_allowed(DstXprType &dst, const SrcXprType& src, const Functor &/*func*/)\n{\n  EIGEN_ONLY_USED_FOR_DEBUG(dst);\n  EIGEN_ONLY_USED_FOR_DEBUG(src);\n  eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n}\n\ntemplate<typename DstXprType,typename SrcXprType, typename T1, typename T2>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid resize_if_allowed(DstXprType &dst, const SrcXprType& src, const internal::assign_op<T1,T2> &/*func*/)\n{\n  Index dstRows = src.rows();\n  Index dstCols = src.cols();\n  if(((dst.rows()!=dstRows) || (dst.cols()!=dstCols)))\n    dst.resize(dstRows, dstCols);\n  eigen_assert(dst.rows() == dstRows && dst.cols() == dstCols);\n}\n\ntemplate<typename DstXprType, typename SrcXprType, typename Functor>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func)\n{\n  typedef evaluator<DstXprType> DstEvaluatorType;\n  typedef evaluator<SrcXprType> SrcEvaluatorType;\n\n  SrcEvaluatorType srcEvaluator(src);\n\n  // NOTE To properly handle A = (A*A.transpose())/s with A rectangular,\n  // we need to resize the destination after the source evaluator has been created.\n  resize_if_allowed(dst, src, func);\n\n  DstEvaluatorType dstEvaluator(dst);\n    \n  typedef generic_dense_assignment_kernel<DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;\n  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());\n\n  dense_assignment_loop<Kernel>::run(kernel);\n}\n\ntemplate<typename DstXprType, typename SrcXprType>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType& dst, const SrcXprType& src)\n{\n  call_dense_assignment_loop(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());\n}\n\n/***************************************************************************\n* Part 6 : Generic assignment\n***************************************************************************/\n\n// Based on the respective shapes of the destination and source,\n// the class AssignmentKind determine the kind of assignment mechanism.\n// AssignmentKind must define a Kind typedef.\ntemplate<typename DstShape, typename SrcShape> struct AssignmentKind;\n\n// Assignement kind defined in this file:\nstruct Dense2Dense {};\nstruct EigenBase2EigenBase {};\n\ntemplate<typename,typename> struct AssignmentKind { typedef EigenBase2EigenBase Kind; };\ntemplate<> struct AssignmentKind<DenseShape,DenseShape> { typedef Dense2Dense Kind; };\n    \n// This is the main assignment class\ntemplate< typename DstXprType, typename SrcXprType, typename Functor,\n          typename Kind = typename AssignmentKind< typename evaluator_traits<DstXprType>::Shape , typename evaluator_traits<SrcXprType>::Shape >::Kind,\n          typename EnableIf = void>\nstruct Assignment;\n\n\n// The only purpose of this call_assignment() function is to deal with noalias() / \"assume-aliasing\" and automatic transposition.\n// Indeed, I (Gael) think that this concept of \"assume-aliasing\" was a mistake, and it makes thing quite complicated.\n// So this intermediate function removes everything related to \"assume-aliasing\" such that Assignment\n// does not has to bother about these annoying details.\n\ntemplate<typename Dst, typename Src>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment(Dst& dst, const Src& src)\n{\n  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());\n}\ntemplate<typename Dst, typename Src>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment(const Dst& dst, const Src& src)\n{\n  call_assignment(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());\n}\n                     \n// Deal with \"assume-aliasing\"\ntemplate<typename Dst, typename Src, typename Func>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if< evaluator_assume_aliasing<Src>::value, void*>::type = 0)\n{\n  typename plain_matrix_type<Src>::type tmp(src);\n  call_assignment_no_alias(dst, tmp, func);\n}\n\ntemplate<typename Dst, typename Src, typename Func>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment(Dst& dst, const Src& src, const Func& func, typename enable_if<!evaluator_assume_aliasing<Src>::value, void*>::type = 0)\n{\n  call_assignment_no_alias(dst, src, func);\n}\n\n// by-pass \"assume-aliasing\"\n// When there is no aliasing, we require that 'dst' has been properly resized\ntemplate<typename Dst, template <typename> class StorageBase, typename Src, typename Func>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment(NoAlias<Dst,StorageBase>& dst, const Src& src, const Func& func)\n{\n  call_assignment_no_alias(dst.expression(), src, func);\n}\n\n\ntemplate<typename Dst, typename Src, typename Func>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment_no_alias(Dst& dst, const Src& src, const Func& func)\n{\n  enum {\n    NeedToTranspose = (    (int(Dst::RowsAtCompileTime) == 1 && int(Src::ColsAtCompileTime) == 1)\n                        || (int(Dst::ColsAtCompileTime) == 1 && int(Src::RowsAtCompileTime) == 1)\n                      ) && int(Dst::SizeAtCompileTime) != 1\n  };\n\n  typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst>::type ActualDstTypeCleaned;\n  typedef typename internal::conditional<NeedToTranspose, Transpose<Dst>, Dst&>::type ActualDstType;\n  ActualDstType actualDst(dst);\n  \n  // TODO check whether this is the right place to perform these checks:\n  EIGEN_STATIC_ASSERT_LVALUE(Dst)\n  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(ActualDstTypeCleaned,Src)\n  EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename ActualDstTypeCleaned::Scalar,typename Src::Scalar);\n  \n  Assignment<ActualDstTypeCleaned,Src,Func>::run(actualDst, src, func);\n}\ntemplate<typename Dst, typename Src>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment_no_alias(Dst& dst, const Src& src)\n{\n  call_assignment_no_alias(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());\n}\n\ntemplate<typename Dst, typename Src, typename Func>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment_no_alias_no_transpose(Dst& dst, const Src& src, const Func& func)\n{\n  // TODO check whether this is the right place to perform these checks:\n  EIGEN_STATIC_ASSERT_LVALUE(Dst)\n  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Dst,Src)\n  EIGEN_CHECK_BINARY_COMPATIBILIY(Func,typename Dst::Scalar,typename Src::Scalar);\n\n  Assignment<Dst,Src,Func>::run(dst, src, func);\n}\ntemplate<typename Dst, typename Src>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_assignment_no_alias_no_transpose(Dst& dst, const Src& src)\n{\n  call_assignment_no_alias_no_transpose(dst, src, internal::assign_op<typename Dst::Scalar,typename Src::Scalar>());\n}\n\n// forward declaration\ntemplate<typename Dst, typename Src> void check_for_aliasing(const Dst &dst, const Src &src);\n\n// Generic Dense to Dense assignment\n// Note that the last template argument \"Weak\" is needed to make it possible to perform\n// both partial specialization+SFINAE without ambiguous specialization\ntemplate< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>\nstruct Assignment<DstXprType, SrcXprType, Functor, Dense2Dense, Weak>\n{\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const Functor &func)\n  {\n#ifndef EIGEN_NO_DEBUG\n    internal::check_for_aliasing(dst, src);\n#endif\n    \n    call_dense_assignment_loop(dst, src, func);\n  }\n};\n\n// Generic assignment through evalTo.\n// TODO: not sure we have to keep that one, but it helps porting current code to new evaluator mechanism.\n// Note that the last template argument \"Weak\" is needed to make it possible to perform\n// both partial specialization+SFINAE without ambiguous specialization\ntemplate< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>\nstruct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>\n{\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n    src.evalTo(dst);\n  }\n\n  // NOTE The following two functions are templated to avoid their instanciation if not needed\n  //      This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.\n  template<typename SrcScalarType>\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,SrcScalarType> &/*func*/)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n    src.addTo(dst);\n  }\n\n  template<typename SrcScalarType>\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,SrcScalarType> &/*func*/)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n    src.subTo(dst);\n  }\n};\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_ASSIGN_EVALUATOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Assign_MKL.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n \n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to Intel(R) MKL\n *   MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin()\n ********************************************************************************\n*/\n\n#ifndef EIGEN_ASSIGN_VML_H\n#define EIGEN_ASSIGN_VML_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Dst, typename Src>\nclass vml_assign_traits\n{\n  private:\n    enum {\n      DstHasDirectAccess = Dst::Flags & DirectAccessBit,\n      SrcHasDirectAccess = Src::Flags & DirectAccessBit,\n      StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)),\n      InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime)\n                : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime)\n                : int(Dst::RowsAtCompileTime),\n      InnerMaxSize  = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime)\n                    : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime)\n                    : int(Dst::MaxRowsAtCompileTime),\n      MaxSizeAtCompileTime = Dst::SizeAtCompileTime,\n\n      MightEnableVml = StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1,\n      MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit),\n      VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize,\n      LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD\n    };\n  public:\n    enum {\n      EnableVml = MightEnableVml && LargeEnough,\n      Traversal = MightLinearize ? LinearTraversal : DefaultTraversal\n    };\n};\n\n#define EIGEN_PP_EXPAND(ARG) ARG\n#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1)\n#define EIGEN_VMLMODE_EXPAND_LA , VML_HA\n#else\n#define EIGEN_VMLMODE_EXPAND_LA , VML_LA\n#endif\n\n#define EIGEN_VMLMODE_EXPAND__ \n\n#define EIGEN_VMLMODE_PREFIX_LA vm\n#define EIGEN_VMLMODE_PREFIX__  v\n#define EIGEN_VMLMODE_PREFIX(VMLMODE) EIGEN_CAT(EIGEN_VMLMODE_PREFIX_,VMLMODE)\n\n#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE)                                           \\\n  template< typename DstXprType, typename SrcXprNested>                                                                         \\\n  struct Assignment<DstXprType, CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested>, assign_op<EIGENTYPE,EIGENTYPE>,   \\\n                   Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> {              \\\n    typedef CwiseUnaryOp<scalar_##EIGENOP##_op<EIGENTYPE>, SrcXprNested> SrcXprType;                                            \\\n    static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) {                   \\\n      eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());                                                       \\\n      if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal) {                                              \\\n        VMLOP(dst.size(), (const VMLTYPE*)src.nestedExpression().data(),                                                        \\\n              (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) );                                           \\\n      } else {                                                                                                                  \\\n        const Index outerSize = dst.outerSize();                                                                                \\\n        for(Index outer = 0; outer < outerSize; ++outer) {                                                                      \\\n          const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) :                             \\\n                                                      &(src.nestedExpression().coeffRef(0, outer));                             \\\n          EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));                           \\\n          VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr,                                                                      \\\n                (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE));                                             \\\n        }                                                                                                                       \\\n      }                                                                                                                         \\\n    }                                                                                                                           \\\n  };                                                                                                                            \\\n\n\n#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE)                                                         \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),s##VMLOP), float, float, VMLMODE)           \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),d##VMLOP), double, double, VMLMODE)\n\n#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE)                                                         \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),c##VMLOP), scomplex, MKL_Complex8, VMLMODE) \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, EIGEN_CAT(EIGEN_VMLMODE_PREFIX(VMLMODE),z##VMLOP), dcomplex, MKL_Complex16, VMLMODE)\n  \n#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP, VMLMODE)                                                              \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP, VMLMODE)                                                               \\\n  EIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(EIGENOP, VMLOP, VMLMODE)\n\n  \nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(sin,   Sin,   LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(asin,  Asin,  LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(sinh,  Sinh,  LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(cos,   Cos,   LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(acos,  Acos,  LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(cosh,  Cosh,  LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(tan,   Tan,   LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(atan,  Atan,  LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(tanh,  Tanh,  LA)\n// EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs,   Abs,    _)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(exp,   Exp,   LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(log,   Ln,    LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(log10, Log10, LA)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS(sqrt,  Sqrt,  _)\n\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr,   _)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS_CPLX(arg, Arg,      _)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(round, Round,  _)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(floor, Floor,  _)\nEIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(ceil,  Ceil,   _)\n\n#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE, VMLMODE)                                           \\\n  template< typename DstXprType, typename SrcXprNested, typename Plain>                                                       \\\n  struct Assignment<DstXprType, CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested,                       \\\n                    const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> >, assign_op<EIGENTYPE,EIGENTYPE>,    \\\n                   Dense2Dense, typename enable_if<vml_assign_traits<DstXprType,SrcXprNested>::EnableVml>::type> {            \\\n    typedef CwiseBinaryOp<scalar_##EIGENOP##_op<EIGENTYPE,EIGENTYPE>, SrcXprNested,                                           \\\n                    const CwiseNullaryOp<internal::scalar_constant_op<EIGENTYPE>,Plain> > SrcXprType;                         \\\n    static void run(DstXprType &dst, const SrcXprType &src, const assign_op<EIGENTYPE,EIGENTYPE> &/*func*/) {                 \\\n      eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());                                                     \\\n      VMLTYPE exponent = reinterpret_cast<const VMLTYPE&>(src.rhs().functor().m_other);                                       \\\n      if(vml_assign_traits<DstXprType,SrcXprNested>::Traversal==LinearTraversal)                                              \\\n      {                                                                                                                       \\\n        VMLOP( dst.size(), (const VMLTYPE*)src.lhs().data(), exponent,                                                        \\\n              (VMLTYPE*)dst.data() EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE) );                                         \\\n      } else {                                                                                                                \\\n        const Index outerSize = dst.outerSize();                                                                              \\\n        for(Index outer = 0; outer < outerSize; ++outer) {                                                                    \\\n          const EIGENTYPE *src_ptr = src.IsRowMajor ? &(src.lhs().coeffRef(outer,0)) :                                        \\\n                                                      &(src.lhs().coeffRef(0, outer));                                        \\\n          EIGENTYPE *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer));                         \\\n          VMLOP( dst.innerSize(), (const VMLTYPE*)src_ptr, exponent,                                                          \\\n                 (VMLTYPE*)dst_ptr EIGEN_PP_EXPAND(EIGEN_VMLMODE_EXPAND_##VMLMODE));                                          \\\n        }                                                                                                                     \\\n      }                                                                                                                       \\\n    }                                                                                                                         \\\n  };\n  \nEIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmsPowx, float,    float,         LA)\nEIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdPowx, double,   double,        LA)\nEIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcPowx, scomplex, MKL_Complex8,  LA)\nEIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzPowx, dcomplex, MKL_Complex16, LA)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_ASSIGN_VML_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/BandMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BANDMATRIX_H\n#define EIGEN_BANDMATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Derived>\nclass BandMatrixBase : public EigenBase<Derived>\n{\n  public:\n\n    enum {\n      Flags = internal::traits<Derived>::Flags,\n      CoeffReadCost = internal::traits<Derived>::CoeffReadCost,\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,\n      Supers = internal::traits<Derived>::Supers,\n      Subs   = internal::traits<Derived>::Subs,\n      Options = internal::traits<Derived>::Options\n    };\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;\n    typedef typename DenseMatrixType::StorageIndex StorageIndex;\n    typedef typename internal::traits<Derived>::CoefficientsType CoefficientsType;\n    typedef EigenBase<Derived> Base;\n\n  protected:\n    enum {\n      DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic))\n                            ? 1 + Supers + Subs\n                            : Dynamic,\n      SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime)\n    };\n\n  public:\n    \n    using Base::derived;\n    using Base::rows;\n    using Base::cols;\n\n    /** \\returns the number of super diagonals */\n    inline Index supers() const { return derived().supers(); }\n\n    /** \\returns the number of sub diagonals */\n    inline Index subs() const { return derived().subs(); }\n    \n    /** \\returns an expression of the underlying coefficient matrix */\n    inline const CoefficientsType& coeffs() const { return derived().coeffs(); }\n    \n    /** \\returns an expression of the underlying coefficient matrix */\n    inline CoefficientsType& coeffs() { return derived().coeffs(); }\n\n    /** \\returns a vector expression of the \\a i -th column,\n      * only the meaningful part is returned.\n      * \\warning the internal storage must be column major. */\n    inline Block<CoefficientsType,Dynamic,1> col(Index i)\n    {\n      EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n      Index start = 0;\n      Index len = coeffs().rows();\n      if (i<=supers())\n      {\n        start = supers()-i;\n        len = (std::min)(rows(),std::max<Index>(0,coeffs().rows() - (supers()-i)));\n      }\n      else if (i>=rows()-subs())\n        len = std::max<Index>(0,coeffs().rows() - (i + 1 - rows() + subs()));\n      return Block<CoefficientsType,Dynamic,1>(coeffs(), start, i, len, 1);\n    }\n\n    /** \\returns a vector expression of the main diagonal */\n    inline Block<CoefficientsType,1,SizeAtCompileTime> diagonal()\n    { return Block<CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }\n\n    /** \\returns a vector expression of the main diagonal (const version) */\n    inline const Block<const CoefficientsType,1,SizeAtCompileTime> diagonal() const\n    { return Block<const CoefficientsType,1,SizeAtCompileTime>(coeffs(),supers(),0,1,(std::min)(rows(),cols())); }\n\n    template<int Index> struct DiagonalIntReturnType {\n      enum {\n        ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)),\n        Conjugate = ReturnOpposite && NumTraits<Scalar>::IsComplex,\n        ActualIndex = ReturnOpposite ? -Index : Index,\n        DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic)\n                     ? Dynamic\n                     : (ActualIndex<0\n                     ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex)\n                     : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex))\n      };\n      typedef Block<CoefficientsType,1, DiagonalSize> BuildType;\n      typedef typename internal::conditional<Conjugate,\n                 CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>,BuildType >,\n                 BuildType>::type Type;\n    };\n\n    /** \\returns a vector expression of the \\a N -th sub or super diagonal */\n    template<int N> inline typename DiagonalIntReturnType<N>::Type diagonal()\n    {\n      return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));\n    }\n\n    /** \\returns a vector expression of the \\a N -th sub or super diagonal */\n    template<int N> inline const typename DiagonalIntReturnType<N>::Type diagonal() const\n    {\n      return typename DiagonalIntReturnType<N>::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N));\n    }\n\n    /** \\returns a vector expression of the \\a i -th sub or super diagonal */\n    inline Block<CoefficientsType,1,Dynamic> diagonal(Index i)\n    {\n      eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));\n      return Block<CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));\n    }\n\n    /** \\returns a vector expression of the \\a i -th sub or super diagonal */\n    inline const Block<const CoefficientsType,1,Dynamic> diagonal(Index i) const\n    {\n      eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers()));\n      return Block<const CoefficientsType,1,Dynamic>(coeffs(), supers()-i, std::max<Index>(0,i), 1, diagonalLength(i));\n    }\n    \n    template<typename Dest> inline void evalTo(Dest& dst) const\n    {\n      dst.resize(rows(),cols());\n      dst.setZero();\n      dst.diagonal() = diagonal();\n      for (Index i=1; i<=supers();++i)\n        dst.diagonal(i) = diagonal(i);\n      for (Index i=1; i<=subs();++i)\n        dst.diagonal(-i) = diagonal(-i);\n    }\n\n    DenseMatrixType toDenseMatrix() const\n    {\n      DenseMatrixType res(rows(),cols());\n      evalTo(res);\n      return res;\n    }\n\n  protected:\n\n    inline Index diagonalLength(Index i) const\n    { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); }\n};\n\n/**\n  * \\class BandMatrix\n  * \\ingroup Core_Module\n  *\n  * \\brief Represents a rectangular matrix with a banded storage\n  *\n  * \\tparam _Scalar Numeric type, i.e. float, double, int\n  * \\tparam _Rows Number of rows, or \\b Dynamic\n  * \\tparam _Cols Number of columns, or \\b Dynamic\n  * \\tparam _Supers Number of super diagonal\n  * \\tparam _Subs Number of sub diagonal\n  * \\tparam _Options A combination of either \\b #RowMajor or \\b #ColMajor, and of \\b #SelfAdjoint\n  *                  The former controls \\ref TopicStorageOrders \"storage order\", and defaults to\n  *                  column-major. The latter controls whether the matrix represents a selfadjoint\n  *                  matrix in which case either Supers of Subs have to be null.\n  *\n  * \\sa class TridiagonalMatrix\n  */\n\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options>\nstruct traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >\n{\n  typedef _Scalar Scalar;\n  typedef Dense StorageKind;\n  typedef Eigen::Index StorageIndex;\n  enum {\n    CoeffReadCost = NumTraits<Scalar>::ReadCost,\n    RowsAtCompileTime = _Rows,\n    ColsAtCompileTime = _Cols,\n    MaxRowsAtCompileTime = _Rows,\n    MaxColsAtCompileTime = _Cols,\n    Flags = LvalueBit,\n    Supers = _Supers,\n    Subs = _Subs,\n    Options = _Options,\n    DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic\n  };\n  typedef Matrix<Scalar,DataRowsAtCompileTime,ColsAtCompileTime,Options&RowMajor?RowMajor:ColMajor> CoefficientsType;\n};\n\ntemplate<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options>\nclass BandMatrix : public BandMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >\n{\n  public:\n\n    typedef typename internal::traits<BandMatrix>::Scalar Scalar;\n    typedef typename internal::traits<BandMatrix>::StorageIndex StorageIndex;\n    typedef typename internal::traits<BandMatrix>::CoefficientsType CoefficientsType;\n\n    explicit inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs)\n      : m_coeffs(1+supers+subs,cols),\n        m_rows(rows), m_supers(supers), m_subs(subs)\n    {\n    }\n\n    /** \\returns the number of columns */\n    inline Index rows() const { return m_rows.value(); }\n\n    /** \\returns the number of rows */\n    inline Index cols() const { return m_coeffs.cols(); }\n\n    /** \\returns the number of super diagonals */\n    inline Index supers() const { return m_supers.value(); }\n\n    /** \\returns the number of sub diagonals */\n    inline Index subs() const { return m_subs.value(); }\n\n    inline const CoefficientsType& coeffs() const { return m_coeffs; }\n    inline CoefficientsType& coeffs() { return m_coeffs; }\n\n  protected:\n\n    CoefficientsType m_coeffs;\n    internal::variable_if_dynamic<Index, Rows>   m_rows;\n    internal::variable_if_dynamic<Index, Supers> m_supers;\n    internal::variable_if_dynamic<Index, Subs>   m_subs;\n};\n\ntemplate<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>\nclass BandMatrixWrapper;\n\ntemplate<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>\nstruct traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >\n{\n  typedef typename _CoefficientsType::Scalar Scalar;\n  typedef typename _CoefficientsType::StorageKind StorageKind;\n  typedef typename _CoefficientsType::StorageIndex StorageIndex;\n  enum {\n    CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost,\n    RowsAtCompileTime = _Rows,\n    ColsAtCompileTime = _Cols,\n    MaxRowsAtCompileTime = _Rows,\n    MaxColsAtCompileTime = _Cols,\n    Flags = LvalueBit,\n    Supers = _Supers,\n    Subs = _Subs,\n    Options = _Options,\n    DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic\n  };\n  typedef _CoefficientsType CoefficientsType;\n};\n\ntemplate<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>\nclass BandMatrixWrapper : public BandMatrixBase<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >\n{\n  public:\n\n    typedef typename internal::traits<BandMatrixWrapper>::Scalar Scalar;\n    typedef typename internal::traits<BandMatrixWrapper>::CoefficientsType CoefficientsType;\n    typedef typename internal::traits<BandMatrixWrapper>::StorageIndex StorageIndex;\n\n    explicit inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs)\n      : m_coeffs(coeffs),\n        m_rows(rows), m_supers(supers), m_subs(subs)\n    {\n      EIGEN_UNUSED_VARIABLE(cols);\n      //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows());\n    }\n\n    /** \\returns the number of columns */\n    inline Index rows() const { return m_rows.value(); }\n\n    /** \\returns the number of rows */\n    inline Index cols() const { return m_coeffs.cols(); }\n\n    /** \\returns the number of super diagonals */\n    inline Index supers() const { return m_supers.value(); }\n\n    /** \\returns the number of sub diagonals */\n    inline Index subs() const { return m_subs.value(); }\n\n    inline const CoefficientsType& coeffs() const { return m_coeffs; }\n\n  protected:\n\n    const CoefficientsType& m_coeffs;\n    internal::variable_if_dynamic<Index, _Rows>   m_rows;\n    internal::variable_if_dynamic<Index, _Supers> m_supers;\n    internal::variable_if_dynamic<Index, _Subs>   m_subs;\n};\n\n/**\n  * \\class TridiagonalMatrix\n  * \\ingroup Core_Module\n  *\n  * \\brief Represents a tridiagonal matrix with a compact banded storage\n  *\n  * \\tparam Scalar Numeric type, i.e. float, double, int\n  * \\tparam Size Number of rows and cols, or \\b Dynamic\n  * \\tparam Options Can be 0 or \\b SelfAdjoint\n  *\n  * \\sa class BandMatrix\n  */\ntemplate<typename Scalar, int Size, int Options>\nclass TridiagonalMatrix : public BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor>\n{\n    typedef BandMatrix<Scalar,Size,Size,Options&SelfAdjoint?0:1,1,Options|RowMajor> Base;\n    typedef typename Base::StorageIndex StorageIndex;\n  public:\n    explicit TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {}\n\n    inline typename Base::template DiagonalIntReturnType<1>::Type super()\n    { return Base::template diagonal<1>(); }\n    inline const typename Base::template DiagonalIntReturnType<1>::Type super() const\n    { return Base::template diagonal<1>(); }\n    inline typename Base::template DiagonalIntReturnType<-1>::Type sub()\n    { return Base::template diagonal<-1>(); }\n    inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const\n    { return Base::template diagonal<-1>(); }\n  protected:\n};\n\n\nstruct BandShape {};\n\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Supers, int _Subs, int _Options>\nstruct evaluator_traits<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >\n  : public evaluator_traits_base<BandMatrix<_Scalar,_Rows,_Cols,_Supers,_Subs,_Options> >\n{\n  typedef BandShape Shape;\n};\n\ntemplate<typename _CoefficientsType,int _Rows, int _Cols, int _Supers, int _Subs,int _Options>\nstruct evaluator_traits<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >\n  : public evaluator_traits_base<BandMatrixWrapper<_CoefficientsType,_Rows,_Cols,_Supers,_Subs,_Options> >\n{\n  typedef BandShape Shape;\n};\n\ntemplate<> struct AssignmentKind<DenseShape,BandShape> { typedef EigenBase2EigenBase Kind; };\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BANDMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Block.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BLOCK_H\n#define EIGEN_BLOCK_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>\nstruct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprType>\n{\n  typedef typename traits<XprType>::Scalar Scalar;\n  typedef typename traits<XprType>::StorageKind StorageKind;\n  typedef typename traits<XprType>::XprKind XprKind;\n  typedef typename ref_selector<XprType>::type XprTypeNested;\n  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;\n  enum{\n    MatrixRows = traits<XprType>::RowsAtCompileTime,\n    MatrixCols = traits<XprType>::ColsAtCompileTime,\n    RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows,\n    ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols,\n    MaxRowsAtCompileTime = BlockRows==0 ? 0\n                         : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime)\n                         : int(traits<XprType>::MaxRowsAtCompileTime),\n    MaxColsAtCompileTime = BlockCols==0 ? 0\n                         : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)\n                         : int(traits<XprType>::MaxColsAtCompileTime),\n\n    XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,\n    IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1\n               : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0\n               : XprTypeIsRowMajor,\n    HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),\n    InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),\n    InnerStrideAtCompileTime = HasSameStorageOrderAsXprType\n                             ? int(inner_stride_at_compile_time<XprType>::ret)\n                             : int(outer_stride_at_compile_time<XprType>::ret),\n    OuterStrideAtCompileTime = HasSameStorageOrderAsXprType\n                             ? int(outer_stride_at_compile_time<XprType>::ret)\n                             : int(inner_stride_at_compile_time<XprType>::ret),\n\n    // FIXME, this traits is rather specialized for dense object and it needs to be cleaned further\n    FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,\n    FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,\n    Flags = (traits<XprType>::Flags & (DirectAccessBit | (InnerPanel?CompressedAccessBit:0))) | FlagsLvalueBit | FlagsRowMajorBit,\n    // FIXME DirectAccessBit should not be handled by expressions\n    // \n    // Alignment is needed by MapBase's assertions\n    // We can sefely set it to false here. Internal alignment errors will be detected by an eigen_internal_assert in the respective evaluator\n    Alignment = 0\n  };\n};\n\ntemplate<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false,\n         bool HasDirectAccess = internal::has_direct_access<XprType>::ret> class BlockImpl_dense;\n         \n} // end namespace internal\n\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, typename StorageKind> class BlockImpl;\n\n/** \\class Block\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a fixed-size or dynamic-size block\n  *\n  * \\tparam XprType the type of the expression in which we are taking a block\n  * \\tparam BlockRows the number of rows of the block we are taking at compile time (optional)\n  * \\tparam BlockCols the number of columns of the block we are taking at compile time (optional)\n  * \\tparam InnerPanel is true, if the block maps to a set of rows of a row major matrix or\n  *         to set of columns of a column major matrix (optional). The parameter allows to determine\n  *         at compile time whether aligned access is possible on the block expression.\n  *\n  * This class represents an expression of either a fixed-size or dynamic-size block. It is the return\n  * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block<int,int>(Index,Index) and\n  * most of the time this is the only way it is used.\n  *\n  * However, if you want to directly maniputate block expressions,\n  * for instance if you want to write a function returning such an expression, you\n  * will need to use this class.\n  *\n  * Here is an example illustrating the dynamic case:\n  * \\include class_Block.cpp\n  * Output: \\verbinclude class_Block.out\n  *\n  * \\note Even though this expression has dynamic size, in the case where \\a XprType\n  * has fixed size, this expression inherits a fixed maximal size which means that evaluating\n  * it does not cause a dynamic memory allocation.\n  *\n  * Here is an example illustrating the fixed-size case:\n  * \\include class_FixedBlock.cpp\n  * Output: \\verbinclude class_FixedBlock.out\n  *\n  * \\sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock\n  */\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> class Block\n  : public BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind>\n{\n    typedef BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, typename internal::traits<XprType>::StorageKind> Impl;\n  public:\n    //typedef typename Impl::Base Base;\n    typedef Impl Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(Block)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)\n    \n    typedef typename internal::remove_all<XprType>::type NestedExpression;\n  \n    /** Column or Row constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline Block(XprType& xpr, Index i) : Impl(xpr,i)\n    {\n      eigen_assert( (i>=0) && (\n          ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i<xpr.rows())\n        ||((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && i<xpr.cols())));\n    }\n\n    /** Fixed-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline Block(XprType& xpr, Index startRow, Index startCol)\n      : Impl(xpr, startRow, startCol)\n    {\n      EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic,THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE)\n      eigen_assert(startRow >= 0 && BlockRows >= 0 && startRow + BlockRows <= xpr.rows()\n             && startCol >= 0 && BlockCols >= 0 && startCol + BlockCols <= xpr.cols());\n    }\n\n    /** Dynamic-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline Block(XprType& xpr,\n          Index startRow, Index startCol,\n          Index blockRows, Index blockCols)\n      : Impl(xpr, startRow, startCol, blockRows, blockCols)\n    {\n      eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)\n          && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));\n      eigen_assert(startRow >= 0 && blockRows >= 0 && startRow  <= xpr.rows() - blockRows\n          && startCol >= 0 && blockCols >= 0 && startCol <= xpr.cols() - blockCols);\n    }\n};\n         \n// The generic default implementation for dense block simplu forward to the internal::BlockImpl_dense\n// that must be specialized for direct and non-direct access...\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>\nclass BlockImpl<XprType, BlockRows, BlockCols, InnerPanel, Dense>\n  : public internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel>\n{\n    typedef internal::BlockImpl_dense<XprType, BlockRows, BlockCols, InnerPanel> Impl;\n    typedef typename XprType::StorageIndex StorageIndex;\n  public:\n    typedef Impl Base;\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)\n    EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index i) : Impl(xpr,i) {}\n    EIGEN_DEVICE_FUNC inline BlockImpl(XprType& xpr, Index startRow, Index startCol) : Impl(xpr, startRow, startCol) {}\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n      : Impl(xpr, startRow, startCol, blockRows, blockCols) {}\n};\n\nnamespace internal {\n\n/** \\internal Internal implementation of dense Blocks in the general case. */\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool HasDirectAccess> class BlockImpl_dense\n  : public internal::dense_xpr_base<Block<XprType, BlockRows, BlockCols, InnerPanel> >::type\n{\n    typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;\n    typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;\n  public:\n\n    typedef typename internal::dense_xpr_base<BlockType>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)\n\n    // class InnerIterator; // FIXME apparently never used\n\n    /** Column or Row constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr, Index i)\n      : m_xpr(xpr),\n        // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime,\n        // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1,\n        // all other cases are invalid.\n        // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.\n        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),\n        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),\n        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),\n        m_blockCols(BlockCols==1 ? 1 : xpr.cols())\n    {}\n\n    /** Fixed-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)\n      : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),\n                    m_blockRows(BlockRows), m_blockCols(BlockCols)\n    {}\n\n    /** Dynamic-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr,\n          Index startRow, Index startCol,\n          Index blockRows, Index blockCols)\n      : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol),\n                    m_blockRows(blockRows), m_blockCols(blockCols)\n    {}\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_blockRows.value(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_blockCols.value(); }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index rowId, Index colId)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(XprType)\n      return m_xpr.coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      return m_xpr.derived().coeffRef(rowId + m_startRow.value(), colId + m_startCol.value());\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index rowId, Index colId) const\n    {\n      return m_xpr.coeff(rowId + m_startRow.value(), colId + m_startCol.value());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index index)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(XprType)\n      return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n                            m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index index) const\n    {\n      return m_xpr.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n                            m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const CoeffReturnType coeff(Index index) const\n    {\n      return m_xpr.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n                         m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    template<int LoadMode>\n    inline PacketScalar packet(Index rowId, Index colId) const\n    {\n      return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value());\n    }\n\n    template<int LoadMode>\n    inline void writePacket(Index rowId, Index colId, const PacketScalar& val)\n    {\n      m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val);\n    }\n\n    template<int LoadMode>\n    inline PacketScalar packet(Index index) const\n    {\n      return m_xpr.template packet<Unaligned>\n              (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    template<int LoadMode>\n    inline void writePacket(Index index, const PacketScalar& val)\n    {\n      m_xpr.template writePacket<Unaligned>\n         (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n          m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), val);\n    }\n\n    #ifdef EIGEN_PARSED_BY_DOXYGEN\n    /** \\sa MapBase::data() */\n    EIGEN_DEVICE_FUNC inline const Scalar* data() const;\n    EIGEN_DEVICE_FUNC inline Index innerStride() const;\n    EIGEN_DEVICE_FUNC inline Index outerStride() const;\n    #endif\n\n    EIGEN_DEVICE_FUNC\n    const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const\n    { \n      return m_xpr; \n    }\n\n    EIGEN_DEVICE_FUNC\n    XprType& nestedExpression() { return m_xpr; }\n      \n    EIGEN_DEVICE_FUNC\n    StorageIndex startRow() const\n    { \n      return m_startRow.value(); \n    }\n      \n    EIGEN_DEVICE_FUNC\n    StorageIndex startCol() const\n    { \n      return m_startCol.value(); \n    }\n\n  protected:\n\n    XprTypeNested m_xpr;\n    const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;\n    const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;\n    const internal::variable_if_dynamic<StorageIndex, RowsAtCompileTime> m_blockRows;\n    const internal::variable_if_dynamic<StorageIndex, ColsAtCompileTime> m_blockCols;\n};\n\n/** \\internal Internal implementation of dense Blocks in the direct access case.*/\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>\nclass BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>\n  : public MapBase<Block<XprType, BlockRows, BlockCols, InnerPanel> >\n{\n    typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;\n    typedef typename internal::ref_selector<XprType>::non_const_type XprTypeNested;\n    enum {\n      XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0\n    };\n  public:\n\n    typedef MapBase<BlockType> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(BlockType)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl_dense)\n\n    /** Column or Row constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr, Index i)\n      : Base(xpr.data() + i * (    ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor)) \n                                || ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()),\n             BlockRows==1 ? 1 : xpr.rows(),\n             BlockCols==1 ? 1 : xpr.cols()),\n        m_xpr(xpr),\n        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),\n        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)\n    {\n      init();\n    }\n\n    /** Fixed-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)\n      : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),\n        m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)\n    {\n      init();\n    }\n\n    /** Dynamic-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr,\n          Index startRow, Index startCol,\n          Index blockRows, Index blockCols)\n      : Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols),\n        m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)\n    {\n      init();\n    }\n\n    EIGEN_DEVICE_FUNC\n    const typename internal::remove_all<XprTypeNested>::type& nestedExpression() const\n    { \n      return m_xpr; \n    }\n\n    EIGEN_DEVICE_FUNC\n    XprType& nestedExpression() { return m_xpr; }\n      \n    /** \\sa MapBase::innerStride() */\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const\n    {\n      return internal::traits<BlockType>::HasSameStorageOrderAsXprType\n             ? m_xpr.innerStride()\n             : m_xpr.outerStride();\n    }\n\n    /** \\sa MapBase::outerStride() */\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const\n    {\n      return m_outerStride;\n    }\n\n    EIGEN_DEVICE_FUNC\n    StorageIndex startRow() const\n    {\n      return m_startRow.value();\n    }\n\n    EIGEN_DEVICE_FUNC\n    StorageIndex startCol() const\n    {\n      return m_startCol.value();\n    }\n\n  #ifndef __SUNPRO_CC\n  // FIXME sunstudio is not friendly with the above friend...\n  // META-FIXME there is no 'friend' keyword around here. Is this obsolete?\n  protected:\n  #endif\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal used by allowAligned() */\n    EIGEN_DEVICE_FUNC\n    inline BlockImpl_dense(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols)\n      : Base(data, blockRows, blockCols), m_xpr(xpr)\n    {\n      init();\n    }\n    #endif\n\n  protected:\n    EIGEN_DEVICE_FUNC\n    void init()\n    {\n      m_outerStride = internal::traits<BlockType>::HasSameStorageOrderAsXprType\n                    ? m_xpr.outerStride()\n                    : m_xpr.innerStride();\n    }\n\n    XprTypeNested m_xpr;\n    const internal::variable_if_dynamic<StorageIndex, (XprType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;\n    const internal::variable_if_dynamic<StorageIndex, (XprType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;\n    Index m_outerStride;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BLOCK_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/BooleanRedux.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ALLANDANY_H\n#define EIGEN_ALLANDANY_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Derived, int UnrollCount, int Rows>\nstruct all_unroller\n{\n  enum {\n    col = (UnrollCount-1) / Rows,\n    row = (UnrollCount-1) % Rows\n  };\n\n  static inline bool run(const Derived &mat)\n  {\n    return all_unroller<Derived, UnrollCount-1, Rows>::run(mat) && mat.coeff(row, col);\n  }\n};\n\ntemplate<typename Derived, int Rows>\nstruct all_unroller<Derived, 0, Rows>\n{\n  static inline bool run(const Derived &/*mat*/) { return true; }\n};\n\ntemplate<typename Derived, int Rows>\nstruct all_unroller<Derived, Dynamic, Rows>\n{\n  static inline bool run(const Derived &) { return false; }\n};\n\ntemplate<typename Derived, int UnrollCount, int Rows>\nstruct any_unroller\n{\n  enum {\n    col = (UnrollCount-1) / Rows,\n    row = (UnrollCount-1) % Rows\n  };\n  \n  static inline bool run(const Derived &mat)\n  {\n    return any_unroller<Derived, UnrollCount-1, Rows>::run(mat) || mat.coeff(row, col);\n  }\n};\n\ntemplate<typename Derived, int Rows>\nstruct any_unroller<Derived, 0, Rows>\n{\n  static inline bool run(const Derived & /*mat*/) { return false; }\n};\n\ntemplate<typename Derived, int Rows>\nstruct any_unroller<Derived, Dynamic, Rows>\n{\n  static inline bool run(const Derived &) { return false; }\n};\n\n} // end namespace internal\n\n/** \\returns true if all coefficients are true\n  *\n  * Example: \\include MatrixBase_all.cpp\n  * Output: \\verbinclude MatrixBase_all.out\n  *\n  * \\sa any(), Cwise::operator<()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::all() const\n{\n  typedef internal::evaluator<Derived> Evaluator;\n  enum {\n    unroll = SizeAtCompileTime != Dynamic\n          && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT\n  };\n  Evaluator evaluator(derived());\n  if(unroll)\n    return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, internal::traits<Derived>::RowsAtCompileTime>::run(evaluator);\n  else\n  {\n    for(Index j = 0; j < cols(); ++j)\n      for(Index i = 0; i < rows(); ++i)\n        if (!evaluator.coeff(i, j)) return false;\n    return true;\n  }\n}\n\n/** \\returns true if at least one coefficient is true\n  *\n  * \\sa all()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::any() const\n{\n  typedef internal::evaluator<Derived> Evaluator;\n  enum {\n    unroll = SizeAtCompileTime != Dynamic\n          && SizeAtCompileTime * (Evaluator::CoeffReadCost + NumTraits<Scalar>::AddCost) <= EIGEN_UNROLLING_LIMIT\n  };\n  Evaluator evaluator(derived());\n  if(unroll)\n    return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, internal::traits<Derived>::RowsAtCompileTime>::run(evaluator);\n  else\n  {\n    for(Index j = 0; j < cols(); ++j)\n      for(Index i = 0; i < rows(); ++i)\n        if (evaluator.coeff(i, j)) return true;\n    return false;\n  }\n}\n\n/** \\returns the number of coefficients which evaluate to true\n  *\n  * \\sa all(), any()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Eigen::Index DenseBase<Derived>::count() const\n{\n  return derived().template cast<bool>().template cast<Index>().sum();\n}\n\n/** \\returns true is \\c *this contains at least one Not A Number (NaN).\n  *\n  * \\sa allFinite()\n  */\ntemplate<typename Derived>\ninline bool DenseBase<Derived>::hasNaN() const\n{\n#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)\n  return derived().array().isNaN().any();\n#else\n  return !((derived().array()==derived().array()).all());\n#endif\n}\n\n/** \\returns true if \\c *this contains only finite numbers, i.e., no NaN and no +/-INF values.\n  *\n  * \\sa hasNaN()\n  */\ntemplate<typename Derived>\ninline bool DenseBase<Derived>::allFinite() const\n{\n#if EIGEN_COMP_MSVC || (defined __FAST_MATH__)\n  return derived().array().isFinite().all();\n#else\n  return !((derived()-derived()).hasNaN());\n#endif\n}\n    \n} // end namespace Eigen\n\n#endif // EIGEN_ALLANDANY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CommaInitializer.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMMAINITIALIZER_H\n#define EIGEN_COMMAINITIALIZER_H\n\nnamespace Eigen { \n\n/** \\class CommaInitializer\n  * \\ingroup Core_Module\n  *\n  * \\brief Helper class used by the comma initializer operator\n  *\n  * This class is internally used to implement the comma initializer feature. It is\n  * the return type of MatrixBase::operator<<, and most of the time this is the only\n  * way it is used.\n  *\n  * \\sa \\blank \\ref MatrixBaseCommaInitRef \"MatrixBase::operator<<\", CommaInitializer::finished()\n  */\ntemplate<typename XprType>\nstruct CommaInitializer\n{\n  typedef typename XprType::Scalar Scalar;\n\n  EIGEN_DEVICE_FUNC\n  inline CommaInitializer(XprType& xpr, const Scalar& s)\n    : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1)\n  {\n    m_xpr.coeffRef(0,0) = s;\n  }\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC\n  inline CommaInitializer(XprType& xpr, const DenseBase<OtherDerived>& other)\n    : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())\n  {\n    m_xpr.block(0, 0, other.rows(), other.cols()) = other;\n  }\n\n  /* Copy/Move constructor which transfers ownership. This is crucial in \n   * absence of return value optimization to avoid assertions during destruction. */\n  // FIXME in C++11 mode this could be replaced by a proper RValue constructor\n  EIGEN_DEVICE_FUNC\n  inline CommaInitializer(const CommaInitializer& o)\n  : m_xpr(o.m_xpr), m_row(o.m_row), m_col(o.m_col), m_currentBlockRows(o.m_currentBlockRows) {\n    // Mark original object as finished. In absence of R-value references we need to const_cast:\n    const_cast<CommaInitializer&>(o).m_row = m_xpr.rows();\n    const_cast<CommaInitializer&>(o).m_col = m_xpr.cols();\n    const_cast<CommaInitializer&>(o).m_currentBlockRows = 0;\n  }\n\n  /* inserts a scalar value in the target matrix */\n  EIGEN_DEVICE_FUNC\n  CommaInitializer& operator,(const Scalar& s)\n  {\n    if (m_col==m_xpr.cols())\n    {\n      m_row+=m_currentBlockRows;\n      m_col = 0;\n      m_currentBlockRows = 1;\n      eigen_assert(m_row<m_xpr.rows()\n        && \"Too many rows passed to comma initializer (operator<<)\");\n    }\n    eigen_assert(m_col<m_xpr.cols()\n      && \"Too many coefficients passed to comma initializer (operator<<)\");\n    eigen_assert(m_currentBlockRows==1);\n    m_xpr.coeffRef(m_row, m_col++) = s;\n    return *this;\n  }\n\n  /* inserts a matrix expression in the target matrix */\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC\n  CommaInitializer& operator,(const DenseBase<OtherDerived>& other)\n  {\n    if (m_col==m_xpr.cols() && (other.cols()!=0 || other.rows()!=m_currentBlockRows))\n    {\n      m_row+=m_currentBlockRows;\n      m_col = 0;\n      m_currentBlockRows = other.rows();\n      eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows()\n        && \"Too many rows passed to comma initializer (operator<<)\");\n    }\n    eigen_assert((m_col + other.cols() <= m_xpr.cols())\n      && \"Too many coefficients passed to comma initializer (operator<<)\");\n    eigen_assert(m_currentBlockRows==other.rows());\n    m_xpr.template block<OtherDerived::RowsAtCompileTime, OtherDerived::ColsAtCompileTime>\n                    (m_row, m_col, other.rows(), other.cols()) = other;\n    m_col += other.cols();\n    return *this;\n  }\n\n  EIGEN_DEVICE_FUNC\n  inline ~CommaInitializer()\n#if defined VERIFY_RAISES_ASSERT && (!defined EIGEN_NO_ASSERTION_CHECKING) && defined EIGEN_EXCEPTIONS\n  EIGEN_EXCEPTION_SPEC(Eigen::eigen_assert_exception)\n#endif\n  {\n      finished();\n  }\n\n  /** \\returns the built matrix once all its coefficients have been set.\n    * Calling finished is 100% optional. Its purpose is to write expressions\n    * like this:\n    * \\code\n    * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());\n    * \\endcode\n    */\n  EIGEN_DEVICE_FUNC\n  inline XprType& finished() {\n      eigen_assert(((m_row+m_currentBlockRows) == m_xpr.rows() || m_xpr.cols() == 0)\n           && m_col == m_xpr.cols()\n           && \"Too few coefficients passed to comma initializer (operator<<)\");\n      return m_xpr;\n  }\n\n  XprType& m_xpr;           // target expression\n  Index m_row;              // current row id\n  Index m_col;              // current col id\n  Index m_currentBlockRows; // current block height\n};\n\n/** \\anchor MatrixBaseCommaInitRef\n  * Convenient operator to set the coefficients of a matrix.\n  *\n  * The coefficients must be provided in a row major order and exactly match\n  * the size of the matrix. Otherwise an assertion is raised.\n  *\n  * Example: \\include MatrixBase_set.cpp\n  * Output: \\verbinclude MatrixBase_set.out\n  * \n  * \\note According the c++ standard, the argument expressions of this comma initializer are evaluated in arbitrary order.\n  *\n  * \\sa CommaInitializer::finished(), class CommaInitializer\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline CommaInitializer<Derived> DenseBase<Derived>::operator<< (const Scalar& s)\n{\n  return CommaInitializer<Derived>(*static_cast<Derived*>(this), s);\n}\n\n/** \\sa operator<<(const Scalar&) */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC inline CommaInitializer<Derived>\nDenseBase<Derived>::operator<<(const DenseBase<OtherDerived>& other)\n{\n  return CommaInitializer<Derived>(*static_cast<Derived *>(this), other);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMMAINITIALIZER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ConditionEstimator.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Rasmus Munk Larsen (rmlarsen@google.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CONDITIONESTIMATOR_H\n#define EIGEN_CONDITIONESTIMATOR_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate <typename Vector, typename RealVector, bool IsComplex>\nstruct rcond_compute_sign {\n  static inline Vector run(const Vector& v) {\n    const RealVector v_abs = v.cwiseAbs();\n    return (v_abs.array() == static_cast<typename Vector::RealScalar>(0))\n            .select(Vector::Ones(v.size()), v.cwiseQuotient(v_abs));\n  }\n};\n\n// Partial specialization to avoid elementwise division for real vectors.\ntemplate <typename Vector>\nstruct rcond_compute_sign<Vector, Vector, false> {\n  static inline Vector run(const Vector& v) {\n    return (v.array() < static_cast<typename Vector::RealScalar>(0))\n           .select(-Vector::Ones(v.size()), Vector::Ones(v.size()));\n  }\n};\n\n/**\n  * \\returns an estimate of ||inv(matrix)||_1 given a decomposition of\n  * \\a matrix that implements .solve() and .adjoint().solve() methods.\n  *\n  * This function implements Algorithms 4.1 and 5.1 from\n  *   http://www.maths.manchester.ac.uk/~higham/narep/narep135.pdf\n  * which also forms the basis for the condition number estimators in\n  * LAPACK. Since at most 10 calls to the solve method of dec are\n  * performed, the total cost is O(dims^2), as opposed to O(dims^3)\n  * needed to compute the inverse matrix explicitly.\n  *\n  * The most common usage is in estimating the condition number\n  * ||matrix||_1 * ||inv(matrix)||_1. The first term ||matrix||_1 can be\n  * computed directly in O(n^2) operations.\n  *\n  * Supports the following decompositions: FullPivLU, PartialPivLU, LDLT, and\n  * LLT.\n  *\n  * \\sa FullPivLU, PartialPivLU, LDLT, LLT.\n  */\ntemplate <typename Decomposition>\ntypename Decomposition::RealScalar rcond_invmatrix_L1_norm_estimate(const Decomposition& dec)\n{\n  typedef typename Decomposition::MatrixType MatrixType;\n  typedef typename Decomposition::Scalar Scalar;\n  typedef typename Decomposition::RealScalar RealScalar;\n  typedef typename internal::plain_col_type<MatrixType>::type Vector;\n  typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVector;\n  const bool is_complex = (NumTraits<Scalar>::IsComplex != 0);\n\n  eigen_assert(dec.rows() == dec.cols());\n  const Index n = dec.rows();\n  if (n == 0)\n    return 0;\n\n  // Disable Index to float conversion warning\n#ifdef __INTEL_COMPILER\n  #pragma warning push\n  #pragma warning ( disable : 2259 )\n#endif\n  Vector v = dec.solve(Vector::Ones(n) / Scalar(n));\n#ifdef __INTEL_COMPILER\n  #pragma warning pop\n#endif\n\n  // lower_bound is a lower bound on\n  //   ||inv(matrix)||_1  = sup_v ||inv(matrix) v||_1 / ||v||_1\n  // and is the objective maximized by the (\"super-\") gradient ascent\n  // algorithm below.\n  RealScalar lower_bound = v.template lpNorm<1>();\n  if (n == 1)\n    return lower_bound;\n\n  // Gradient ascent algorithm follows: We know that the optimum is achieved at\n  // one of the simplices v = e_i, so in each iteration we follow a\n  // super-gradient to move towards the optimal one.\n  RealScalar old_lower_bound = lower_bound;\n  Vector sign_vector(n);\n  Vector old_sign_vector;\n  Index v_max_abs_index = -1;\n  Index old_v_max_abs_index = v_max_abs_index;\n  for (int k = 0; k < 4; ++k)\n  {\n    sign_vector = internal::rcond_compute_sign<Vector, RealVector, is_complex>::run(v);\n    if (k > 0 && !is_complex && sign_vector == old_sign_vector) {\n      // Break if the solution stagnated.\n      break;\n    }\n    // v_max_abs_index = argmax |real( inv(matrix)^T * sign_vector )|\n    v = dec.adjoint().solve(sign_vector);\n    v.real().cwiseAbs().maxCoeff(&v_max_abs_index);\n    if (v_max_abs_index == old_v_max_abs_index) {\n      // Break if the solution stagnated.\n      break;\n    }\n    // Move to the new simplex e_j, where j = v_max_abs_index.\n    v = dec.solve(Vector::Unit(n, v_max_abs_index));  // v = inv(matrix) * e_j.\n    lower_bound = v.template lpNorm<1>();\n    if (lower_bound <= old_lower_bound) {\n      // Break if the gradient step did not increase the lower_bound.\n      break;\n    }\n    if (!is_complex) {\n      old_sign_vector = sign_vector;\n    }\n    old_v_max_abs_index = v_max_abs_index;\n    old_lower_bound = lower_bound;\n  }\n  // The following calculates an independent estimate of ||matrix||_1 by\n  // multiplying matrix by a vector with entries of slowly increasing\n  // magnitude and alternating sign:\n  //   v_i = (-1)^{i} (1 + (i / (dim-1))), i = 0,...,dim-1.\n  // This improvement to Hager's algorithm above is due to Higham. It was\n  // added to make the algorithm more robust in certain corner cases where\n  // large elements in the matrix might otherwise escape detection due to\n  // exact cancellation (especially when op and op_adjoint correspond to a\n  // sequence of backsubstitutions and permutations), which could cause\n  // Hager's algorithm to vastly underestimate ||matrix||_1.\n  Scalar alternating_sign(RealScalar(1));\n  for (Index i = 0; i < n; ++i) {\n    // The static_cast is needed when Scalar is a complex and RealScalar implements expression templates\n    v[i] = alternating_sign * static_cast<RealScalar>(RealScalar(1) + (RealScalar(i) / (RealScalar(n - 1))));\n    alternating_sign = -alternating_sign;\n  }\n  v = dec.solve(v);\n  const RealScalar alternate_lower_bound = (2 * v.template lpNorm<1>()) / (3 * RealScalar(n));\n  return numext::maxi(lower_bound, alternate_lower_bound);\n}\n\n/** \\brief Reciprocal condition number estimator.\n  *\n  * Computing a decomposition of a dense matrix takes O(n^3) operations, while\n  * this method estimates the condition number quickly and reliably in O(n^2)\n  * operations.\n  *\n  * \\returns an estimate of the reciprocal condition number\n  * (1 / (||matrix||_1 * ||inv(matrix)||_1)) of matrix, given ||matrix||_1 and\n  * its decomposition. Supports the following decompositions: FullPivLU,\n  * PartialPivLU, LDLT, and LLT.\n  *\n  * \\sa FullPivLU, PartialPivLU, LDLT, LLT.\n  */\ntemplate <typename Decomposition>\ntypename Decomposition::RealScalar\nrcond_estimate_helper(typename Decomposition::RealScalar matrix_norm, const Decomposition& dec)\n{\n  typedef typename Decomposition::RealScalar RealScalar;\n  eigen_assert(dec.rows() == dec.cols());\n  if (dec.rows() == 0)              return RealScalar(1);\n  if (matrix_norm == RealScalar(0)) return RealScalar(0);\n  if (dec.rows() == 1)              return RealScalar(1);\n  const RealScalar inverse_matrix_norm = rcond_invmatrix_L1_norm_estimate(dec);\n  return (inverse_matrix_norm == RealScalar(0) ? RealScalar(0)\n                                               : (RealScalar(1) / inverse_matrix_norm) / matrix_norm);\n}\n\n}  // namespace internal\n\n}  // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CoreEvaluators.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_COREEVALUATORS_H\n#define EIGEN_COREEVALUATORS_H\n\nnamespace Eigen {\n  \nnamespace internal {\n\n// This class returns the evaluator kind from the expression storage kind.\n// Default assumes index based accessors\ntemplate<typename StorageKind>\nstruct storage_kind_to_evaluator_kind {\n  typedef IndexBased Kind;\n};\n\n// This class returns the evaluator shape from the expression storage kind.\n// It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.\ntemplate<typename StorageKind> struct storage_kind_to_shape;\n\ntemplate<> struct storage_kind_to_shape<Dense>                  { typedef DenseShape Shape;           };\ntemplate<> struct storage_kind_to_shape<SolverStorage>          { typedef SolverShape Shape;           };\ntemplate<> struct storage_kind_to_shape<PermutationStorage>     { typedef PermutationShape Shape;     };\ntemplate<> struct storage_kind_to_shape<TranspositionsStorage>  { typedef TranspositionsShape Shape;  };\n\n// Evaluators have to be specialized with respect to various criteria such as:\n//  - storage/structure/shape\n//  - scalar type\n//  - etc.\n// Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.\n// We currently distinguish the following kind of evaluators:\n// - unary_evaluator    for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)\n// - binary_evaluator   for expression taking two arguments (CwiseBinaryOp)\n// - ternary_evaluator   for expression taking three arguments (CwiseTernaryOp)\n// - product_evaluator  for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.\n// - mapbase_evaluator  for Map, Block, Ref\n// - block_evaluator    for Block (special dispatching to a mapbase_evaluator or unary_evaluator)\n\ntemplate< typename T,\n          typename Arg1Kind   = typename evaluator_traits<typename T::Arg1>::Kind,\n          typename Arg2Kind   = typename evaluator_traits<typename T::Arg2>::Kind,\n          typename Arg3Kind   = typename evaluator_traits<typename T::Arg3>::Kind,\n          typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,\n          typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,\n          typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;\n\ntemplate< typename T,\n          typename LhsKind   = typename evaluator_traits<typename T::Lhs>::Kind,\n          typename RhsKind   = typename evaluator_traits<typename T::Rhs>::Kind,\n          typename LhsScalar = typename traits<typename T::Lhs>::Scalar,\n          typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;\n\ntemplate< typename T,\n          typename Kind   = typename evaluator_traits<typename T::NestedExpression>::Kind,\n          typename Scalar = typename T::Scalar> struct unary_evaluator;\n          \n// evaluator_traits<T> contains traits for evaluator<T> \n\ntemplate<typename T>\nstruct evaluator_traits_base\n{\n  // by default, get evaluator kind and shape from storage\n  typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;\n  typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;\n};\n\n// Default evaluator traits\ntemplate<typename T>\nstruct evaluator_traits : public evaluator_traits_base<T>\n{\n};\n\ntemplate<typename T, typename Shape = typename evaluator_traits<T>::Shape >\nstruct evaluator_assume_aliasing {\n  static const bool value = false;\n};\n\n// By default, we assume a unary expression:\ntemplate<typename T>\nstruct evaluator : public unary_evaluator<T>\n{\n  typedef unary_evaluator<T> Base;\n  EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}\n};\n\n\n// TODO: Think about const-correctness\ntemplate<typename T>\nstruct evaluator<const T>\n  : evaluator<T>\n{\n  EIGEN_DEVICE_FUNC\n  explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}\n};\n\n// ---------- base class for all evaluators ----------\n\ntemplate<typename ExpressionType>\nstruct evaluator_base\n{\n  // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.\n  typedef traits<ExpressionType> ExpressionTraits;\n  \n  enum {\n    Alignment = 0\n  };\n  // noncopyable:\n  // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)\n  // and make complex evaluator much larger than then should do.\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {}\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {}\nprivate:\n  EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&);\n  EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);\n};\n\n// -------------------- Matrix and Array --------------------\n//\n// evaluator<PlainObjectBase> is a common base class for the\n// Matrix and Array evaluators.\n// Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,\n// so no need for more sophisticated dispatching.\n\n// this helper permits to completely eliminate m_outerStride if it is known at compiletime.\ntemplate<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {\npublic:\n  EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)\n  {\n    EIGEN_ONLY_USED_FOR_DEBUG(outerStride);\n    eigen_internal_assert(outerStride==OuterStride);\n  }\n  EIGEN_DEVICE_FUNC Index outerStride() const { return OuterStride; }\n  const Scalar *data;\n};\n\ntemplate<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {\npublic:\n  EIGEN_DEVICE_FUNC plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}\n  EIGEN_DEVICE_FUNC Index outerStride() const { return m_outerStride; }\n  const Scalar *data;\nprotected:\n  Index m_outerStride;\n};\n\ntemplate<typename Derived>\nstruct evaluator<PlainObjectBase<Derived> >\n  : evaluator_base<Derived>\n{\n  typedef PlainObjectBase<Derived> PlainObjectType;\n  typedef typename PlainObjectType::Scalar Scalar;\n  typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;\n\n  enum {\n    IsRowMajor = PlainObjectType::IsRowMajor,\n    IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,\n    RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,\n    ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,\n    \n    CoeffReadCost = NumTraits<Scalar>::ReadCost,\n    Flags = traits<Derived>::EvaluatorFlags,\n    Alignment = traits<Derived>::Alignment\n  };\n  enum {\n    // We do not need to know the outer stride for vectors\n    OuterStrideAtCompileTime = IsVectorAtCompileTime  ? 0\n                                                      : int(IsRowMajor) ? ColsAtCompileTime\n                                                                        : RowsAtCompileTime\n  };\n\n  EIGEN_DEVICE_FUNC evaluator()\n    : m_d(0,OuterStrideAtCompileTime)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)\n    : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    if (IsRowMajor)\n      return m_d.data[row * m_d.outerStride() + col];\n    else\n      return m_d.data[row + col * m_d.outerStride()];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_d.data[index];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    if (IsRowMajor)\n      return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];\n    else\n      return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return const_cast<Scalar*>(m_d.data)[index];\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    if (IsRowMajor)\n      return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);\n    else\n      return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return ploadt<PacketType, LoadMode>(m_d.data + index);\n  }\n\n  template<int StoreMode,typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x)\n  {\n    if (IsRowMajor)\n      return pstoret<Scalar, PacketType, StoreMode>\n\t            (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);\n    else\n      return pstoret<Scalar, PacketType, StoreMode>\n                    (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x)\n  {\n    return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);\n  }\n\nprotected:\n\n  plainobjectbase_evaluator_data<Scalar,OuterStrideAtCompileTime> m_d;\n};\n\ntemplate<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>\nstruct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >\n  : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >\n{\n  typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;\n  \n  EIGEN_DEVICE_FUNC evaluator() {}\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)\n    : evaluator<PlainObjectBase<XprType> >(m) \n  { }\n};\n\ntemplate<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>\nstruct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >\n  : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >\n{\n  typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;\n\n  EIGEN_DEVICE_FUNC evaluator() {}\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)\n    : evaluator<PlainObjectBase<XprType> >(m) \n  { }\n};\n\n// -------------------- Transpose --------------------\n\ntemplate<typename ArgType>\nstruct unary_evaluator<Transpose<ArgType>, IndexBased>\n  : evaluator_base<Transpose<ArgType> >\n{\n  typedef Transpose<ArgType> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,    \n    Flags = evaluator<ArgType>::Flags ^ RowMajorBit,\n    Alignment = evaluator<ArgType>::Alignment\n  };\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}\n\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_argImpl.coeff(col, row);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_argImpl.coeff(index);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_argImpl.coeffRef(col, row);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  typename XprType::Scalar& coeffRef(Index index)\n  {\n    return m_argImpl.coeffRef(index);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    return m_argImpl.template packet<LoadMode,PacketType>(col, row);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return m_argImpl.template packet<LoadMode,PacketType>(index);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x)\n  {\n    m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x)\n  {\n    m_argImpl.template writePacket<StoreMode,PacketType>(index, x);\n  }\n\nprotected:\n  evaluator<ArgType> m_argImpl;\n};\n\n// -------------------- CwiseNullaryOp --------------------\n// Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.\n// Likewise, there is not need to more sophisticated dispatching here.\n\ntemplate<typename Scalar,typename NullaryOp,\n         bool has_nullary = has_nullary_operator<NullaryOp>::value,\n         bool has_unary   = has_unary_operator<NullaryOp>::value,\n         bool has_binary  = has_binary_operator<NullaryOp>::value>\nstruct nullary_wrapper\n{\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }\n\n  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }\n  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }\n};\n\ntemplate<typename Scalar,typename NullaryOp>\nstruct nullary_wrapper<Scalar,NullaryOp,true,false,false>\n{\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }\n  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }\n};\n\ntemplate<typename Scalar,typename NullaryOp>\nstruct nullary_wrapper<Scalar,NullaryOp,false,false,true>\n{\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }\n  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }\n};\n\n// We need the following specialization for vector-only functors assigned to a runtime vector,\n// for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.\n// In this case, i==0 and j is used for the actual iteration.\ntemplate<typename Scalar,typename NullaryOp>\nstruct nullary_wrapper<Scalar,NullaryOp,false,true,false>\n{\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {\n    eigen_assert(i==0 || j==0);\n    return op(i+j);\n  }\n  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {\n    eigen_assert(i==0 || j==0);\n    return op.template packetOp<T>(i+j);\n  }\n\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }\n  template <typename T, typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }\n};\n\ntemplate<typename Scalar,typename NullaryOp>\nstruct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};\n\n#if 0 && EIGEN_COMP_MSVC>0\n// Disable this ugly workaround. This is now handled in traits<Ref>::match,\n// but this piece of code might still become handly if some other weird compilation\n// erros pop up again.\n\n// MSVC exhibits a weird compilation error when\n// compiling:\n//    Eigen::MatrixXf A = MatrixXf::Random(3,3);\n//    Ref<const MatrixXf> R = 2.f*A;\n// and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.\n// The \"problem\" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>\n// and at that time has_*ary_operator<T> returns true regardless of T.\n// Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.\n// The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),\n// and packet() are really instantiated as implemented below:\n\n// This is a simple wrapper around Index to enforce the re-instantiation of\n// has_*ary_operator when needed.\ntemplate<typename T> struct nullary_wrapper_workaround_msvc {\n  nullary_wrapper_workaround_msvc(const T&);\n  operator T()const;\n};\n\ntemplate<typename Scalar,typename NullaryOp>\nstruct nullary_wrapper<Scalar,NullaryOp,true,true,true>\n{\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {\n    return nullary_wrapper<Scalar,NullaryOp,\n    has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);\n  }\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {\n    return nullary_wrapper<Scalar,NullaryOp,\n    has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);\n  }\n\n  template <typename T, typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {\n    return nullary_wrapper<Scalar,NullaryOp,\n    has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);\n  }\n  template <typename T, typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {\n    return nullary_wrapper<Scalar,NullaryOp,\n    has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,\n    has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);\n  }\n};\n#endif // MSVC workaround\n\ntemplate<typename NullaryOp, typename PlainObjectType>\nstruct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >\n  : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >\n{\n  typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;\n  typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;\n  \n  enum {\n    CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,\n    \n    Flags = (evaluator<PlainObjectTypeCleaned>::Flags\n          &  (  HereditaryBits\n              | (functor_has_linear_access<NullaryOp>::ret  ? LinearAccessBit : 0)\n              | (functor_traits<NullaryOp>::PacketAccess    ? PacketAccessBit : 0)))\n          | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),\n    Alignment = AlignedMax\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)\n    : m_functor(n.functor()), m_wrapper()\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(IndexType row, IndexType col) const\n  {\n    return m_wrapper(m_functor, row, col);\n  }\n\n  template <typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(IndexType index) const\n  {\n    return m_wrapper(m_functor,index);\n  }\n\n  template<int LoadMode, typename PacketType, typename IndexType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(IndexType row, IndexType col) const\n  {\n    return m_wrapper.template packetOp<PacketType>(m_functor, row, col);\n  }\n\n  template<int LoadMode, typename PacketType, typename IndexType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(IndexType index) const\n  {\n    return m_wrapper.template packetOp<PacketType>(m_functor, index);\n  }\n\nprotected:\n  const NullaryOp m_functor;\n  const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;\n};\n\n// -------------------- CwiseUnaryOp --------------------\n\ntemplate<typename UnaryOp, typename ArgType>\nstruct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >\n  : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >\n{\n  typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,\n    \n    Flags = evaluator<ArgType>::Flags\n          & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),\n    Alignment = evaluator<ArgType>::Alignment\n  };\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  explicit unary_evaluator(const XprType& op) : m_d(op)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_d.func()(m_d.argImpl.coeff(row, col));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_d.func()(m_d.argImpl.coeff(index));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));\n  }\n\nprotected:\n\n  // this helper permits to completely eliminate the functor if it is empty\n  class Data : private UnaryOp\n  {\n  public:\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Data(const XprType& xpr) : UnaryOp(xpr.functor()), argImpl(xpr.nestedExpression()) {}\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const UnaryOp& func() const { return static_cast<const UnaryOp&>(*this); }\n    evaluator<ArgType> argImpl;\n  };\n\n  Data m_d;\n};\n\n// -------------------- CwiseTernaryOp --------------------\n\n// this is a ternary expression\ntemplate<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>\nstruct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >\n  : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >\n{\n  typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;\n  typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}\n};\n\ntemplate<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>\nstruct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>\n  : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >\n{\n  typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<Arg1>::CoeffReadCost + evaluator<Arg2>::CoeffReadCost + evaluator<Arg3>::CoeffReadCost + functor_traits<TernaryOp>::Cost,\n    \n    Arg1Flags = evaluator<Arg1>::Flags,\n    Arg2Flags = evaluator<Arg2>::Flags,\n    Arg3Flags = evaluator<Arg3>::Flags,\n    SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,\n    StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),\n    Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (\n        HereditaryBits\n        | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &\n           ( (StorageOrdersAgree ? LinearAccessBit : 0)\n           | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)\n           )\n        )\n     ),\n    Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),\n    Alignment = EIGEN_PLAIN_ENUM_MIN(\n        EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),\n        evaluator<Arg3>::Alignment)\n  };\n\n  EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),\n                               m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),\n                               m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),\n                               m_d.arg2Impl.template packet<LoadMode,PacketType>(index),\n                               m_d.arg3Impl.template packet<LoadMode,PacketType>(index));\n  }\n\nprotected:\n  // this helper permits to completely eliminate the functor if it is empty\n  struct Data : private TernaryOp\n  {\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Data(const XprType& xpr) : TernaryOp(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const TernaryOp& func() const { return static_cast<const TernaryOp&>(*this); }\n    evaluator<Arg1> arg1Impl;\n    evaluator<Arg2> arg2Impl;\n    evaluator<Arg3> arg3Impl;\n  };\n\n  Data m_d;\n};\n\n// -------------------- CwiseBinaryOp --------------------\n\n// this is a binary expression\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n  : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;\n  typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}\n};\n\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>\n  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    \n    LhsFlags = evaluator<Lhs>::Flags,\n    RhsFlags = evaluator<Rhs>::Flags,\n    SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,\n    StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),\n    Flags0 = (int(LhsFlags) | int(RhsFlags)) & (\n        HereditaryBits\n      | (int(LhsFlags) & int(RhsFlags) &\n           ( (StorageOrdersAgree ? LinearAccessBit : 0)\n           | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)\n           )\n        )\n     ),\n    Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),\n    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)\n  };\n\n  EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_d(xpr)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),\n                               m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),\n                               m_d.rhsImpl.template packet<LoadMode,PacketType>(index));\n  }\n\nprotected:\n\n  // this helper permits to completely eliminate the functor if it is empty\n  struct Data : private BinaryOp\n  {\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Data(const XprType& xpr) : BinaryOp(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const BinaryOp& func() const { return static_cast<const BinaryOp&>(*this); }\n    evaluator<Lhs> lhsImpl;\n    evaluator<Rhs> rhsImpl;\n  };\n\n  Data m_d;\n};\n\n// -------------------- CwiseUnaryView --------------------\n\ntemplate<typename UnaryOp, typename ArgType>\nstruct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>\n  : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >\n{\n  typedef CwiseUnaryView<UnaryOp, ArgType> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,\n    \n    Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),\n    \n    Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...\n  };\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_d.func()(m_d.argImpl.coeff(row, col));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_d.func()(m_d.argImpl.coeff(index));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_d.func()(m_d.argImpl.coeffRef(row, col));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return m_d.func()(m_d.argImpl.coeffRef(index));\n  }\n\nprotected:\n\n  // this helper permits to completely eliminate the functor if it is empty\n  struct Data : private UnaryOp\n  {\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Data(const XprType& xpr) : UnaryOp(xpr.functor()), argImpl(xpr.nestedExpression()) {}\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const UnaryOp& func() const { return static_cast<const UnaryOp&>(*this); }\n    evaluator<ArgType> argImpl;\n  };\n\n  Data m_d;\n};\n\n// -------------------- Map --------------------\n\n// FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?\n// but that might complicate template specialization\ntemplate<typename Derived, typename PlainObjectType>\nstruct mapbase_evaluator;\n\ntemplate<typename Derived, typename PlainObjectType>\nstruct mapbase_evaluator : evaluator_base<Derived>\n{\n  typedef Derived  XprType;\n  typedef typename XprType::PointerType PointerType;\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n  \n  enum {\n    IsRowMajor = XprType::RowsAtCompileTime,\n    ColsAtCompileTime = XprType::ColsAtCompileTime,\n    CoeffReadCost = NumTraits<Scalar>::ReadCost\n  };\n\n  EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)\n    : m_data(const_cast<PointerType>(map.data())),\n      m_innerStride(map.innerStride()),\n      m_outerStride(map.outerStride())\n  {\n    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),\n                        PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_data[col * colStride() + row * rowStride()];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_data[index * m_innerStride.value()];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_data[col * colStride() + row * rowStride()];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return m_data[index * m_innerStride.value()];\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    PointerType ptr = m_data + row * rowStride() + col * colStride();\n    return internal::ploadt<PacketType, LoadMode>(ptr);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x)\n  {\n    PointerType ptr = m_data + row * rowStride() + col * colStride();\n    return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x)\n  {\n    internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);\n  }\nprotected:\n  EIGEN_DEVICE_FUNC\n  inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }\n  EIGEN_DEVICE_FUNC\n  inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }\n\n  PointerType m_data;\n  const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;\n  const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;\n};\n\ntemplate<typename PlainObjectType, int MapOptions, typename StrideType> \nstruct evaluator<Map<PlainObjectType, MapOptions, StrideType> >\n  : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>\n{\n  typedef Map<PlainObjectType, MapOptions, StrideType> XprType;\n  typedef typename XprType::Scalar Scalar;\n  // TODO: should check for smaller packet types once we can handle multi-sized packet types\n  typedef typename packet_traits<Scalar>::type PacketScalar;\n  \n  enum {\n    InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0\n                             ? int(PlainObjectType::InnerStrideAtCompileTime)\n                             : int(StrideType::InnerStrideAtCompileTime),\n    OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0\n                             ? int(PlainObjectType::OuterStrideAtCompileTime)\n                             : int(StrideType::OuterStrideAtCompileTime),\n    HasNoInnerStride = InnerStrideAtCompileTime == 1,\n    HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,\n    HasNoStride = HasNoInnerStride && HasNoOuterStride,\n    IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,\n    \n    PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),\n    LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),\n    Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),\n    \n    Alignment = int(MapOptions)&int(AlignedMask)\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)\n    : mapbase_evaluator<XprType, PlainObjectType>(map) \n  { }\n};\n\n// -------------------- Ref --------------------\n\ntemplate<typename PlainObjectType, int RefOptions, typename StrideType> \nstruct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >\n  : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>\n{\n  typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;\n  \n  enum {\n    Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,\n    Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)\n    : mapbase_evaluator<XprType, PlainObjectType>(ref) \n  { }\n};\n\n// -------------------- Block --------------------\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,\n         bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;\n         \ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> \nstruct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >\n  : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>\n{\n  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;\n  typedef typename XprType::Scalar Scalar;\n  // TODO: should check for smaller packet types once we can handle multi-sized packet types\n  typedef typename packet_traits<Scalar>::type PacketScalar;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n    \n    RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,\n    ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,\n    MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,\n    \n    ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,\n    IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1\n               : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0\n               : ArgTypeIsRowMajor,\n    HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),\n    InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),\n    InnerStrideAtCompileTime = HasSameStorageOrderAsArgType\n                             ? int(inner_stride_at_compile_time<ArgType>::ret)\n                             : int(outer_stride_at_compile_time<ArgType>::ret),\n    OuterStrideAtCompileTime = HasSameStorageOrderAsArgType\n                             ? int(outer_stride_at_compile_time<ArgType>::ret)\n                             : int(inner_stride_at_compile_time<ArgType>::ret),\n    MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0,\n    \n    FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,    \n    FlagsRowMajorBit = XprType::Flags&RowMajorBit,\n    Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |\n                                           DirectAccessBit |\n                                           MaskPacketAccessBit),\n    Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,\n    \n    PacketAlignment = unpacket_traits<PacketScalar>::alignment,\n    Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,\n    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)\n  };\n  typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n};\n\n// no direct-access => dispatch to a unary evaluator\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>\nstruct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>\n  : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >\n{\n  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;\n\n  EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)\n    : unary_evaluator<XprType>(block) \n  {}\n};\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>\nstruct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>\n  : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >\n{\n  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)\n    : m_argImpl(block.nestedExpression()), \n      m_startRow(block.startRow()), \n      m_startCol(block.startCol()) \n  { }\n \n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  enum {\n    RowsAtCompileTime = XprType::RowsAtCompileTime\n  };\n \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  { \n    return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); \n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  { \n    return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  { \n    return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); \n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  { \n    return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);\n  }\n \n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const \n  { \n    return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col); \n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const \n  { \n    return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,\n                                       RowsAtCompileTime == 1 ? index : 0);\n  }\n  \n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x) \n  {\n    return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x); \n  }\n  \n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x) \n  {\n    return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,\n                                             RowsAtCompileTime == 1 ? index : 0,\n                                             x);\n  }\n \nprotected:\n  evaluator<ArgType> m_argImpl;\n  const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;\n  const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;\n};\n\n// TODO: This evaluator does not actually use the child evaluator; \n// all action is via the data() as returned by the Block expression.\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel> \nstruct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>\n  : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,\n                      typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>\n{\n  typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;\n  typedef typename XprType::Scalar Scalar;\n\n  EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)\n    : mapbase_evaluator<XprType, typename XprType::PlainObject>(block) \n  {\n    // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime\n    eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && \"data is not aligned\");\n  }\n};\n\n\n// -------------------- Select --------------------\n// NOTE shall we introduce a ternary_evaluator?\n\n// TODO enable vectorization for Select\ntemplate<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>\nstruct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >\n  : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >\n{\n  typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;\n  enum {\n    CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost\n                  + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,\n                                         evaluator<ElseMatrixType>::CoeffReadCost),\n\n    Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,\n    \n    Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)\n    : m_conditionImpl(select.conditionMatrix()),\n      m_thenImpl(select.thenMatrix()),\n      m_elseImpl(select.elseMatrix())\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n \n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    if (m_conditionImpl.coeff(row, col))\n      return m_thenImpl.coeff(row, col);\n    else\n      return m_elseImpl.coeff(row, col);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    if (m_conditionImpl.coeff(index))\n      return m_thenImpl.coeff(index);\n    else\n      return m_elseImpl.coeff(index);\n  }\n \nprotected:\n  evaluator<ConditionMatrixType> m_conditionImpl;\n  evaluator<ThenMatrixType> m_thenImpl;\n  evaluator<ElseMatrixType> m_elseImpl;\n};\n\n\n// -------------------- Replicate --------------------\n\ntemplate<typename ArgType, int RowFactor, int ColFactor> \nstruct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >\n  : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >\n{\n  typedef Replicate<ArgType, RowFactor, ColFactor> XprType;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n  enum {\n    Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor\n  };\n  typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;\n  typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,\n    LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,\n    Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),\n    \n    Alignment = evaluator<ArgTypeNestedCleaned>::Alignment\n  };\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)\n    : m_arg(replicate.nestedExpression()),\n      m_argImpl(m_arg),\n      m_rows(replicate.nestedExpression().rows()),\n      m_cols(replicate.nestedExpression().cols())\n  {}\n \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    // try to avoid using modulo; this is a pure optimization strategy\n    const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0\n                           : RowFactor==1 ? row\n                           : row % m_rows.value();\n    const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0\n                           : ColFactor==1 ? col\n                           : col % m_cols.value();\n    \n    return m_argImpl.coeff(actual_row, actual_col);\n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    // try to avoid using modulo; this is a pure optimization strategy\n    const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1\n                                  ? (ColFactor==1 ?  index : index%m_cols.value())\n                                  : (RowFactor==1 ?  index : index%m_rows.value());\n    \n    return m_argImpl.coeff(actual_index);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0\n                           : RowFactor==1 ? row\n                           : row % m_rows.value();\n    const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0\n                           : ColFactor==1 ? col\n                           : col % m_cols.value();\n\n    return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);\n  }\n  \n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1\n                                  ? (ColFactor==1 ?  index : index%m_cols.value())\n                                  : (RowFactor==1 ?  index : index%m_rows.value());\n\n    return m_argImpl.template packet<LoadMode,PacketType>(actual_index);\n  }\n \nprotected:\n  const ArgTypeNested m_arg;\n  evaluator<ArgTypeNestedCleaned> m_argImpl;\n  const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;\n  const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;\n};\n\n\n// -------------------- PartialReduxExpr --------------------\n\ntemplate< typename ArgType, typename MemberOp, int Direction>\nstruct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >\n  : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >\n{\n  typedef PartialReduxExpr<ArgType, MemberOp, Direction> XprType;\n  typedef typename internal::nested_eval<ArgType,1>::type ArgTypeNested;\n  typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;\n  typedef typename ArgType::Scalar InputScalar;\n  typedef typename XprType::Scalar Scalar;\n  enum {\n    TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) :  int(ArgType::ColsAtCompileTime)\n  };\n  typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;\n  enum {\n    CoeffReadCost = TraversalSize==Dynamic ? HugeCost\n                  : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),\n    \n    Flags = (traits<XprType>::Flags&RowMajorBit) | (evaluator<ArgType>::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit,\n    \n    Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)\n    : m_arg(xpr.nestedExpression()), m_functor(xpr.functor())\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value));\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  const Scalar coeff(Index i, Index j) const\n  {\n    if (Direction==Vertical)\n      return m_functor(m_arg.col(j));\n    else\n      return m_functor(m_arg.row(i));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  const Scalar coeff(Index index) const\n  {\n    if (Direction==Vertical)\n      return m_functor(m_arg.col(index));\n    else\n      return m_functor(m_arg.row(index));\n  }\n\nprotected:\n  typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg;\n  const MemberOp m_functor;\n};\n\n\n// -------------------- MatrixWrapper and ArrayWrapper --------------------\n//\n// evaluator_wrapper_base<T> is a common base class for the\n// MatrixWrapper and ArrayWrapper evaluators.\n\ntemplate<typename XprType>\nstruct evaluator_wrapper_base\n  : evaluator_base<XprType>\n{\n  typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;\n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n    Flags = evaluator<ArgType>::Flags,\n    Alignment = evaluator<ArgType>::Alignment\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}\n\n  typedef typename ArgType::Scalar Scalar;\n  typedef typename ArgType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_argImpl.coeff(row, col);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_argImpl.coeff(index);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_argImpl.coeffRef(row, col);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return m_argImpl.coeffRef(index);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    return m_argImpl.template packet<LoadMode,PacketType>(row, col);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    return m_argImpl.template packet<LoadMode,PacketType>(index);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x)\n  {\n    m_argImpl.template writePacket<StoreMode>(row, col, x);\n  }\n\n  template<int StoreMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x)\n  {\n    m_argImpl.template writePacket<StoreMode>(index, x);\n  }\n\nprotected:\n  evaluator<ArgType> m_argImpl;\n};\n\ntemplate<typename TArgType>\nstruct unary_evaluator<MatrixWrapper<TArgType> >\n  : evaluator_wrapper_base<MatrixWrapper<TArgType> >\n{\n  typedef MatrixWrapper<TArgType> XprType;\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)\n    : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())\n  { }\n};\n\ntemplate<typename TArgType>\nstruct unary_evaluator<ArrayWrapper<TArgType> >\n  : evaluator_wrapper_base<ArrayWrapper<TArgType> >\n{\n  typedef ArrayWrapper<TArgType> XprType;\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)\n    : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())\n  { }\n};\n\n\n// -------------------- Reverse --------------------\n\n// defined in Reverse.h:\ntemplate<typename PacketType, bool ReversePacket> struct reverse_packet_cond;\n\ntemplate<typename ArgType, int Direction>\nstruct unary_evaluator<Reverse<ArgType, Direction> >\n  : evaluator_base<Reverse<ArgType, Direction> >\n{\n  typedef Reverse<ArgType, Direction> XprType;\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  enum {\n    IsRowMajor = XprType::IsRowMajor,\n    IsColMajor = !IsRowMajor,\n    ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),\n    ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),\n    ReversePacket = (Direction == BothDirections)\n                    || ((Direction == Vertical)   && IsColMajor)\n                    || ((Direction == Horizontal) && IsRowMajor),\n                    \n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n    \n    // let's enable LinearAccess only with vectorization because of the product overhead\n    // FIXME enable DirectAccess with negative strides?\n    Flags0 = evaluator<ArgType>::Flags,\n    LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )\n                  || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))\n                 ? LinearAccessBit : 0,\n\n    Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),\n    \n    Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.\n  };\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)\n    : m_argImpl(reverse.nestedExpression()),\n      m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),\n      m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)\n  { }\n \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,\n                           ReverseCol ? m_cols.value() - col - 1 : col);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,\n                              ReverseCol ? m_cols.value() - col - 1 : col);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index row, Index col) const\n  {\n    enum {\n      PacketSize = unpacket_traits<PacketType>::size,\n      OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,\n      OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1\n    };\n    typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;\n    return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(\n                                  ReverseRow ? m_rows.value() - row - OffsetRow : row,\n                                  ReverseCol ? m_cols.value() - col - OffsetCol : col));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  PacketType packet(Index index) const\n  {\n    enum { PacketSize = unpacket_traits<PacketType>::size };\n    return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index row, Index col, const PacketType& x)\n  {\n    // FIXME we could factorize some code with packet(i,j)\n    enum {\n      PacketSize = unpacket_traits<PacketType>::size,\n      OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,\n      OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1\n    };\n    typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;\n    m_argImpl.template writePacket<LoadMode>(\n                                  ReverseRow ? m_rows.value() - row - OffsetRow : row,\n                                  ReverseCol ? m_cols.value() - col - OffsetCol : col,\n                                  reverse_packet::run(x));\n  }\n\n  template<int LoadMode, typename PacketType>\n  EIGEN_STRONG_INLINE\n  void writePacket(Index index, const PacketType& x)\n  {\n    enum { PacketSize = unpacket_traits<PacketType>::size };\n    m_argImpl.template writePacket<LoadMode>\n      (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));\n  }\n \nprotected:\n  evaluator<ArgType> m_argImpl;\n\n  // If we do not reverse rows, then we do not need to know the number of rows; same for columns\n  // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.\n  const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;\n  const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;\n};\n\n\n// -------------------- Diagonal --------------------\n\ntemplate<typename ArgType, int DiagIndex>\nstruct evaluator<Diagonal<ArgType, DiagIndex> >\n  : evaluator_base<Diagonal<ArgType, DiagIndex> >\n{\n  typedef Diagonal<ArgType, DiagIndex> XprType;\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n    \n    Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,\n    \n    Alignment = 0\n  };\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)\n    : m_argImpl(diagonal.nestedExpression()),\n      m_index(diagonal.index())\n  { }\n \n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index) const\n  {\n    return m_argImpl.coeff(row + rowOffset(), row + colOffset());\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index index) const\n  {\n    return m_argImpl.coeff(index + rowOffset(), index + colOffset());\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index)\n  {\n    return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index index)\n  {\n    return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());\n  }\n\nprotected:\n  evaluator<ArgType> m_argImpl;\n  const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;\n\nprivate:\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }\n};\n\n\n//----------------------------------------------------------------------\n// deprecated code\n//----------------------------------------------------------------------\n\n// -------------------- EvalToTemp --------------------\n\n// expression class for evaluating nested expression to a temporary\n\ntemplate<typename ArgType> class EvalToTemp;\n\ntemplate<typename ArgType>\nstruct traits<EvalToTemp<ArgType> >\n  : public traits<ArgType>\n{ };\n\ntemplate<typename ArgType>\nclass EvalToTemp\n  : public dense_xpr_base<EvalToTemp<ArgType> >::type\n{\n public:\n \n  typedef typename dense_xpr_base<EvalToTemp>::type Base;\n  EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)\n \n  explicit EvalToTemp(const ArgType& arg)\n    : m_arg(arg)\n  { }\n \n  const ArgType& arg() const\n  {\n    return m_arg;\n  }\n\n  Index rows() const \n  {\n    return m_arg.rows();\n  }\n\n  Index cols() const \n  {\n    return m_arg.cols();\n  }\n\n private:\n  const ArgType& m_arg;\n};\n \ntemplate<typename ArgType>\nstruct evaluator<EvalToTemp<ArgType> >\n  : public evaluator<typename ArgType::PlainObject>\n{\n  typedef EvalToTemp<ArgType>                   XprType;\n  typedef typename ArgType::PlainObject         PlainObject;\n  typedef evaluator<PlainObject> Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)\n    : m_result(xpr.arg())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n  }\n\n  // This constructor is used when nesting an EvalTo evaluator in another evaluator\n  EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)\n    : m_result(arg)\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n  }\n\nprotected:\n  PlainObject m_result;\n};\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COREEVALUATORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CoreIterators.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COREITERATORS_H\n#define EIGEN_COREITERATORS_H\n\nnamespace Eigen { \n\n/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core\n */\n\nnamespace internal {\n\ntemplate<typename XprType, typename EvaluatorKind>\nclass inner_iterator_selector;\n\n}\n\n/** \\class InnerIterator\n  * \\brief An InnerIterator allows to loop over the element of any matrix expression.\n  * \n  * \\warning To be used with care because an evaluator is constructed every time an InnerIterator iterator is constructed.\n  * \n  * TODO: add a usage example\n  */\ntemplate<typename XprType>\nclass InnerIterator\n{\nprotected:\n  typedef internal::inner_iterator_selector<XprType, typename internal::evaluator_traits<XprType>::Kind> IteratorType;\n  typedef internal::evaluator<XprType> EvaluatorType;\n  typedef typename internal::traits<XprType>::Scalar Scalar;\npublic:\n  /** Construct an iterator over the \\a outerId -th row or column of \\a xpr */\n  InnerIterator(const XprType &xpr, const Index &outerId)\n    : m_eval(xpr), m_iter(m_eval, outerId, xpr.innerSize())\n  {}\n  \n  /// \\returns the value of the current coefficient.\n  EIGEN_STRONG_INLINE Scalar value() const          { return m_iter.value(); }\n  /** Increment the iterator \\c *this to the next non-zero coefficient.\n    * Explicit zeros are not skipped over. To skip explicit zeros, see class SparseView\n    */\n  EIGEN_STRONG_INLINE InnerIterator& operator++()   { m_iter.operator++(); return *this; }\n  EIGEN_STRONG_INLINE InnerIterator& operator+=(Index i) { m_iter.operator+=(i); return *this; }\n  EIGEN_STRONG_INLINE InnerIterator operator+(Index i) \n  { InnerIterator result(*this); result+=i; return result; }\n    \n\n  /// \\returns the column or row index of the current coefficient.\n  EIGEN_STRONG_INLINE Index index() const           { return m_iter.index(); }\n  /// \\returns the row index of the current coefficient.\n  EIGEN_STRONG_INLINE Index row() const             { return m_iter.row(); }\n  /// \\returns the column index of the current coefficient.\n  EIGEN_STRONG_INLINE Index col() const             { return m_iter.col(); }\n  /// \\returns \\c true if the iterator \\c *this still references a valid coefficient.\n  EIGEN_STRONG_INLINE operator bool() const         { return m_iter; }\n  \nprotected:\n  EvaluatorType m_eval;\n  IteratorType m_iter;\nprivate:\n  // If you get here, then you're not using the right InnerIterator type, e.g.:\n  //   SparseMatrix<double,RowMajor> A;\n  //   SparseMatrix<double>::InnerIterator it(A,0);\n  template<typename T> InnerIterator(const EigenBase<T>&,Index outer);\n};\n\nnamespace internal {\n\n// Generic inner iterator implementation for dense objects\ntemplate<typename XprType>\nclass inner_iterator_selector<XprType, IndexBased>\n{\nprotected:\n  typedef evaluator<XprType> EvaluatorType;\n  typedef typename traits<XprType>::Scalar Scalar;\n  enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };\n  \npublic:\n  EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &innerSize)\n    : m_eval(eval), m_inner(0), m_outer(outerId), m_end(innerSize)\n  {}\n\n  EIGEN_STRONG_INLINE Scalar value() const\n  {\n    return (IsRowMajor) ? m_eval.coeff(m_outer, m_inner)\n                        : m_eval.coeff(m_inner, m_outer);\n  }\n\n  EIGEN_STRONG_INLINE inner_iterator_selector& operator++() { m_inner++; return *this; }\n\n  EIGEN_STRONG_INLINE Index index() const { return m_inner; }\n  inline Index row() const { return IsRowMajor ? m_outer : index(); }\n  inline Index col() const { return IsRowMajor ? index() : m_outer; }\n\n  EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }\n\nprotected:\n  const EvaluatorType& m_eval;\n  Index m_inner;\n  const Index m_outer;\n  const Index m_end;\n};\n\n// For iterator-based evaluator, inner-iterator is already implemented as\n// evaluator<>::InnerIterator\ntemplate<typename XprType>\nclass inner_iterator_selector<XprType, IteratorBased>\n : public evaluator<XprType>::InnerIterator\n{\nprotected:\n  typedef typename evaluator<XprType>::InnerIterator Base;\n  typedef evaluator<XprType> EvaluatorType;\n  \npublic:\n  EIGEN_STRONG_INLINE inner_iterator_selector(const EvaluatorType &eval, const Index &outerId, const Index &/*innerSize*/)\n    : Base(eval, outerId)\n  {}  \n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COREITERATORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CwiseBinaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CWISE_BINARY_OP_H\n#define EIGEN_CWISE_BINARY_OP_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\n  // we must not inherit from traits<Lhs> since it has\n  // the potential to cause problems with MSVC\n  typedef typename remove_all<Lhs>::type Ancestor;\n  typedef typename traits<Ancestor>::XprKind XprKind;\n  enum {\n    RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,\n    ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,\n    MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime\n  };\n\n  // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor),\n  // we still want to handle the case when the result type is different.\n  typedef typename result_of<\n                     BinaryOp(\n                       const typename Lhs::Scalar&,\n                       const typename Rhs::Scalar&\n                     )\n                   >::type Scalar;\n  typedef typename cwise_promote_storage_type<typename traits<Lhs>::StorageKind,\n                                              typename traits<Rhs>::StorageKind,\n                                              BinaryOp>::ret StorageKind;\n  typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,\n                                      typename traits<Rhs>::StorageIndex>::type StorageIndex;\n  typedef typename Lhs::Nested LhsNested;\n  typedef typename Rhs::Nested RhsNested;\n  typedef typename remove_reference<LhsNested>::type _LhsNested;\n  typedef typename remove_reference<RhsNested>::type _RhsNested;\n  enum {\n    Flags = cwise_promote_storage_order<typename traits<Lhs>::StorageKind,typename traits<Rhs>::StorageKind,_LhsNested::Flags & RowMajorBit,_RhsNested::Flags & RowMajorBit>::value\n  };\n};\n} // end namespace internal\n\ntemplate<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>\nclass CwiseBinaryOpImpl;\n\n/** \\class CwiseBinaryOp\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic expression where a coefficient-wise binary operator is applied to two expressions\n  *\n  * \\tparam BinaryOp template functor implementing the operator\n  * \\tparam LhsType the type of the left-hand side\n  * \\tparam RhsType the type of the right-hand side\n  *\n  * This class represents an expression  where a coefficient-wise binary operator is applied to two expressions.\n  * It is the return type of binary operators, by which we mean only those binary operators where\n  * both the left-hand side and the right-hand side are Eigen expressions.\n  * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp.\n  *\n  * Most of the time, this is the only way that it is used, so you typically don't have to name\n  * CwiseBinaryOp types explicitly.\n  *\n  * \\sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp\n  */\ntemplate<typename BinaryOp, typename LhsType, typename RhsType>\nclass CwiseBinaryOp : \n  public CwiseBinaryOpImpl<\n          BinaryOp, LhsType, RhsType,\n          typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,\n                                                        typename internal::traits<RhsType>::StorageKind,\n                                                        BinaryOp>::ret>,\n  internal::no_assignment_operator\n{\n  public:\n    \n    typedef typename internal::remove_all<BinaryOp>::type Functor;\n    typedef typename internal::remove_all<LhsType>::type Lhs;\n    typedef typename internal::remove_all<RhsType>::type Rhs;\n\n    typedef typename CwiseBinaryOpImpl<\n        BinaryOp, LhsType, RhsType,\n        typename internal::cwise_promote_storage_type<typename internal::traits<LhsType>::StorageKind,\n                                                      typename internal::traits<Rhs>::StorageKind,\n                                                      BinaryOp>::ret>::Base Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)\n\n    typedef typename internal::ref_selector<LhsType>::type LhsNested;\n    typedef typename internal::ref_selector<RhsType>::type RhsNested;\n    typedef typename internal::remove_reference<LhsNested>::type _LhsNested;\n    typedef typename internal::remove_reference<RhsNested>::type _RhsNested;\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& aLhs, const Rhs& aRhs, const BinaryOp& func = BinaryOp())\n      : m_lhs(aLhs), m_rhs(aRhs), m_functor(func)\n    {\n      EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar);\n      // require the sizes to match\n      EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs)\n      eigen_assert(aLhs.rows() == aRhs.rows() && aLhs.cols() == aRhs.cols());\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index rows() const {\n      // return the fixed size type if available to enable compile time optimizations\n      if (internal::traits<typename internal::remove_all<LhsNested>::type>::RowsAtCompileTime==Dynamic)\n        return m_rhs.rows();\n      else\n        return m_lhs.rows();\n    }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index cols() const {\n      // return the fixed size type if available to enable compile time optimizations\n      if (internal::traits<typename internal::remove_all<LhsNested>::type>::ColsAtCompileTime==Dynamic)\n        return m_rhs.cols();\n      else\n        return m_lhs.cols();\n    }\n\n    /** \\returns the left hand side nested expression */\n    EIGEN_DEVICE_FUNC\n    const _LhsNested& lhs() const { return m_lhs; }\n    /** \\returns the right hand side nested expression */\n    EIGEN_DEVICE_FUNC\n    const _RhsNested& rhs() const { return m_rhs; }\n    /** \\returns the functor representing the binary operation */\n    EIGEN_DEVICE_FUNC\n    const BinaryOp& functor() const { return m_functor; }\n\n  protected:\n    LhsNested m_lhs;\n    RhsNested m_rhs;\n    const BinaryOp m_functor;\n};\n\n// Generic API dispatcher\ntemplate<typename BinaryOp, typename Lhs, typename Rhs, typename StorageKind>\nclass CwiseBinaryOpImpl\n  : public internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >::type Base;\n};\n\n/** replaces \\c *this by \\c *this - \\a other.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nMatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n/** replaces \\c *this by \\c *this + \\a other.\n  *\n  * \\returns a reference to \\c *this\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived &\nMatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)\n{\n  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_CWISE_BINARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CwiseNullaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CWISE_NULLARY_OP_H\n#define EIGEN_CWISE_NULLARY_OP_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename NullaryOp, typename PlainObjectType>\nstruct traits<CwiseNullaryOp<NullaryOp, PlainObjectType> > : traits<PlainObjectType>\n{\n  enum {\n    Flags = traits<PlainObjectType>::Flags & RowMajorBit\n  };\n};\n\n} // namespace internal\n\n/** \\class CwiseNullaryOp\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic expression of a matrix where all coefficients are defined by a functor\n  *\n  * \\tparam NullaryOp template functor implementing the operator\n  * \\tparam PlainObjectType the underlying plain matrix/array type\n  *\n  * This class represents an expression of a generic nullary operator.\n  * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods,\n  * and most of the time this is the only way it is used.\n  *\n  * However, if you want to write a function returning such an expression, you\n  * will need to use this class.\n  *\n  * The functor NullaryOp must expose one of the following method:\n    <table class=\"manual\">\n    <tr            ><td>\\c operator()() </td><td>if the procedural generation does not depend on the coefficient entries (e.g., random numbers)</td></tr>\n    <tr class=\"alt\"><td>\\c operator()(Index i)</td><td>if the procedural generation makes sense for vectors only and that it depends on the coefficient index \\c i (e.g., linspace) </td></tr>\n    <tr            ><td>\\c operator()(Index i,Index j)</td><td>if the procedural generation depends on the matrix coordinates \\c i, \\c j (e.g., to generate a checkerboard with 0 and 1)</td></tr>\n    </table>\n  * It is also possible to expose the last two operators if the generation makes sense for matrices but can be optimized for vectors.\n  *\n  * See DenseBase::NullaryExpr(Index,const CustomNullaryOp&) for an example binding\n  * C++11 random number generators.\n  *\n  * A nullary expression can also be used to implement custom sophisticated matrix manipulations\n  * that cannot be covered by the existing set of natively supported matrix manipulations.\n  * See this \\ref TopicCustomizing_NullaryExpr \"page\" for some examples and additional explanations\n  * on the behavior of CwiseNullaryOp.\n  *\n  * \\sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr\n  */\ntemplate<typename NullaryOp, typename PlainObjectType>\nclass CwiseNullaryOp : public internal::dense_xpr_base< CwiseNullaryOp<NullaryOp, PlainObjectType> >::type, internal::no_assignment_operator\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<CwiseNullaryOp>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp)\n\n    EIGEN_DEVICE_FUNC\n    CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp())\n      : m_rows(rows), m_cols(cols), m_functor(func)\n    {\n      eigen_assert(rows >= 0\n            && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)\n            &&  cols >= 0\n            && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); }\n\n    /** \\returns the functor representing the nullary operation */\n    EIGEN_DEVICE_FUNC\n    const NullaryOp& functor() const { return m_functor; }\n\n  protected:\n    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;\n    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;\n    const NullaryOp m_functor;\n};\n\n\n/** \\returns an expression of a matrix defined by a custom functor \\a func\n  *\n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this MatrixBase type.\n  *\n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Zero() should be used\n  * instead.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\ntemplate<typename CustomNullaryOp>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>\nDenseBase<Derived>::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func)\n{\n  return CwiseNullaryOp<CustomNullaryOp, PlainObject>(rows, cols, func);\n}\n\n/** \\returns an expression of a matrix defined by a custom functor \\a func\n  *\n  * The parameter \\a size is the size of the returned vector.\n  * Must be compatible with this MatrixBase type.\n  *\n  * \\only_for_vectors\n  *\n  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,\n  * it is redundant to pass \\a size as argument, so Zero() should be used\n  * instead.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * Here is an example with C++11 random generators: \\include random_cpp11.cpp\n  * Output: \\verbinclude random_cpp11.out\n  * \n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\ntemplate<typename CustomNullaryOp>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>\nDenseBase<Derived>::NullaryExpr(Index size, const CustomNullaryOp& func)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, PlainObject>(1, size, func);\n  else return CwiseNullaryOp<CustomNullaryOp, PlainObject>(size, 1, func);\n}\n\n/** \\returns an expression of a matrix defined by a custom functor \\a func\n  *\n  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you\n  * need to use the variants taking size arguments.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\ntemplate<typename CustomNullaryOp>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseNullaryOp<CustomNullaryOp, typename DenseBase<Derived>::PlainObject>\nDenseBase<Derived>::NullaryExpr(const CustomNullaryOp& func)\n{\n  return CwiseNullaryOp<CustomNullaryOp, PlainObject>(RowsAtCompileTime, ColsAtCompileTime, func);\n}\n\n/** \\returns an expression of a constant matrix of value \\a value\n  *\n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this DenseBase type.\n  *\n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Zero() should be used\n  * instead.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Constant(Index rows, Index cols, const Scalar& value)\n{\n  return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_constant_op<Scalar>(value));\n}\n\n/** \\returns an expression of a constant matrix of value \\a value\n  *\n  * The parameter \\a size is the size of the returned vector.\n  * Must be compatible with this DenseBase type.\n  *\n  * \\only_for_vectors\n  *\n  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,\n  * it is redundant to pass \\a size as argument, so Zero() should be used\n  * instead.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Constant(Index size, const Scalar& value)\n{\n  return DenseBase<Derived>::NullaryExpr(size, internal::scalar_constant_op<Scalar>(value));\n}\n\n/** \\returns an expression of a constant matrix of value \\a value\n  *\n  * This variant is only for fixed-size DenseBase types. For dynamic-size types, you\n  * need to use the variants taking size arguments.\n  *\n  * The template parameter \\a CustomNullaryOp is the type of the functor.\n  *\n  * \\sa class CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Constant(const Scalar& value)\n{\n  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)\n  return DenseBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op<Scalar>(value));\n}\n\n/** \\deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(Index,const Scalar&,const Scalar&)\n  *\n  * \\sa LinSpaced(Index,Scalar,Scalar), setLinSpaced(Index,const Scalar&,const Scalar&)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType\nDenseBase<Derived>::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));\n}\n\n/** \\deprecated because of accuracy loss. In Eigen 3.3, it is an alias for LinSpaced(const Scalar&,const Scalar&)\n  *\n  * \\sa LinSpaced(Scalar,Scalar)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType\nDenseBase<Derived>::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)\n  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));\n}\n\n/**\n  * \\brief Sets a linearly spaced vector.\n  *\n  * The function generates 'size' equally spaced values in the closed interval [low,high].\n  * When size is set to 1, a vector of length 1 containing 'high' is returned.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include DenseBase_LinSpaced.cpp\n  * Output: \\verbinclude DenseBase_LinSpaced.out\n  *\n  * For integer scalar types, an even spacing is possible if and only if the length of the range,\n  * i.e., \\c high-low is a scalar multiple of \\c size-1, or if \\c size is a scalar multiple of the\n  * number of values \\c high-low+1 (meaning each value can be repeated the same number of time).\n  * If one of these two considions is not satisfied, then \\c high is lowered to the largest value\n  * satisfying one of this constraint.\n  * Here are some examples:\n  *\n  * Example: \\include DenseBase_LinSpacedInt.cpp\n  * Output: \\verbinclude DenseBase_LinSpacedInt.out\n  *\n  * \\sa setLinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType\nDenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return DenseBase<Derived>::NullaryExpr(size, internal::linspaced_op<Scalar,PacketScalar>(low,high,size));\n}\n\n/**\n  * \\copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)\n  * Special version for fixed size types which does not require the size parameter.\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::RandomAccessLinSpacedReturnType\nDenseBase<Derived>::LinSpaced(const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)\n  return DenseBase<Derived>::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op<Scalar,PacketScalar>(low,high,Derived::SizeAtCompileTime));\n}\n\n/** \\returns true if all coefficients in this matrix are approximately equal to \\a val, to within precision \\a prec */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApproxToConstant\n(const Scalar& val, const RealScalar& prec) const\n{\n  typename internal::nested_eval<Derived,1>::type self(derived());\n  for(Index j = 0; j < cols(); ++j)\n    for(Index i = 0; i < rows(); ++i)\n      if(!internal::isApprox(self.coeff(i, j), val, prec))\n        return false;\n  return true;\n}\n\n/** This is just an alias for isApproxToConstant().\n  *\n  * \\returns true if all coefficients in this matrix are approximately equal to \\a value, to within precision \\a prec */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isConstant\n(const Scalar& val, const RealScalar& prec) const\n{\n  return isApproxToConstant(val, prec);\n}\n\n/** Alias for setConstant(): sets all coefficients in this expression to \\a val.\n  *\n  * \\sa setConstant(), Constant(), class CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void DenseBase<Derived>::fill(const Scalar& val)\n{\n  setConstant(val);\n}\n\n/** Sets all coefficients in this expression to value \\a val.\n  *\n  * \\sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setConstant(const Scalar& val)\n{\n  return derived() = Constant(rows(), cols(), val);\n}\n\n/** Resizes to the given \\a size, and sets all coefficients in this expression to the given value \\a val.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include Matrix_setConstant_int.cpp\n  * Output: \\verbinclude Matrix_setConstant_int.out\n  *\n  * \\sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setConstant(Index size, const Scalar& val)\n{\n  resize(size);\n  return setConstant(val);\n}\n\n/** Resizes to the given size, and sets all coefficients in this expression to the given value \\a val.\n  *\n  * \\param rows the new number of rows\n  * \\param cols the new number of columns\n  * \\param val the value to which all coefficients are set\n  *\n  * Example: \\include Matrix_setConstant_int_int.cpp\n  * Output: \\verbinclude Matrix_setConstant_int_int.out\n  *\n  * \\sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setConstant(Index rows, Index cols, const Scalar& val)\n{\n  resize(rows, cols);\n  return setConstant(val);\n}\n\n/**\n  * \\brief Sets a linearly spaced vector.\n  *\n  * The function generates 'size' equally spaced values in the closed interval [low,high].\n  * When size is set to 1, a vector of length 1 containing 'high' is returned.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include DenseBase_setLinSpaced.cpp\n  * Output: \\verbinclude DenseBase_setLinSpaced.out\n  *\n  * For integer scalar types, do not miss the explanations on the definition\n  * of \\link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \\endlink.\n  *\n  * \\sa LinSpaced(Index,const Scalar&,const Scalar&), CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(Index newSize, const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return derived() = Derived::NullaryExpr(newSize, internal::linspaced_op<Scalar,PacketScalar>(low,high,newSize));\n}\n\n/**\n  * \\brief Sets a linearly spaced vector.\n  *\n  * The function fills \\c *this with equally spaced values in the closed interval [low,high].\n  * When size is set to 1, a vector of length 1 containing 'high' is returned.\n  *\n  * \\only_for_vectors\n  *\n  * For integer scalar types, do not miss the explanations on the definition\n  * of \\link LinSpaced(Index,const Scalar&,const Scalar&) even spacing \\endlink.\n  *\n  * \\sa LinSpaced(Index,const Scalar&,const Scalar&), setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setLinSpaced(const Scalar& low, const Scalar& high)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return setLinSpaced(size(), low, high);\n}\n\n// zero:\n\n/** \\returns an expression of a zero matrix.\n  *\n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this MatrixBase type.\n  *\n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Zero() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_zero_int_int.cpp\n  * Output: \\verbinclude MatrixBase_zero_int_int.out\n  *\n  * \\sa Zero(), Zero(Index)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Zero(Index rows, Index cols)\n{\n  return Constant(rows, cols, Scalar(0));\n}\n\n/** \\returns an expression of a zero vector.\n  *\n  * The parameter \\a size is the size of the returned vector.\n  * Must be compatible with this MatrixBase type.\n  *\n  * \\only_for_vectors\n  *\n  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,\n  * it is redundant to pass \\a size as argument, so Zero() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_zero_int.cpp\n  * Output: \\verbinclude MatrixBase_zero_int.out\n  *\n  * \\sa Zero(), Zero(Index,Index)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Zero(Index size)\n{\n  return Constant(size, Scalar(0));\n}\n\n/** \\returns an expression of a fixed-size zero matrix or vector.\n  *\n  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you\n  * need to use the variants taking size arguments.\n  *\n  * Example: \\include MatrixBase_zero.cpp\n  * Output: \\verbinclude MatrixBase_zero.out\n  *\n  * \\sa Zero(Index), Zero(Index,Index)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Zero()\n{\n  return Constant(Scalar(0));\n}\n\n/** \\returns true if *this is approximately equal to the zero matrix,\n  *          within the precision given by \\a prec.\n  *\n  * Example: \\include MatrixBase_isZero.cpp\n  * Output: \\verbinclude MatrixBase_isZero.out\n  *\n  * \\sa class CwiseNullaryOp, Zero()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isZero(const RealScalar& prec) const\n{\n  typename internal::nested_eval<Derived,1>::type self(derived());\n  for(Index j = 0; j < cols(); ++j)\n    for(Index i = 0; i < rows(); ++i)\n      if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<Scalar>(1), prec))\n        return false;\n  return true;\n}\n\n/** Sets all coefficients in this expression to zero.\n  *\n  * Example: \\include MatrixBase_setZero.cpp\n  * Output: \\verbinclude MatrixBase_setZero.out\n  *\n  * \\sa class CwiseNullaryOp, Zero()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setZero()\n{\n  return setConstant(Scalar(0));\n}\n\n/** Resizes to the given \\a size, and sets all coefficients in this expression to zero.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include Matrix_setZero_int.cpp\n  * Output: \\verbinclude Matrix_setZero_int.out\n  *\n  * \\sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setZero(Index newSize)\n{\n  resize(newSize);\n  return setConstant(Scalar(0));\n}\n\n/** Resizes to the given size, and sets all coefficients in this expression to zero.\n  *\n  * \\param rows the new number of rows\n  * \\param cols the new number of columns\n  *\n  * Example: \\include Matrix_setZero_int_int.cpp\n  * Output: \\verbinclude Matrix_setZero_int_int.out\n  *\n  * \\sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setZero(Index rows, Index cols)\n{\n  resize(rows, cols);\n  return setConstant(Scalar(0));\n}\n\n// ones:\n\n/** \\returns an expression of a matrix where all coefficients equal one.\n  *\n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this MatrixBase type.\n  *\n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Ones() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_ones_int_int.cpp\n  * Output: \\verbinclude MatrixBase_ones_int_int.out\n  *\n  * \\sa Ones(), Ones(Index), isOnes(), class Ones\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Ones(Index rows, Index cols)\n{\n  return Constant(rows, cols, Scalar(1));\n}\n\n/** \\returns an expression of a vector where all coefficients equal one.\n  *\n  * The parameter \\a newSize is the size of the returned vector.\n  * Must be compatible with this MatrixBase type.\n  *\n  * \\only_for_vectors\n  *\n  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,\n  * it is redundant to pass \\a size as argument, so Ones() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_ones_int.cpp\n  * Output: \\verbinclude MatrixBase_ones_int.out\n  *\n  * \\sa Ones(), Ones(Index,Index), isOnes(), class Ones\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Ones(Index newSize)\n{\n  return Constant(newSize, Scalar(1));\n}\n\n/** \\returns an expression of a fixed-size matrix or vector where all coefficients equal one.\n  *\n  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you\n  * need to use the variants taking size arguments.\n  *\n  * Example: \\include MatrixBase_ones.cpp\n  * Output: \\verbinclude MatrixBase_ones.out\n  *\n  * \\sa Ones(Index), Ones(Index,Index), isOnes(), class Ones\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename DenseBase<Derived>::ConstantReturnType\nDenseBase<Derived>::Ones()\n{\n  return Constant(Scalar(1));\n}\n\n/** \\returns true if *this is approximately equal to the matrix where all coefficients\n  *          are equal to 1, within the precision given by \\a prec.\n  *\n  * Example: \\include MatrixBase_isOnes.cpp\n  * Output: \\verbinclude MatrixBase_isOnes.out\n  *\n  * \\sa class CwiseNullaryOp, Ones()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isOnes\n(const RealScalar& prec) const\n{\n  return isApproxToConstant(Scalar(1), prec);\n}\n\n/** Sets all coefficients in this expression to one.\n  *\n  * Example: \\include MatrixBase_setOnes.cpp\n  * Output: \\verbinclude MatrixBase_setOnes.out\n  *\n  * \\sa class CwiseNullaryOp, Ones()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::setOnes()\n{\n  return setConstant(Scalar(1));\n}\n\n/** Resizes to the given \\a newSize, and sets all coefficients in this expression to one.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include Matrix_setOnes_int.cpp\n  * Output: \\verbinclude Matrix_setOnes_int.out\n  *\n  * \\sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setOnes(Index newSize)\n{\n  resize(newSize);\n  return setConstant(Scalar(1));\n}\n\n/** Resizes to the given size, and sets all coefficients in this expression to one.\n  *\n  * \\param rows the new number of rows\n  * \\param cols the new number of columns\n  *\n  * Example: \\include Matrix_setOnes_int_int.cpp\n  * Output: \\verbinclude Matrix_setOnes_int_int.out\n  *\n  * \\sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setOnes(Index rows, Index cols)\n{\n  resize(rows, cols);\n  return setConstant(Scalar(1));\n}\n\n// Identity:\n\n/** \\returns an expression of the identity matrix (not necessarily square).\n  *\n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this MatrixBase type.\n  *\n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Identity() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_identity_int_int.cpp\n  * Output: \\verbinclude MatrixBase_identity_int_int.out\n  *\n  * \\sa Identity(), setIdentity(), isIdentity()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType\nMatrixBase<Derived>::Identity(Index rows, Index cols)\n{\n  return DenseBase<Derived>::NullaryExpr(rows, cols, internal::scalar_identity_op<Scalar>());\n}\n\n/** \\returns an expression of the identity matrix (not necessarily square).\n  *\n  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you\n  * need to use the variant taking size arguments.\n  *\n  * Example: \\include MatrixBase_identity.cpp\n  * Output: \\verbinclude MatrixBase_identity.out\n  *\n  * \\sa Identity(Index,Index), setIdentity(), isIdentity()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::IdentityReturnType\nMatrixBase<Derived>::Identity()\n{\n  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)\n  return MatrixBase<Derived>::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op<Scalar>());\n}\n\n/** \\returns true if *this is approximately equal to the identity matrix\n  *          (not necessarily square),\n  *          within the precision given by \\a prec.\n  *\n  * Example: \\include MatrixBase_isIdentity.cpp\n  * Output: \\verbinclude MatrixBase_isIdentity.out\n  *\n  * \\sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity()\n  */\ntemplate<typename Derived>\nbool MatrixBase<Derived>::isIdentity\n(const RealScalar& prec) const\n{\n  typename internal::nested_eval<Derived,1>::type self(derived());\n  for(Index j = 0; j < cols(); ++j)\n  {\n    for(Index i = 0; i < rows(); ++i)\n    {\n      if(i == j)\n      {\n        if(!internal::isApprox(self.coeff(i, j), static_cast<Scalar>(1), prec))\n          return false;\n      }\n      else\n      {\n        if(!internal::isMuchSmallerThan(self.coeff(i, j), static_cast<RealScalar>(1), prec))\n          return false;\n      }\n    }\n  }\n  return true;\n}\n\nnamespace internal {\n\ntemplate<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>\nstruct setIdentity_impl\n{\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE Derived& run(Derived& m)\n  {\n    return m = Derived::Identity(m.rows(), m.cols());\n  }\n};\n\ntemplate<typename Derived>\nstruct setIdentity_impl<Derived, true>\n{\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE Derived& run(Derived& m)\n  {\n    m.setZero();\n    const Index size = numext::mini(m.rows(), m.cols());\n    for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1);\n    return m;\n  }\n};\n\n} // end namespace internal\n\n/** Writes the identity expression (not necessarily square) into *this.\n  *\n  * Example: \\include MatrixBase_setIdentity.cpp\n  * Output: \\verbinclude MatrixBase_setIdentity.out\n  *\n  * \\sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity()\n{\n  return internal::setIdentity_impl<Derived>::run(derived());\n}\n\n/** \\brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this.\n  *\n  * \\param rows the new number of rows\n  * \\param cols the new number of columns\n  *\n  * Example: \\include Matrix_setIdentity_int_int.cpp\n  * Output: \\verbinclude Matrix_setIdentity_int_int.out\n  *\n  * \\sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& MatrixBase<Derived>::setIdentity(Index rows, Index cols)\n{\n  derived().resize(rows, cols);\n  return setIdentity();\n}\n\n/** \\returns an expression of the i-th unit (basis) vector.\n  *\n  * \\only_for_vectors\n  *\n  * \\sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index newSize, Index i)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return BasisReturnType(SquareMatrixType::Identity(newSize,newSize), i);\n}\n\n/** \\returns an expression of the i-th unit (basis) vector.\n  *\n  * \\only_for_vectors\n  *\n  * This variant is for fixed-size vector only.\n  *\n  * \\sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(Index i)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return BasisReturnType(SquareMatrixType::Identity(),i);\n}\n\n/** \\returns an expression of the X axis unit vector (1{,0}^*)\n  *\n  * \\only_for_vectors\n  *\n  * \\sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()\n{ return Derived::Unit(0); }\n\n/** \\returns an expression of the Y axis unit vector (0,1{,0}^*)\n  *\n  * \\only_for_vectors\n  *\n  * \\sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()\n{ return Derived::Unit(1); }\n\n/** \\returns an expression of the Z axis unit vector (0,0,1{,0}^*)\n  *\n  * \\only_for_vectors\n  *\n  * \\sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()\n{ return Derived::Unit(2); }\n\n/** \\returns an expression of the W axis unit vector (0,0,0,1)\n  *\n  * \\only_for_vectors\n  *\n  * \\sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()\n{ return Derived::Unit(3); }\n\n} // end namespace Eigen\n\n#endif // EIGEN_CWISE_NULLARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CwiseTernaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CWISE_TERNARY_OP_H\n#define EIGEN_CWISE_TERNARY_OP_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>\nstruct traits<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > {\n  // we must not inherit from traits<Arg1> since it has\n  // the potential to cause problems with MSVC\n  typedef typename remove_all<Arg1>::type Ancestor;\n  typedef typename traits<Ancestor>::XprKind XprKind;\n  enum {\n    RowsAtCompileTime = traits<Ancestor>::RowsAtCompileTime,\n    ColsAtCompileTime = traits<Ancestor>::ColsAtCompileTime,\n    MaxRowsAtCompileTime = traits<Ancestor>::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = traits<Ancestor>::MaxColsAtCompileTime\n  };\n\n  // even though we require Arg1, Arg2, and Arg3 to have the same scalar type\n  // (see CwiseTernaryOp constructor),\n  // we still want to handle the case when the result type is different.\n  typedef typename result_of<TernaryOp(\n      const typename Arg1::Scalar&, const typename Arg2::Scalar&,\n      const typename Arg3::Scalar&)>::type Scalar;\n\n  typedef typename internal::traits<Arg1>::StorageKind StorageKind;\n  typedef typename internal::traits<Arg1>::StorageIndex StorageIndex;\n\n  typedef typename Arg1::Nested Arg1Nested;\n  typedef typename Arg2::Nested Arg2Nested;\n  typedef typename Arg3::Nested Arg3Nested;\n  typedef typename remove_reference<Arg1Nested>::type _Arg1Nested;\n  typedef typename remove_reference<Arg2Nested>::type _Arg2Nested;\n  typedef typename remove_reference<Arg3Nested>::type _Arg3Nested;\n  enum { Flags = _Arg1Nested::Flags & RowMajorBit };\n};\n}  // end namespace internal\n\ntemplate <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,\n          typename StorageKind>\nclass CwiseTernaryOpImpl;\n\n/** \\class CwiseTernaryOp\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic expression where a coefficient-wise ternary operator is\n * applied to two expressions\n  *\n  * \\tparam TernaryOp template functor implementing the operator\n  * \\tparam Arg1Type the type of the first argument\n  * \\tparam Arg2Type the type of the second argument\n  * \\tparam Arg3Type the type of the third argument\n  *\n  * This class represents an expression where a coefficient-wise ternary\n * operator is applied to three expressions.\n  * It is the return type of ternary operators, by which we mean only those\n * ternary operators where\n  * all three arguments are Eigen expressions.\n  * For example, the return type of betainc(matrix1, matrix2, matrix3) is a\n * CwiseTernaryOp.\n  *\n  * Most of the time, this is the only way that it is used, so you typically\n * don't have to name\n  * CwiseTernaryOp types explicitly.\n  *\n  * \\sa MatrixBase::ternaryExpr(const MatrixBase<Argument2> &, const\n * MatrixBase<Argument3> &, const CustomTernaryOp &) const, class CwiseBinaryOp,\n * class CwiseUnaryOp, class CwiseNullaryOp\n  */\ntemplate <typename TernaryOp, typename Arg1Type, typename Arg2Type,\n          typename Arg3Type>\nclass CwiseTernaryOp : public CwiseTernaryOpImpl<\n                           TernaryOp, Arg1Type, Arg2Type, Arg3Type,\n                           typename internal::traits<Arg1Type>::StorageKind>,\n                       internal::no_assignment_operator\n{\n public:\n  typedef typename internal::remove_all<Arg1Type>::type Arg1;\n  typedef typename internal::remove_all<Arg2Type>::type Arg2;\n  typedef typename internal::remove_all<Arg3Type>::type Arg3;\n\n  typedef typename CwiseTernaryOpImpl<\n      TernaryOp, Arg1Type, Arg2Type, Arg3Type,\n      typename internal::traits<Arg1Type>::StorageKind>::Base Base;\n  EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseTernaryOp)\n\n  typedef typename internal::ref_selector<Arg1Type>::type Arg1Nested;\n  typedef typename internal::ref_selector<Arg2Type>::type Arg2Nested;\n  typedef typename internal::ref_selector<Arg3Type>::type Arg3Nested;\n  typedef typename internal::remove_reference<Arg1Nested>::type _Arg1Nested;\n  typedef typename internal::remove_reference<Arg2Nested>::type _Arg2Nested;\n  typedef typename internal::remove_reference<Arg3Nested>::type _Arg3Nested;\n\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE CwiseTernaryOp(const Arg1& a1, const Arg2& a2,\n                                     const Arg3& a3,\n                                     const TernaryOp& func = TernaryOp())\n      : m_arg1(a1), m_arg2(a2), m_arg3(a3), m_functor(func) {\n    // require the sizes to match\n    EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg2)\n    EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Arg1, Arg3)\n\n    // The index types should match\n    EIGEN_STATIC_ASSERT((internal::is_same<\n                         typename internal::traits<Arg1Type>::StorageKind,\n                         typename internal::traits<Arg2Type>::StorageKind>::value),\n                        STORAGE_KIND_MUST_MATCH)\n    EIGEN_STATIC_ASSERT((internal::is_same<\n                         typename internal::traits<Arg1Type>::StorageKind,\n                         typename internal::traits<Arg3Type>::StorageKind>::value),\n                        STORAGE_KIND_MUST_MATCH)\n\n    eigen_assert(a1.rows() == a2.rows() && a1.cols() == a2.cols() &&\n                 a1.rows() == a3.rows() && a1.cols() == a3.cols());\n  }\n\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE Index rows() const {\n    // return the fixed size type if available to enable compile time\n    // optimizations\n    if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::\n                RowsAtCompileTime == Dynamic &&\n        internal::traits<typename internal::remove_all<Arg2Nested>::type>::\n                RowsAtCompileTime == Dynamic)\n      return m_arg3.rows();\n    else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::\n                     RowsAtCompileTime == Dynamic &&\n             internal::traits<typename internal::remove_all<Arg3Nested>::type>::\n                     RowsAtCompileTime == Dynamic)\n      return m_arg2.rows();\n    else\n      return m_arg1.rows();\n  }\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE Index cols() const {\n    // return the fixed size type if available to enable compile time\n    // optimizations\n    if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::\n                ColsAtCompileTime == Dynamic &&\n        internal::traits<typename internal::remove_all<Arg2Nested>::type>::\n                ColsAtCompileTime == Dynamic)\n      return m_arg3.cols();\n    else if (internal::traits<typename internal::remove_all<Arg1Nested>::type>::\n                     ColsAtCompileTime == Dynamic &&\n             internal::traits<typename internal::remove_all<Arg3Nested>::type>::\n                     ColsAtCompileTime == Dynamic)\n      return m_arg2.cols();\n    else\n      return m_arg1.cols();\n  }\n\n  /** \\returns the first argument nested expression */\n  EIGEN_DEVICE_FUNC\n  const _Arg1Nested& arg1() const { return m_arg1; }\n  /** \\returns the first argument nested expression */\n  EIGEN_DEVICE_FUNC\n  const _Arg2Nested& arg2() const { return m_arg2; }\n  /** \\returns the third argument nested expression */\n  EIGEN_DEVICE_FUNC\n  const _Arg3Nested& arg3() const { return m_arg3; }\n  /** \\returns the functor representing the ternary operation */\n  EIGEN_DEVICE_FUNC\n  const TernaryOp& functor() const { return m_functor; }\n\n protected:\n  Arg1Nested m_arg1;\n  Arg2Nested m_arg2;\n  Arg3Nested m_arg3;\n  const TernaryOp m_functor;\n};\n\n// Generic API dispatcher\ntemplate <typename TernaryOp, typename Arg1, typename Arg2, typename Arg3,\n          typename StorageKind>\nclass CwiseTernaryOpImpl\n    : public internal::generic_xpr_base<\n          CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type {\n public:\n  typedef typename internal::generic_xpr_base<\n      CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >::type Base;\n};\n\n}  // end namespace Eigen\n\n#endif  // EIGEN_CWISE_TERNARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CwiseUnaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CWISE_UNARY_OP_H\n#define EIGEN_CWISE_UNARY_OP_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename UnaryOp, typename XprType>\nstruct traits<CwiseUnaryOp<UnaryOp, XprType> >\n : traits<XprType>\n{\n  typedef typename result_of<\n                     UnaryOp(const typename XprType::Scalar&)\n                   >::type Scalar;\n  typedef typename XprType::Nested XprTypeNested;\n  typedef typename remove_reference<XprTypeNested>::type _XprTypeNested;\n  enum {\n    Flags = _XprTypeNested::Flags & RowMajorBit \n  };\n};\n}\n\ntemplate<typename UnaryOp, typename XprType, typename StorageKind>\nclass CwiseUnaryOpImpl;\n\n/** \\class CwiseUnaryOp\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic expression where a coefficient-wise unary operator is applied to an expression\n  *\n  * \\tparam UnaryOp template functor implementing the operator\n  * \\tparam XprType the type of the expression to which we are applying the unary operator\n  *\n  * This class represents an expression where a unary operator is applied to an expression.\n  * It is the return type of all operations taking exactly 1 input expression, regardless of the\n  * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix\n  * is considered unary, because only the right-hand side is an expression, and its\n  * return type is a specialization of CwiseUnaryOp.\n  *\n  * Most of the time, this is the only way that it is used, so you typically don't have to name\n  * CwiseUnaryOp types explicitly.\n  *\n  * \\sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp\n  */\ntemplate<typename UnaryOp, typename XprType>\nclass CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal::traits<XprType>::StorageKind>, internal::no_assignment_operator\n{\n  public:\n\n    typedef typename CwiseUnaryOpImpl<UnaryOp, XprType,typename internal::traits<XprType>::StorageKind>::Base Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)\n    typedef typename internal::ref_selector<XprType>::type XprTypeNested;\n    typedef typename internal::remove_all<XprType>::type NestedExpression;\n\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    explicit CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp())\n      : m_xpr(xpr), m_functor(func) {}\n\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Index rows() const { return m_xpr.rows(); }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Index cols() const { return m_xpr.cols(); }\n\n    /** \\returns the functor representing the unary operation */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const UnaryOp& functor() const { return m_functor; }\n\n    /** \\returns the nested expression */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    const typename internal::remove_all<XprTypeNested>::type&\n    nestedExpression() const { return m_xpr; }\n\n    /** \\returns the nested expression */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    typename internal::remove_all<XprTypeNested>::type&\n    nestedExpression() { return m_xpr; }\n\n  protected:\n    XprTypeNested m_xpr;\n    const UnaryOp m_functor;\n};\n\n// Generic API dispatcher\ntemplate<typename UnaryOp, typename XprType, typename StorageKind>\nclass CwiseUnaryOpImpl\n  : public internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<CwiseUnaryOp<UnaryOp, XprType> >::type Base;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_CWISE_UNARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/CwiseUnaryView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CWISE_UNARY_VIEW_H\n#define EIGEN_CWISE_UNARY_VIEW_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename ViewOp, typename MatrixType>\nstruct traits<CwiseUnaryView<ViewOp, MatrixType> >\n : traits<MatrixType>\n{\n  typedef typename result_of<\n                     ViewOp(const typename traits<MatrixType>::Scalar&)\n                   >::type Scalar;\n  typedef typename MatrixType::Nested MatrixTypeNested;\n  typedef typename remove_all<MatrixTypeNested>::type _MatrixTypeNested;\n  enum {\n    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,\n    Flags = traits<_MatrixTypeNested>::Flags & (RowMajorBit | FlagsLvalueBit | DirectAccessBit), // FIXME DirectAccessBit should not be handled by expressions\n    MatrixTypeInnerStride =  inner_stride_at_compile_time<MatrixType>::ret,\n    // need to cast the sizeof's from size_t to int explicitly, otherwise:\n    // \"error: no integral type can represent all of the enumerator values\n    InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic\n                             ? int(Dynamic)\n                             : int(MatrixTypeInnerStride) * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar)),\n    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret == Dynamic\n                             ? int(Dynamic)\n                             : outer_stride_at_compile_time<MatrixType>::ret * int(sizeof(typename traits<MatrixType>::Scalar) / sizeof(Scalar))\n  };\n};\n}\n\ntemplate<typename ViewOp, typename MatrixType, typename StorageKind>\nclass CwiseUnaryViewImpl;\n\n/** \\class CwiseUnaryView\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector\n  *\n  * \\tparam ViewOp template functor implementing the view\n  * \\tparam MatrixType the type of the matrix we are applying the unary operator\n  *\n  * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector.\n  * It is the return type of real() and imag(), and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp\n  */\ntemplate<typename ViewOp, typename MatrixType>\nclass CwiseUnaryView : public CwiseUnaryViewImpl<ViewOp, MatrixType, typename internal::traits<MatrixType>::StorageKind>\n{\n  public:\n\n    typedef typename CwiseUnaryViewImpl<ViewOp, MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView)\n    typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;\n    typedef typename internal::remove_all<MatrixType>::type NestedExpression;\n\n    explicit inline CwiseUnaryView(MatrixType& mat, const ViewOp& func = ViewOp())\n      : m_matrix(mat), m_functor(func) {}\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView)\n\n    EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); }\n    EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); }\n\n    /** \\returns the functor representing unary operation */\n    const ViewOp& functor() const { return m_functor; }\n\n    /** \\returns the nested expression */\n    const typename internal::remove_all<MatrixTypeNested>::type&\n    nestedExpression() const { return m_matrix; }\n\n    /** \\returns the nested expression */\n    typename internal::remove_reference<MatrixTypeNested>::type&\n    nestedExpression() { return m_matrix.const_cast_derived(); }\n\n  protected:\n    MatrixTypeNested m_matrix;\n    ViewOp m_functor;\n};\n\n// Generic API dispatcher\ntemplate<typename ViewOp, typename XprType, typename StorageKind>\nclass CwiseUnaryViewImpl\n  : public internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<CwiseUnaryView<ViewOp, XprType> >::type Base;\n};\n\ntemplate<typename ViewOp, typename MatrixType>\nclass CwiseUnaryViewImpl<ViewOp,MatrixType,Dense>\n  : public internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type\n{\n  public:\n\n    typedef CwiseUnaryView<ViewOp, MatrixType> Derived;\n    typedef typename internal::dense_xpr_base< CwiseUnaryView<ViewOp, MatrixType> >::type Base;\n\n    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryViewImpl)\n    \n    EIGEN_DEVICE_FUNC inline Scalar* data() { return &(this->coeffRef(0)); }\n    EIGEN_DEVICE_FUNC inline const Scalar* data() const { return &(this->coeff(0)); }\n\n    EIGEN_DEVICE_FUNC inline Index innerStride() const\n    {\n      return derived().nestedExpression().innerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);\n    }\n\n    EIGEN_DEVICE_FUNC inline Index outerStride() const\n    {\n      return derived().nestedExpression().outerStride() * sizeof(typename internal::traits<MatrixType>::Scalar) / sizeof(Scalar);\n    }\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_CWISE_UNARY_VIEW_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/DenseBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DENSEBASE_H\n#define EIGEN_DENSEBASE_H\n\nnamespace Eigen {\n\nnamespace internal {\n  \n// The index type defined by EIGEN_DEFAULT_DENSE_INDEX_TYPE must be a signed type.\n// This dummy function simply aims at checking that at compile time.\nstatic inline void check_DenseIndex_is_signed() {\n  EIGEN_STATIC_ASSERT(NumTraits<DenseIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); \n}\n\n} // end namespace internal\n  \n/** \\class DenseBase\n  * \\ingroup Core_Module\n  *\n  * \\brief Base class for all dense matrices, vectors, and arrays\n  *\n  * This class is the base that is inherited by all dense objects (matrix, vector, arrays,\n  * and related expression types). The common Eigen API for dense objects is contained in this class.\n  *\n  * \\tparam Derived is the derived type, e.g., a matrix type or an expression.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_DENSEBASE_PLUGIN.\n  *\n  * \\sa \\blank \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived> class DenseBase\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n  : public DenseCoeffsBase<Derived>\n#else\n  : public DenseCoeffsBase<Derived,DirectWriteAccessors>\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n{\n  public:\n\n    /** Inner iterator type to iterate over the coefficients of a row or column.\n      * \\sa class InnerIterator\n      */\n    typedef Eigen::InnerIterator<Derived> InnerIterator;\n\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n\n    /**\n      * \\brief The type used to store indices\n      * \\details This typedef is relevant for types that store multiple indices such as\n      *          PermutationMatrix or Transpositions, otherwise it defaults to Eigen::Index\n      * \\sa \\blank \\ref TopicPreprocessorDirectives, Eigen::Index, SparseMatrixBase.\n     */\n    typedef typename internal::traits<Derived>::StorageIndex StorageIndex;\n\n    /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc. */\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    \n    /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.\n      *\n      * It is an alias for the Scalar type */\n    typedef Scalar value_type;\n    \n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef DenseCoeffsBase<Derived> Base;\n\n    using Base::derived;\n    using Base::const_cast_derived;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::rowIndexByOuterInner;\n    using Base::colIndexByOuterInner;\n    using Base::coeff;\n    using Base::coeffByOuterInner;\n    using Base::operator();\n    using Base::operator[];\n    using Base::x;\n    using Base::y;\n    using Base::z;\n    using Base::w;\n    using Base::stride;\n    using Base::innerStride;\n    using Base::outerStride;\n    using Base::rowStride;\n    using Base::colStride;\n    typedef typename Base::CoeffReturnType CoeffReturnType;\n\n    enum {\n\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n        /**< The number of rows at compile-time. This is just a copy of the value provided\n          * by the \\a Derived type. If a value is not known at compile-time,\n          * it is set to the \\a Dynamic constant.\n          * \\sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */\n\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n        /**< The number of columns at compile-time. This is just a copy of the value provided\n          * by the \\a Derived type. If a value is not known at compile-time,\n          * it is set to the \\a Dynamic constant.\n          * \\sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */\n\n\n      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,\n                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),\n        /**< This is equal to the number of coefficients, i.e. the number of\n          * rows times the number of columns, or to \\a Dynamic if this is not\n          * known at compile-time. \\sa RowsAtCompileTime, ColsAtCompileTime */\n\n      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,\n        /**< This value is equal to the maximum possible number of rows that this expression\n          * might have. If this expression might have an arbitrarily high number of rows,\n          * this value is set to \\a Dynamic.\n          *\n          * This value is useful to know when evaluating an expression, in order to determine\n          * whether it is possible to avoid doing a dynamic memory allocation.\n          *\n          * \\sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime\n          */\n\n      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,\n        /**< This value is equal to the maximum possible number of columns that this expression\n          * might have. If this expression might have an arbitrarily high number of columns,\n          * this value is set to \\a Dynamic.\n          *\n          * This value is useful to know when evaluating an expression, in order to determine\n          * whether it is possible to avoid doing a dynamic memory allocation.\n          *\n          * \\sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime\n          */\n\n      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,\n                                                      internal::traits<Derived>::MaxColsAtCompileTime>::ret),\n        /**< This value is equal to the maximum possible number of coefficients that this expression\n          * might have. If this expression might have an arbitrarily high number of coefficients,\n          * this value is set to \\a Dynamic.\n          *\n          * This value is useful to know when evaluating an expression, in order to determine\n          * whether it is possible to avoid doing a dynamic memory allocation.\n          *\n          * \\sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime\n          */\n\n      IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1\n                           || internal::traits<Derived>::MaxColsAtCompileTime == 1,\n        /**< This is set to true if either the number of rows or the number of\n          * columns is known at compile-time to be equal to 1. Indeed, in that case,\n          * we are dealing with a column-vector (if there is only one column) or with\n          * a row-vector (if there is only one row). */\n\n      Flags = internal::traits<Derived>::Flags,\n        /**< This stores expression \\ref flags flags which may or may not be inherited by new expressions\n          * constructed from this one. See the \\ref flags \"list of flags\".\n          */\n\n      IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */\n\n      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)\n                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),\n\n      InnerStrideAtCompileTime = internal::inner_stride_at_compile_time<Derived>::ret,\n      OuterStrideAtCompileTime = internal::outer_stride_at_compile_time<Derived>::ret\n    };\n    \n    typedef typename internal::find_best_packet<Scalar,SizeAtCompileTime>::type PacketScalar;\n\n    enum { IsPlainObjectBase = 0 };\n    \n    /** The plain matrix type corresponding to this expression.\n      * \\sa PlainObject */\n    typedef Matrix<typename internal::traits<Derived>::Scalar,\n                internal::traits<Derived>::RowsAtCompileTime,\n                internal::traits<Derived>::ColsAtCompileTime,\n                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),\n                internal::traits<Derived>::MaxRowsAtCompileTime,\n                internal::traits<Derived>::MaxColsAtCompileTime\n          > PlainMatrix;\n    \n    /** The plain array type corresponding to this expression.\n      * \\sa PlainObject */\n    typedef Array<typename internal::traits<Derived>::Scalar,\n                internal::traits<Derived>::RowsAtCompileTime,\n                internal::traits<Derived>::ColsAtCompileTime,\n                AutoAlign | (internal::traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),\n                internal::traits<Derived>::MaxRowsAtCompileTime,\n                internal::traits<Derived>::MaxColsAtCompileTime\n          > PlainArray;\n\n    /** \\brief The plain matrix or array type corresponding to this expression.\n      *\n      * This is not necessarily exactly the return type of eval(). In the case of plain matrices,\n      * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed\n      * that the return type of eval() is either PlainObject or const PlainObject&.\n      */\n    typedef typename internal::conditional<internal::is_same<typename internal::traits<Derived>::XprKind,MatrixXpr >::value,\n                                 PlainMatrix, PlainArray>::type PlainObject;\n\n    /** \\returns the number of nonzero coefficients which is in practice the number\n      * of stored coefficients. */\n    EIGEN_DEVICE_FUNC\n    inline Index nonZeros() const { return size(); }\n\n    /** \\returns the outer size.\n      *\n      * \\note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension\n      * with respect to the \\ref TopicStorageOrders \"storage order\", i.e., the number of columns for a\n      * column-major matrix, and the number of rows for a row-major matrix. */\n    EIGEN_DEVICE_FUNC\n    Index outerSize() const\n    {\n      return IsVectorAtCompileTime ? 1\n           : int(IsRowMajor) ? this->rows() : this->cols();\n    }\n\n    /** \\returns the inner size.\n      *\n      * \\note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension\n      * with respect to the \\ref TopicStorageOrders \"storage order\", i.e., the number of rows for a \n      * column-major matrix, and the number of columns for a row-major matrix. */\n    EIGEN_DEVICE_FUNC\n    Index innerSize() const\n    {\n      return IsVectorAtCompileTime ? this->size()\n           : int(IsRowMajor) ? this->cols() : this->rows();\n    }\n\n    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are\n      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does\n      * nothing else.\n      */\n    EIGEN_DEVICE_FUNC\n    void resize(Index newSize)\n    {\n      EIGEN_ONLY_USED_FOR_DEBUG(newSize);\n      eigen_assert(newSize == this->size()\n                && \"DenseBase::resize() does not actually allow to resize.\");\n    }\n    /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are\n      * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does\n      * nothing else.\n      */\n    EIGEN_DEVICE_FUNC\n    void resize(Index rows, Index cols)\n    {\n      EIGEN_ONLY_USED_FOR_DEBUG(rows);\n      EIGEN_ONLY_USED_FOR_DEBUG(cols);\n      eigen_assert(rows == this->rows() && cols == this->cols()\n                && \"DenseBase::resize() does not actually allow to resize.\");\n    }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal Represents a matrix with all coefficients equal to one another*/\n    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;\n    /** \\internal \\deprecated Represents a vector with linearly spaced coefficients that allows sequential access only. */\n    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> SequentialLinSpacedReturnType;\n    /** \\internal Represents a vector with linearly spaced coefficients that allows random access. */\n    typedef CwiseNullaryOp<internal::linspaced_op<Scalar,PacketScalar>,PlainObject> RandomAccessLinSpacedReturnType;\n    /** \\internal the return type of MatrixBase::eigenvalues() */\n    typedef Matrix<typename NumTraits<typename internal::traits<Derived>::Scalar>::Real, internal::traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;\n\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n    /** Copies \\a other into *this. \\returns a reference to *this. */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const DenseBase<OtherDerived>& other);\n\n    /** Special case of the template operator=, in order to prevent the compiler\n      * from generating a default operator= (issue hit with g++ 4.1)\n      */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const DenseBase& other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator=(const EigenBase<OtherDerived> &other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator+=(const EigenBase<OtherDerived> &other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator-=(const EigenBase<OtherDerived> &other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator=(const ReturnByValue<OtherDerived>& func);\n\n    /** \\internal\n      * Copies \\a other into *this without evaluating other. \\returns a reference to *this.\n      * \\deprecated */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& lazyAssign(const DenseBase<OtherDerived>& other);\n\n    EIGEN_DEVICE_FUNC\n    CommaInitializer<Derived> operator<< (const Scalar& s);\n\n    /** \\deprecated it now returns \\c *this */\n    template<unsigned int Added,unsigned int Removed>\n    EIGEN_DEPRECATED\n    const Derived& flagged() const\n    { return derived(); }\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    CommaInitializer<Derived> operator<< (const DenseBase<OtherDerived>& other);\n\n    typedef Transpose<Derived> TransposeReturnType;\n    EIGEN_DEVICE_FUNC\n    TransposeReturnType transpose();\n    typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;\n    EIGEN_DEVICE_FUNC\n    ConstTransposeReturnType transpose() const;\n    EIGEN_DEVICE_FUNC\n    void transposeInPlace();\n\n    EIGEN_DEVICE_FUNC static const ConstantReturnType\n    Constant(Index rows, Index cols, const Scalar& value);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType\n    Constant(Index size, const Scalar& value);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType\n    Constant(const Scalar& value);\n\n    EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType\n    LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high);\n    EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType\n    LinSpaced(Index size, const Scalar& low, const Scalar& high);\n    EIGEN_DEVICE_FUNC static const SequentialLinSpacedReturnType\n    LinSpaced(Sequential_t, const Scalar& low, const Scalar& high);\n    EIGEN_DEVICE_FUNC static const RandomAccessLinSpacedReturnType\n    LinSpaced(const Scalar& low, const Scalar& high);\n\n    template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC\n    static const CwiseNullaryOp<CustomNullaryOp, PlainObject>\n    NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func);\n    template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC\n    static const CwiseNullaryOp<CustomNullaryOp, PlainObject>\n    NullaryExpr(Index size, const CustomNullaryOp& func);\n    template<typename CustomNullaryOp> EIGEN_DEVICE_FUNC\n    static const CwiseNullaryOp<CustomNullaryOp, PlainObject>\n    NullaryExpr(const CustomNullaryOp& func);\n\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index rows, Index cols);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Zero(Index size);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Zero();\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index rows, Index cols);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Ones(Index size);\n    EIGEN_DEVICE_FUNC static const ConstantReturnType Ones();\n\n    EIGEN_DEVICE_FUNC void fill(const Scalar& value);\n    EIGEN_DEVICE_FUNC Derived& setConstant(const Scalar& value);\n    EIGEN_DEVICE_FUNC Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high);\n    EIGEN_DEVICE_FUNC Derived& setLinSpaced(const Scalar& low, const Scalar& high);\n    EIGEN_DEVICE_FUNC Derived& setZero();\n    EIGEN_DEVICE_FUNC Derived& setOnes();\n    EIGEN_DEVICE_FUNC Derived& setRandom();\n\n    template<typename OtherDerived> EIGEN_DEVICE_FUNC\n    bool isApprox(const DenseBase<OtherDerived>& other,\n                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    EIGEN_DEVICE_FUNC \n    bool isMuchSmallerThan(const RealScalar& other,\n                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    template<typename OtherDerived> EIGEN_DEVICE_FUNC\n    bool isMuchSmallerThan(const DenseBase<OtherDerived>& other,\n                           const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n\n    EIGEN_DEVICE_FUNC bool isApproxToConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    EIGEN_DEVICE_FUNC bool isConstant(const Scalar& value, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    EIGEN_DEVICE_FUNC bool isZero(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    EIGEN_DEVICE_FUNC bool isOnes(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    \n    inline bool hasNaN() const;\n    inline bool allFinite() const;\n\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator*=(const Scalar& other);\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator/=(const Scalar& other);\n\n    typedef typename internal::add_const_on_value_type<typename internal::eval<Derived>::type>::type EvalReturnType;\n    /** \\returns the matrix or vector obtained by evaluating this expression.\n      *\n      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns\n      * a const reference, in order to avoid a useless copy.\n      * \n      * \\warning Be carefull with eval() and the auto C++ keyword, as detailed in this \\link TopicPitfalls_auto_keyword page \\endlink.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE EvalReturnType eval() const\n    {\n      // Even though MSVC does not honor strong inlining when the return type\n      // is a dynamic matrix, we desperately need strong inlining for fixed\n      // size types on MSVC.\n      return typename internal::eval<Derived>::type(derived());\n    }\n    \n    /** swaps *this with the expression \\a other.\n      *\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void swap(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT(!OtherDerived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);\n      eigen_assert(rows()==other.rows() && cols()==other.cols());\n      call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());\n    }\n\n    /** swaps *this with the matrix or array \\a other.\n      *\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void swap(PlainObjectBase<OtherDerived>& other)\n    {\n      eigen_assert(rows()==other.rows() && cols()==other.cols());\n      call_assignment(derived(), other.derived(), internal::swap_assign_op<Scalar>());\n    }\n\n    EIGEN_DEVICE_FUNC inline const NestByValue<Derived> nestByValue() const;\n    EIGEN_DEVICE_FUNC inline const ForceAlignedAccess<Derived> forceAlignedAccess() const;\n    EIGEN_DEVICE_FUNC inline ForceAlignedAccess<Derived> forceAlignedAccess();\n    template<bool Enable> EIGEN_DEVICE_FUNC\n    inline const typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf() const;\n    template<bool Enable> EIGEN_DEVICE_FUNC\n    inline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type forceAlignedAccessIf();\n\n    EIGEN_DEVICE_FUNC Scalar sum() const;\n    EIGEN_DEVICE_FUNC Scalar mean() const;\n    EIGEN_DEVICE_FUNC Scalar trace() const;\n\n    EIGEN_DEVICE_FUNC Scalar prod() const;\n\n    EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar minCoeff() const;\n    EIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar maxCoeff() const;\n\n    template<typename IndexType> EIGEN_DEVICE_FUNC\n    typename internal::traits<Derived>::Scalar minCoeff(IndexType* row, IndexType* col) const;\n    template<typename IndexType> EIGEN_DEVICE_FUNC\n    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* row, IndexType* col) const;\n    template<typename IndexType> EIGEN_DEVICE_FUNC\n    typename internal::traits<Derived>::Scalar minCoeff(IndexType* index) const;\n    template<typename IndexType> EIGEN_DEVICE_FUNC\n    typename internal::traits<Derived>::Scalar maxCoeff(IndexType* index) const;\n\n    template<typename BinaryOp>\n    EIGEN_DEVICE_FUNC\n    Scalar redux(const BinaryOp& func) const;\n\n    template<typename Visitor>\n    EIGEN_DEVICE_FUNC\n    void visit(Visitor& func) const;\n\n    /** \\returns a WithFormat proxy object allowing to print a matrix the with given\n      * format \\a fmt.\n      *\n      * See class IOFormat for some examples.\n      *\n      * \\sa class IOFormat, class WithFormat\n      */\n    inline const WithFormat<Derived> format(const IOFormat& fmt) const\n    {\n      return WithFormat<Derived>(derived(), fmt);\n    }\n\n    /** \\returns the unique coefficient of a 1x1 expression */\n    EIGEN_DEVICE_FUNC\n    CoeffReturnType value() const\n    {\n      EIGEN_STATIC_ASSERT_SIZE_1x1(Derived)\n      eigen_assert(this->rows() == 1 && this->cols() == 1);\n      return derived().coeff(0,0);\n    }\n\n    EIGEN_DEVICE_FUNC bool all() const;\n    EIGEN_DEVICE_FUNC bool any() const;\n    EIGEN_DEVICE_FUNC Index count() const;\n\n    typedef VectorwiseOp<Derived, Horizontal> RowwiseReturnType;\n    typedef const VectorwiseOp<const Derived, Horizontal> ConstRowwiseReturnType;\n    typedef VectorwiseOp<Derived, Vertical> ColwiseReturnType;\n    typedef const VectorwiseOp<const Derived, Vertical> ConstColwiseReturnType;\n\n    /** \\returns a VectorwiseOp wrapper of *this providing additional partial reduction operations\n    *\n    * Example: \\include MatrixBase_rowwise.cpp\n    * Output: \\verbinclude MatrixBase_rowwise.out\n    *\n    * \\sa colwise(), class VectorwiseOp, \\ref TutorialReductionsVisitorsBroadcasting\n    */\n    //Code moved here due to a CUDA compiler bug\n    EIGEN_DEVICE_FUNC inline ConstRowwiseReturnType rowwise() const {\n      return ConstRowwiseReturnType(derived());\n    }\n    EIGEN_DEVICE_FUNC RowwiseReturnType rowwise();\n\n    /** \\returns a VectorwiseOp wrapper of *this providing additional partial reduction operations\n    *\n    * Example: \\include MatrixBase_colwise.cpp\n    * Output: \\verbinclude MatrixBase_colwise.out\n    *\n    * \\sa rowwise(), class VectorwiseOp, \\ref TutorialReductionsVisitorsBroadcasting\n    */\n    EIGEN_DEVICE_FUNC inline ConstColwiseReturnType colwise() const {\n      return ConstColwiseReturnType(derived());\n    }\n    EIGEN_DEVICE_FUNC ColwiseReturnType colwise();\n\n    typedef CwiseNullaryOp<internal::scalar_random_op<Scalar>,PlainObject> RandomReturnType;\n    static const RandomReturnType Random(Index rows, Index cols);\n    static const RandomReturnType Random(Index size);\n    static const RandomReturnType Random();\n\n    template<typename ThenDerived,typename ElseDerived>\n    const Select<Derived,ThenDerived,ElseDerived>\n    select(const DenseBase<ThenDerived>& thenMatrix,\n           const DenseBase<ElseDerived>& elseMatrix) const;\n\n    template<typename ThenDerived>\n    inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>\n    select(const DenseBase<ThenDerived>& thenMatrix, const typename ThenDerived::Scalar& elseScalar) const;\n\n    template<typename ElseDerived>\n    inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >\n    select(const typename ElseDerived::Scalar& thenScalar, const DenseBase<ElseDerived>& elseMatrix) const;\n\n    template<int p> RealScalar lpNorm() const;\n\n    template<int RowFactor, int ColFactor>\n    EIGEN_DEVICE_FUNC\n    const Replicate<Derived,RowFactor,ColFactor> replicate() const;\n    /**\n    * \\return an expression of the replication of \\c *this\n    *\n    * Example: \\include MatrixBase_replicate_int_int.cpp\n    * Output: \\verbinclude MatrixBase_replicate_int_int.out\n    *\n    * \\sa VectorwiseOp::replicate(), DenseBase::replicate<int,int>(), class Replicate\n    */\n    //Code moved here due to a CUDA compiler bug\n    EIGEN_DEVICE_FUNC\n    const Replicate<Derived, Dynamic, Dynamic> replicate(Index rowFactor, Index colFactor) const\n    {\n      return Replicate<Derived, Dynamic, Dynamic>(derived(), rowFactor, colFactor);\n    }\n\n    typedef Reverse<Derived, BothDirections> ReverseReturnType;\n    typedef const Reverse<const Derived, BothDirections> ConstReverseReturnType;\n    EIGEN_DEVICE_FUNC ReverseReturnType reverse();\n    /** This is the const version of reverse(). */\n    //Code moved here due to a CUDA compiler bug\n    EIGEN_DEVICE_FUNC ConstReverseReturnType reverse() const\n    {\n      return ConstReverseReturnType(derived());\n    }\n    EIGEN_DEVICE_FUNC void reverseInPlace();\n\n#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase\n#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)\n#define EIGEN_DOC_UNARY_ADDONS(X,Y)\n#   include \"../plugins/CommonCwiseUnaryOps.h\"\n#   include \"../plugins/BlockMethods.h\"\n#   include \"../plugins/IndexedViewMethods.h\"\n#   ifdef EIGEN_DENSEBASE_PLUGIN\n#     include EIGEN_DENSEBASE_PLUGIN\n#   endif\n#undef EIGEN_CURRENT_STORAGE_BASE_CLASS\n#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF\n#undef EIGEN_DOC_UNARY_ADDONS\n\n    // disable the use of evalTo for dense objects with a nice compilation error\n    template<typename Dest>\n    EIGEN_DEVICE_FUNC\n    inline void evalTo(Dest& ) const\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<Dest,void>::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS);\n    }\n\n  protected:\n    /** Default constructor. Do nothing. */\n    EIGEN_DEVICE_FUNC DenseBase()\n    {\n      /* Just checks for self-consistency of the flags.\n       * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down\n       */\n#ifdef EIGEN_INTERNAL_DEBUGGING\n      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor))\n                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))),\n                          INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION)\n#endif\n    }\n\n  private:\n    EIGEN_DEVICE_FUNC explicit DenseBase(int);\n    EIGEN_DEVICE_FUNC DenseBase(int,int);\n    template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit DenseBase(const DenseBase<OtherDerived>&);\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_DENSEBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/DenseCoeffsBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DENSECOEFFSBASE_H\n#define EIGEN_DENSECOEFFSBASE_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename T> struct add_const_on_value_type_if_arithmetic\n{\n  typedef typename conditional<is_arithmetic<T>::value, T, typename add_const_on_value_type<T>::type>::type type;\n};\n}\n\n/** \\brief Base class providing read-only coefficient access to matrices and arrays.\n  * \\ingroup Core_Module\n  * \\tparam Derived Type of the derived class\n  * \\tparam #ReadOnlyAccessors Constant indicating read-only access\n  *\n  * This class defines the \\c operator() \\c const function and friends, which can be used to read specific\n  * entries of a matrix or array.\n  * \n  * \\sa DenseCoeffsBase<Derived, WriteAccessors>, DenseCoeffsBase<Derived, DirectAccessors>,\n  *     \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived>\nclass DenseCoeffsBase<Derived,ReadOnlyAccessors> : public EigenBase<Derived>\n{\n  public:\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n\n    // Explanation for this CoeffReturnType typedef.\n    // - This is the return type of the coeff() method.\n    // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references\n    // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value).\n    // - The is_artihmetic check is required since \"const int\", \"const double\", etc. will cause warnings on some systems\n    // while the declaration of \"const T\", where T is a non arithmetic type does not. Always returning \"const Scalar&\" is\n    // not possible, since the underlying expressions might not offer a valid address the reference could be referring to.\n    typedef typename internal::conditional<bool(internal::traits<Derived>::Flags&LvalueBit),\n                         const Scalar&,\n                         typename internal::conditional<internal::is_arithmetic<Scalar>::value, Scalar, const Scalar>::type\n                     >::type CoeffReturnType;\n\n    typedef typename internal::add_const_on_value_type_if_arithmetic<\n                         typename internal::packet_traits<Scalar>::type\n                     >::type PacketReturnType;\n\n    typedef EigenBase<Derived> Base;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::derived;\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const\n    {\n      return int(Derived::RowsAtCompileTime) == 1 ? 0\n          : int(Derived::ColsAtCompileTime) == 1 ? inner\n          : int(Derived::Flags)&RowMajorBit ? outer\n          : inner;\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const\n    {\n      return int(Derived::ColsAtCompileTime) == 1 ? 0\n          : int(Derived::RowsAtCompileTime) == 1 ? inner\n          : int(Derived::Flags)&RowMajorBit ? inner\n          : outer;\n    }\n\n    /** Short version: don't use this function, use\n      * \\link operator()(Index,Index) const \\endlink instead.\n      *\n      * Long version: this function is similar to\n      * \\link operator()(Index,Index) const \\endlink, but without the assertion.\n      * Use this for limiting the performance cost of debugging code when doing\n      * repeated coefficient access. Only use this when it is guaranteed that the\n      * parameters \\a row and \\a col are in range.\n      *\n      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this\n      * function equivalent to \\link operator()(Index,Index) const \\endlink.\n      *\n      * \\sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const\n    {\n      eigen_internal_assert(row >= 0 && row < rows()\n                         && col >= 0 && col < cols());\n      return internal::evaluator<Derived>(derived()).coeff(row,col);\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const\n    {\n      return coeff(rowIndexByOuterInner(outer, inner),\n                   colIndexByOuterInner(outer, inner));\n    }\n\n    /** \\returns the coefficient at given the given row and column.\n      *\n      * \\sa operator()(Index,Index), operator[](Index)\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const\n    {\n      eigen_assert(row >= 0 && row < rows()\n          && col >= 0 && col < cols());\n      return coeff(row, col);\n    }\n\n    /** Short version: don't use this function, use\n      * \\link operator[](Index) const \\endlink instead.\n      *\n      * Long version: this function is similar to\n      * \\link operator[](Index) const \\endlink, but without the assertion.\n      * Use this for limiting the performance cost of debugging code when doing\n      * repeated coefficient access. Only use this when it is guaranteed that the\n      * parameter \\a index is in range.\n      *\n      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this\n      * function equivalent to \\link operator[](Index) const \\endlink.\n      *\n      * \\sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    coeff(Index index) const\n    {\n      EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,\n                          THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)\n      eigen_internal_assert(index >= 0 && index < size());\n      return internal::evaluator<Derived>(derived()).coeff(index);\n    }\n\n\n    /** \\returns the coefficient at given index.\n      *\n      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.\n      *\n      * \\sa operator[](Index), operator()(Index,Index) const, x() const, y() const,\n      * z() const, w() const\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    operator[](Index index) const\n    {\n      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,\n                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)\n      eigen_assert(index >= 0 && index < size());\n      return coeff(index);\n    }\n\n    /** \\returns the coefficient at given index.\n      *\n      * This is synonymous to operator[](Index) const.\n      *\n      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.\n      *\n      * \\sa operator[](Index), operator()(Index,Index) const, x() const, y() const,\n      * z() const, w() const\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    operator()(Index index) const\n    {\n      eigen_assert(index >= 0 && index < size());\n      return coeff(index);\n    }\n\n    /** equivalent to operator[](0).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    x() const { return (*this)[0]; }\n\n    /** equivalent to operator[](1).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    y() const\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS);\n      return (*this)[1];\n    }\n\n    /** equivalent to operator[](2).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    z() const\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS);\n      return (*this)[2];\n    }\n\n    /** equivalent to operator[](3).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE CoeffReturnType\n    w() const\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS);\n      return (*this)[3];\n    }\n\n    /** \\internal\n      * \\returns the packet of coefficients starting at the given row and column. It is your responsibility\n      * to ensure that a packet really starts there. This method is only available on expressions having the\n      * PacketAccessBit.\n      *\n      * The \\a LoadMode parameter may have the value \\a #Aligned or \\a #Unaligned. Its effect is to select\n      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets\n      * starting at an address which is a multiple of the packet size.\n      */\n\n    template<int LoadMode>\n    EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const\n    {\n      typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;\n      eigen_internal_assert(row >= 0 && row < rows() && col >= 0 && col < cols());\n      return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(row,col);\n    }\n\n\n    /** \\internal */\n    template<int LoadMode>\n    EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const\n    {\n      return packet<LoadMode>(rowIndexByOuterInner(outer, inner),\n                              colIndexByOuterInner(outer, inner));\n    }\n\n    /** \\internal\n      * \\returns the packet of coefficients starting at the given index. It is your responsibility\n      * to ensure that a packet really starts there. This method is only available on expressions having the\n      * PacketAccessBit and the LinearAccessBit.\n      *\n      * The \\a LoadMode parameter may have the value \\a #Aligned or \\a #Unaligned. Its effect is to select\n      * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets\n      * starting at an address which is a multiple of the packet size.\n      */\n\n    template<int LoadMode>\n    EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const\n    {\n      EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,\n                          THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)\n      typedef typename internal::packet_traits<Scalar>::type DefaultPacketType;\n      eigen_internal_assert(index >= 0 && index < size());\n      return internal::evaluator<Derived>(derived()).template packet<LoadMode,DefaultPacketType>(index);\n    }\n\n  protected:\n    // explanation: DenseBase is doing \"using ...\" on the methods from DenseCoeffsBase.\n    // But some methods are only available in the DirectAccess case.\n    // So we add dummy methods here with these names, so that \"using... \" doesn't fail.\n    // It's not private so that the child class DenseBase can access them, and it's not public\n    // either since it's an implementation detail, so has to be protected.\n    void coeffRef();\n    void coeffRefByOuterInner();\n    void writePacket();\n    void writePacketByOuterInner();\n    void copyCoeff();\n    void copyCoeffByOuterInner();\n    void copyPacket();\n    void copyPacketByOuterInner();\n    void stride();\n    void innerStride();\n    void outerStride();\n    void rowStride();\n    void colStride();\n};\n\n/** \\brief Base class providing read/write coefficient access to matrices and arrays.\n  * \\ingroup Core_Module\n  * \\tparam Derived Type of the derived class\n  * \\tparam #WriteAccessors Constant indicating read/write access\n  *\n  * This class defines the non-const \\c operator() function and friends, which can be used to write specific\n  * entries of a matrix or array. This class inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which\n  * defines the const variant for reading specific entries.\n  * \n  * \\sa DenseCoeffsBase<Derived, DirectAccessors>, \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived>\nclass DenseCoeffsBase<Derived, WriteAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>\n{\n  public:\n\n    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;\n\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    using Base::coeff;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::derived;\n    using Base::rowIndexByOuterInner;\n    using Base::colIndexByOuterInner;\n    using Base::operator[];\n    using Base::operator();\n    using Base::x;\n    using Base::y;\n    using Base::z;\n    using Base::w;\n\n    /** Short version: don't use this function, use\n      * \\link operator()(Index,Index) \\endlink instead.\n      *\n      * Long version: this function is similar to\n      * \\link operator()(Index,Index) \\endlink, but without the assertion.\n      * Use this for limiting the performance cost of debugging code when doing\n      * repeated coefficient access. Only use this when it is guaranteed that the\n      * parameters \\a row and \\a col are in range.\n      *\n      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this\n      * function equivalent to \\link operator()(Index,Index) \\endlink.\n      *\n      * \\sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index)\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col)\n    {\n      eigen_internal_assert(row >= 0 && row < rows()\n                         && col >= 0 && col < cols());\n      return internal::evaluator<Derived>(derived()).coeffRef(row,col);\n    }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    coeffRefByOuterInner(Index outer, Index inner)\n    {\n      return coeffRef(rowIndexByOuterInner(outer, inner),\n                      colIndexByOuterInner(outer, inner));\n    }\n\n    /** \\returns a reference to the coefficient at given the given row and column.\n      *\n      * \\sa operator[](Index)\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    operator()(Index row, Index col)\n    {\n      eigen_assert(row >= 0 && row < rows()\n          && col >= 0 && col < cols());\n      return coeffRef(row, col);\n    }\n\n\n    /** Short version: don't use this function, use\n      * \\link operator[](Index) \\endlink instead.\n      *\n      * Long version: this function is similar to\n      * \\link operator[](Index) \\endlink, but without the assertion.\n      * Use this for limiting the performance cost of debugging code when doing\n      * repeated coefficient access. Only use this when it is guaranteed that the\n      * parameters \\a row and \\a col are in range.\n      *\n      * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this\n      * function equivalent to \\link operator[](Index) \\endlink.\n      *\n      * \\sa operator[](Index), coeff(Index) const, coeffRef(Index,Index)\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    coeffRef(Index index)\n    {\n      EIGEN_STATIC_ASSERT(internal::evaluator<Derived>::Flags & LinearAccessBit,\n                          THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS)\n      eigen_internal_assert(index >= 0 && index < size());\n      return internal::evaluator<Derived>(derived()).coeffRef(index);\n    }\n\n    /** \\returns a reference to the coefficient at given index.\n      *\n      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.\n      *\n      * \\sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    operator[](Index index)\n    {\n      EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime,\n                          THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD)\n      eigen_assert(index >= 0 && index < size());\n      return coeffRef(index);\n    }\n\n    /** \\returns a reference to the coefficient at given index.\n      *\n      * This is synonymous to operator[](Index).\n      *\n      * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.\n      *\n      * \\sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w()\n      */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    operator()(Index index)\n    {\n      eigen_assert(index >= 0 && index < size());\n      return coeffRef(index);\n    }\n\n    /** equivalent to operator[](0).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    x() { return (*this)[0]; }\n\n    /** equivalent to operator[](1).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    y()\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=2, OUT_OF_RANGE_ACCESS);\n      return (*this)[1];\n    }\n\n    /** equivalent to operator[](2).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    z()\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=3, OUT_OF_RANGE_ACCESS);\n      return (*this)[2];\n    }\n\n    /** equivalent to operator[](3).  */\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar&\n    w()\n    {\n      EIGEN_STATIC_ASSERT(Derived::SizeAtCompileTime==-1 || Derived::SizeAtCompileTime>=4, OUT_OF_RANGE_ACCESS);\n      return (*this)[3];\n    }\n};\n\n/** \\brief Base class providing direct read-only coefficient access to matrices and arrays.\n  * \\ingroup Core_Module\n  * \\tparam Derived Type of the derived class\n  * \\tparam #DirectAccessors Constant indicating direct access\n  *\n  * This class defines functions to work with strides which can be used to access entries directly. This class\n  * inherits DenseCoeffsBase<Derived, ReadOnlyAccessors> which defines functions to access entries read-only using\n  * \\c operator() .\n  *\n  * \\sa \\blank \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived>\nclass DenseCoeffsBase<Derived, DirectAccessors> : public DenseCoeffsBase<Derived, ReadOnlyAccessors>\n{\n  public:\n\n    typedef DenseCoeffsBase<Derived, ReadOnlyAccessors> Base;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::derived;\n\n    /** \\returns the pointer increment between two consecutive elements within a slice in the inner direction.\n      *\n      * \\sa outerStride(), rowStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const\n    {\n      return derived().innerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns\n      *          in a column-major matrix).\n      *\n      * \\sa innerStride(), rowStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const\n    {\n      return derived().outerStride();\n    }\n\n    // FIXME shall we remove it ?\n    inline Index stride() const\n    {\n      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive rows.\n      *\n      * \\sa innerStride(), outerStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index rowStride() const\n    {\n      return Derived::IsRowMajor ? outerStride() : innerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive columns.\n      *\n      * \\sa innerStride(), outerStride(), rowStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index colStride() const\n    {\n      return Derived::IsRowMajor ? innerStride() : outerStride();\n    }\n};\n\n/** \\brief Base class providing direct read/write coefficient access to matrices and arrays.\n  * \\ingroup Core_Module\n  * \\tparam Derived Type of the derived class\n  * \\tparam #DirectWriteAccessors Constant indicating direct access\n  *\n  * This class defines functions to work with strides which can be used to access entries directly. This class\n  * inherits DenseCoeffsBase<Derived, WriteAccessors> which defines functions to access entries read/write using\n  * \\c operator().\n  *\n  * \\sa \\blank \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived>\nclass DenseCoeffsBase<Derived, DirectWriteAccessors>\n  : public DenseCoeffsBase<Derived, WriteAccessors>\n{\n  public:\n\n    typedef DenseCoeffsBase<Derived, WriteAccessors> Base;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::derived;\n\n    /** \\returns the pointer increment between two consecutive elements within a slice in the inner direction.\n      *\n      * \\sa outerStride(), rowStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const\n    {\n      return derived().innerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns\n      *          in a column-major matrix).\n      *\n      * \\sa innerStride(), rowStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const\n    {\n      return derived().outerStride();\n    }\n\n    // FIXME shall we remove it ?\n    inline Index stride() const\n    {\n      return Derived::IsVectorAtCompileTime ? innerStride() : outerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive rows.\n      *\n      * \\sa innerStride(), outerStride(), colStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index rowStride() const\n    {\n      return Derived::IsRowMajor ? outerStride() : innerStride();\n    }\n\n    /** \\returns the pointer increment between two consecutive columns.\n      *\n      * \\sa innerStride(), outerStride(), rowStride()\n      */\n    EIGEN_DEVICE_FUNC\n    inline Index colStride() const\n    {\n      return Derived::IsRowMajor ? innerStride() : outerStride();\n    }\n};\n\nnamespace internal {\n\ntemplate<int Alignment, typename Derived, bool JustReturnZero>\nstruct first_aligned_impl\n{\n  static inline Index run(const Derived&)\n  { return 0; }\n};\n\ntemplate<int Alignment, typename Derived>\nstruct first_aligned_impl<Alignment, Derived, false>\n{\n  static inline Index run(const Derived& m)\n  {\n    return internal::first_aligned<Alignment>(m.data(), m.size());\n  }\n};\n\n/** \\internal \\returns the index of the first element of the array stored by \\a m that is properly aligned with respect to \\a Alignment for vectorization.\n  *\n  * \\tparam Alignment requested alignment in Bytes.\n  *\n  * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more\n  * documentation.\n  */\ntemplate<int Alignment, typename Derived>\nstatic inline Index first_aligned(const DenseBase<Derived>& m)\n{\n  enum { ReturnZero = (int(evaluator<Derived>::Alignment) >= Alignment) || !(Derived::Flags & DirectAccessBit) };\n  return first_aligned_impl<Alignment, Derived, ReturnZero>::run(m.derived());\n}\n\ntemplate<typename Derived>\nstatic inline Index first_default_aligned(const DenseBase<Derived>& m)\n{\n  typedef typename Derived::Scalar Scalar;\n  typedef typename packet_traits<Scalar>::type DefaultPacketType;\n  return internal::first_aligned<int(unpacket_traits<DefaultPacketType>::alignment),Derived>(m);\n}\n\ntemplate<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>\nstruct inner_stride_at_compile_time\n{\n  enum { ret = traits<Derived>::InnerStrideAtCompileTime };\n};\n\ntemplate<typename Derived>\nstruct inner_stride_at_compile_time<Derived, false>\n{\n  enum { ret = 0 };\n};\n\ntemplate<typename Derived, bool HasDirectAccess = has_direct_access<Derived>::ret>\nstruct outer_stride_at_compile_time\n{\n  enum { ret = traits<Derived>::OuterStrideAtCompileTime };\n};\n\ntemplate<typename Derived>\nstruct outer_stride_at_compile_time<Derived, false>\n{\n  enum { ret = 0 };\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_DENSECOEFFSBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/DenseStorage.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2010-2013 Hauke Heibel <hauke.heibel@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATRIXSTORAGE_H\n#define EIGEN_MATRIXSTORAGE_H\n\n#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X) X; EIGEN_DENSE_STORAGE_CTOR_PLUGIN;\n#else\n  #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(X)\n#endif\n\nnamespace Eigen {\n\nnamespace internal {\n\nstruct constructor_without_unaligned_array_assert {};\n\ntemplate<typename T, int Size>\nEIGEN_DEVICE_FUNC\nvoid check_static_allocation_size()\n{\n  // if EIGEN_STACK_ALLOCATION_LIMIT is defined to 0, then no limit\n  #if EIGEN_STACK_ALLOCATION_LIMIT\n  EIGEN_STATIC_ASSERT(Size * sizeof(T) <= EIGEN_STACK_ALLOCATION_LIMIT, OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG);\n  #endif\n}\n\n/** \\internal\n  * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned:\n  * to 16 bytes boundary if the total size is a multiple of 16 bytes.\n  */\ntemplate <typename T, int Size, int MatrixOrArrayOptions,\n          int Alignment = (MatrixOrArrayOptions&DontAlign) ? 0\n                        : compute_default_alignment<T,Size>::value >\nstruct plain_array\n{\n  T array[Size];\n\n  EIGEN_DEVICE_FUNC\n  plain_array()\n  { \n    check_static_allocation_size<T,Size>();\n  }\n\n  EIGEN_DEVICE_FUNC\n  plain_array(constructor_without_unaligned_array_assert)\n  { \n    check_static_allocation_size<T,Size>();\n  }\n};\n\n#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)\n  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)\n#elif EIGEN_GNUC_AT_LEAST(4,7) \n  // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned.\n  // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900\n  // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:\n  template<typename PtrType>\n  EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; }\n  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \\\n    eigen_assert((internal::UIntPtr(eigen_unaligned_array_assert_workaround_gcc47(array)) & (sizemask)) == 0 \\\n              && \"this assertion is explained here: \" \\\n              \"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html\" \\\n              \" **** READ THIS WEB PAGE !!! ****\");\n#else\n  #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \\\n    eigen_assert((internal::UIntPtr(array) & (sizemask)) == 0 \\\n              && \"this assertion is explained here: \" \\\n              \"http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html\" \\\n              \" **** READ THIS WEB PAGE !!! ****\");\n#endif\n\ntemplate <typename T, int Size, int MatrixOrArrayOptions>\nstruct plain_array<T, Size, MatrixOrArrayOptions, 8>\n{\n  EIGEN_ALIGN_TO_BOUNDARY(8) T array[Size];\n\n  EIGEN_DEVICE_FUNC\n  plain_array() \n  {\n    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(7);\n    check_static_allocation_size<T,Size>();\n  }\n\n  EIGEN_DEVICE_FUNC\n  plain_array(constructor_without_unaligned_array_assert) \n  { \n    check_static_allocation_size<T,Size>();\n  }\n};\n\ntemplate <typename T, int Size, int MatrixOrArrayOptions>\nstruct plain_array<T, Size, MatrixOrArrayOptions, 16>\n{\n  EIGEN_ALIGN_TO_BOUNDARY(16) T array[Size];\n\n  EIGEN_DEVICE_FUNC\n  plain_array() \n  { \n    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(15);\n    check_static_allocation_size<T,Size>();\n  }\n\n  EIGEN_DEVICE_FUNC\n  plain_array(constructor_without_unaligned_array_assert) \n  { \n    check_static_allocation_size<T,Size>();\n  }\n};\n\ntemplate <typename T, int Size, int MatrixOrArrayOptions>\nstruct plain_array<T, Size, MatrixOrArrayOptions, 32>\n{\n  EIGEN_ALIGN_TO_BOUNDARY(32) T array[Size];\n\n  EIGEN_DEVICE_FUNC\n  plain_array() \n  {\n    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(31);\n    check_static_allocation_size<T,Size>();\n  }\n\n  EIGEN_DEVICE_FUNC\n  plain_array(constructor_without_unaligned_array_assert) \n  { \n    check_static_allocation_size<T,Size>();\n  }\n};\n\ntemplate <typename T, int Size, int MatrixOrArrayOptions>\nstruct plain_array<T, Size, MatrixOrArrayOptions, 64>\n{\n  EIGEN_ALIGN_TO_BOUNDARY(64) T array[Size];\n\n  EIGEN_DEVICE_FUNC\n  plain_array() \n  { \n    EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(63);\n    check_static_allocation_size<T,Size>();\n  }\n\n  EIGEN_DEVICE_FUNC\n  plain_array(constructor_without_unaligned_array_assert) \n  { \n    check_static_allocation_size<T,Size>();\n  }\n};\n\ntemplate <typename T, int MatrixOrArrayOptions, int Alignment>\nstruct plain_array<T, 0, MatrixOrArrayOptions, Alignment>\n{\n  T array[1];\n  EIGEN_DEVICE_FUNC plain_array() {}\n  EIGEN_DEVICE_FUNC plain_array(constructor_without_unaligned_array_assert) {}\n};\n\n} // end namespace internal\n\n/** \\internal\n  *\n  * \\class DenseStorage\n  * \\ingroup Core_Module\n  *\n  * \\brief Stores the data of a matrix\n  *\n  * This class stores the data of fixed-size, dynamic-size or mixed matrices\n  * in a way as compact as possible.\n  *\n  * \\sa Matrix\n  */\ntemplate<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage;\n\n// purely fixed-size matrix\ntemplate<typename T, int Size, int _Rows, int _Cols, int _Options> class DenseStorage\n{\n    internal::plain_array<T,Size,_Options> m_data;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)\n    }\n    EIGEN_DEVICE_FUNC\n    explicit DenseStorage(internal::constructor_without_unaligned_array_assert)\n      : m_data(internal::constructor_without_unaligned_array_assert()) {}\n    EIGEN_DEVICE_FUNC \n    DenseStorage(const DenseStorage& other) : m_data(other.m_data) {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = Size)\n    }\n    EIGEN_DEVICE_FUNC \n    DenseStorage& operator=(const DenseStorage& other)\n    { \n      if (this != &other) m_data = other.m_data;\n      return *this; \n    }\n    EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      eigen_internal_assert(size==rows*cols && rows==_Rows && cols==_Cols);\n      EIGEN_UNUSED_VARIABLE(size);\n      EIGEN_UNUSED_VARIABLE(rows);\n      EIGEN_UNUSED_VARIABLE(cols);\n    }\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); }\n    EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}\n    EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}\n    EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}\n    EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data.array; }\n};\n\n// null matrix\ntemplate<typename T, int _Rows, int _Cols, int _Options> class DenseStorage<T, 0, _Rows, _Cols, _Options>\n{\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() {}\n    EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert) {}\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage&) {}\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage&) { return *this; }\n    EIGEN_DEVICE_FUNC DenseStorage(Index,Index,Index) {}\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& ) {}\n    EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}\n    EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}\n    EIGEN_DEVICE_FUNC void conservativeResize(Index,Index,Index) {}\n    EIGEN_DEVICE_FUNC void resize(Index,Index,Index) {}\n    EIGEN_DEVICE_FUNC const T *data() const { return 0; }\n    EIGEN_DEVICE_FUNC T *data() { return 0; }\n};\n\n// more specializations for null matrices; these are necessary to resolve ambiguities\ntemplate<typename T, int _Options> class DenseStorage<T, 0, Dynamic, Dynamic, _Options>\n: public DenseStorage<T, 0, 0, 0, _Options> { };\n\ntemplate<typename T, int _Rows, int _Options> class DenseStorage<T, 0, _Rows, Dynamic, _Options>\n: public DenseStorage<T, 0, 0, 0, _Options> { };\n\ntemplate<typename T, int _Cols, int _Options> class DenseStorage<T, 0, Dynamic, _Cols, _Options>\n: public DenseStorage<T, 0, 0, 0, _Options> { };\n\n// dynamic-size matrix with fixed-size storage\ntemplate<typename T, int Size, int _Options> class DenseStorage<T, Size, Dynamic, Dynamic, _Options>\n{\n    internal::plain_array<T,Size,_Options> m_data;\n    Index m_rows;\n    Index m_cols;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0), m_cols(0) {}\n    EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)\n      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows), m_cols(other.m_cols) {}\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) \n    { \n      if (this != &other)\n      {\n        m_data = other.m_data;\n        m_rows = other.m_rows;\n        m_cols = other.m_cols;\n      }\n      return *this; \n    }\n    EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index cols) : m_rows(rows), m_cols(cols) {}\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other)\n    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }\n    EIGEN_DEVICE_FUNC Index rows() const {return m_rows;}\n    EIGEN_DEVICE_FUNC Index cols() const {return m_cols;}\n    EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }\n    EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index cols) { m_rows = rows; m_cols = cols; }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data.array; }\n};\n\n// dynamic-size matrix with fixed-size storage and fixed width\ntemplate<typename T, int Size, int _Cols, int _Options> class DenseStorage<T, Size, Dynamic, _Cols, _Options>\n{\n    internal::plain_array<T,Size,_Options> m_data;\n    Index m_rows;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_rows(0) {}\n    EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)\n      : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_rows(other.m_rows) {}\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other) \n    {\n      if (this != &other)\n      {\n        m_data = other.m_data;\n        m_rows = other.m_rows;\n      }\n      return *this; \n    }\n    EIGEN_DEVICE_FUNC DenseStorage(Index, Index rows, Index) : m_rows(rows) {}\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }\n    EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}\n    EIGEN_DEVICE_FUNC Index cols(void) const {return _Cols;}\n    EIGEN_DEVICE_FUNC void conservativeResize(Index, Index rows, Index) { m_rows = rows; }\n    EIGEN_DEVICE_FUNC void resize(Index, Index rows, Index) { m_rows = rows; }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data.array; }\n};\n\n// dynamic-size matrix with fixed-size storage and fixed height\ntemplate<typename T, int Size, int _Rows, int _Options> class DenseStorage<T, Size, _Rows, Dynamic, _Options>\n{\n    internal::plain_array<T,Size,_Options> m_data;\n    Index m_cols;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_cols(0) {}\n    EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)\n      : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other) : m_data(other.m_data), m_cols(other.m_cols) {}\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)\n    {\n      if (this != &other)\n      {\n        m_data = other.m_data;\n        m_cols = other.m_cols;\n      }\n      return *this;\n    }\n    EIGEN_DEVICE_FUNC DenseStorage(Index, Index, Index cols) : m_cols(cols) {}\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }\n    EIGEN_DEVICE_FUNC Index rows(void) const {return _Rows;}\n    EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}\n    void conservativeResize(Index, Index, Index cols) { m_cols = cols; }\n    void resize(Index, Index, Index cols) { m_cols = cols; }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data.array; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data.array; }\n};\n\n// purely dynamic matrix.\ntemplate<typename T, int _Options> class DenseStorage<T, Dynamic, Dynamic, Dynamic, _Options>\n{\n    T *m_data;\n    Index m_rows;\n    Index m_cols;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0), m_cols(0) {}\n    EIGEN_DEVICE_FUNC explicit DenseStorage(internal::constructor_without_unaligned_array_assert)\n       : m_data(0), m_rows(0), m_cols(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols)\n      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows), m_cols(cols)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      eigen_internal_assert(size==rows*cols && rows>=0 && cols >=0);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)\n      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*other.m_cols))\n      , m_rows(other.m_rows)\n      , m_cols(other.m_cols)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*m_cols)\n      internal::smart_copy(other.m_data, other.m_data+other.m_rows*other.m_cols, m_data);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)\n    {\n      if (this != &other)\n      {\n        DenseStorage tmp(other);\n        this->swap(tmp);\n      }\n      return *this;\n    }\n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT\n      : m_data(std::move(other.m_data))\n      , m_rows(std::move(other.m_rows))\n      , m_cols(std::move(other.m_cols))\n    {\n      other.m_data = nullptr;\n      other.m_rows = 0;\n      other.m_cols = 0;\n    }\n    EIGEN_DEVICE_FUNC\n    DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT\n    {\n      using std::swap;\n      swap(m_data, other.m_data);\n      swap(m_rows, other.m_rows);\n      swap(m_cols, other.m_cols);\n      return *this;\n    }\n#endif\n    EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols); }\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other)\n    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }\n    EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}\n    EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}\n    void conservativeResize(Index size, Index rows, Index cols)\n    {\n      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*m_cols);\n      m_rows = rows;\n      m_cols = cols;\n    }\n    EIGEN_DEVICE_FUNC void resize(Index size, Index rows, Index cols)\n    {\n      if(size != m_rows*m_cols)\n      {\n        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, m_rows*m_cols);\n        if (size)\n          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);\n        else\n          m_data = 0;\n        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      }\n      m_rows = rows;\n      m_cols = cols;\n    }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data; }\n};\n\n// matrix with dynamic width and fixed height (so that matrix has dynamic size).\ntemplate<typename T, int _Rows, int _Options> class DenseStorage<T, Dynamic, _Rows, Dynamic, _Options>\n{\n    T *m_data;\n    Index m_cols;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_cols(0) {}\n    explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_cols(cols)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      eigen_internal_assert(size==rows*cols && rows==_Rows && cols >=0);\n      EIGEN_UNUSED_VARIABLE(rows);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)\n      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(_Rows*other.m_cols))\n      , m_cols(other.m_cols)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_cols*_Rows)\n      internal::smart_copy(other.m_data, other.m_data+_Rows*m_cols, m_data);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)\n    {\n      if (this != &other)\n      {\n        DenseStorage tmp(other);\n        this->swap(tmp);\n      }\n      return *this;\n    }    \n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT\n      : m_data(std::move(other.m_data))\n      , m_cols(std::move(other.m_cols))\n    {\n      other.m_data = nullptr;\n      other.m_cols = 0;\n    }\n    EIGEN_DEVICE_FUNC\n    DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT\n    {\n      using std::swap;\n      swap(m_data, other.m_data);\n      swap(m_cols, other.m_cols);\n      return *this;\n    }\n#endif\n    EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols); }\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }\n    EIGEN_DEVICE_FUNC static Index rows(void) {return _Rows;}\n    EIGEN_DEVICE_FUNC Index cols(void) const {return m_cols;}\n    EIGEN_DEVICE_FUNC void conservativeResize(Index size, Index, Index cols)\n    {\n      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, _Rows*m_cols);\n      m_cols = cols;\n    }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index, Index cols)\n    {\n      if(size != _Rows*m_cols)\n      {\n        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Rows*m_cols);\n        if (size)\n          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);\n        else\n          m_data = 0;\n        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      }\n      m_cols = cols;\n    }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data; }\n};\n\n// matrix with dynamic height and fixed width (so that matrix has dynamic size).\ntemplate<typename T, int _Cols, int _Options> class DenseStorage<T, Dynamic, Dynamic, _Cols, _Options>\n{\n    T *m_data;\n    Index m_rows;\n  public:\n    EIGEN_DEVICE_FUNC DenseStorage() : m_data(0), m_rows(0) {}\n    explicit DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {}\n    EIGEN_DEVICE_FUNC DenseStorage(Index size, Index rows, Index cols) : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size)), m_rows(rows)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      eigen_internal_assert(size==rows*cols && rows>=0 && cols == _Cols);\n      EIGEN_UNUSED_VARIABLE(cols);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage(const DenseStorage& other)\n      : m_data(internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(other.m_rows*_Cols))\n      , m_rows(other.m_rows)\n    {\n      EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN(Index size = m_rows*_Cols)\n      internal::smart_copy(other.m_data, other.m_data+other.m_rows*_Cols, m_data);\n    }\n    EIGEN_DEVICE_FUNC DenseStorage& operator=(const DenseStorage& other)\n    {\n      if (this != &other)\n      {\n        DenseStorage tmp(other);\n        this->swap(tmp);\n      }\n      return *this;\n    }    \n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    DenseStorage(DenseStorage&& other) EIGEN_NOEXCEPT\n      : m_data(std::move(other.m_data))\n      , m_rows(std::move(other.m_rows))\n    {\n      other.m_data = nullptr;\n      other.m_rows = 0;\n    }\n    EIGEN_DEVICE_FUNC\n    DenseStorage& operator=(DenseStorage&& other) EIGEN_NOEXCEPT\n    {\n      using std::swap;\n      swap(m_data, other.m_data);\n      swap(m_rows, other.m_rows);\n      return *this;\n    }\n#endif\n    EIGEN_DEVICE_FUNC ~DenseStorage() { internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows); }\n    EIGEN_DEVICE_FUNC void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }\n    EIGEN_DEVICE_FUNC Index rows(void) const {return m_rows;}\n    EIGEN_DEVICE_FUNC static Index cols(void) {return _Cols;}\n    void conservativeResize(Index size, Index rows, Index)\n    {\n      m_data = internal::conditional_aligned_realloc_new_auto<T,(_Options&DontAlign)==0>(m_data, size, m_rows*_Cols);\n      m_rows = rows;\n    }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(Index size, Index rows, Index)\n    {\n      if(size != m_rows*_Cols)\n      {\n        internal::conditional_aligned_delete_auto<T,(_Options&DontAlign)==0>(m_data, _Cols*m_rows);\n        if (size)\n          m_data = internal::conditional_aligned_new_auto<T,(_Options&DontAlign)==0>(size);\n        else\n          m_data = 0;\n        EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({})\n      }\n      m_rows = rows;\n    }\n    EIGEN_DEVICE_FUNC const T *data() const { return m_data; }\n    EIGEN_DEVICE_FUNC T *data() { return m_data; }\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Diagonal.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DIAGONAL_H\n#define EIGEN_DIAGONAL_H\n\nnamespace Eigen { \n\n/** \\class Diagonal\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix\n  *\n  * \\param MatrixType the type of the object in which we are taking a sub/main/super diagonal\n  * \\param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal.\n  *              A positive value means a superdiagonal, a negative value means a subdiagonal.\n  *              You can also use DynamicIndex so the index can be set at runtime.\n  *\n  * The matrix is not required to be square.\n  *\n  * This class represents an expression of the main diagonal, or any sub/super diagonal\n  * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the\n  * time this is the only way it is used.\n  *\n  * \\sa MatrixBase::diagonal(), MatrixBase::diagonal(Index)\n  */\n\nnamespace internal {\ntemplate<typename MatrixType, int DiagIndex>\nstruct traits<Diagonal<MatrixType,DiagIndex> >\n : traits<MatrixType>\n{\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;\n  typedef typename MatrixType::StorageKind StorageKind;\n  enum {\n    RowsAtCompileTime = (int(DiagIndex) == DynamicIndex || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic\n                      : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),\n                                              MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),\n    ColsAtCompileTime = 1,\n    MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic\n                         : DiagIndex == DynamicIndex ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime,\n                                                                              MatrixType::MaxColsAtCompileTime)\n                         : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0),\n                                                 MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))),\n    MaxColsAtCompileTime = 1,\n    MaskLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,\n    Flags = (unsigned int)_MatrixTypeNested::Flags & (RowMajorBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, // FIXME DirectAccessBit should not be handled by expressions\n    MatrixTypeOuterStride = outer_stride_at_compile_time<MatrixType>::ret,\n    InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1,\n    OuterStrideAtCompileTime = 0\n  };\n};\n}\n\ntemplate<typename MatrixType, int _DiagIndex> class Diagonal\n   : public internal::dense_xpr_base< Diagonal<MatrixType,_DiagIndex> >::type\n{\n  public:\n\n    enum { DiagIndex = _DiagIndex };\n    typedef typename internal::dense_xpr_base<Diagonal>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal)\n\n    EIGEN_DEVICE_FUNC\n    explicit inline Diagonal(MatrixType& matrix, Index a_index = DiagIndex) : m_matrix(matrix), m_index(a_index) {}\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal)\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const\n    {\n      return m_index.value()<0 ? numext::mini<Index>(m_matrix.cols(),m_matrix.rows()+m_index.value())\n                               : numext::mini<Index>(m_matrix.rows(),m_matrix.cols()-m_index.value());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return 1; }\n\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const\n    {\n      return m_matrix.outerStride() + 1;\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const\n    {\n      return 0;\n    }\n\n    typedef typename internal::conditional<\n                       internal::is_lvalue<MatrixType>::value,\n                       Scalar,\n                       const Scalar\n                     >::type ScalarWithConstIfNotLvalue;\n\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }\n    EIGEN_DEVICE_FUNC\n    inline const Scalar* data() const { return &(m_matrix.coeffRef(rowOffset(), colOffset())); }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index row, Index)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)\n      return m_matrix.coeffRef(row+rowOffset(), row+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index row, Index) const\n    {\n      return m_matrix.coeffRef(row+rowOffset(), row+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline CoeffReturnType coeff(Index row, Index) const\n    {\n      return m_matrix.coeff(row+rowOffset(), row+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index idx)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)\n      return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index idx) const\n    {\n      return m_matrix.coeffRef(idx+rowOffset(), idx+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline CoeffReturnType coeff(Index idx) const\n    {\n      return m_matrix.coeff(idx+rowOffset(), idx+colOffset());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const typename internal::remove_all<typename MatrixType::Nested>::type& \n    nestedExpression() const \n    {\n      return m_matrix;\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Index index() const\n    {\n      return m_index.value();\n    }\n\n  protected:\n    typename internal::ref_selector<MatrixType>::non_const_type m_matrix;\n    const internal::variable_if_dynamicindex<Index, DiagIndex> m_index;\n\n  private:\n    // some compilers may fail to optimize std::max etc in case of compile-time constants...\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; }\n    // trigger a compile-time error if someone try to call packet\n    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index) const;\n    template<int LoadMode> typename MatrixType::PacketReturnType packet(Index,Index) const;\n};\n\n/** \\returns an expression of the main diagonal of the matrix \\c *this\n  *\n  * \\c *this is not required to be square.\n  *\n  * Example: \\include MatrixBase_diagonal.cpp\n  * Output: \\verbinclude MatrixBase_diagonal.out\n  *\n  * \\sa class Diagonal */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalReturnType\nMatrixBase<Derived>::diagonal()\n{\n  return DiagonalReturnType(derived());\n}\n\n/** This is the const version of diagonal(). */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalReturnType\nMatrixBase<Derived>::diagonal() const\n{\n  return ConstDiagonalReturnType(derived());\n}\n\n/** \\returns an expression of the \\a DiagIndex-th sub or super diagonal of the matrix \\c *this\n  *\n  * \\c *this is not required to be square.\n  *\n  * The template parameter \\a DiagIndex represent a super diagonal if \\a DiagIndex > 0\n  * and a sub diagonal otherwise. \\a DiagIndex == 0 is equivalent to the main diagonal.\n  *\n  * Example: \\include MatrixBase_diagonal_int.cpp\n  * Output: \\verbinclude MatrixBase_diagonal_int.out\n  *\n  * \\sa MatrixBase::diagonal(), class Diagonal */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType\nMatrixBase<Derived>::diagonal(Index index)\n{\n  return DiagonalDynamicIndexReturnType(derived(), index);\n}\n\n/** This is the const version of diagonal(Index). */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType\nMatrixBase<Derived>::diagonal(Index index) const\n{\n  return ConstDiagonalDynamicIndexReturnType(derived(), index);\n}\n\n/** \\returns an expression of the \\a DiagIndex-th sub or super diagonal of the matrix \\c *this\n  *\n  * \\c *this is not required to be square.\n  *\n  * The template parameter \\a DiagIndex represent a super diagonal if \\a DiagIndex > 0\n  * and a sub diagonal otherwise. \\a DiagIndex == 0 is equivalent to the main diagonal.\n  *\n  * Example: \\include MatrixBase_diagonal_template_int.cpp\n  * Output: \\verbinclude MatrixBase_diagonal_template_int.out\n  *\n  * \\sa MatrixBase::diagonal(), class Diagonal */\ntemplate<typename Derived>\ntemplate<int Index_>\nEIGEN_DEVICE_FUNC\ninline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type\nMatrixBase<Derived>::diagonal()\n{\n  return typename DiagonalIndexReturnType<Index_>::Type(derived());\n}\n\n/** This is the const version of diagonal<int>(). */\ntemplate<typename Derived>\ntemplate<int Index_>\nEIGEN_DEVICE_FUNC\ninline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type\nMatrixBase<Derived>::diagonal() const\n{\n  return typename ConstDiagonalIndexReturnType<Index_>::Type(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_DIAGONAL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/DiagonalMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DIAGONALMATRIX_H\n#define EIGEN_DIAGONALMATRIX_H\n\nnamespace Eigen { \n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename Derived>\nclass DiagonalBase : public EigenBase<Derived>\n{\n  public:\n    typedef typename internal::traits<Derived>::DiagonalVectorType DiagonalVectorType;\n    typedef typename DiagonalVectorType::Scalar Scalar;\n    typedef typename DiagonalVectorType::RealScalar RealScalar;\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::StorageIndex StorageIndex;\n\n    enum {\n      RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,\n      ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,\n      MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,\n      MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,\n      IsVectorAtCompileTime = 0,\n      Flags = NoPreferredStorageOrderBit\n    };\n\n    typedef Matrix<Scalar, RowsAtCompileTime, ColsAtCompileTime, 0, MaxRowsAtCompileTime, MaxColsAtCompileTime> DenseMatrixType;\n    typedef DenseMatrixType DenseType;\n    typedef DiagonalMatrix<Scalar,DiagonalVectorType::SizeAtCompileTime,DiagonalVectorType::MaxSizeAtCompileTime> PlainObject;\n\n    EIGEN_DEVICE_FUNC\n    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    EIGEN_DEVICE_FUNC\n    inline Derived& derived() { return *static_cast<Derived*>(this); }\n\n    EIGEN_DEVICE_FUNC\n    DenseMatrixType toDenseMatrix() const { return derived(); }\n\n    EIGEN_DEVICE_FUNC\n    inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); }\n    EIGEN_DEVICE_FUNC\n    inline DiagonalVectorType& diagonal() { return derived().diagonal(); }\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return diagonal().size(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return diagonal().size(); }\n\n    template<typename MatrixDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<Derived,MatrixDerived,LazyProduct>\n    operator*(const MatrixBase<MatrixDerived> &matrix) const\n    {\n      return Product<Derived, MatrixDerived, LazyProduct>(derived(),matrix.derived());\n    }\n\n    typedef DiagonalWrapper<const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const DiagonalVectorType> > InverseReturnType;\n    EIGEN_DEVICE_FUNC\n    inline const InverseReturnType\n    inverse() const\n    {\n      return InverseReturnType(diagonal().cwiseInverse());\n    }\n    \n    EIGEN_DEVICE_FUNC\n    inline const DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >\n    operator*(const Scalar& scalar) const\n    {\n      return DiagonalWrapper<const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DiagonalVectorType,Scalar,product) >(diagonal() * scalar);\n    }\n    EIGEN_DEVICE_FUNC\n    friend inline const DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,DiagonalVectorType,product) >\n    operator*(const Scalar& scalar, const DiagonalBase& other)\n    {\n      return DiagonalWrapper<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,DiagonalVectorType,product) >(scalar * other.diagonal());\n    }\n};\n\n#endif\n\n/** \\class DiagonalMatrix\n  * \\ingroup Core_Module\n  *\n  * \\brief Represents a diagonal matrix with its storage\n  *\n  * \\param _Scalar the type of coefficients\n  * \\param SizeAtCompileTime the dimension of the matrix, or Dynamic\n  * \\param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults\n  *        to SizeAtCompileTime. Most of the time, you do not need to specify it.\n  *\n  * \\sa class DiagonalWrapper\n  */\n\nnamespace internal {\ntemplate<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>\nstruct traits<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >\n : traits<Matrix<_Scalar,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >\n{\n  typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType;\n  typedef DiagonalShape StorageKind;\n  enum {\n    Flags = LvalueBit | NoPreferredStorageOrderBit\n  };\n};\n}\ntemplate<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime>\nclass DiagonalMatrix\n  : public DiagonalBase<DiagonalMatrix<_Scalar,SizeAtCompileTime,MaxSizeAtCompileTime> >\n{\n  public:\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename internal::traits<DiagonalMatrix>::DiagonalVectorType DiagonalVectorType;\n    typedef const DiagonalMatrix& Nested;\n    typedef _Scalar Scalar;\n    typedef typename internal::traits<DiagonalMatrix>::StorageKind StorageKind;\n    typedef typename internal::traits<DiagonalMatrix>::StorageIndex StorageIndex;\n    #endif\n\n  protected:\n\n    DiagonalVectorType m_diagonal;\n\n  public:\n\n    /** const version of diagonal(). */\n    EIGEN_DEVICE_FUNC\n    inline const DiagonalVectorType& diagonal() const { return m_diagonal; }\n    /** \\returns a reference to the stored vector of diagonal coefficients. */\n    EIGEN_DEVICE_FUNC\n    inline DiagonalVectorType& diagonal() { return m_diagonal; }\n\n    /** Default constructor without initialization */\n    EIGEN_DEVICE_FUNC\n    inline DiagonalMatrix() {}\n\n    /** Constructs a diagonal matrix with given dimension  */\n    EIGEN_DEVICE_FUNC\n    explicit inline DiagonalMatrix(Index dim) : m_diagonal(dim) {}\n\n    /** 2D constructor. */\n    EIGEN_DEVICE_FUNC\n    inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {}\n\n    /** 3D constructor. */\n    EIGEN_DEVICE_FUNC\n    inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {}\n\n    /** Copy constructor. */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    inline DiagonalMatrix(const DiagonalBase<OtherDerived>& other) : m_diagonal(other.diagonal()) {}\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */\n    inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {}\n    #endif\n\n    /** generic constructor from expression of the diagonal coefficients */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    explicit inline DiagonalMatrix(const MatrixBase<OtherDerived>& other) : m_diagonal(other)\n    {}\n\n    /** Copy operator. */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    DiagonalMatrix& operator=(const DiagonalBase<OtherDerived>& other)\n    {\n      m_diagonal = other.diagonal();\n      return *this;\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    EIGEN_DEVICE_FUNC\n    DiagonalMatrix& operator=(const DiagonalMatrix& other)\n    {\n      m_diagonal = other.diagonal();\n      return *this;\n    }\n    #endif\n\n    /** Resizes to given size. */\n    EIGEN_DEVICE_FUNC\n    inline void resize(Index size) { m_diagonal.resize(size); }\n    /** Sets all coefficients to zero. */\n    EIGEN_DEVICE_FUNC\n    inline void setZero() { m_diagonal.setZero(); }\n    /** Resizes and sets all coefficients to zero. */\n    EIGEN_DEVICE_FUNC\n    inline void setZero(Index size) { m_diagonal.setZero(size); }\n    /** Sets this matrix to be the identity matrix of the current size. */\n    EIGEN_DEVICE_FUNC\n    inline void setIdentity() { m_diagonal.setOnes(); }\n    /** Sets this matrix to be the identity matrix of the given size. */\n    EIGEN_DEVICE_FUNC\n    inline void setIdentity(Index size) { m_diagonal.setOnes(size); }\n};\n\n/** \\class DiagonalWrapper\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a diagonal matrix\n  *\n  * \\param _DiagonalVectorType the type of the vector of diagonal coefficients\n  *\n  * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients,\n  * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal()\n  * and most of the time this is the only way that it is used.\n  *\n  * \\sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal()\n  */\n\nnamespace internal {\ntemplate<typename _DiagonalVectorType>\nstruct traits<DiagonalWrapper<_DiagonalVectorType> >\n{\n  typedef _DiagonalVectorType DiagonalVectorType;\n  typedef typename DiagonalVectorType::Scalar Scalar;\n  typedef typename DiagonalVectorType::StorageIndex StorageIndex;\n  typedef DiagonalShape StorageKind;\n  typedef typename traits<DiagonalVectorType>::XprKind XprKind;\n  enum {\n    RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,\n    ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime,\n    MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,\n    MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime,\n    Flags =  (traits<DiagonalVectorType>::Flags & LvalueBit) | NoPreferredStorageOrderBit\n  };\n};\n}\n\ntemplate<typename _DiagonalVectorType>\nclass DiagonalWrapper\n  : public DiagonalBase<DiagonalWrapper<_DiagonalVectorType> >, internal::no_assignment_operator\n{\n  public:\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef _DiagonalVectorType DiagonalVectorType;\n    typedef DiagonalWrapper Nested;\n    #endif\n\n    /** Constructor from expression of diagonal coefficients to wrap. */\n    EIGEN_DEVICE_FUNC\n    explicit inline DiagonalWrapper(DiagonalVectorType& a_diagonal) : m_diagonal(a_diagonal) {}\n\n    /** \\returns a const reference to the wrapped expression of diagonal coefficients. */\n    EIGEN_DEVICE_FUNC\n    const DiagonalVectorType& diagonal() const { return m_diagonal; }\n\n  protected:\n    typename DiagonalVectorType::Nested m_diagonal;\n};\n\n/** \\returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include MatrixBase_asDiagonal.cpp\n  * Output: \\verbinclude MatrixBase_asDiagonal.out\n  *\n  * \\sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal()\n  **/\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const DiagonalWrapper<const Derived>\nMatrixBase<Derived>::asDiagonal() const\n{\n  return DiagonalWrapper<const Derived>(derived());\n}\n\n/** \\returns true if *this is approximately equal to a diagonal matrix,\n  *          within the precision given by \\a prec.\n  *\n  * Example: \\include MatrixBase_isDiagonal.cpp\n  * Output: \\verbinclude MatrixBase_isDiagonal.out\n  *\n  * \\sa asDiagonal()\n  */\ntemplate<typename Derived>\nbool MatrixBase<Derived>::isDiagonal(const RealScalar& prec) const\n{\n  if(cols() != rows()) return false;\n  RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);\n  for(Index j = 0; j < cols(); ++j)\n  {\n    RealScalar absOnDiagonal = numext::abs(coeff(j,j));\n    if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;\n  }\n  for(Index j = 0; j < cols(); ++j)\n    for(Index i = 0; i < j; ++i)\n    {\n      if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;\n      if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;\n    }\n  return true;\n}\n\nnamespace internal {\n\ntemplate<> struct storage_kind_to_shape<DiagonalShape> { typedef DiagonalShape Shape; };\n\nstruct Diagonal2Dense {};\n\ntemplate<> struct AssignmentKind<DenseShape,DiagonalShape> { typedef Diagonal2Dense Kind; };\n\n// Diagonal matrix to Dense assignment\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Dense>\n{\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n    \n    dst.setZero();\n    dst.diagonal() = src.diagonal();\n  }\n  \n  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  { dst.diagonal() += src.diagonal(); }\n  \n  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  { dst.diagonal() -= src.diagonal(); }\n};\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_DIAGONALMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/DiagonalProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DIAGONALPRODUCT_H\n#define EIGEN_DIAGONALPRODUCT_H\n\nnamespace Eigen { \n\n/** \\returns the diagonal matrix product of \\c *this by the diagonal matrix \\a diagonal.\n  */\ntemplate<typename Derived>\ntemplate<typename DiagonalDerived>\nEIGEN_DEVICE_FUNC inline const Product<Derived, DiagonalDerived, LazyProduct>\nMatrixBase<Derived>::operator*(const DiagonalBase<DiagonalDerived> &a_diagonal) const\n{\n  return Product<Derived, DiagonalDerived, LazyProduct>(derived(),a_diagonal.derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_DIAGONALPRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Dot.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008, 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DOT_H\n#define EIGEN_DOT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot\n// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE\n// looking at the static assertions. Thus this is a trick to get better compile errors.\ntemplate<typename T, typename U,\n// the NeedToTranspose condition here is taken straight from Assign.h\n         bool NeedToTranspose = T::IsVectorAtCompileTime\n                && U::IsVectorAtCompileTime\n                && ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)\n                      |  // FIXME | instead of || to please GCC 4.4.0 stupid warning \"suggest parentheses around &&\".\n                         // revert to || as soon as not needed anymore.\n                    (int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))\n>\nstruct dot_nocheck\n{\n  typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;\n  typedef typename conj_prod::result_type ResScalar;\n  EIGEN_DEVICE_FUNC\n  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)\n  {\n    return a.template binaryExpr<conj_prod>(b).sum();\n  }\n};\n\ntemplate<typename T, typename U>\nstruct dot_nocheck<T, U, true>\n{\n  typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;\n  typedef typename conj_prod::result_type ResScalar;\n  EIGEN_DEVICE_FUNC\n  static inline ResScalar run(const MatrixBase<T>& a, const MatrixBase<U>& b)\n  {\n    return a.transpose().template binaryExpr<conj_prod>(b).sum();\n  }\n};\n\n} // end namespace internal\n\n/** \\fn MatrixBase::dot\n  * \\returns the dot product of *this with other.\n  *\n  * \\only_for_vectors\n  *\n  * \\note If the scalar type is complex numbers, then this function returns the hermitian\n  * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the\n  * second variable.\n  *\n  * \\sa squaredNorm(), norm()\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ntypename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType\nMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)\n#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))\n  typedef internal::scalar_conj_product_op<Scalar,typename OtherDerived::Scalar> func;\n  EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar);\n#endif\n  \n  eigen_assert(size() == other.size());\n\n  return internal::dot_nocheck<Derived,OtherDerived>::run(*this, other);\n}\n\n//---------- implementation of L2 norm and related functions ----------\n\n/** \\returns, for vectors, the squared \\em l2 norm of \\c *this, and for matrices the Frobenius norm.\n  * In both cases, it consists in the sum of the square of all the matrix entries.\n  * For vectors, this is also equals to the dot product of \\c *this with itself.\n  *\n  * \\sa dot(), norm(), lpNorm()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::squaredNorm() const\n{\n  return numext::real((*this).cwiseAbs2().sum());\n}\n\n/** \\returns, for vectors, the \\em l2 norm of \\c *this, and for matrices the Frobenius norm.\n  * In both cases, it consists in the square root of the sum of the square of all the matrix entries.\n  * For vectors, this is also equals to the square root of the dot product of \\c *this with itself.\n  *\n  * \\sa lpNorm(), dot(), squaredNorm()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const\n{\n  return numext::sqrt(squaredNorm());\n}\n\n/** \\returns an expression of the quotient of \\c *this by its own norm.\n  *\n  * \\warning If the input vector is too small (i.e., this->norm()==0),\n  *          then this function returns a copy of the input.\n  *\n  * \\only_for_vectors\n  *\n  * \\sa norm(), normalize()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::PlainObject\nMatrixBase<Derived>::normalized() const\n{\n  typedef typename internal::nested_eval<Derived,2>::type _Nested;\n  _Nested n(derived());\n  RealScalar z = n.squaredNorm();\n  // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU\n  if(z>RealScalar(0))\n    return n / numext::sqrt(z);\n  else\n    return n;\n}\n\n/** Normalizes the vector, i.e. divides it by its own norm.\n  *\n  * \\only_for_vectors\n  *\n  * \\warning If the input vector is too small (i.e., this->norm()==0), then \\c *this is left unchanged.\n  *\n  * \\sa norm(), normalized()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::normalize()\n{\n  RealScalar z = squaredNorm();\n  // NOTE: after extensive benchmarking, this conditional does not impact performance, at least on recent x86 CPU\n  if(z>RealScalar(0))\n    derived() /= numext::sqrt(z);\n}\n\n/** \\returns an expression of the quotient of \\c *this by its own norm while avoiding underflow and overflow.\n  *\n  * \\only_for_vectors\n  *\n  * This method is analogue to the normalized() method, but it reduces the risk of\n  * underflow and overflow when computing the norm.\n  *\n  * \\warning If the input vector is too small (i.e., this->norm()==0),\n  *          then this function returns a copy of the input.\n  *\n  * \\sa stableNorm(), stableNormalize(), normalized()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::PlainObject\nMatrixBase<Derived>::stableNormalized() const\n{\n  typedef typename internal::nested_eval<Derived,3>::type _Nested;\n  _Nested n(derived());\n  RealScalar w = n.cwiseAbs().maxCoeff();\n  RealScalar z = (n/w).squaredNorm();\n  if(z>RealScalar(0))\n    return n / (numext::sqrt(z)*w);\n  else\n    return n;\n}\n\n/** Normalizes the vector while avoid underflow and overflow\n  *\n  * \\only_for_vectors\n  *\n  * This method is analogue to the normalize() method, but it reduces the risk of\n  * underflow and overflow when computing the norm.\n  *\n  * \\warning If the input vector is too small (i.e., this->norm()==0), then \\c *this is left unchanged.\n  *\n  * \\sa stableNorm(), stableNormalized(), normalize()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::stableNormalize()\n{\n  RealScalar w = cwiseAbs().maxCoeff();\n  RealScalar z = (derived()/w).squaredNorm();\n  if(z>RealScalar(0))\n    derived() /= numext::sqrt(z)*w;\n}\n\n//---------- implementation of other norms ----------\n\nnamespace internal {\n\ntemplate<typename Derived, int p>\nstruct lpNorm_selector\n{\n  typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const MatrixBase<Derived>& m)\n  {\n    EIGEN_USING_STD_MATH(pow)\n    return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p);\n  }\n};\n\ntemplate<typename Derived>\nstruct lpNorm_selector<Derived, 1>\n{\n  EIGEN_DEVICE_FUNC\n  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)\n  {\n    return m.cwiseAbs().sum();\n  }\n};\n\ntemplate<typename Derived>\nstruct lpNorm_selector<Derived, 2>\n{\n  EIGEN_DEVICE_FUNC\n  static inline typename NumTraits<typename traits<Derived>::Scalar>::Real run(const MatrixBase<Derived>& m)\n  {\n    return m.norm();\n  }\n};\n\ntemplate<typename Derived>\nstruct lpNorm_selector<Derived, Infinity>\n{\n  typedef typename NumTraits<typename traits<Derived>::Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const MatrixBase<Derived>& m)\n  {\n    if(Derived::SizeAtCompileTime==0 || (Derived::SizeAtCompileTime==Dynamic && m.size()==0))\n      return RealScalar(0);\n    return m.cwiseAbs().maxCoeff();\n  }\n};\n\n} // end namespace internal\n\n/** \\returns the \\b coefficient-wise \\f$ \\ell^p \\f$ norm of \\c *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values\n  *          of the coefficients of \\c *this. If \\a p is the special value \\a Eigen::Infinity, this function returns the \\f$ \\ell^\\infty \\f$\n  *          norm, that is the maximum of the absolute values of the coefficients of \\c *this.\n  *\n  * In all cases, if \\c *this is empty, then the value 0 is returned.\n  *\n  * \\note For matrices, this function does not compute the <a href=\"https://en.wikipedia.org/wiki/Operator_norm\">operator-norm</a>. That is, if \\c *this is a matrix, then its coefficients are interpreted as a 1D vector. Nonetheless, you can easily compute the 1-norm and \\f$\\infty\\f$-norm matrix operator norms using \\link TutorialReductionsVisitorsBroadcastingReductionsNorm partial reductions \\endlink.\n  *\n  * \\sa norm()\n  */\ntemplate<typename Derived>\ntemplate<int p>\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_DEVICE_FUNC inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\n#else\nEIGEN_DEVICE_FUNC MatrixBase<Derived>::RealScalar\n#endif\nMatrixBase<Derived>::lpNorm() const\n{\n  return internal::lpNorm_selector<Derived, p>::run(*this);\n}\n\n//---------- implementation of isOrthogonal / isUnitary ----------\n\n/** \\returns true if *this is approximately orthogonal to \\a other,\n  *          within the precision given by \\a prec.\n  *\n  * Example: \\include MatrixBase_isOrthogonal.cpp\n  * Output: \\verbinclude MatrixBase_isOrthogonal.out\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nbool MatrixBase<Derived>::isOrthogonal\n(const MatrixBase<OtherDerived>& other, const RealScalar& prec) const\n{\n  typename internal::nested_eval<Derived,2>::type nested(derived());\n  typename internal::nested_eval<OtherDerived,2>::type otherNested(other.derived());\n  return numext::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm();\n}\n\n/** \\returns true if *this is approximately an unitary matrix,\n  *          within the precision given by \\a prec. In the case where the \\a Scalar\n  *          type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.\n  *\n  * \\note This can be used to check whether a family of vectors forms an orthonormal basis.\n  *       Indeed, \\c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an\n  *       orthonormal basis.\n  *\n  * Example: \\include MatrixBase_isUnitary.cpp\n  * Output: \\verbinclude MatrixBase_isUnitary.out\n  */\ntemplate<typename Derived>\nbool MatrixBase<Derived>::isUnitary(const RealScalar& prec) const\n{\n  typename internal::nested_eval<Derived,1>::type self(derived());\n  for(Index i = 0; i < cols(); ++i)\n  {\n    if(!internal::isApprox(self.col(i).squaredNorm(), static_cast<RealScalar>(1), prec))\n      return false;\n    for(Index j = 0; j < i; ++j)\n      if(!internal::isMuchSmallerThan(self.col(i).dot(self.col(j)), static_cast<Scalar>(1), prec))\n        return false;\n  }\n  return true;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_DOT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/EigenBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_EIGENBASE_H\n#define EIGEN_EIGENBASE_H\n\nnamespace Eigen {\n\n/** \\class EigenBase\n  * \n  * Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).\n  *\n  * In other words, an EigenBase object is an object that can be copied into a MatrixBase.\n  *\n  * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.\n  *\n  * Notice that this class is trivial, it is only used to disambiguate overloaded functions.\n  *\n  * \\sa \\blank \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived> struct EigenBase\n{\n//   typedef typename internal::plain_matrix_type<Derived>::type PlainObject;\n  \n  /** \\brief The interface type of indices\n    * \\details To change this, \\c \\#define the preprocessor symbol \\c EIGEN_DEFAULT_DENSE_INDEX_TYPE.\n    * \\deprecated Since Eigen 3.3, its usage is deprecated. Use Eigen::Index instead.\n    * \\sa StorageIndex, \\ref TopicPreprocessorDirectives.\n    */\n  typedef Eigen::Index Index;\n\n  // FIXME is it needed?\n  typedef typename internal::traits<Derived>::StorageKind StorageKind;\n\n  /** \\returns a reference to the derived object */\n  EIGEN_DEVICE_FUNC\n  Derived& derived() { return *static_cast<Derived*>(this); }\n  /** \\returns a const reference to the derived object */\n  EIGEN_DEVICE_FUNC\n  const Derived& derived() const { return *static_cast<const Derived*>(this); }\n\n  EIGEN_DEVICE_FUNC\n  inline Derived& const_cast_derived() const\n  { return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }\n  EIGEN_DEVICE_FUNC\n  inline const Derived& const_derived() const\n  { return *static_cast<const Derived*>(this); }\n\n  /** \\returns the number of rows. \\sa cols(), RowsAtCompileTime */\n  EIGEN_DEVICE_FUNC\n  inline Index rows() const { return derived().rows(); }\n  /** \\returns the number of columns. \\sa rows(), ColsAtCompileTime*/\n  EIGEN_DEVICE_FUNC\n  inline Index cols() const { return derived().cols(); }\n  /** \\returns the number of coefficients, which is rows()*cols().\n    * \\sa rows(), cols(), SizeAtCompileTime. */\n  EIGEN_DEVICE_FUNC\n  inline Index size() const { return rows() * cols(); }\n\n  /** \\internal Don't use it, but do the equivalent: \\code dst = *this; \\endcode */\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC\n  inline void evalTo(Dest& dst) const\n  { derived().evalTo(dst); }\n\n  /** \\internal Don't use it, but do the equivalent: \\code dst += *this; \\endcode */\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC\n  inline void addTo(Dest& dst) const\n  {\n    // This is the default implementation,\n    // derived class can reimplement it in a more optimized way.\n    typename Dest::PlainObject res(rows(),cols());\n    evalTo(res);\n    dst += res;\n  }\n\n  /** \\internal Don't use it, but do the equivalent: \\code dst -= *this; \\endcode */\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC\n  inline void subTo(Dest& dst) const\n  {\n    // This is the default implementation,\n    // derived class can reimplement it in a more optimized way.\n    typename Dest::PlainObject res(rows(),cols());\n    evalTo(res);\n    dst -= res;\n  }\n\n  /** \\internal Don't use it, but do the equivalent: \\code dst.applyOnTheRight(*this); \\endcode */\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC inline void applyThisOnTheRight(Dest& dst) const\n  {\n    // This is the default implementation,\n    // derived class can reimplement it in a more optimized way.\n    dst = dst * this->derived();\n  }\n\n  /** \\internal Don't use it, but do the equivalent: \\code dst.applyOnTheLeft(*this); \\endcode */\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC inline void applyThisOnTheLeft(Dest& dst) const\n  {\n    // This is the default implementation,\n    // derived class can reimplement it in a more optimized way.\n    dst = this->derived() * dst;\n  }\n\n};\n\n/***************************************************************************\n* Implementation of matrix base methods\n***************************************************************************/\n\n/** \\brief Copies the generic expression \\a other into *this.\n  *\n  * \\details The expression must provide a (templated) evalTo(Derived& dst) const\n  * function which does the actual job. In practice, this allows any user to write\n  * its own special matrix without having to modify MatrixBase\n  *\n  * \\returns a reference to *this.\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nDerived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nDerived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nDerived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_EIGENBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ForceAlignedAccess.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_FORCEALIGNEDACCESS_H\n#define EIGEN_FORCEALIGNEDACCESS_H\n\nnamespace Eigen {\n\n/** \\class ForceAlignedAccess\n  * \\ingroup Core_Module\n  *\n  * \\brief Enforce aligned packet loads and stores regardless of what is requested\n  *\n  * \\param ExpressionType the type of the object of which we are forcing aligned packet access\n  *\n  * This class is the return type of MatrixBase::forceAlignedAccess()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::forceAlignedAccess()\n  */\n\nnamespace internal {\ntemplate<typename ExpressionType>\nstruct traits<ForceAlignedAccess<ExpressionType> > : public traits<ExpressionType>\n{};\n}\n\ntemplate<typename ExpressionType> class ForceAlignedAccess\n  : public internal::dense_xpr_base< ForceAlignedAccess<ExpressionType> >::type\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<ForceAlignedAccess>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess)\n\n    EIGEN_DEVICE_FUNC explicit inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {}\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }\n    EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }\n    EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }\n\n    EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const\n    {\n      return m_expression.coeff(row, col);\n    }\n\n    EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)\n    {\n      return m_expression.const_cast_derived().coeffRef(row, col);\n    }\n\n    EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const\n    {\n      return m_expression.coeff(index);\n    }\n\n    EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)\n    {\n      return m_expression.const_cast_derived().coeffRef(index);\n    }\n\n    template<int LoadMode>\n    inline const PacketScalar packet(Index row, Index col) const\n    {\n      return m_expression.template packet<Aligned>(row, col);\n    }\n\n    template<int LoadMode>\n    inline void writePacket(Index row, Index col, const PacketScalar& x)\n    {\n      m_expression.const_cast_derived().template writePacket<Aligned>(row, col, x);\n    }\n\n    template<int LoadMode>\n    inline const PacketScalar packet(Index index) const\n    {\n      return m_expression.template packet<Aligned>(index);\n    }\n\n    template<int LoadMode>\n    inline void writePacket(Index index, const PacketScalar& x)\n    {\n      m_expression.const_cast_derived().template writePacket<Aligned>(index, x);\n    }\n\n    EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }\n\n  protected:\n    const ExpressionType& m_expression;\n\n  private:\n    ForceAlignedAccess& operator=(const ForceAlignedAccess&);\n};\n\n/** \\returns an expression of *this with forced aligned access\n  * \\sa forceAlignedAccessIf(),class ForceAlignedAccess\n  */\ntemplate<typename Derived>\ninline const ForceAlignedAccess<Derived>\nMatrixBase<Derived>::forceAlignedAccess() const\n{\n  return ForceAlignedAccess<Derived>(derived());\n}\n\n/** \\returns an expression of *this with forced aligned access\n  * \\sa forceAlignedAccessIf(), class ForceAlignedAccess\n  */\ntemplate<typename Derived>\ninline ForceAlignedAccess<Derived>\nMatrixBase<Derived>::forceAlignedAccess()\n{\n  return ForceAlignedAccess<Derived>(derived());\n}\n\n/** \\returns an expression of *this with forced aligned access if \\a Enable is true.\n  * \\sa forceAlignedAccess(), class ForceAlignedAccess\n  */\ntemplate<typename Derived>\ntemplate<bool Enable>\ninline typename internal::add_const_on_value_type<typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type>::type\nMatrixBase<Derived>::forceAlignedAccessIf() const\n{\n  return derived();  // FIXME This should not work but apparently is never used\n}\n\n/** \\returns an expression of *this with forced aligned access if \\a Enable is true.\n  * \\sa forceAlignedAccess(), class ForceAlignedAccess\n  */\ntemplate<typename Derived>\ntemplate<bool Enable>\ninline typename internal::conditional<Enable,ForceAlignedAccess<Derived>,Derived&>::type\nMatrixBase<Derived>::forceAlignedAccessIf()\n{\n  return derived();  // FIXME This should not work but apparently is never used\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_FORCEALIGNEDACCESS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Fuzzy.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_FUZZY_H\n#define EIGEN_FUZZY_H\n\nnamespace Eigen { \n\nnamespace internal\n{\n\ntemplate<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>\nstruct isApprox_selector\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)\n  {\n    typename internal::nested_eval<Derived,2>::type nested(x);\n    typename internal::nested_eval<OtherDerived,2>::type otherNested(y);\n    return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * numext::mini(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum());\n  }\n};\n\ntemplate<typename Derived, typename OtherDerived>\nstruct isApprox_selector<Derived, OtherDerived, true>\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar&)\n  {\n    return x.matrix() == y.matrix();\n  }\n};\n\ntemplate<typename Derived, typename OtherDerived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>\nstruct isMuchSmallerThan_object_selector\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const OtherDerived& y, const typename Derived::RealScalar& prec)\n  {\n    return x.cwiseAbs2().sum() <= numext::abs2(prec) * y.cwiseAbs2().sum();\n  }\n};\n\ntemplate<typename Derived, typename OtherDerived>\nstruct isMuchSmallerThan_object_selector<Derived, OtherDerived, true>\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const OtherDerived&, const typename Derived::RealScalar&)\n  {\n    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();\n  }\n};\n\ntemplate<typename Derived, bool is_integer = NumTraits<typename Derived::Scalar>::IsInteger>\nstruct isMuchSmallerThan_scalar_selector\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const typename Derived::RealScalar& y, const typename Derived::RealScalar& prec)\n  {\n    return x.cwiseAbs2().sum() <= numext::abs2(prec * y);\n  }\n};\n\ntemplate<typename Derived>\nstruct isMuchSmallerThan_scalar_selector<Derived, true>\n{\n  EIGEN_DEVICE_FUNC\n  static bool run(const Derived& x, const typename Derived::RealScalar&, const typename Derived::RealScalar&)\n  {\n    return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix();\n  }\n};\n\n} // end namespace internal\n\n\n/** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n  * determined by \\a prec.\n  *\n  * \\note The fuzzy compares are done multiplicatively. Two vectors \\f$ v \\f$ and \\f$ w \\f$\n  * are considered to be approximately equal within precision \\f$ p \\f$ if\n  * \\f[ \\Vert v - w \\Vert \\leqslant p\\,\\min(\\Vert v\\Vert, \\Vert w\\Vert). \\f]\n  * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm\n  * L2 norm).\n  *\n  * \\note Because of the multiplicativeness of this comparison, one can't use this function\n  * to check whether \\c *this is approximately equal to the zero matrix or vector.\n  * Indeed, \\c isApprox(zero) returns false unless \\c *this itself is exactly the zero matrix\n  * or vector. If you want to test whether \\c *this is zero, use internal::isMuchSmallerThan(const\n  * RealScalar&, RealScalar) instead.\n  *\n  * \\sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isApprox(\n  const DenseBase<OtherDerived>& other,\n  const RealScalar& prec\n) const\n{\n  return internal::isApprox_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);\n}\n\n/** \\returns \\c true if the norm of \\c *this is much smaller than \\a other,\n  * within the precision determined by \\a prec.\n  *\n  * \\note The fuzzy compares are done multiplicatively. A vector \\f$ v \\f$ is\n  * considered to be much smaller than \\f$ x \\f$ within precision \\f$ p \\f$ if\n  * \\f[ \\Vert v \\Vert \\leqslant p\\,\\vert x\\vert. \\f]\n  *\n  * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,\n  * the value of the reference scalar \\a other should come from the Hilbert-Schmidt norm\n  * of a reference matrix of same dimensions.\n  *\n  * \\sa isApprox(), isMuchSmallerThan(const DenseBase<OtherDerived>&, RealScalar) const\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(\n  const typename NumTraits<Scalar>::Real& other,\n  const RealScalar& prec\n) const\n{\n  return internal::isMuchSmallerThan_scalar_selector<Derived>::run(derived(), other, prec);\n}\n\n/** \\returns \\c true if the norm of \\c *this is much smaller than the norm of \\a other,\n  * within the precision determined by \\a prec.\n  *\n  * \\note The fuzzy compares are done multiplicatively. A vector \\f$ v \\f$ is\n  * considered to be much smaller than a vector \\f$ w \\f$ within precision \\f$ p \\f$ if\n  * \\f[ \\Vert v \\Vert \\leqslant p\\,\\Vert w\\Vert. \\f]\n  * For matrices, the comparison is done using the Hilbert-Schmidt norm.\n  *\n  * \\sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC bool DenseBase<Derived>::isMuchSmallerThan(\n  const DenseBase<OtherDerived>& other,\n  const RealScalar& prec\n) const\n{\n  return internal::isMuchSmallerThan_object_selector<Derived, OtherDerived>::run(derived(), other.derived(), prec);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_FUZZY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/GeneralProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERAL_PRODUCT_H\n#define EIGEN_GENERAL_PRODUCT_H\n\nnamespace Eigen {\n\nenum {\n  Large = 2,\n  Small = 3\n};\n\nnamespace internal {\n\ntemplate<int Rows, int Cols, int Depth> struct product_type_selector;\n\ntemplate<int Size, int MaxSize> struct product_size_category\n{\n  enum { is_large = MaxSize == Dynamic ||\n                    Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ||\n                    (Size==Dynamic && MaxSize>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD),\n         value = is_large  ? Large\n               : Size == 1 ? 1\n                           : Small\n  };\n};\n\ntemplate<typename Lhs, typename Rhs> struct product_type\n{\n  typedef typename remove_all<Lhs>::type _Lhs;\n  typedef typename remove_all<Rhs>::type _Rhs;\n  enum {\n    MaxRows = traits<_Lhs>::MaxRowsAtCompileTime,\n    Rows    = traits<_Lhs>::RowsAtCompileTime,\n    MaxCols = traits<_Rhs>::MaxColsAtCompileTime,\n    Cols    = traits<_Rhs>::ColsAtCompileTime,\n    MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime,\n                                           traits<_Rhs>::MaxRowsAtCompileTime),\n    Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime,\n                                        traits<_Rhs>::RowsAtCompileTime)\n  };\n\n  // the splitting into different lines of code here, introducing the _select enums and the typedef below,\n  // is to work around an internal compiler error with gcc 4.1 and 4.2.\nprivate:\n  enum {\n    rows_select = product_size_category<Rows,MaxRows>::value,\n    cols_select = product_size_category<Cols,MaxCols>::value,\n    depth_select = product_size_category<Depth,MaxDepth>::value\n  };\n  typedef product_type_selector<rows_select, cols_select, depth_select> selector;\n\npublic:\n  enum {\n    value = selector::ret,\n    ret = selector::ret\n  };\n#ifdef EIGEN_DEBUG_PRODUCT\n  static void debug()\n  {\n      EIGEN_DEBUG_VAR(Rows);\n      EIGEN_DEBUG_VAR(Cols);\n      EIGEN_DEBUG_VAR(Depth);\n      EIGEN_DEBUG_VAR(rows_select);\n      EIGEN_DEBUG_VAR(cols_select);\n      EIGEN_DEBUG_VAR(depth_select);\n      EIGEN_DEBUG_VAR(value);\n  }\n#endif\n};\n\n/* The following allows to select the kind of product at compile time\n * based on the three dimensions of the product.\n * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */\n// FIXME I'm not sure the current mapping is the ideal one.\ntemplate<int M, int N>  struct product_type_selector<M,N,1>              { enum { ret = OuterProduct }; };\ntemplate<int M>         struct product_type_selector<M, 1, 1>            { enum { ret = LazyCoeffBasedProductMode }; };\ntemplate<int N>         struct product_type_selector<1, N, 1>            { enum { ret = LazyCoeffBasedProductMode }; };\ntemplate<int Depth>     struct product_type_selector<1,    1,    Depth>  { enum { ret = InnerProduct }; };\ntemplate<>              struct product_type_selector<1,    1,    1>      { enum { ret = InnerProduct }; };\ntemplate<>              struct product_type_selector<Small,1,    Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<1,    Small,Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Small,Small,Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Small, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Small, Large, 1>    { enum { ret = LazyCoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Large, Small, 1>    { enum { ret = LazyCoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<1,    Large,Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<1,    Large,Large>  { enum { ret = GemvProduct }; };\ntemplate<>              struct product_type_selector<1,    Small,Large>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Large,1,    Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Large,1,    Large>  { enum { ret = GemvProduct }; };\ntemplate<>              struct product_type_selector<Small,1,    Large>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Small,Small,Large>  { enum { ret = GemmProduct }; };\ntemplate<>              struct product_type_selector<Large,Small,Large>  { enum { ret = GemmProduct }; };\ntemplate<>              struct product_type_selector<Small,Large,Large>  { enum { ret = GemmProduct }; };\ntemplate<>              struct product_type_selector<Large,Large,Large>  { enum { ret = GemmProduct }; };\ntemplate<>              struct product_type_selector<Large,Small,Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Small,Large,Small>  { enum { ret = CoeffBasedProductMode }; };\ntemplate<>              struct product_type_selector<Large,Large,Small>  { enum { ret = GemmProduct }; };\n\n} // end namespace internal\n\n/***********************************************************************\n*  Implementation of Inner Vector Vector Product\n***********************************************************************/\n\n// FIXME : maybe the \"inner product\" could return a Scalar\n// instead of a 1x1 matrix ??\n// Pro: more natural for the user\n// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix\n// product ends up to a row-vector times col-vector product... To tackle this use\n// case, we could have a specialization for Block<MatrixType,1,1> with: operator=(Scalar x);\n\n/***********************************************************************\n*  Implementation of Outer Vector Vector Product\n***********************************************************************/\n\n/***********************************************************************\n*  Implementation of General Matrix Vector Product\n***********************************************************************/\n\n/*  According to the shape/flags of the matrix we have to distinghish 3 different cases:\n *   1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine\n *   2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine\n *   3 - all other cases are handled using a simple loop along the outer-storage direction.\n *  Therefore we need a lower level meta selector.\n *  Furthermore, if the matrix is the rhs, then the product has to be transposed.\n */\nnamespace internal {\n\ntemplate<int Side, int StorageOrder, bool BlasCompatible>\nstruct gemv_dense_selector;\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<typename Scalar,int Size,int MaxSize,bool Cond> struct gemv_static_vector_if;\n\ntemplate<typename Scalar,int Size,int MaxSize>\nstruct gemv_static_vector_if<Scalar,Size,MaxSize,false>\n{\n  EIGEN_STRONG_INLINE  Scalar* data() { eigen_internal_assert(false && \"should never be called\"); return 0; }\n};\n\ntemplate<typename Scalar,int Size>\nstruct gemv_static_vector_if<Scalar,Size,Dynamic,true>\n{\n  EIGEN_STRONG_INLINE Scalar* data() { return 0; }\n};\n\ntemplate<typename Scalar,int Size,int MaxSize>\nstruct gemv_static_vector_if<Scalar,Size,MaxSize,true>\n{\n  enum {\n    ForceAlignment  = internal::packet_traits<Scalar>::Vectorizable,\n    PacketSize      = internal::packet_traits<Scalar>::size\n  };\n  #if EIGEN_MAX_STATIC_ALIGN_BYTES!=0\n  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize),0,EIGEN_PLAIN_ENUM_MIN(AlignedMax,PacketSize)> m_data;\n  EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; }\n  #else\n  // Some architectures cannot align on the stack,\n  // => let's manually enforce alignment by allocating more data and return the address of the first aligned element.\n  internal::plain_array<Scalar,EIGEN_SIZE_MIN_PREFER_FIXED(Size,MaxSize)+(ForceAlignment?EIGEN_MAX_ALIGN_BYTES:0),0> m_data;\n  EIGEN_STRONG_INLINE Scalar* data() {\n    return ForceAlignment\n            ? reinterpret_cast<Scalar*>((internal::UIntPtr(m_data.array) & ~(std::size_t(EIGEN_MAX_ALIGN_BYTES-1))) + EIGEN_MAX_ALIGN_BYTES)\n            : m_data.array;\n  }\n  #endif\n};\n\n// The vector is on the left => transposition\ntemplate<int StorageOrder, bool BlasCompatible>\nstruct gemv_dense_selector<OnTheLeft,StorageOrder,BlasCompatible>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    Transpose<Dest> destT(dest);\n    enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor };\n    gemv_dense_selector<OnTheRight,OtherStorageOrder,BlasCompatible>\n      ::run(rhs.transpose(), lhs.transpose(), destT, alpha);\n  }\n};\n\ntemplate<> struct gemv_dense_selector<OnTheRight,ColMajor,true>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    typedef typename Lhs::Scalar   LhsScalar;\n    typedef typename Rhs::Scalar   RhsScalar;\n    typedef typename Dest::Scalar  ResScalar;\n    typedef typename Dest::RealScalar  RealScalar;\n    \n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n  \n    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;\n\n    ActualLhsType actualLhs = LhsBlasTraits::extract(lhs);\n    ActualRhsType actualRhs = RhsBlasTraits::extract(rhs);\n\n    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)\n                                  * RhsBlasTraits::extractScalarFactor(rhs);\n\n    // make sure Dest is a compile-time vector type (bug 1166)\n    typedef typename conditional<Dest::IsVectorAtCompileTime, Dest, typename Dest::ColXpr>::type ActualDest;\n\n    enum {\n      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1\n      // on, the other hand it is good for the cache to pack the vector anyways...\n      EvalToDestAtCompileTime = (ActualDest::InnerStrideAtCompileTime==1),\n      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),\n      MightCannotUseDest = (!EvalToDestAtCompileTime) || ComplexByReal\n    };\n\n    typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;\n    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);\n\n    if(!MightCannotUseDest)\n    {\n      // shortcut if we are sure to be able to use dest directly,\n      // this ease the compiler to generate cleaner and more optimzized code for most common cases\n      general_matrix_vector_product\n          <Index,LhsScalar,LhsMapper,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(\n          actualLhs.rows(), actualLhs.cols(),\n          LhsMapper(actualLhs.data(), actualLhs.outerStride()),\n          RhsMapper(actualRhs.data(), actualRhs.innerStride()),\n          dest.data(), 1,\n          compatibleAlpha);\n    }\n    else\n    {\n      gemv_static_vector_if<ResScalar,ActualDest::SizeAtCompileTime,ActualDest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;\n\n      const bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));\n      const bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;\n\n      ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),\n                                                    evalToDest ? dest.data() : static_dest.data());\n\n      if(!evalToDest)\n      {\n        #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n        Index size = dest.size();\n        EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n        #endif\n        if(!alphaIsCompatible)\n        {\n          MappedDest(actualDestPtr, dest.size()).setZero();\n          compatibleAlpha = RhsScalar(1);\n        }\n        else\n          MappedDest(actualDestPtr, dest.size()) = dest;\n      }\n\n      general_matrix_vector_product\n          <Index,LhsScalar,LhsMapper,ColMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(\n          actualLhs.rows(), actualLhs.cols(),\n          LhsMapper(actualLhs.data(), actualLhs.outerStride()),\n          RhsMapper(actualRhs.data(), actualRhs.innerStride()),\n          actualDestPtr, 1,\n          compatibleAlpha);\n\n      if (!evalToDest)\n      {\n        if(!alphaIsCompatible)\n          dest.matrix() += actualAlpha * MappedDest(actualDestPtr, dest.size());\n        else\n          dest = MappedDest(actualDestPtr, dest.size());\n      }\n    }\n  }\n};\n\ntemplate<> struct gemv_dense_selector<OnTheRight,RowMajor,true>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    typedef typename Lhs::Scalar   LhsScalar;\n    typedef typename Rhs::Scalar   RhsScalar;\n    typedef typename Dest::Scalar  ResScalar;\n    \n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n    typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;\n\n    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);\n    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);\n\n    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)\n                                  * RhsBlasTraits::extractScalarFactor(rhs);\n\n    enum {\n      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1\n      // on, the other hand it is good for the cache to pack the vector anyways...\n      DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1\n    };\n\n    gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;\n\n    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),\n        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());\n\n    if(!DirectlyUseRhs)\n    {\n      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      Index size = actualRhs.size();\n      EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      #endif\n      Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;\n    }\n\n    typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;\n    general_matrix_vector_product\n        <Index,LhsScalar,LhsMapper,RowMajor,LhsBlasTraits::NeedToConjugate,RhsScalar,RhsMapper,RhsBlasTraits::NeedToConjugate>::run(\n        actualLhs.rows(), actualLhs.cols(),\n        LhsMapper(actualLhs.data(), actualLhs.outerStride()),\n        RhsMapper(actualRhsPtr, 1),\n        dest.data(), dest.col(0).innerStride(), //NOTE  if dest is not a vector at compile-time, then dest.innerStride() might be wrong. (bug 1166)\n        actualAlpha);\n  }\n};\n\ntemplate<> struct gemv_dense_selector<OnTheRight,ColMajor,false>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    EIGEN_STATIC_ASSERT((!nested_eval<Lhs,1>::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);\n    // TODO if rhs is large enough it might be beneficial to make sure that dest is sequentially stored in memory, otherwise use a temp\n    typename nested_eval<Rhs,1>::type actual_rhs(rhs);\n    const Index size = rhs.rows();\n    for(Index k=0; k<size; ++k)\n      dest += (alpha*actual_rhs.coeff(k)) * lhs.col(k);\n  }\n};\n\ntemplate<> struct gemv_dense_selector<OnTheRight,RowMajor,false>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    EIGEN_STATIC_ASSERT((!nested_eval<Lhs,1>::Evaluate),EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE);\n    typename nested_eval<Rhs,Lhs::RowsAtCompileTime>::type actual_rhs(rhs);\n    const Index rows = dest.rows();\n    for(Index i=0; i<rows; ++i)\n      dest.coeffRef(i) += alpha * (lhs.row(i).cwiseProduct(actual_rhs.transpose())).sum();\n  }\n};\n\n} // end namespace internal\n\n/***************************************************************************\n* Implementation of matrix base methods\n***************************************************************************/\n\n/** \\returns the matrix product of \\c *this and \\a other.\n  *\n  * \\note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().\n  *\n  * \\sa lazyProduct(), operator*=(const MatrixBase&), Cwise::operator*()\n  */\n#ifndef __CUDACC__\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline const Product<Derived, OtherDerived>\nMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const\n{\n  // A note regarding the function declaration: In MSVC, this function will sometimes\n  // not be inlined since DenseStorage is an unwindable object for dynamic\n  // matrices and product types are holding a member to store the result.\n  // Thus it does not help tagging this function with EIGEN_STRONG_INLINE.\n  enum {\n    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic\n                   || OtherDerived::RowsAtCompileTime==Dynamic\n                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),\n    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,\n    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)\n  };\n  // note to the lost user:\n  //    * for a dot product use: v1.dot(v2)\n  //    * for a coeff-wise product use: v1.cwiseProduct(v2)\n  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),\n    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)\n  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),\n    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)\n  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)\n#ifdef EIGEN_DEBUG_PRODUCT\n  internal::product_type<Derived,OtherDerived>::debug();\n#endif\n\n  return Product<Derived, OtherDerived>(derived(), other.derived());\n}\n\n#endif // __CUDACC__\n\n/** \\returns an expression of the matrix product of \\c *this and \\a other without implicit evaluation.\n  *\n  * The returned product will behave like any other expressions: the coefficients of the product will be\n  * computed once at a time as requested. This might be useful in some extremely rare cases when only\n  * a small and no coherent fraction of the result's coefficients have to be computed.\n  *\n  * \\warning This version of the matrix product can be much much slower. So use it only if you know\n  * what you are doing and that you measured a true speed improvement.\n  *\n  * \\sa operator*(const MatrixBase&)\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nconst Product<Derived,OtherDerived,LazyProduct>\nEIGEN_DEVICE_FUNC MatrixBase<Derived>::lazyProduct(const MatrixBase<OtherDerived> &other) const\n{\n  enum {\n    ProductIsValid =  Derived::ColsAtCompileTime==Dynamic\n                   || OtherDerived::RowsAtCompileTime==Dynamic\n                   || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime),\n    AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime,\n    SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived)\n  };\n  // note to the lost user:\n  //    * for a dot product use: v1.dot(v2)\n  //    * for a coeff-wise product use: v1.cwiseProduct(v2)\n  EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),\n    INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)\n  EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),\n    INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)\n  EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)\n\n  return Product<Derived,OtherDerived,LazyProduct>(derived(), other.derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_PRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/GenericPacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERIC_PACKET_MATH_H\n#define EIGEN_GENERIC_PACKET_MATH_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/** \\internal\n  * \\file GenericPacketMath.h\n  *\n  * Default implementation for types not supported by the vectorization.\n  * In practice these functions are provided to make easier the writing\n  * of generic vectorized code.\n  */\n\n#ifndef EIGEN_DEBUG_ALIGNED_LOAD\n#define EIGEN_DEBUG_ALIGNED_LOAD\n#endif\n\n#ifndef EIGEN_DEBUG_UNALIGNED_LOAD\n#define EIGEN_DEBUG_UNALIGNED_LOAD\n#endif\n\n#ifndef EIGEN_DEBUG_ALIGNED_STORE\n#define EIGEN_DEBUG_ALIGNED_STORE\n#endif\n\n#ifndef EIGEN_DEBUG_UNALIGNED_STORE\n#define EIGEN_DEBUG_UNALIGNED_STORE\n#endif\n\nstruct default_packet_traits\n{\n  enum {\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasNegate = 1,\n    HasAbs    = 1,\n    HasArg    = 0,\n    HasAbs2   = 1,\n    HasMin    = 1,\n    HasMax    = 1,\n    HasConj   = 1,\n    HasSetLinear = 1,\n    HasBlend  = 0,\n\n    HasDiv    = 0,\n    HasSqrt   = 0,\n    HasRsqrt  = 0,\n    HasExp    = 0,\n    HasExpm1  = 0,\n    HasLog    = 0,\n    HasLog1p  = 0,\n    HasLog10  = 0,\n    HasPow    = 0,\n\n    HasSin    = 0,\n    HasCos    = 0,\n    HasTan    = 0,\n    HasASin   = 0,\n    HasACos   = 0,\n    HasATan   = 0,\n    HasSinh   = 0,\n    HasCosh   = 0,\n    HasTanh   = 0,\n    HasLGamma = 0,\n    HasDiGamma = 0,\n    HasZeta = 0,\n    HasPolygamma = 0,\n    HasErf = 0,\n    HasErfc = 0,\n    HasIGamma = 0,\n    HasIGammac = 0,\n    HasBetaInc = 0,\n\n    HasRound  = 0,\n    HasFloor  = 0,\n    HasCeil   = 0,\n\n    HasSign   = 0\n  };\n};\n\ntemplate<typename T> struct packet_traits : default_packet_traits\n{\n  typedef T type;\n  typedef T half;\n  enum {\n    Vectorizable = 0,\n    size = 1,\n    AlignedOnScalar = 0,\n    HasHalfPacket = 0\n  };\n  enum {\n    HasAdd    = 0,\n    HasSub    = 0,\n    HasMul    = 0,\n    HasNegate = 0,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasConj   = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<typename T> struct packet_traits<const T> : packet_traits<T> { };\n\ntemplate <typename Src, typename Tgt> struct type_casting_traits {\n  enum {\n    VectorizedCast = 0,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\n\n/** \\internal \\returns static_cast<TgtType>(a) (coeff-wise) */\ntemplate <typename SrcPacket, typename TgtPacket>\nEIGEN_DEVICE_FUNC inline TgtPacket\npcast(const SrcPacket& a) {\n  return static_cast<TgtPacket>(a);\n}\ntemplate <typename SrcPacket, typename TgtPacket>\nEIGEN_DEVICE_FUNC inline TgtPacket\npcast(const SrcPacket& a, const SrcPacket& /*b*/) {\n  return static_cast<TgtPacket>(a);\n}\n\ntemplate <typename SrcPacket, typename TgtPacket>\nEIGEN_DEVICE_FUNC inline TgtPacket\npcast(const SrcPacket& a, const SrcPacket& /*b*/, const SrcPacket& /*c*/, const SrcPacket& /*d*/) {\n  return static_cast<TgtPacket>(a);\n}\n\n/** \\internal \\returns a + b (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npadd(const Packet& a,\n        const Packet& b) { return a+b; }\n\n/** \\internal \\returns a - b (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npsub(const Packet& a,\n        const Packet& b) { return a-b; }\n\n/** \\internal \\returns -a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npnegate(const Packet& a) { return -a; }\n\n/** \\internal \\returns conj(a) (coeff-wise) */\n\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npconj(const Packet& a) { return numext::conj(a); }\n\n/** \\internal \\returns a * b (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npmul(const Packet& a,\n        const Packet& b) { return a*b; }\n\n/** \\internal \\returns a / b (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npdiv(const Packet& a,\n        const Packet& b) { return a/b; }\n\n/** \\internal \\returns the min of \\a a and \\a b  (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npmin(const Packet& a,\n        const Packet& b) { return numext::mini(a, b); }\n\n/** \\internal \\returns the max of \\a a and \\a b  (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npmax(const Packet& a,\n        const Packet& b) { return numext::maxi(a, b); }\n\n/** \\internal \\returns the absolute value of \\a a */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npabs(const Packet& a) { using std::abs; return abs(a); }\n\n/** \\internal \\returns the phase angle of \\a a */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\nparg(const Packet& a) { using numext::arg; return arg(a); }\n\n/** \\internal \\returns the bitwise and of \\a a and \\a b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npand(const Packet& a, const Packet& b) { return a & b; }\n\n/** \\internal \\returns the bitwise or of \\a a and \\a b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npor(const Packet& a, const Packet& b) { return a | b; }\n\n/** \\internal \\returns the bitwise xor of \\a a and \\a b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npxor(const Packet& a, const Packet& b) { return a ^ b; }\n\n/** \\internal \\returns the bitwise andnot of \\a a and \\a b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npandnot(const Packet& a, const Packet& b) { return a & (!b); }\n\n/** \\internal \\returns a packet version of \\a *from, from must be 16 bytes aligned */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npload(const typename unpacket_traits<Packet>::type* from) { return *from; }\n\n/** \\internal \\returns a packet version of \\a *from, (un-aligned load) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\nploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }\n\n/** \\internal \\returns a packet with constant coefficients \\a a, e.g.: (a,a,a,a) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npset1(const typename unpacket_traits<Packet>::type& a) { return a; }\n\n/** \\internal \\returns a packet with constant coefficients \\a a[0], e.g.: (a[0],a[0],a[0],a[0]) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npload1(const typename unpacket_traits<Packet>::type  *a) { return pset1<Packet>(*a); }\n\n/** \\internal \\returns a packet with elements of \\a *from duplicated.\n  * For instance, for a packet of 8 elements, 4 scalars will be read from \\a *from and\n  * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]}\n  * Currently, this function is only used for scalar * complex products.\n  */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet\nploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }\n\n/** \\internal \\returns a packet with elements of \\a *from quadrupled.\n  * For instance, for a packet of 8 elements, 2 scalars will be read from \\a *from and\n  * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}\n  * Currently, this function is only used in matrix products.\n  * For packet-size smaller or equal to 4, this function is equivalent to pload1 \n  */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\nploadquad(const typename unpacket_traits<Packet>::type* from)\n{ return pload1<Packet>(from); }\n\n/** \\internal equivalent to\n  * \\code\n  * a0 = pload1(a+0);\n  * a1 = pload1(a+1);\n  * a2 = pload1(a+2);\n  * a3 = pload1(a+3);\n  * \\endcode\n  * \\sa pset1, pload1, ploaddup, pbroadcast2\n  */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC\ninline void pbroadcast4(const typename unpacket_traits<Packet>::type *a,\n                        Packet& a0, Packet& a1, Packet& a2, Packet& a3)\n{\n  a0 = pload1<Packet>(a+0);\n  a1 = pload1<Packet>(a+1);\n  a2 = pload1<Packet>(a+2);\n  a3 = pload1<Packet>(a+3);\n}\n\n/** \\internal equivalent to\n  * \\code\n  * a0 = pload1(a+0);\n  * a1 = pload1(a+1);\n  * \\endcode\n  * \\sa pset1, pload1, ploaddup, pbroadcast4\n  */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC\ninline void pbroadcast2(const typename unpacket_traits<Packet>::type *a,\n                        Packet& a0, Packet& a1)\n{\n  a0 = pload1<Packet>(a+0);\n  a1 = pload1<Packet>(a+1);\n}\n\n/** \\internal \\brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet\nplset(const typename unpacket_traits<Packet>::type& a) { return a; }\n\n/** \\internal copy the packet \\a from to \\a *to, \\a to must be 16 bytes aligned */\ntemplate<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstore(Scalar* to, const Packet& from)\n{ (*to) = from; }\n\n/** \\internal copy the packet \\a from to \\a *to, (un-aligned store) */\ntemplate<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pstoreu(Scalar* to, const Packet& from)\n{  (*to) = from; }\n\n template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline Packet pgather(const Scalar* from, Index /*stride*/)\n { return ploadu<Packet>(from); }\n\n template<typename Scalar, typename Packet> EIGEN_DEVICE_FUNC inline void pscatter(Scalar* to, const Packet& from, Index /*stride*/)\n { pstore(to, from); }\n\n/** \\internal tries to do cache prefetching of \\a addr */\ntemplate<typename Scalar> EIGEN_DEVICE_FUNC inline void prefetch(const Scalar* addr)\n{\n#ifdef __CUDA_ARCH__\n#if defined(__LP64__)\n  // 64-bit pointer operand constraint for inlined asm\n  asm(\" prefetch.L1 [ %1 ];\" : \"=l\"(addr) : \"l\"(addr));\n#else\n  // 32-bit pointer operand constraint for inlined asm\n  asm(\" prefetch.L1 [ %1 ];\" : \"=r\"(addr) : \"r\"(addr));\n#endif\n#elif (!EIGEN_COMP_MSVC) && (EIGEN_COMP_GNUC || EIGEN_COMP_CLANG || EIGEN_COMP_ICC)\n  __builtin_prefetch(addr);\n#endif\n}\n\n/** \\internal \\returns the first element of a packet */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type pfirst(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns a packet where the element i contains the sum of the packet of \\a vec[i] */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npreduxp(const Packet* vecs) { return vecs[0]; }\n\n/** \\internal \\returns the sum of the elements of \\a a*/\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns the sum of the elements of \\a a by block of 4 elements.\n  * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}\n  * For packet-size smaller or equal to 4, this boils down to a noop.\n  */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline\ntypename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type\npredux_downto4(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns the product of the elements of \\a a*/\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns the min of the elements of \\a a*/\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns the max of the elements of \\a a*/\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns the reversed elements of \\a a*/\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet preverse(const Packet& a)\n{ return a; }\n\n/** \\internal \\returns \\a a with real and imaginary part flipped (for complex type only) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet pcplxflip(const Packet& a)\n{\n  // FIXME: uncomment the following in case we drop the internal imag and real functions.\n//   using std::imag;\n//   using std::real;\n  return Packet(imag(a),real(a));\n}\n\n/**************************\n* Special math functions\n***************************/\n\n/** \\internal \\returns the sine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket psin(const Packet& a) { using std::sin; return sin(a); }\n\n/** \\internal \\returns the cosine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pcos(const Packet& a) { using std::cos; return cos(a); }\n\n/** \\internal \\returns the tan of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket ptan(const Packet& a) { using std::tan; return tan(a); }\n\n/** \\internal \\returns the arc sine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pasin(const Packet& a) { using std::asin; return asin(a); }\n\n/** \\internal \\returns the arc cosine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pacos(const Packet& a) { using std::acos; return acos(a); }\n\n/** \\internal \\returns the arc tangent of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket patan(const Packet& a) { using std::atan; return atan(a); }\n\n/** \\internal \\returns the hyperbolic sine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket psinh(const Packet& a) { using std::sinh; return sinh(a); }\n\n/** \\internal \\returns the hyperbolic cosine of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pcosh(const Packet& a) { using std::cosh; return cosh(a); }\n\n/** \\internal \\returns the hyperbolic tan of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket ptanh(const Packet& a) { using std::tanh; return tanh(a); }\n\n/** \\internal \\returns the exp of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pexp(const Packet& a) { using std::exp; return exp(a); }\n\n/** \\internal \\returns the expm1 of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pexpm1(const Packet& a) { return numext::expm1(a); }\n\n/** \\internal \\returns the log of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket plog(const Packet& a) { using std::log; return log(a); }\n\n/** \\internal \\returns the log1p of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket plog1p(const Packet& a) { return numext::log1p(a); }\n\n/** \\internal \\returns the log10 of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket plog10(const Packet& a) { using std::log10; return log10(a); }\n\n/** \\internal \\returns the square-root of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket psqrt(const Packet& a) { using std::sqrt; return sqrt(a); }\n\n/** \\internal \\returns the reciprocal square-root of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket prsqrt(const Packet& a) {\n  return pdiv(pset1<Packet>(1), psqrt(a));\n}\n\n/** \\internal \\returns the rounded value of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pround(const Packet& a) { using numext::round; return round(a); }\n\n/** \\internal \\returns the floor of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pfloor(const Packet& a) { using numext::floor; return floor(a); }\n\n/** \\internal \\returns the ceil of \\a a (coeff-wise) */\ntemplate<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS\nPacket pceil(const Packet& a) { using numext::ceil; return ceil(a); }\n\n/***************************************************************************\n* The following functions might not have to be overwritten for vectorized types\n***************************************************************************/\n\n/** \\internal copy a packet with constant coeficient \\a a (e.g., [a,a,a,a]) to \\a *to. \\a to must be 16 bytes aligned */\n// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type)\ntemplate<typename Packet>\ninline void pstore1(typename unpacket_traits<Packet>::type* to, const typename unpacket_traits<Packet>::type& a)\n{\n  pstore(to, pset1<Packet>(a));\n}\n\n/** \\internal \\returns a * b + c (coeff-wise) */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npmadd(const Packet&  a,\n         const Packet&  b,\n         const Packet&  c)\n{ return padd(pmul(a, b),c); }\n\n/** \\internal \\returns a packet version of \\a *from.\n  * The pointer \\a from must be aligned on a \\a Alignment bytes boundary. */\ntemplate<typename Packet, int Alignment>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt(const typename unpacket_traits<Packet>::type* from)\n{\n  if(Alignment >= unpacket_traits<Packet>::alignment)\n    return pload<Packet>(from);\n  else\n    return ploadu<Packet>(from);\n}\n\n/** \\internal copy the packet \\a from to \\a *to.\n  * The pointer \\a from must be aligned on a \\a Alignment bytes boundary. */\ntemplate<typename Scalar, typename Packet, int Alignment>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void pstoret(Scalar* to, const Packet& from)\n{\n  if(Alignment >= unpacket_traits<Packet>::alignment)\n    pstore(to, from);\n  else\n    pstoreu(to, from);\n}\n\n/** \\internal \\returns a packet version of \\a *from.\n  * Unlike ploadt, ploadt_ro takes advantage of the read-only memory path on the\n  * hardware if available to speedup the loading of data that won't be modified\n  * by the current computation.\n  */\ntemplate<typename Packet, int LoadMode>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet ploadt_ro(const typename unpacket_traits<Packet>::type* from)\n{\n  return ploadt<Packet, LoadMode>(from);\n}\n\n/** \\internal default implementation of palign() allowing partial specialization */\ntemplate<int Offset,typename PacketType>\nstruct palign_impl\n{\n  // by default data are aligned, so there is nothing to be done :)\n  static inline void run(PacketType&, const PacketType&) {}\n};\n\n/** \\internal update \\a first using the concatenation of the packet_size minus \\a Offset last elements\n  * of \\a first and \\a Offset first elements of \\a second.\n  * \n  * This function is currently only used to optimize matrix-vector products on unligned matrices.\n  * It takes 2 packets that represent a contiguous memory array, and returns a packet starting\n  * at the position \\a Offset. For instance, for packets of 4 elements, we have:\n  *  Input:\n  *  - first = {f0,f1,f2,f3}\n  *  - second = {s0,s1,s2,s3}\n  * Output: \n  *   - if Offset==0 then {f0,f1,f2,f3}\n  *   - if Offset==1 then {f1,f2,f3,s0}\n  *   - if Offset==2 then {f2,f3,s0,s1}\n  *   - if Offset==3 then {f3,s0,s1,s3}\n  */\ntemplate<int Offset,typename PacketType>\ninline void palign(PacketType& first, const PacketType& second)\n{\n  palign_impl<Offset,PacketType>::run(first,second);\n}\n\n/***************************************************************************\n* Fast complex products (GCC generates a function call which is very slow)\n***************************************************************************/\n\n// Eigen+CUDA does not support complexes.\n#ifndef __CUDACC__\n\ntemplate<> inline std::complex<float> pmul(const std::complex<float>& a, const std::complex<float>& b)\n{ return std::complex<float>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }\n\ntemplate<> inline std::complex<double> pmul(const std::complex<double>& a, const std::complex<double>& b)\n{ return std::complex<double>(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); }\n\n#endif\n\n\n/***************************************************************************\n * PacketBlock, that is a collection of N packets where the number of words\n * in the packet is a multiple of N.\n***************************************************************************/\ntemplate <typename Packet,int N=unpacket_traits<Packet>::size> struct PacketBlock {\n  Packet packet[N];\n};\n\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet,1>& /*kernel*/) {\n  // Nothing to do in the scalar case, i.e. a 1x1 matrix.\n}\n\n/***************************************************************************\n * Selector, i.e. vector of N boolean values used to select (i.e. blend)\n * words from 2 packets.\n***************************************************************************/\ntemplate <size_t N> struct Selector {\n  bool select[N];\n};\n\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npblend(const Selector<unpacket_traits<Packet>::size>& ifPacket, const Packet& thenPacket, const Packet& elsePacket) {\n  return ifPacket.select[0] ? thenPacket : elsePacket;\n}\n\n/** \\internal \\returns \\a a with the first coefficient replaced by the scalar b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npinsertfirst(const Packet& a, typename unpacket_traits<Packet>::type b)\n{\n  // Default implementation based on pblend.\n  // It must be specialized for higher performance.\n  Selector<unpacket_traits<Packet>::size> mask;\n  mask.select[0] = true;\n  // This for loop should be optimized away by the compiler.\n  for(Index i=1; i<unpacket_traits<Packet>::size; ++i)\n    mask.select[i] = false;\n  return pblend(mask, pset1<Packet>(b), a);\n}\n\n/** \\internal \\returns \\a a with the last coefficient replaced by the scalar b */\ntemplate<typename Packet> EIGEN_DEVICE_FUNC inline Packet\npinsertlast(const Packet& a, typename unpacket_traits<Packet>::type b)\n{\n  // Default implementation based on pblend.\n  // It must be specialized for higher performance.\n  Selector<unpacket_traits<Packet>::size> mask;\n  // This for loop should be optimized away by the compiler.\n  for(Index i=0; i<unpacket_traits<Packet>::size-1; ++i)\n    mask.select[i] = false;\n  mask.select[unpacket_traits<Packet>::size-1] = true;\n  return pblend(mask, pset1<Packet>(b), a);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERIC_PACKET_MATH_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/GlobalFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GLOBAL_FUNCTIONS_H\n#define EIGEN_GLOBAL_FUNCTIONS_H\n\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n\n#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \\\n  /** \\returns an expression of the coefficient-wise DOC_OP of \\a x\n\n    DOC_DETAILS\n\n    \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_##NAME\">Math functions</a>, class CwiseUnaryOp\n    */ \\\n  template<typename Derived> \\\n  inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \\\n  NAME(const Eigen::ArrayBase<Derived>& x);\n\n#else\n\n#define EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(NAME,FUNCTOR,DOC_OP,DOC_DETAILS) \\\n  template<typename Derived> \\\n  inline const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> \\\n  (NAME)(const Eigen::ArrayBase<Derived>& x) { \\\n    return Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived>(x.derived()); \\\n  }\n\n#endif // EIGEN_PARSED_BY_DOXYGEN\n\n#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \\\n  \\\n  template<typename Derived> \\\n  struct NAME##_retval<ArrayBase<Derived> > \\\n  { \\\n    typedef const Eigen::CwiseUnaryOp<Eigen::internal::FUNCTOR<typename Derived::Scalar>, const Derived> type; \\\n  }; \\\n  template<typename Derived> \\\n  struct NAME##_impl<ArrayBase<Derived> > \\\n  { \\\n    static inline typename NAME##_retval<ArrayBase<Derived> >::type run(const Eigen::ArrayBase<Derived>& x) \\\n    { \\\n      return typename NAME##_retval<ArrayBase<Derived> >::type(x.derived()); \\\n    } \\\n  };\n\nnamespace Eigen\n{\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(real,scalar_real_op,real part,\\sa ArrayBase::real)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(imag,scalar_imag_op,imaginary part,\\sa ArrayBase::imag)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(conj,scalar_conjugate_op,complex conjugate,\\sa ArrayBase::conjugate)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(inverse,scalar_inverse_op,inverse,\\sa ArrayBase::inverse)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sin,scalar_sin_op,sine,\\sa ArrayBase::sin)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cos,scalar_cos_op,cosine,\\sa ArrayBase::cos)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tan,scalar_tan_op,tangent,\\sa ArrayBase::tan)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(atan,scalar_atan_op,arc-tangent,\\sa ArrayBase::atan)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(asin,scalar_asin_op,arc-sine,\\sa ArrayBase::asin)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(acos,scalar_acos_op,arc-consine,\\sa ArrayBase::acos)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sinh,scalar_sinh_op,hyperbolic sine,\\sa ArrayBase::sinh)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cosh,scalar_cosh_op,hyperbolic cosine,\\sa ArrayBase::cosh)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(tanh,scalar_tanh_op,hyperbolic tangent,\\sa ArrayBase::tanh)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(lgamma,scalar_lgamma_op,natural logarithm of the gamma function,\\sa ArrayBase::lgamma)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(digamma,scalar_digamma_op,derivative of lgamma,\\sa ArrayBase::digamma)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erf,scalar_erf_op,error function,\\sa ArrayBase::erf)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(erfc,scalar_erfc_op,complement error function,\\sa ArrayBase::erfc)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(exp,scalar_exp_op,exponential,\\sa ArrayBase::exp)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(expm1,scalar_expm1_op,exponential of a value minus 1,\\sa ArrayBase::expm1)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log,scalar_log_op,natural logarithm,\\sa Eigen::log10 DOXCOMMA ArrayBase::log)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log1p,scalar_log1p_op,natural logarithm of 1 plus the value,\\sa ArrayBase::log1p)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(log10,scalar_log10_op,base 10 logarithm,\\sa Eigen::log DOXCOMMA ArrayBase::log)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs,scalar_abs_op,absolute value,\\sa ArrayBase::abs DOXCOMMA MatrixBase::cwiseAbs)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(abs2,scalar_abs2_op,squared absolute value,\\sa ArrayBase::abs2 DOXCOMMA MatrixBase::cwiseAbs2)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(arg,scalar_arg_op,complex argument,\\sa ArrayBase::arg)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sqrt,scalar_sqrt_op,square root,\\sa ArrayBase::sqrt DOXCOMMA MatrixBase::cwiseSqrt)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(rsqrt,scalar_rsqrt_op,reciprocal square root,\\sa ArrayBase::rsqrt)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(square,scalar_square_op,square (power 2),\\sa Eigen::abs2 DOXCOMMA Eigen::pow DOXCOMMA ArrayBase::square)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(cube,scalar_cube_op,cube (power 3),\\sa Eigen::pow DOXCOMMA ArrayBase::cube)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(round,scalar_round_op,nearest integer,\\sa Eigen::floor DOXCOMMA Eigen::ceil DOXCOMMA ArrayBase::round)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(floor,scalar_floor_op,nearest integer not greater than the giben value,\\sa Eigen::ceil DOXCOMMA ArrayBase::floor)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(ceil,scalar_ceil_op,nearest integer not less than the giben value,\\sa Eigen::floor DOXCOMMA ArrayBase::ceil)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isnan,scalar_isnan_op,not-a-number test,\\sa Eigen::isinf DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isnan)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isinf,scalar_isinf_op,infinite value test,\\sa Eigen::isnan DOXCOMMA Eigen::isfinite DOXCOMMA ArrayBase::isinf)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(isfinite,scalar_isfinite_op,finite value test,\\sa Eigen::isinf DOXCOMMA Eigen::isnan DOXCOMMA ArrayBase::isfinite)\n  EIGEN_ARRAY_DECLARE_GLOBAL_UNARY(sign,scalar_sign_op,sign (or 0),\\sa ArrayBase::sign)\n  \n  /** \\returns an expression of the coefficient-wise power of \\a x to the given constant \\a exponent.\n    *\n    * \\tparam ScalarExponent is the scalar type of \\a exponent. It must be compatible with the scalar type of the given expression (\\c Derived::Scalar).\n    *\n    * \\sa ArrayBase::pow()\n    *\n    * \\relates ArrayBase\n    */\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n  template<typename Derived,typename ScalarExponent>\n  inline const CwiseBinaryOp<internal::scalar_pow_op<Derived::Scalar,ScalarExponent>,Derived,Constant<ScalarExponent> >\n  pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent);\n#else\n  template<typename Derived,typename ScalarExponent>\n  inline typename internal::enable_if<   !(internal::is_same<typename Derived::Scalar,ScalarExponent>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,typename Derived::Scalar,ScalarExponent),\n          const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,ScalarExponent,pow) >::type\n  pow(const Eigen::ArrayBase<Derived>& x, const ScalarExponent& exponent) {\n    return x.derived().pow(exponent);\n  }\n\n  template<typename Derived>\n  inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename Derived::Scalar,pow)\n  pow(const Eigen::ArrayBase<Derived>& x, const typename Derived::Scalar& exponent) {\n    return x.derived().pow(exponent);\n  }\n#endif\n\n  /** \\returns an expression of the coefficient-wise power of \\a x to the given array of \\a exponents.\n    *\n    * This function computes the coefficient-wise power.\n    *\n    * Example: \\include Cwise_array_power_array.cpp\n    * Output: \\verbinclude Cwise_array_power_array.out\n    * \n    * \\sa ArrayBase::pow()\n    *\n    * \\relates ArrayBase\n    */\n  template<typename Derived,typename ExponentDerived>\n  inline const Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>\n  pow(const Eigen::ArrayBase<Derived>& x, const Eigen::ArrayBase<ExponentDerived>& exponents) \n  {\n    return Eigen::CwiseBinaryOp<Eigen::internal::scalar_pow_op<typename Derived::Scalar, typename ExponentDerived::Scalar>, const Derived, const ExponentDerived>(\n      x.derived(),\n      exponents.derived()\n    );\n  }\n  \n  /** \\returns an expression of the coefficient-wise power of the scalar \\a x to the given array of \\a exponents.\n    *\n    * This function computes the coefficient-wise power between a scalar and an array of exponents.\n    *\n    * \\tparam Scalar is the scalar type of \\a x. It must be compatible with the scalar type of the given array expression (\\c Derived::Scalar).\n    *\n    * Example: \\include Cwise_scalar_power_array.cpp\n    * Output: \\verbinclude Cwise_scalar_power_array.out\n    * \n    * \\sa ArrayBase::pow()\n    *\n    * \\relates ArrayBase\n    */\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n  template<typename Scalar,typename Derived>\n  inline const CwiseBinaryOp<internal::scalar_pow_op<Scalar,Derived::Scalar>,Constant<Scalar>,Derived>\n  pow(const Scalar& x,const Eigen::ArrayBase<Derived>& x);\n#else\n  template<typename Scalar, typename Derived>\n  inline typename internal::enable_if<   !(internal::is_same<typename Derived::Scalar,Scalar>::value) && EIGEN_SCALAR_BINARY_SUPPORTED(pow,Scalar,typename Derived::Scalar),\n          const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow) >::type\n  pow(const Scalar& x, const Eigen::ArrayBase<Derived>& exponents)\n  {\n    return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,Derived,pow)(\n            typename internal::plain_constant_type<Derived,Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() );\n  }\n\n  template<typename Derived>\n  inline const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow)\n  pow(const typename Derived::Scalar& x, const Eigen::ArrayBase<Derived>& exponents)\n  {\n    return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename Derived::Scalar,Derived,pow)(\n      typename internal::plain_constant_type<Derived,typename Derived::Scalar>::type(exponents.rows(), exponents.cols(), x), exponents.derived() );\n  }\n#endif\n\n\n  namespace internal\n  {\n    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op)\n    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op)\n    EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op)\n  }\n}\n\n// TODO: cleanly disable those functions that are not supported on Array (numext::real_ref, internal::random, internal::isApprox...)\n\n#endif // EIGEN_GLOBAL_FUNCTIONS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/IO.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_IO_H\n#define EIGEN_IO_H\n\nnamespace Eigen { \n\nenum { DontAlignCols = 1 };\nenum { StreamPrecision = -1,\n       FullPrecision = -2 };\n\nnamespace internal {\ntemplate<typename Derived>\nstd::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt);\n}\n\n/** \\class IOFormat\n  * \\ingroup Core_Module\n  *\n  * \\brief Stores a set of parameters controlling the way matrices are printed\n  *\n  * List of available parameters:\n  *  - \\b precision number of digits for floating point values, or one of the special constants \\c StreamPrecision and \\c FullPrecision.\n  *                 The default is the special value \\c StreamPrecision which means to use the\n  *                 stream's own precision setting, as set for instance using \\c cout.precision(3). The other special value\n  *                 \\c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point\n  *                 type.\n  *  - \\b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \\c DontAlignCols which\n  *             allows to disable the alignment of columns, resulting in faster code.\n  *  - \\b coeffSeparator string printed between two coefficients of the same row\n  *  - \\b rowSeparator string printed between two rows\n  *  - \\b rowPrefix string printed at the beginning of each row\n  *  - \\b rowSuffix string printed at the end of each row\n  *  - \\b matPrefix string printed at the beginning of the matrix\n  *  - \\b matSuffix string printed at the end of the matrix\n  *\n  * Example: \\include IOFormat.cpp\n  * Output: \\verbinclude IOFormat.out\n  *\n  * \\sa DenseBase::format(), class WithFormat\n  */\nstruct IOFormat\n{\n  /** Default constructor, see class IOFormat for the meaning of the parameters */\n  IOFormat(int _precision = StreamPrecision, int _flags = 0,\n    const std::string& _coeffSeparator = \" \",\n    const std::string& _rowSeparator = \"\\n\", const std::string& _rowPrefix=\"\", const std::string& _rowSuffix=\"\",\n    const std::string& _matPrefix=\"\", const std::string& _matSuffix=\"\")\n  : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator),\n    rowSpacer(\"\"), coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags)\n  {\n    // TODO check if rowPrefix, rowSuffix or rowSeparator contains a newline\n    // don't add rowSpacer if columns are not to be aligned\n    if((flags & DontAlignCols))\n      return;\n    int i = int(matSuffix.length())-1;\n    while (i>=0 && matSuffix[i]!='\\n')\n    {\n      rowSpacer += ' ';\n      i--;\n    }\n  }\n  std::string matPrefix, matSuffix;\n  std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer;\n  std::string coeffSeparator;\n  int precision;\n  int flags;\n};\n\n/** \\class WithFormat\n  * \\ingroup Core_Module\n  *\n  * \\brief Pseudo expression providing matrix output with given format\n  *\n  * \\tparam ExpressionType the type of the object on which IO stream operations are performed\n  *\n  * This class represents an expression with stream operators controlled by a given IOFormat.\n  * It is the return type of DenseBase::format()\n  * and most of the time this is the only way it is used.\n  *\n  * See class IOFormat for some examples.\n  *\n  * \\sa DenseBase::format(), class IOFormat\n  */\ntemplate<typename ExpressionType>\nclass WithFormat\n{\n  public:\n\n    WithFormat(const ExpressionType& matrix, const IOFormat& format)\n      : m_matrix(matrix), m_format(format)\n    {}\n\n    friend std::ostream & operator << (std::ostream & s, const WithFormat& wf)\n    {\n      return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format);\n    }\n\n  protected:\n    typename ExpressionType::Nested m_matrix;\n    IOFormat m_format;\n};\n\nnamespace internal {\n\n// NOTE: This helper is kept for backward compatibility with previous code specializing\n//       this internal::significant_decimals_impl structure. In the future we should directly\n//       call digits10() which has been introduced in July 2016 in 3.3.\ntemplate<typename Scalar>\nstruct significant_decimals_impl\n{\n  static inline int run()\n  {\n    return NumTraits<Scalar>::digits10();\n  }\n};\n\n/** \\internal\n  * print the matrix \\a _m to the output stream \\a s using the output format \\a fmt */\ntemplate<typename Derived>\nstd::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt)\n{\n  if(_m.size() == 0)\n  {\n    s << fmt.matPrefix << fmt.matSuffix;\n    return s;\n  }\n  \n  typename Derived::Nested m = _m;\n  typedef typename Derived::Scalar Scalar;\n\n  Index width = 0;\n\n  std::streamsize explicit_precision;\n  if(fmt.precision == StreamPrecision)\n  {\n    explicit_precision = 0;\n  }\n  else if(fmt.precision == FullPrecision)\n  {\n    if (NumTraits<Scalar>::IsInteger)\n    {\n      explicit_precision = 0;\n    }\n    else\n    {\n      explicit_precision = significant_decimals_impl<Scalar>::run();\n    }\n  }\n  else\n  {\n    explicit_precision = fmt.precision;\n  }\n\n  std::streamsize old_precision = 0;\n  if(explicit_precision) old_precision = s.precision(explicit_precision);\n\n  bool align_cols = !(fmt.flags & DontAlignCols);\n  if(align_cols)\n  {\n    // compute the largest width\n    for(Index j = 0; j < m.cols(); ++j)\n      for(Index i = 0; i < m.rows(); ++i)\n      {\n        std::stringstream sstr;\n        sstr.copyfmt(s);\n        sstr << m.coeff(i,j);\n        width = std::max<Index>(width, Index(sstr.str().length()));\n      }\n  }\n  s << fmt.matPrefix;\n  for(Index i = 0; i < m.rows(); ++i)\n  {\n    if (i)\n      s << fmt.rowSpacer;\n    s << fmt.rowPrefix;\n    if(width) s.width(width);\n    s << m.coeff(i, 0);\n    for(Index j = 1; j < m.cols(); ++j)\n    {\n      s << fmt.coeffSeparator;\n      if (width) s.width(width);\n      s << m.coeff(i, j);\n    }\n    s << fmt.rowSuffix;\n    if( i < m.rows() - 1)\n      s << fmt.rowSeparator;\n  }\n  s << fmt.matSuffix;\n  if(explicit_precision) s.precision(old_precision);\n  return s;\n}\n\n} // end namespace internal\n\n/** \\relates DenseBase\n  *\n  * Outputs the matrix, to the given stream.\n  *\n  * If you wish to print the matrix with a format different than the default, use DenseBase::format().\n  *\n  * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers.\n  * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters.\n  *\n  * \\sa DenseBase::format()\n  */\ntemplate<typename Derived>\nstd::ostream & operator <<\n(std::ostream & s,\n const DenseBase<Derived> & m)\n{\n  return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_IO_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/IndexedView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_INDEXED_VIEW_H\n#define EIGEN_INDEXED_VIEW_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename XprType, typename RowIndices, typename ColIndices>\nstruct traits<IndexedView<XprType, RowIndices, ColIndices> >\n : traits<XprType>\n{\n  enum {\n    RowsAtCompileTime = int(array_size<RowIndices>::value),\n    ColsAtCompileTime = int(array_size<ColIndices>::value),\n    MaxRowsAtCompileTime = RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) : int(traits<XprType>::MaxRowsAtCompileTime),\n    MaxColsAtCompileTime = ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) : int(traits<XprType>::MaxColsAtCompileTime),\n\n    XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,\n    IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1\n               : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0\n               : XprTypeIsRowMajor,\n\n    RowIncr = int(get_compile_time_incr<RowIndices>::value),\n    ColIncr = int(get_compile_time_incr<ColIndices>::value),\n    InnerIncr = IsRowMajor ? ColIncr : RowIncr,\n    OuterIncr = IsRowMajor ? RowIncr : ColIncr,\n\n    HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),\n    XprInnerStride = HasSameStorageOrderAsXprType ? int(inner_stride_at_compile_time<XprType>::ret) : int(outer_stride_at_compile_time<XprType>::ret),\n    XprOuterstride = HasSameStorageOrderAsXprType ? int(outer_stride_at_compile_time<XprType>::ret) : int(inner_stride_at_compile_time<XprType>::ret),\n\n    InnerSize = XprTypeIsRowMajor ? ColsAtCompileTime : RowsAtCompileTime,\n    IsBlockAlike = InnerIncr==1 && OuterIncr==1,\n    IsInnerPannel = HasSameStorageOrderAsXprType && is_same<AllRange<InnerSize>,typename conditional<XprTypeIsRowMajor,ColIndices,RowIndices>::type>::value,\n\n    InnerStrideAtCompileTime = InnerIncr<0 || InnerIncr==DynamicIndex || XprInnerStride==Dynamic ? Dynamic : XprInnerStride * InnerIncr,\n    OuterStrideAtCompileTime = OuterIncr<0 || OuterIncr==DynamicIndex || XprOuterstride==Dynamic ? Dynamic : XprOuterstride * OuterIncr,\n\n    ReturnAsScalar = is_same<RowIndices,SingleRange>::value && is_same<ColIndices,SingleRange>::value,\n    ReturnAsBlock = (!ReturnAsScalar) && IsBlockAlike,\n    ReturnAsIndexedView = (!ReturnAsScalar) && (!ReturnAsBlock),\n\n    // FIXME we deal with compile-time strides if and only if we have DirectAccessBit flag,\n    // but this is too strict regarding negative strides...\n    DirectAccessMask = (int(InnerIncr)!=UndefinedIncr && int(OuterIncr)!=UndefinedIncr && InnerIncr>=0 && OuterIncr>=0) ? DirectAccessBit : 0,\n    FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,\n    FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,\n    Flags = (traits<XprType>::Flags & (HereditaryBits | DirectAccessMask)) | FlagsLvalueBit | FlagsRowMajorBit\n  };\n\n  typedef Block<XprType,RowsAtCompileTime,ColsAtCompileTime,IsInnerPannel> BlockType;\n};\n\n}\n\ntemplate<typename XprType, typename RowIndices, typename ColIndices, typename StorageKind>\nclass IndexedViewImpl;\n\n\n/** \\class IndexedView\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a non-sequential sub-matrix defined by arbitrary sequences of row and column indices\n  *\n  * \\tparam XprType the type of the expression in which we are taking the intersections of sub-rows and sub-columns\n  * \\tparam RowIndices the type of the object defining the sequence of row indices\n  * \\tparam ColIndices the type of the object defining the sequence of column indices\n  *\n  * This class represents an expression of a sub-matrix (or sub-vector) defined as the intersection\n  * of sub-sets of rows and columns, that are themself defined by generic sequences of row indices \\f$ \\{r_0,r_1,..r_{m-1}\\} \\f$\n  * and column indices \\f$ \\{c_0,c_1,..c_{n-1} \\}\\f$. Let \\f$ A \\f$  be the nested matrix, then the resulting matrix \\f$ B \\f$ has \\c m\n  * rows and \\c n columns, and its entries are given by: \\f$ B(i,j) = A(r_i,c_j) \\f$.\n  *\n  * The \\c RowIndices and \\c ColIndices types must be compatible with the following API:\n  * \\code\n  * <integral type> operator[](Index) const;\n  * Index size() const;\n  * \\endcode\n  *\n  * Typical supported types thus include:\n  *  - std::vector<int>\n  *  - std::valarray<int>\n  *  - std::array<int>\n  *  - Plain C arrays: int[N]\n  *  - Eigen::ArrayXi\n  *  - decltype(ArrayXi::LinSpaced(...))\n  *  - Any view/expressions of the previous types\n  *  - Eigen::ArithmeticSequence\n  *  - Eigen::internal::AllRange      (helper for Eigen::all)\n  *  - Eigen::internal::SingleRange  (helper for single index)\n  *  - etc.\n  *\n  * In typical usages of %Eigen, this class should never be used directly. It is the return type of\n  * DenseBase::operator()(const RowIndices&, const ColIndices&).\n  *\n  * \\sa class Block\n  */\ntemplate<typename XprType, typename RowIndices, typename ColIndices>\nclass IndexedView : public IndexedViewImpl<XprType, RowIndices, ColIndices, typename internal::traits<XprType>::StorageKind>\n{\npublic:\n  typedef typename IndexedViewImpl<XprType, RowIndices, ColIndices, typename internal::traits<XprType>::StorageKind>::Base Base;\n  EIGEN_GENERIC_PUBLIC_INTERFACE(IndexedView)\n  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(IndexedView)\n\n  typedef typename internal::ref_selector<XprType>::non_const_type MatrixTypeNested;\n  typedef typename internal::remove_all<XprType>::type NestedExpression;\n\n  template<typename T0, typename T1>\n  IndexedView(XprType& xpr, const T0& rowIndices, const T1& colIndices)\n    : m_xpr(xpr), m_rowIndices(rowIndices), m_colIndices(colIndices)\n  {}\n\n  /** \\returns number of rows */\n  Index rows() const { return internal::size(m_rowIndices); }\n\n  /** \\returns number of columns */\n  Index cols() const { return internal::size(m_colIndices); }\n\n  /** \\returns the nested expression */\n  const typename internal::remove_all<XprType>::type&\n  nestedExpression() const { return m_xpr; }\n\n  /** \\returns the nested expression */\n  typename internal::remove_reference<XprType>::type&\n  nestedExpression() { return m_xpr.const_cast_derived(); }\n\n  /** \\returns a const reference to the object storing/generating the row indices */\n  const RowIndices& rowIndices() const { return m_rowIndices; }\n\n  /** \\returns a const reference to the object storing/generating the column indices */\n  const ColIndices& colIndices() const { return m_colIndices; }\n\nprotected:\n  MatrixTypeNested m_xpr;\n  RowIndices m_rowIndices;\n  ColIndices m_colIndices;\n};\n\n\n// Generic API dispatcher\ntemplate<typename XprType, typename RowIndices, typename ColIndices, typename StorageKind>\nclass IndexedViewImpl\n  : public internal::generic_xpr_base<IndexedView<XprType, RowIndices, ColIndices> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<IndexedView<XprType, RowIndices, ColIndices> >::type Base;\n};\n\nnamespace internal {\n\n\ntemplate<typename ArgType, typename RowIndices, typename ColIndices>\nstruct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>\n  : evaluator_base<IndexedView<ArgType, RowIndices, ColIndices> >\n{\n  typedef IndexedView<ArgType, RowIndices, ColIndices> XprType;\n\n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost /* TODO + cost of row/col index */,\n\n    Flags = (evaluator<ArgType>::Flags & (HereditaryBits /*| LinearAccessBit | DirectAccessBit*/)),\n\n    Alignment = 0\n  };\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_xpr(xpr)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  CoeffReturnType coeff(Index row, Index col) const\n  {\n    return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  Scalar& coeffRef(Index row, Index col)\n  {\n    return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);\n  }\n\nprotected:\n\n  evaluator<ArgType> m_argImpl;\n  const XprType& m_xpr;\n\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_INDEXED_VIEW_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Inverse.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_INVERSE_H\n#define EIGEN_INVERSE_H\n\nnamespace Eigen { \n\ntemplate<typename XprType,typename StorageKind> class InverseImpl;\n\nnamespace internal {\n\ntemplate<typename XprType>\nstruct traits<Inverse<XprType> >\n  : traits<typename XprType::PlainObject>\n{\n  typedef typename XprType::PlainObject PlainObject;\n  typedef traits<PlainObject> BaseTraits;\n  enum {\n    Flags = BaseTraits::Flags & RowMajorBit\n  };\n};\n\n} // end namespace internal\n\n/** \\class Inverse\n  *\n  * \\brief Expression of the inverse of another expression\n  *\n  * \\tparam XprType the type of the expression we are taking the inverse\n  *\n  * This class represents an abstract expression of A.inverse()\n  * and most of the time this is the only way it is used.\n  *\n  */\ntemplate<typename XprType>\nclass Inverse : public InverseImpl<XprType,typename internal::traits<XprType>::StorageKind>\n{\npublic:\n  typedef typename XprType::StorageIndex StorageIndex;\n  typedef typename XprType::PlainObject                       PlainObject;\n  typedef typename XprType::Scalar                            Scalar;\n  typedef typename internal::ref_selector<XprType>::type      XprTypeNested;\n  typedef typename internal::remove_all<XprTypeNested>::type  XprTypeNestedCleaned;\n  typedef typename internal::ref_selector<Inverse>::type Nested;\n  typedef typename internal::remove_all<XprType>::type NestedExpression;\n  \n  explicit EIGEN_DEVICE_FUNC Inverse(const XprType &xpr)\n    : m_xpr(xpr)\n  {}\n\n  EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }\n\n  EIGEN_DEVICE_FUNC const XprTypeNestedCleaned& nestedExpression() const { return m_xpr; }\n\nprotected:\n  XprTypeNested m_xpr;\n};\n\n// Generic API dispatcher\ntemplate<typename XprType, typename StorageKind>\nclass InverseImpl\n  : public internal::generic_xpr_base<Inverse<XprType> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<Inverse<XprType> >::type Base;\n  typedef typename XprType::Scalar Scalar;\nprivate:\n\n  Scalar coeff(Index row, Index col) const;\n  Scalar coeff(Index i) const;\n};\n\nnamespace internal {\n\n/** \\internal\n  * \\brief Default evaluator for Inverse expression.\n  * \n  * This default evaluator for Inverse expression simply evaluate the inverse into a temporary\n  * by a call to internal::call_assignment_no_alias.\n  * Therefore, inverse implementers only have to specialize Assignment<Dst,Inverse<...>, ...> for\n  * there own nested expression.\n  *\n  * \\sa class Inverse\n  */\ntemplate<typename ArgType>\nstruct unary_evaluator<Inverse<ArgType> >\n  : public evaluator<typename Inverse<ArgType>::PlainObject>\n{\n  typedef Inverse<ArgType> InverseType;\n  typedef typename InverseType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n  \n  enum { Flags = Base::Flags | EvalBeforeNestingBit };\n\n  unary_evaluator(const InverseType& inv_xpr)\n    : m_result(inv_xpr.rows(), inv_xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    internal::call_assignment_no_alias(m_result, inv_xpr);\n  }\n  \nprotected:\n  PlainObject m_result;\n};\n  \n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_INVERSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Map.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MAP_H\n#define EIGEN_MAP_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename PlainObjectType, int MapOptions, typename StrideType>\nstruct traits<Map<PlainObjectType, MapOptions, StrideType> >\n  : public traits<PlainObjectType>\n{\n  typedef traits<PlainObjectType> TraitsBase;\n  enum {\n    InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0\n                             ? int(PlainObjectType::InnerStrideAtCompileTime)\n                             : int(StrideType::InnerStrideAtCompileTime),\n    OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0\n                             ? int(PlainObjectType::OuterStrideAtCompileTime)\n                             : int(StrideType::OuterStrideAtCompileTime),\n    Alignment = int(MapOptions)&int(AlignedMask),\n    Flags0 = TraitsBase::Flags & (~NestByRefBit),\n    Flags = is_lvalue<PlainObjectType>::value ? int(Flags0) : (int(Flags0) & ~LvalueBit)\n  };\nprivate:\n  enum { Options }; // Expressions don't have Options\n};\n}\n\n/** \\class Map\n  * \\ingroup Core_Module\n  *\n  * \\brief A matrix or vector expression mapping an existing array of data.\n  *\n  * \\tparam PlainObjectType the equivalent matrix type of the mapped data\n  * \\tparam MapOptions specifies the pointer alignment in bytes. It can be: \\c #Aligned128, , \\c #Aligned64, \\c #Aligned32, \\c #Aligned16, \\c #Aligned8 or \\c #Unaligned.\n  *                The default is \\c #Unaligned.\n  * \\tparam StrideType optionally specifies strides. By default, Map assumes the memory layout\n  *                   of an ordinary, contiguous array. This can be overridden by specifying strides.\n  *                   The type passed here must be a specialization of the Stride template, see examples below.\n  *\n  * This class represents a matrix or vector expression mapping an existing array of data.\n  * It can be used to let Eigen interface without any overhead with non-Eigen data structures,\n  * such as plain C arrays or structures from other libraries. By default, it assumes that the\n  * data is laid out contiguously in memory. You can however override this by explicitly specifying\n  * inner and outer strides.\n  *\n  * Here's an example of simply mapping a contiguous array as a \\ref TopicStorageOrders \"column-major\" matrix:\n  * \\include Map_simple.cpp\n  * Output: \\verbinclude Map_simple.out\n  *\n  * If you need to map non-contiguous arrays, you can do so by specifying strides:\n  *\n  * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer\n  * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time\n  * fixed value.\n  * \\include Map_inner_stride.cpp\n  * Output: \\verbinclude Map_inner_stride.out\n  *\n  * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping\n  * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns.\n  * Here, we're specifying the outer stride as a runtime parameter. Note that here \\c OuterStride<> is\n  * a short version of \\c OuterStride<Dynamic> because the default template parameter of OuterStride\n  * is  \\c Dynamic\n  * \\include Map_outer_stride.cpp\n  * Output: \\verbinclude Map_outer_stride.out\n  *\n  * For more details and for an example of specifying both an inner and an outer stride, see class Stride.\n  *\n  * \\b Tip: to change the array of data mapped by a Map object, you can use the C++\n  * placement new syntax:\n  *\n  * Example: \\include Map_placement_new.cpp\n  * Output: \\verbinclude Map_placement_new.out\n  *\n  * This class is the return type of PlainObjectBase::Map() but can also be used directly.\n  *\n  * \\sa PlainObjectBase::Map(), \\ref TopicStorageOrders\n  */\ntemplate<typename PlainObjectType, int MapOptions, typename StrideType> class Map\n  : public MapBase<Map<PlainObjectType, MapOptions, StrideType> >\n{\n  public:\n\n    typedef MapBase<Map> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Map)\n\n    typedef typename Base::PointerType PointerType;\n    typedef PointerType PointerArgType;\n    EIGEN_DEVICE_FUNC\n    inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; }\n\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const\n    {\n      return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const\n    {\n      return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()\n           : IsVectorAtCompileTime ? this->size()\n           : int(Flags)&RowMajorBit ? this->cols()\n           : this->rows();\n    }\n\n    /** Constructor in the fixed-size case.\n      *\n      * \\param dataPtr pointer to the array to map\n      * \\param stride optional Stride object, passing the strides.\n      */\n    EIGEN_DEVICE_FUNC\n    explicit inline Map(PointerArgType dataPtr, const StrideType& stride = StrideType())\n      : Base(cast_to_pointer_type(dataPtr)), m_stride(stride)\n    {\n      PlainObjectType::Base::_check_template_params();\n    }\n\n    /** Constructor in the dynamic-size vector case.\n      *\n      * \\param dataPtr pointer to the array to map\n      * \\param size the size of the vector expression\n      * \\param stride optional Stride object, passing the strides.\n      */\n    EIGEN_DEVICE_FUNC\n    inline Map(PointerArgType dataPtr, Index size, const StrideType& stride = StrideType())\n      : Base(cast_to_pointer_type(dataPtr), size), m_stride(stride)\n    {\n      PlainObjectType::Base::_check_template_params();\n    }\n\n    /** Constructor in the dynamic-size matrix case.\n      *\n      * \\param dataPtr pointer to the array to map\n      * \\param rows the number of rows of the matrix expression\n      * \\param cols the number of columns of the matrix expression\n      * \\param stride optional Stride object, passing the strides.\n      */\n    EIGEN_DEVICE_FUNC\n    inline Map(PointerArgType dataPtr, Index rows, Index cols, const StrideType& stride = StrideType())\n      : Base(cast_to_pointer_type(dataPtr), rows, cols), m_stride(stride)\n    {\n      PlainObjectType::Base::_check_template_params();\n    }\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)\n\n  protected:\n    StrideType m_stride;\n};\n\n\n} // end namespace Eigen\n\n#endif // EIGEN_MAP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/MapBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MAPBASE_H\n#define EIGEN_MAPBASE_H\n\n#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \\\n      EIGEN_STATIC_ASSERT((int(internal::evaluator<Derived>::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \\\n                          YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT)\n\nnamespace Eigen { \n\n/** \\ingroup Core_Module\n  *\n  * \\brief Base class for dense Map and Block expression with direct access\n  *\n  * This base class provides the const low-level accessors (e.g. coeff, coeffRef) of dense\n  * Map and Block objects with direct access.\n  * Typical users do not have to directly deal with this class.\n  *\n  * This class can be extended by through the macro plugin \\c EIGEN_MAPBASE_PLUGIN.\n  * See \\link TopicCustomizing_Plugins customizing Eigen \\endlink for details.\n  *\n  * The \\c Derived class has to provide the following two methods describing the memory layout:\n  *  \\code Index innerStride() const; \\endcode\n  *  \\code Index outerStride() const; \\endcode\n  *\n  * \\sa class Map, class Block\n  */\ntemplate<typename Derived> class MapBase<Derived, ReadOnlyAccessors>\n  : public internal::dense_xpr_base<Derived>::type\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<Derived>::type Base;\n    enum {\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n      SizeAtCompileTime = Base::SizeAtCompileTime\n    };\n\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef typename internal::conditional<\n                         bool(internal::is_lvalue<Derived>::value),\n                         Scalar *,\n                         const Scalar *>::type\n                     PointerType;\n\n    using Base::derived;\n//    using Base::RowsAtCompileTime;\n//    using Base::ColsAtCompileTime;\n//    using Base::SizeAtCompileTime;\n    using Base::MaxRowsAtCompileTime;\n    using Base::MaxColsAtCompileTime;\n    using Base::MaxSizeAtCompileTime;\n    using Base::IsVectorAtCompileTime;\n    using Base::Flags;\n    using Base::IsRowMajor;\n\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::coeff;\n    using Base::coeffRef;\n    using Base::lazyAssign;\n    using Base::eval;\n\n    using Base::innerStride;\n    using Base::outerStride;\n    using Base::rowStride;\n    using Base::colStride;\n\n    // bug 217 - compile error on ICC 11.1\n    using Base::operator=;\n\n    typedef typename Base::CoeffReturnType CoeffReturnType;\n\n    /** \\copydoc DenseBase::rows() */\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_rows.value(); }\n    /** \\copydoc DenseBase::cols() */\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_cols.value(); }\n\n    /** Returns a pointer to the first coefficient of the matrix or vector.\n      *\n      * \\note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride().\n      *\n      * \\sa innerStride(), outerStride()\n      */\n    EIGEN_DEVICE_FUNC inline const Scalar* data() const { return m_data; }\n\n    /** \\copydoc PlainObjectBase::coeff(Index,Index) const */\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeff(Index rowId, Index colId) const\n    {\n      return m_data[colId * colStride() + rowId * rowStride()];\n    }\n\n    /** \\copydoc PlainObjectBase::coeff(Index) const */\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeff(Index index) const\n    {\n      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)\n      return m_data[index * innerStride()];\n    }\n\n    /** \\copydoc PlainObjectBase::coeffRef(Index,Index) const */\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      return this->m_data[colId * colStride() + rowId * rowStride()];\n    }\n\n    /** \\copydoc PlainObjectBase::coeffRef(Index) const */\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index index) const\n    {\n      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)\n      return this->m_data[index * innerStride()];\n    }\n\n    /** \\internal */\n    template<int LoadMode>\n    inline PacketScalar packet(Index rowId, Index colId) const\n    {\n      return internal::ploadt<PacketScalar, LoadMode>\n               (m_data + (colId * colStride() + rowId * rowStride()));\n    }\n\n    /** \\internal */\n    template<int LoadMode>\n    inline PacketScalar packet(Index index) const\n    {\n      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)\n      return internal::ploadt<PacketScalar, LoadMode>(m_data + index * innerStride());\n    }\n\n    /** \\internal Constructor for fixed size matrices or vectors */\n    EIGEN_DEVICE_FUNC\n    explicit inline MapBase(PointerType dataPtr) : m_data(dataPtr), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)\n    {\n      EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)\n      checkSanity<Derived>();\n    }\n\n    /** \\internal Constructor for dynamically sized vectors */\n    EIGEN_DEVICE_FUNC\n    inline MapBase(PointerType dataPtr, Index vecSize)\n            : m_data(dataPtr),\n              m_rows(RowsAtCompileTime == Dynamic ? vecSize : Index(RowsAtCompileTime)),\n              m_cols(ColsAtCompileTime == Dynamic ? vecSize : Index(ColsAtCompileTime))\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n      eigen_assert(vecSize >= 0);\n      eigen_assert(dataPtr == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == vecSize);\n      checkSanity<Derived>();\n    }\n\n    /** \\internal Constructor for dynamically sized matrices */\n    EIGEN_DEVICE_FUNC\n    inline MapBase(PointerType dataPtr, Index rows, Index cols)\n            : m_data(dataPtr), m_rows(rows), m_cols(cols)\n    {\n      eigen_assert( (dataPtr == 0)\n              || (   rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)\n                  && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)));\n      checkSanity<Derived>();\n    }\n\n    #ifdef EIGEN_MAPBASE_PLUGIN\n    #include EIGEN_MAPBASE_PLUGIN\n    #endif\n\n  protected:\n\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    void checkSanity(typename internal::enable_if<(internal::traits<T>::Alignment>0),void*>::type = 0) const\n    {\n#if EIGEN_MAX_ALIGN_BYTES>0\n      eigen_assert((   ((internal::UIntPtr(m_data) % internal::traits<Derived>::Alignment) == 0)\n                    || (cols() * rows() * innerStride() * sizeof(Scalar)) < internal::traits<Derived>::Alignment ) && \"data is not aligned\");\n#endif\n    }\n\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    void checkSanity(typename internal::enable_if<internal::traits<T>::Alignment==0,void*>::type = 0) const\n    {}\n\n    PointerType m_data;\n    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_rows;\n    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_cols;\n};\n\n/** \\ingroup Core_Module\n  *\n  * \\brief Base class for non-const dense Map and Block expression with direct access\n  *\n  * This base class provides the non-const low-level accessors (e.g. coeff and coeffRef) of\n  * dense Map and Block objects with direct access.\n  * It inherits MapBase<Derived, ReadOnlyAccessors> which defines the const variant for reading specific entries.\n  *\n  * \\sa class Map, class Block\n  */\ntemplate<typename Derived> class MapBase<Derived, WriteAccessors>\n  : public MapBase<Derived, ReadOnlyAccessors>\n{\n    typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;\n  public:\n\n    typedef MapBase<Derived, ReadOnlyAccessors> Base;\n\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::PacketScalar PacketScalar;\n    typedef typename Base::StorageIndex StorageIndex;\n    typedef typename Base::PointerType PointerType;\n\n    using Base::derived;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::coeff;\n    using Base::coeffRef;\n\n    using Base::innerStride;\n    using Base::outerStride;\n    using Base::rowStride;\n    using Base::colStride;\n\n    typedef typename internal::conditional<\n                    internal::is_lvalue<Derived>::value,\n                    Scalar,\n                    const Scalar\n                  >::type ScalarWithConstIfNotLvalue;\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar* data() const { return this->m_data; }\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error\n\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col)\n    {\n      return this->m_data[col * colStride() + row * rowStride()];\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline ScalarWithConstIfNotLvalue& coeffRef(Index index)\n    {\n      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)\n      return this->m_data[index * innerStride()];\n    }\n\n    template<int StoreMode>\n    inline void writePacket(Index row, Index col, const PacketScalar& val)\n    {\n      internal::pstoret<Scalar, PacketScalar, StoreMode>\n               (this->m_data + (col * colStride() + row * rowStride()), val);\n    }\n\n    template<int StoreMode>\n    inline void writePacket(Index index, const PacketScalar& val)\n    {\n      EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived)\n      internal::pstoret<Scalar, PacketScalar, StoreMode>\n                (this->m_data + index * innerStride(), val);\n    }\n\n    EIGEN_DEVICE_FUNC explicit inline MapBase(PointerType dataPtr) : Base(dataPtr) {}\n    EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index vecSize) : Base(dataPtr, vecSize) {}\n    EIGEN_DEVICE_FUNC inline MapBase(PointerType dataPtr, Index rows, Index cols) : Base(dataPtr, rows, cols) {}\n\n    EIGEN_DEVICE_FUNC\n    Derived& operator=(const MapBase& other)\n    {\n      ReadOnlyMapBase::Base::operator=(other);\n      return derived();\n    }\n\n    // In theory we could simply refer to Base:Base::operator=, but MSVC does not like Base::Base,\n    // see bugs 821 and 920.\n    using ReadOnlyMapBase::Base::operator=;\n};\n\n#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS\n\n} // end namespace Eigen\n\n#endif // EIGEN_MAPBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATHFUNCTIONS_H\n#define EIGEN_MATHFUNCTIONS_H\n\n// source: http://www.geom.uiuc.edu/~huberty/math5337/groupe/digits.html\n// TODO this should better be moved to NumTraits\n#define EIGEN_PI 3.141592653589793238462643383279502884197169399375105820974944592307816406L\n\nnamespace Eigen {\n\n// On WINCE, std::abs is defined for int only, so let's defined our own overloads:\n// This issue has been confirmed with MSVC 2008 only, but the issue might exist for more recent versions too.\n#if EIGEN_OS_WINCE && EIGEN_COMP_MSVC && EIGEN_COMP_MSVC<=1500\nlong        abs(long        x) { return (labs(x));  }\ndouble      abs(double      x) { return (fabs(x));  }\nfloat       abs(float       x) { return (fabsf(x)); }\nlong double abs(long double x) { return (fabsl(x)); }\n#endif\n\nnamespace internal {\n\n/** \\internal \\class global_math_functions_filtering_base\n  *\n  * What it does:\n  * Defines a typedef 'type' as follows:\n  * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then\n  *   global_math_functions_filtering_base<T>::type is a typedef for it.\n  * - otherwise, global_math_functions_filtering_base<T>::type is a typedef for T.\n  *\n  * How it's used:\n  * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions.\n  * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know\n  * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase<Derived>.\n  * So we must make sure to use sin_impl<ArrayBase<Derived> > and not sin_impl<Derived>, otherwise our partial specialization\n  * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it.\n  *\n  * How it's implemented:\n  * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace\n  * the typename dummy by an integer template parameter, it doesn't work anymore!\n  */\n\ntemplate<typename T, typename dummy = void>\nstruct global_math_functions_filtering_base\n{\n  typedef T type;\n};\n\ntemplate<typename T> struct always_void { typedef void type; };\n\ntemplate<typename T>\nstruct global_math_functions_filtering_base\n  <T,\n   typename always_void<typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl>::type\n  >\n{\n  typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type;\n};\n\n#define EIGEN_MATHFUNC_IMPL(func, scalar) Eigen::internal::func##_impl<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>\n#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename Eigen::internal::func##_retval<typename Eigen::internal::global_math_functions_filtering_base<scalar>::type>::type\n\n/****************************************************************************\n* Implementation of real                                                 *\n****************************************************************************/\n\ntemplate<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>\nstruct real_default_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    return x;\n  }\n};\n\ntemplate<typename Scalar>\nstruct real_default_impl<Scalar,true>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    using std::real;\n    return real(x);\n  }\n};\n\ntemplate<typename Scalar> struct real_impl : real_default_impl<Scalar> {};\n\n#ifdef __CUDA_ARCH__\ntemplate<typename T>\nstruct real_impl<std::complex<T> >\n{\n  typedef T RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline T run(const std::complex<T>& x)\n  {\n    return x.real();\n  }\n};\n#endif\n\ntemplate<typename Scalar>\nstruct real_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of imag                                                 *\n****************************************************************************/\n\ntemplate<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>\nstruct imag_default_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar&)\n  {\n    return RealScalar(0);\n  }\n};\n\ntemplate<typename Scalar>\nstruct imag_default_impl<Scalar,true>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    using std::imag;\n    return imag(x);\n  }\n};\n\ntemplate<typename Scalar> struct imag_impl : imag_default_impl<Scalar> {};\n\n#ifdef __CUDA_ARCH__\ntemplate<typename T>\nstruct imag_impl<std::complex<T> >\n{\n  typedef T RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline T run(const std::complex<T>& x)\n  {\n    return x.imag();\n  }\n};\n#endif\n\ntemplate<typename Scalar>\nstruct imag_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of real_ref                                             *\n****************************************************************************/\n\ntemplate<typename Scalar>\nstruct real_ref_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar& run(Scalar& x)\n  {\n    return reinterpret_cast<RealScalar*>(&x)[0];\n  }\n  EIGEN_DEVICE_FUNC\n  static inline const RealScalar& run(const Scalar& x)\n  {\n    return reinterpret_cast<const RealScalar*>(&x)[0];\n  }\n};\n\ntemplate<typename Scalar>\nstruct real_ref_retval\n{\n  typedef typename NumTraits<Scalar>::Real & type;\n};\n\n/****************************************************************************\n* Implementation of imag_ref                                             *\n****************************************************************************/\n\ntemplate<typename Scalar, bool IsComplex>\nstruct imag_ref_default_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar& run(Scalar& x)\n  {\n    return reinterpret_cast<RealScalar*>(&x)[1];\n  }\n  EIGEN_DEVICE_FUNC\n  static inline const RealScalar& run(const Scalar& x)\n  {\n    return reinterpret_cast<RealScalar*>(&x)[1];\n  }\n};\n\ntemplate<typename Scalar>\nstruct imag_ref_default_impl<Scalar, false>\n{\n  EIGEN_DEVICE_FUNC\n  static inline Scalar run(Scalar&)\n  {\n    return Scalar(0);\n  }\n  EIGEN_DEVICE_FUNC\n  static inline const Scalar run(const Scalar&)\n  {\n    return Scalar(0);\n  }\n};\n\ntemplate<typename Scalar>\nstruct imag_ref_impl : imag_ref_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};\n\ntemplate<typename Scalar>\nstruct imag_ref_retval\n{\n  typedef typename NumTraits<Scalar>::Real & type;\n};\n\n/****************************************************************************\n* Implementation of conj                                                 *\n****************************************************************************/\n\ntemplate<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>\nstruct conj_impl\n{\n  EIGEN_DEVICE_FUNC\n  static inline Scalar run(const Scalar& x)\n  {\n    return x;\n  }\n};\n\ntemplate<typename Scalar>\nstruct conj_impl<Scalar,true>\n{\n  EIGEN_DEVICE_FUNC\n  static inline Scalar run(const Scalar& x)\n  {\n    using std::conj;\n    return conj(x);\n  }\n};\n\ntemplate<typename Scalar>\nstruct conj_retval\n{\n  typedef Scalar type;\n};\n\n/****************************************************************************\n* Implementation of abs2                                                 *\n****************************************************************************/\n\ntemplate<typename Scalar,bool IsComplex>\nstruct abs2_impl_default\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    return x*x;\n  }\n};\n\ntemplate<typename Scalar>\nstruct abs2_impl_default<Scalar, true> // IsComplex\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    return real(x)*real(x) + imag(x)*imag(x);\n  }\n};\n\ntemplate<typename Scalar>\nstruct abs2_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    return abs2_impl_default<Scalar,NumTraits<Scalar>::IsComplex>::run(x);\n  }\n};\n\ntemplate<typename Scalar>\nstruct abs2_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of norm1                                                *\n****************************************************************************/\n\ntemplate<typename Scalar, bool IsComplex>\nstruct norm1_default_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar run(const Scalar& x)\n  {\n    EIGEN_USING_STD_MATH(abs);\n    return abs(real(x)) + abs(imag(x));\n  }\n};\n\ntemplate<typename Scalar>\nstruct norm1_default_impl<Scalar, false>\n{\n  EIGEN_DEVICE_FUNC\n  static inline Scalar run(const Scalar& x)\n  {\n    EIGEN_USING_STD_MATH(abs);\n    return abs(x);\n  }\n};\n\ntemplate<typename Scalar>\nstruct norm1_impl : norm1_default_impl<Scalar, NumTraits<Scalar>::IsComplex> {};\n\ntemplate<typename Scalar>\nstruct norm1_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of hypot                                                *\n****************************************************************************/\n\ntemplate<typename Scalar>\nstruct hypot_impl\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  static inline RealScalar run(const Scalar& x, const Scalar& y)\n  {\n    EIGEN_USING_STD_MATH(abs);\n    EIGEN_USING_STD_MATH(sqrt);\n    RealScalar _x = abs(x);\n    RealScalar _y = abs(y);\n    Scalar p, qp;\n    if(_x>_y)\n    {\n      p = _x;\n      qp = _y / p;\n    }\n    else\n    {\n      p = _y;\n      qp = _x / p;\n    }\n    if(p==RealScalar(0)) return RealScalar(0);\n    return p * sqrt(RealScalar(1) + qp*qp);\n  }\n};\n\ntemplate<typename Scalar>\nstruct hypot_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of cast                                                 *\n****************************************************************************/\n\ntemplate<typename OldType, typename NewType>\nstruct cast_impl\n{\n  EIGEN_DEVICE_FUNC\n  static inline NewType run(const OldType& x)\n  {\n    return static_cast<NewType>(x);\n  }\n};\n\n// here, for once, we're plainly returning NewType: we don't want cast to do weird things.\n\ntemplate<typename OldType, typename NewType>\nEIGEN_DEVICE_FUNC\ninline NewType cast(const OldType& x)\n{\n  return cast_impl<OldType, NewType>::run(x);\n}\n\n/****************************************************************************\n* Implementation of round                                                   *\n****************************************************************************/\n\n#if EIGEN_HAS_CXX11_MATH\n  template<typename Scalar>\n  struct round_impl {\n    static inline Scalar run(const Scalar& x)\n    {\n      EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)\n      EIGEN_USING_STD_MATH(round);\n      return round(x);\n    }\n  };\n#else\n  template<typename Scalar>\n  struct round_impl\n  {\n    static inline Scalar run(const Scalar& x)\n    {\n      EIGEN_STATIC_ASSERT((!NumTraits<Scalar>::IsComplex), NUMERIC_TYPE_MUST_BE_REAL)\n      EIGEN_USING_STD_MATH(floor);\n      EIGEN_USING_STD_MATH(ceil);\n      return (x > Scalar(0)) ? floor(x + Scalar(0.5)) : ceil(x - Scalar(0.5));\n    }\n  };\n#endif\n\ntemplate<typename Scalar>\nstruct round_retval\n{\n  typedef Scalar type;\n};\n\n/****************************************************************************\n* Implementation of arg                                                     *\n****************************************************************************/\n\n#if EIGEN_HAS_CXX11_MATH\n  template<typename Scalar>\n  struct arg_impl {\n    static inline Scalar run(const Scalar& x)\n    {\n      EIGEN_USING_STD_MATH(arg);\n      return arg(x);\n    }\n  };\n#else\n  template<typename Scalar, bool IsComplex = NumTraits<Scalar>::IsComplex>\n  struct arg_default_impl\n  {\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    EIGEN_DEVICE_FUNC\n    static inline RealScalar run(const Scalar& x)\n    {\n      return (x < Scalar(0)) ? Scalar(EIGEN_PI) : Scalar(0); }\n  };\n\n  template<typename Scalar>\n  struct arg_default_impl<Scalar,true>\n  {\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    EIGEN_DEVICE_FUNC\n    static inline RealScalar run(const Scalar& x)\n    {\n      EIGEN_USING_STD_MATH(arg);\n      return arg(x);\n    }\n  };\n\n  template<typename Scalar> struct arg_impl : arg_default_impl<Scalar> {};\n#endif\n\ntemplate<typename Scalar>\nstruct arg_retval\n{\n  typedef typename NumTraits<Scalar>::Real type;\n};\n\n/****************************************************************************\n* Implementation of expm1                                                   *\n****************************************************************************/\n\n// This implementation is based on GSL Math's expm1.\nnamespace std_fallback {\n  // fallback expm1 implementation in case there is no expm1(Scalar) function in namespace of Scalar,\n  // or that there is no suitable std::expm1 function available. Implementation\n  // attributed to Kahan. See: http://www.plunk.org/~hatch/rightway.php.\n  template<typename Scalar>\n  EIGEN_DEVICE_FUNC inline Scalar expm1(const Scalar& x) {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    EIGEN_USING_STD_MATH(exp);\n    Scalar u = exp(x);\n    if (u == Scalar(1)) {\n      return x;\n    }\n    Scalar um1 = u - RealScalar(1);\n    if (um1 == Scalar(-1)) {\n      return RealScalar(-1);\n    }\n\n    EIGEN_USING_STD_MATH(log);\n    return (u - RealScalar(1)) * x / log(u);\n  }\n}\n\ntemplate<typename Scalar>\nstruct expm1_impl {\n  EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x)\n  {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)\n    #if EIGEN_HAS_CXX11_MATH\n    using std::expm1;\n    #endif\n    using std_fallback::expm1;\n    return expm1(x);\n  }\n};\n\n\ntemplate<typename Scalar>\nstruct expm1_retval\n{\n  typedef Scalar type;\n};\n\n/****************************************************************************\n* Implementation of log1p                                                   *\n****************************************************************************/\n\nnamespace std_fallback {\n  // fallback log1p implementation in case there is no log1p(Scalar) function in namespace of Scalar,\n  // or that there is no suitable std::log1p function available\n  template<typename Scalar>\n  EIGEN_DEVICE_FUNC inline Scalar log1p(const Scalar& x) {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    EIGEN_USING_STD_MATH(log);\n    Scalar x1p = RealScalar(1) + x;\n    return ( x1p == Scalar(1) ) ? x : x * ( log(x1p) / (x1p - RealScalar(1)) );\n  }\n}\n\ntemplate<typename Scalar>\nstruct log1p_impl {\n  EIGEN_DEVICE_FUNC static inline Scalar run(const Scalar& x)\n  {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar)\n    #if EIGEN_HAS_CXX11_MATH\n    using std::log1p;\n    #endif\n    using std_fallback::log1p;\n    return log1p(x);\n  }\n};\n\n\ntemplate<typename Scalar>\nstruct log1p_retval\n{\n  typedef Scalar type;\n};\n\n/****************************************************************************\n* Implementation of pow                                                  *\n****************************************************************************/\n\ntemplate<typename ScalarX,typename ScalarY, bool IsInteger = NumTraits<ScalarX>::IsInteger&&NumTraits<ScalarY>::IsInteger>\nstruct pow_impl\n{\n  //typedef Scalar retval;\n  typedef typename ScalarBinaryOpTraits<ScalarX,ScalarY,internal::scalar_pow_op<ScalarX,ScalarY> >::ReturnType result_type;\n  static EIGEN_DEVICE_FUNC inline result_type run(const ScalarX& x, const ScalarY& y)\n  {\n    EIGEN_USING_STD_MATH(pow);\n    return pow(x, y);\n  }\n};\n\ntemplate<typename ScalarX,typename ScalarY>\nstruct pow_impl<ScalarX,ScalarY, true>\n{\n  typedef ScalarX result_type;\n  static EIGEN_DEVICE_FUNC inline ScalarX run(ScalarX x, ScalarY y)\n  {\n    ScalarX res(1);\n    eigen_assert(!NumTraits<ScalarY>::IsSigned || y >= 0);\n    if(y & 1) res *= x;\n    y >>= 1;\n    while(y)\n    {\n      x *= x;\n      if(y&1) res *= x;\n      y >>= 1;\n    }\n    return res;\n  }\n};\n\n/****************************************************************************\n* Implementation of random                                               *\n****************************************************************************/\n\ntemplate<typename Scalar,\n         bool IsComplex,\n         bool IsInteger>\nstruct random_default_impl {};\n\ntemplate<typename Scalar>\nstruct random_impl : random_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};\n\ntemplate<typename Scalar>\nstruct random_retval\n{\n  typedef Scalar type;\n};\n\ntemplate<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y);\ntemplate<typename Scalar> inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random();\n\ntemplate<typename Scalar>\nstruct random_default_impl<Scalar, false, false>\n{\n  static inline Scalar run(const Scalar& x, const Scalar& y)\n  {\n    return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX);\n  }\n  static inline Scalar run()\n  {\n    return run(Scalar(NumTraits<Scalar>::IsSigned ? -1 : 0), Scalar(1));\n  }\n};\n\nenum {\n  meta_floor_log2_terminate,\n  meta_floor_log2_move_up,\n  meta_floor_log2_move_down,\n  meta_floor_log2_bogus\n};\n\ntemplate<unsigned int n, int lower, int upper> struct meta_floor_log2_selector\n{\n  enum { middle = (lower + upper) / 2,\n         value = (upper <= lower + 1) ? int(meta_floor_log2_terminate)\n               : (n < (1 << middle)) ? int(meta_floor_log2_move_down)\n               : (n==0) ? int(meta_floor_log2_bogus)\n               : int(meta_floor_log2_move_up)\n  };\n};\n\ntemplate<unsigned int n,\n         int lower = 0,\n         int upper = sizeof(unsigned int) * CHAR_BIT - 1,\n         int selector = meta_floor_log2_selector<n, lower, upper>::value>\nstruct meta_floor_log2 {};\n\ntemplate<unsigned int n, int lower, int upper>\nstruct meta_floor_log2<n, lower, upper, meta_floor_log2_move_down>\n{\n  enum { value = meta_floor_log2<n, lower, meta_floor_log2_selector<n, lower, upper>::middle>::value };\n};\n\ntemplate<unsigned int n, int lower, int upper>\nstruct meta_floor_log2<n, lower, upper, meta_floor_log2_move_up>\n{\n  enum { value = meta_floor_log2<n, meta_floor_log2_selector<n, lower, upper>::middle, upper>::value };\n};\n\ntemplate<unsigned int n, int lower, int upper>\nstruct meta_floor_log2<n, lower, upper, meta_floor_log2_terminate>\n{\n  enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower };\n};\n\ntemplate<unsigned int n, int lower, int upper>\nstruct meta_floor_log2<n, lower, upper, meta_floor_log2_bogus>\n{\n  // no value, error at compile time\n};\n\ntemplate<typename Scalar>\nstruct random_default_impl<Scalar, false, true>\n{\n  static inline Scalar run(const Scalar& x, const Scalar& y)\n  {\n    typedef typename conditional<NumTraits<Scalar>::IsSigned,std::ptrdiff_t,std::size_t>::type ScalarX;\n    if(y<x)\n      return x;\n    // the following difference might overflow on a 32 bits system,\n    // but since y>=x the result converted to an unsigned long is still correct.\n    std::size_t range = ScalarX(y)-ScalarX(x);\n    std::size_t offset = 0;\n    // rejection sampling\n    std::size_t divisor = 1;\n    std::size_t multiplier = 1;\n    if(range<RAND_MAX) divisor = (std::size_t(RAND_MAX)+1)/(range+1);\n    else               multiplier = 1 + range/(std::size_t(RAND_MAX)+1);\n    do {\n      offset = (std::size_t(std::rand()) * multiplier) / divisor;\n    } while (offset > range);\n    return Scalar(ScalarX(x) + offset);\n  }\n\n  static inline Scalar run()\n  {\n#ifdef EIGEN_MAKING_DOCS\n    return run(Scalar(NumTraits<Scalar>::IsSigned ? -10 : 0), Scalar(10));\n#else\n    enum { rand_bits = meta_floor_log2<(unsigned int)(RAND_MAX)+1>::value,\n           scalar_bits = sizeof(Scalar) * CHAR_BIT,\n           shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)),\n           offset = NumTraits<Scalar>::IsSigned ? (1 << (EIGEN_PLAIN_ENUM_MIN(rand_bits,scalar_bits)-1)) : 0\n    };\n    return Scalar((std::rand() >> shift) - offset);\n#endif\n  }\n};\n\ntemplate<typename Scalar>\nstruct random_default_impl<Scalar, true, false>\n{\n  static inline Scalar run(const Scalar& x, const Scalar& y)\n  {\n    return Scalar(random(real(x), real(y)),\n                  random(imag(x), imag(y)));\n  }\n  static inline Scalar run()\n  {\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    return Scalar(random<RealScalar>(), random<RealScalar>());\n  }\n};\n\ntemplate<typename Scalar>\ninline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y)\n{\n  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y);\n}\n\ntemplate<typename Scalar>\ninline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()\n{\n  return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();\n}\n\n// Implementatin of is* functions\n\n// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.\n#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)\n#define EIGEN_USE_STD_FPCLASSIFY 1\n#else\n#define EIGEN_USE_STD_FPCLASSIFY 0\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<internal::is_integral<T>::value,bool>::type\nisnan_impl(const T&) { return false; }\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<internal::is_integral<T>::value,bool>::type\nisinf_impl(const T&) { return false; }\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<internal::is_integral<T>::value,bool>::type\nisfinite_impl(const T&) { return true; }\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type\nisfinite_impl(const T& x)\n{\n  #ifdef __CUDA_ARCH__\n    return (::isfinite)(x);\n  #elif EIGEN_USE_STD_FPCLASSIFY\n    using std::isfinite;\n    return isfinite EIGEN_NOT_A_MACRO (x);\n  #else\n    return x<=NumTraits<T>::highest() && x>=NumTraits<T>::lowest();\n  #endif\n}\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type\nisinf_impl(const T& x)\n{\n  #ifdef __CUDA_ARCH__\n    return (::isinf)(x);\n  #elif EIGEN_USE_STD_FPCLASSIFY\n    using std::isinf;\n    return isinf EIGEN_NOT_A_MACRO (x);\n  #else\n    return x>NumTraits<T>::highest() || x<NumTraits<T>::lowest();\n  #endif\n}\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ntypename internal::enable_if<(!internal::is_integral<T>::value)&&(!NumTraits<T>::IsComplex),bool>::type\nisnan_impl(const T& x)\n{\n  #ifdef __CUDA_ARCH__\n    return (::isnan)(x);\n  #elif EIGEN_USE_STD_FPCLASSIFY\n    using std::isnan;\n    return isnan EIGEN_NOT_A_MACRO (x);\n  #else\n    return x != x;\n  #endif\n}\n\n#if (!EIGEN_USE_STD_FPCLASSIFY)\n\n#if EIGEN_COMP_MSVC\n\ntemplate<typename T> EIGEN_DEVICE_FUNC bool isinf_msvc_helper(T x)\n{\n  return _fpclass(x)==_FPCLASS_NINF || _fpclass(x)==_FPCLASS_PINF;\n}\n\n//MSVC defines a _isnan builtin function, but for double only\nEIGEN_DEVICE_FUNC inline bool isnan_impl(const long double& x) { return _isnan(x)!=0; }\nEIGEN_DEVICE_FUNC inline bool isnan_impl(const double& x)      { return _isnan(x)!=0; }\nEIGEN_DEVICE_FUNC inline bool isnan_impl(const float& x)       { return _isnan(x)!=0; }\n\nEIGEN_DEVICE_FUNC inline bool isinf_impl(const long double& x) { return isinf_msvc_helper(x); }\nEIGEN_DEVICE_FUNC inline bool isinf_impl(const double& x)      { return isinf_msvc_helper(x); }\nEIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x)       { return isinf_msvc_helper(x); }\n\n#elif (defined __FINITE_MATH_ONLY__ && __FINITE_MATH_ONLY__ && EIGEN_COMP_GNUC)\n\n#if EIGEN_GNUC_AT_LEAST(5,0)\n  #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((optimize(\"no-finite-math-only\")))\n#else\n  // NOTE the inline qualifier and noinline attribute are both needed: the former is to avoid linking issue (duplicate symbol),\n  //      while the second prevent too aggressive optimizations in fast-math mode:\n  #define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((noinline,optimize(\"no-finite-math-only\")))\n#endif\n\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const long double& x) { return __builtin_isnan(x); }\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const double& x)      { return __builtin_isnan(x); }\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const float& x)       { return __builtin_isnan(x); }\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const double& x)      { return __builtin_isinf(x); }\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const float& x)       { return __builtin_isinf(x); }\ntemplate<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const long double& x) { return __builtin_isinf(x); }\n\n#undef EIGEN_TMP_NOOPT_ATTRIB\n\n#endif\n\n#endif\n\n// The following overload are defined at the end of this file\ntemplate<typename T> EIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex<T>& x);\ntemplate<typename T> EIGEN_DEVICE_FUNC bool isnan_impl(const std::complex<T>& x);\ntemplate<typename T> EIGEN_DEVICE_FUNC bool isinf_impl(const std::complex<T>& x);\n\ntemplate<typename T> T generic_fast_tanh_float(const T& a_x);\n\n} // end namespace internal\n\n/****************************************************************************\n* Generic math functions                                                    *\n****************************************************************************/\n\nnamespace numext {\n\n#if !defined(__CUDA_ARCH__) && !defined(__SYCL_DEVICE_ONLY__)\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)\n{\n  EIGEN_USING_STD_MATH(min);\n  return min EIGEN_NOT_A_MACRO (x,y);\n}\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)\n{\n  EIGEN_USING_STD_MATH(max);\n  return max EIGEN_NOT_A_MACRO (x,y);\n}\n\n\n#elif defined(__SYCL_DEVICE_ONLY__)\ntemplate<typename T>\nEIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)\n{\n\n  return y < x ? y : x;\n}\n\ntemplate<typename T>\nEIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)\n{\n\n  return x < y ? y : x;\n}\n\nEIGEN_ALWAYS_INLINE int mini(const int& x, const int& y)\n{\n  return cl::sycl::min(x,y);\n}\n\nEIGEN_ALWAYS_INLINE int maxi(const int& x, const int& y)\n{\n  return cl::sycl::max(x,y);\n}\n\nEIGEN_ALWAYS_INLINE unsigned int mini(const unsigned int& x, const unsigned int& y)\n{\n  return cl::sycl::min(x,y);\n}\n\nEIGEN_ALWAYS_INLINE unsigned int maxi(const unsigned int& x, const unsigned int& y)\n{\n  return cl::sycl::max(x,y);\n}\n\nEIGEN_ALWAYS_INLINE  long mini(const long & x, const long & y)\n{\n  return cl::sycl::min(x,y);\n}\n\nEIGEN_ALWAYS_INLINE  long maxi(const long & x, const long & y)\n{\n  return cl::sycl::max(x,y);\n}\n\nEIGEN_ALWAYS_INLINE unsigned long mini(const unsigned long& x, const unsigned long& y)\n{\n  return cl::sycl::min(x,y);\n}\n\nEIGEN_ALWAYS_INLINE unsigned long maxi(const unsigned long& x, const unsigned long& y)\n{\n  return cl::sycl::max(x,y);\n}\n\n\nEIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)\n{\n  return cl::sycl::fmin(x,y);\n}\n\nEIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)\n{\n  return cl::sycl::fmax(x,y);\n}\n\nEIGEN_ALWAYS_INLINE double mini(const double& x, const double& y)\n{\n  return cl::sycl::fmin(x,y);\n}\n\nEIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)\n{\n  return cl::sycl::fmax(x,y);\n}\n\n#else\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)\n{\n  return y < x ? y : x;\n}\ntemplate<>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)\n{\n  return fminf(x, y);\n}\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)\n{\n  return x < y ? y : x;\n}\ntemplate<>\nEIGEN_DEVICE_FUNC\nEIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)\n{\n  return fmaxf(x, y);\n}\n#endif\n\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x)\n{\n  return internal::real_ref_impl<Scalar>::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(arg, Scalar) arg(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(arg, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline typename internal::add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x)\n{\n  return internal::imag_ref_impl<Scalar>::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y)\n{\n  return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y);\n}\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(log1p, Scalar) log1p(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(log1p, Scalar)::run(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   log1p(float x) { return cl::sycl::log1p(x); }\nEIGEN_ALWAYS_INLINE double  log1p(double x) { return cl::sycl::log1p(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat log1p(const float &x) { return ::log1pf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble log1p(const double &x) { return ::log1p(x); }\n#endif\n\ntemplate<typename ScalarX,typename ScalarY>\nEIGEN_DEVICE_FUNC\ninline typename internal::pow_impl<ScalarX,ScalarY>::result_type pow(const ScalarX& x, const ScalarY& y)\n{\n  return internal::pow_impl<ScalarX,ScalarY>::run(x, y);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   pow(float x, float y) { return cl::sycl::pow(x, y); }\nEIGEN_ALWAYS_INLINE double  pow(double x, double y) { return cl::sycl::pow(x, y); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\ntemplate<typename T> EIGEN_DEVICE_FUNC bool (isnan)   (const T &x) { return internal::isnan_impl(x); }\ntemplate<typename T> EIGEN_DEVICE_FUNC bool (isinf)   (const T &x) { return internal::isinf_impl(x); }\ntemplate<typename T> EIGEN_DEVICE_FUNC bool (isfinite)(const T &x) { return internal::isfinite_impl(x); }\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   isnan(float x) { return cl::sycl::isnan(x); }\nEIGEN_ALWAYS_INLINE double  isnan(double x) { return cl::sycl::isnan(x); }\nEIGEN_ALWAYS_INLINE float   isinf(float x) { return cl::sycl::isinf(x); }\nEIGEN_ALWAYS_INLINE double  isinf(double x) { return cl::sycl::isinf(x); }\nEIGEN_ALWAYS_INLINE float   isfinite(float x) { return cl::sycl::isfinite(x); }\nEIGEN_ALWAYS_INLINE double  isfinite(double x) { return cl::sycl::isfinite(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(round, Scalar) round(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(round, Scalar)::run(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   round(float x) { return cl::sycl::round(x); }\nEIGEN_ALWAYS_INLINE double  round(double x) { return cl::sycl::round(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nT (floor)(const T& x)\n{\n  EIGEN_USING_STD_MATH(floor);\n  return floor(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   floor(float x) { return cl::sycl::floor(x); }\nEIGEN_ALWAYS_INLINE double  floor(double x) { return cl::sycl::floor(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat floor(const float &x) { return ::floorf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble floor(const double &x) { return ::floor(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\nT (ceil)(const T& x)\n{\n  EIGEN_USING_STD_MATH(ceil);\n  return ceil(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   ceil(float x) { return cl::sycl::ceil(x); }\nEIGEN_ALWAYS_INLINE double  ceil(double x) { return cl::sycl::ceil(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat ceil(const float &x) { return ::ceilf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble ceil(const double &x) { return ::ceil(x); }\n#endif\n\n\n/** Log base 2 for 32 bits positive integers.\n  * Conveniently returns 0 for x==0. */\ninline int log2(int x)\n{\n  eigen_assert(x>=0);\n  unsigned int v(x);\n  static const int table[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };\n  v |= v >> 1;\n  v |= v >> 2;\n  v |= v >> 4;\n  v |= v >> 8;\n  v |= v >> 16;\n  return table[(v * 0x07C4ACDDU) >> 27];\n}\n\n/** \\returns the square root of \\a x.\n  *\n  * It is essentially equivalent to \\code using std::sqrt; return sqrt(x); \\endcode,\n  * but slightly faster for float/double and some compilers (e.g., gcc), thanks to\n  * specializations when SSE is enabled.\n  *\n  * It's usage is justified in performance critical functions, like norm/normalize.\n  */\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT sqrt(const T &x)\n{\n  EIGEN_USING_STD_MATH(sqrt);\n  return sqrt(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   sqrt(float x) { return cl::sycl::sqrt(x); }\nEIGEN_ALWAYS_INLINE double  sqrt(double x) { return cl::sycl::sqrt(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT log(const T &x) {\n  EIGEN_USING_STD_MATH(log);\n  return log(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   log(float x) { return cl::sycl::log(x); }\nEIGEN_ALWAYS_INLINE double  log(double x) { return cl::sycl::log(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat log(const float &x) { return ::logf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble log(const double &x) { return ::log(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ntypename NumTraits<T>::Real abs(const T &x) {\n  EIGEN_USING_STD_MATH(abs);\n  return abs(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   abs(float x) { return cl::sycl::fabs(x); }\nEIGEN_ALWAYS_INLINE double  abs(double x) { return cl::sycl::fabs(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat abs(const float &x) { return ::fabsf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble abs(const double &x) { return ::fabs(x); }\n\ntemplate <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat abs(const std::complex<float>& x) {\n  return ::hypotf(x.real(), x.imag());\n}\n\ntemplate <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble abs(const std::complex<double>& x) {\n  return ::hypot(x.real(), x.imag());\n}\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT exp(const T &x) {\n  EIGEN_USING_STD_MATH(exp);\n  return exp(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   exp(float x) { return cl::sycl::exp(x); }\nEIGEN_ALWAYS_INLINE double  exp(double x) { return cl::sycl::exp(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat exp(const float &x) { return ::expf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble exp(const double &x) { return ::exp(x); }\n#endif\n\ntemplate<typename Scalar>\nEIGEN_DEVICE_FUNC\ninline EIGEN_MATHFUNC_RETVAL(expm1, Scalar) expm1(const Scalar& x)\n{\n  return EIGEN_MATHFUNC_IMPL(expm1, Scalar)::run(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   expm1(float x) { return cl::sycl::expm1(x); }\nEIGEN_ALWAYS_INLINE double  expm1(double x) { return cl::sycl::expm1(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat expm1(const float &x) { return ::expm1f(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble expm1(const double &x) { return ::expm1(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT cos(const T &x) {\n  EIGEN_USING_STD_MATH(cos);\n  return cos(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   cos(float x) { return cl::sycl::cos(x); }\nEIGEN_ALWAYS_INLINE double  cos(double x) { return cl::sycl::cos(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat cos(const float &x) { return ::cosf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble cos(const double &x) { return ::cos(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT sin(const T &x) {\n  EIGEN_USING_STD_MATH(sin);\n  return sin(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   sin(float x) { return cl::sycl::sin(x); }\nEIGEN_ALWAYS_INLINE double  sin(double x) { return cl::sycl::sin(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat sin(const float &x) { return ::sinf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble sin(const double &x) { return ::sin(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT tan(const T &x) {\n  EIGEN_USING_STD_MATH(tan);\n  return tan(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   tan(float x) { return cl::sycl::tan(x); }\nEIGEN_ALWAYS_INLINE double  tan(double x) { return cl::sycl::tan(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat tan(const float &x) { return ::tanf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble tan(const double &x) { return ::tan(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT acos(const T &x) {\n  EIGEN_USING_STD_MATH(acos);\n  return acos(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   acos(float x) { return cl::sycl::acos(x); }\nEIGEN_ALWAYS_INLINE double  acos(double x) { return cl::sycl::acos(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat acos(const float &x) { return ::acosf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble acos(const double &x) { return ::acos(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT asin(const T &x) {\n  EIGEN_USING_STD_MATH(asin);\n  return asin(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   asin(float x) { return cl::sycl::asin(x); }\nEIGEN_ALWAYS_INLINE double  asin(double x) { return cl::sycl::asin(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat asin(const float &x) { return ::asinf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble asin(const double &x) { return ::asin(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT atan(const T &x) {\n  EIGEN_USING_STD_MATH(atan);\n  return atan(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   atan(float x) { return cl::sycl::atan(x); }\nEIGEN_ALWAYS_INLINE double  atan(double x) { return cl::sycl::atan(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat atan(const float &x) { return ::atanf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble atan(const double &x) { return ::atan(x); }\n#endif\n\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT cosh(const T &x) {\n  EIGEN_USING_STD_MATH(cosh);\n  return cosh(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   cosh(float x) { return cl::sycl::cosh(x); }\nEIGEN_ALWAYS_INLINE double  cosh(double x) { return cl::sycl::cosh(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat cosh(const float &x) { return ::coshf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble cosh(const double &x) { return ::cosh(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT sinh(const T &x) {\n  EIGEN_USING_STD_MATH(sinh);\n  return sinh(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   sinh(float x) { return cl::sycl::sinh(x); }\nEIGEN_ALWAYS_INLINE double  sinh(double x) { return cl::sycl::sinh(x); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat sinh(const float &x) { return ::sinhf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble sinh(const double &x) { return ::sinh(x); }\n#endif\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT tanh(const T &x) {\n  EIGEN_USING_STD_MATH(tanh);\n  return tanh(x);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   tanh(float x) { return cl::sycl::tanh(x); }\nEIGEN_ALWAYS_INLINE double  tanh(double x) { return cl::sycl::tanh(x); }\n#elif (!defined(__CUDACC__)) && EIGEN_FAST_MATH\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat tanh(float x) { return internal::generic_fast_tanh_float(x); }\n#endif\n\n#ifdef __CUDACC__\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat tanh(const float &x) { return ::tanhf(x); }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble tanh(const double &x) { return ::tanh(x); }\n#endif\n\ntemplate <typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nT fmod(const T& a, const T& b) {\n  EIGEN_USING_STD_MATH(fmod);\n  return fmod(a, b);\n}\n\n#if defined(__SYCL_DEVICE_ONLY__)\nEIGEN_ALWAYS_INLINE float   fmod(float x, float y) { return cl::sycl::fmod(x, y); }\nEIGEN_ALWAYS_INLINE double  fmod(double x, double y) { return cl::sycl::fmod(x, y); }\n#endif // defined(__SYCL_DEVICE_ONLY__)\n\n#ifdef __CUDACC__\ntemplate <>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat fmod(const float& a, const float& b) {\n  return ::fmodf(a, b);\n}\n\ntemplate <>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble fmod(const double& a, const double& b) {\n  return ::fmod(a, b);\n}\n#endif\n\n} // end namespace numext\n\nnamespace internal {\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC bool isfinite_impl(const std::complex<T>& x)\n{\n  return (numext::isfinite)(numext::real(x)) && (numext::isfinite)(numext::imag(x));\n}\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC bool isnan_impl(const std::complex<T>& x)\n{\n  return (numext::isnan)(numext::real(x)) || (numext::isnan)(numext::imag(x));\n}\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC bool isinf_impl(const std::complex<T>& x)\n{\n  return ((numext::isinf)(numext::real(x)) || (numext::isinf)(numext::imag(x))) && (!(numext::isnan)(x));\n}\n\n/****************************************************************************\n* Implementation of fuzzy comparisons                                       *\n****************************************************************************/\n\ntemplate<typename Scalar,\n         bool IsComplex,\n         bool IsInteger>\nstruct scalar_fuzzy_default_impl {};\n\ntemplate<typename Scalar>\nstruct scalar_fuzzy_default_impl<Scalar, false, false>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  template<typename OtherScalar> EIGEN_DEVICE_FUNC\n  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)\n  {\n    return numext::abs(x) <= numext::abs(y) * prec;\n  }\n  EIGEN_DEVICE_FUNC\n  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)\n  {\n    return numext::abs(x - y) <= numext::mini(numext::abs(x), numext::abs(y)) * prec;\n  }\n  EIGEN_DEVICE_FUNC\n  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec)\n  {\n    return x <= y || isApprox(x, y, prec);\n  }\n};\n\ntemplate<typename Scalar>\nstruct scalar_fuzzy_default_impl<Scalar, false, true>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  template<typename OtherScalar> EIGEN_DEVICE_FUNC\n  static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&)\n  {\n    return x == Scalar(0);\n  }\n  EIGEN_DEVICE_FUNC\n  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&)\n  {\n    return x == y;\n  }\n  EIGEN_DEVICE_FUNC\n  static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&)\n  {\n    return x <= y;\n  }\n};\n\ntemplate<typename Scalar>\nstruct scalar_fuzzy_default_impl<Scalar, true, false>\n{\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  template<typename OtherScalar> EIGEN_DEVICE_FUNC\n  static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec)\n  {\n    return numext::abs2(x) <= numext::abs2(y) * prec * prec;\n  }\n  EIGEN_DEVICE_FUNC\n  static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec)\n  {\n    return numext::abs2(x - y) <= numext::mini(numext::abs2(x), numext::abs2(y)) * prec * prec;\n  }\n};\n\ntemplate<typename Scalar>\nstruct scalar_fuzzy_impl : scalar_fuzzy_default_impl<Scalar, NumTraits<Scalar>::IsComplex, NumTraits<Scalar>::IsInteger> {};\n\ntemplate<typename Scalar, typename OtherScalar> EIGEN_DEVICE_FUNC\ninline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y,\n                              const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())\n{\n  return scalar_fuzzy_impl<Scalar>::template isMuchSmallerThan<OtherScalar>(x, y, precision);\n}\n\ntemplate<typename Scalar> EIGEN_DEVICE_FUNC\ninline bool isApprox(const Scalar& x, const Scalar& y,\n                     const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())\n{\n  return scalar_fuzzy_impl<Scalar>::isApprox(x, y, precision);\n}\n\ntemplate<typename Scalar> EIGEN_DEVICE_FUNC\ninline bool isApproxOrLessThan(const Scalar& x, const Scalar& y,\n                               const typename NumTraits<Scalar>::Real &precision = NumTraits<Scalar>::dummy_precision())\n{\n  return scalar_fuzzy_impl<Scalar>::isApproxOrLessThan(x, y, precision);\n}\n\n/******************************************\n***  The special case of the  bool type ***\n******************************************/\n\ntemplate<> struct random_impl<bool>\n{\n  static inline bool run()\n  {\n    return random<int>(0,1)==0 ? false : true;\n  }\n};\n\ntemplate<> struct scalar_fuzzy_impl<bool>\n{\n  typedef bool RealScalar;\n\n  template<typename OtherScalar> EIGEN_DEVICE_FUNC\n  static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&)\n  {\n    return !x;\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline bool isApprox(bool x, bool y, bool)\n  {\n    return x == y;\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&)\n  {\n    return (!x) || y;\n  }\n\n};\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATHFUNCTIONS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/MathFunctionsImpl.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)\n// Copyright (C) 2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATHFUNCTIONSIMPL_H\n#define EIGEN_MATHFUNCTIONSIMPL_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/** \\internal \\returns the hyperbolic tan of \\a a (coeff-wise)\n    Doesn't do anything fancy, just a 13/6-degree rational interpolant which\n    is accurate up to a couple of ulp in the range [-9, 9], outside of which\n    the tanh(x) = +/-1.\n\n    This implementation works on both scalars and packets.\n*/\ntemplate<typename T>\nT generic_fast_tanh_float(const T& a_x)\n{\n  // Clamp the inputs to the range [-9, 9] since anything outside\n  // this range is +/-1.0f in single-precision.\n  const T plus_9 = pset1<T>(9.f);\n  const T minus_9 = pset1<T>(-9.f);\n  const T x = pmax(pmin(a_x, plus_9), minus_9);\n  // The monomial coefficients of the numerator polynomial (odd).\n  const T alpha_1 = pset1<T>(4.89352455891786e-03f);\n  const T alpha_3 = pset1<T>(6.37261928875436e-04f);\n  const T alpha_5 = pset1<T>(1.48572235717979e-05f);\n  const T alpha_7 = pset1<T>(5.12229709037114e-08f);\n  const T alpha_9 = pset1<T>(-8.60467152213735e-11f);\n  const T alpha_11 = pset1<T>(2.00018790482477e-13f);\n  const T alpha_13 = pset1<T>(-2.76076847742355e-16f);\n\n  // The monomial coefficients of the denominator polynomial (even).\n  const T beta_0 = pset1<T>(4.89352518554385e-03f);\n  const T beta_2 = pset1<T>(2.26843463243900e-03f);\n  const T beta_4 = pset1<T>(1.18534705686654e-04f);\n  const T beta_6 = pset1<T>(1.19825839466702e-06f);\n\n  // Since the polynomials are odd/even, we need x^2.\n  const T x2 = pmul(x, x);\n\n  // Evaluate the numerator polynomial p.\n  T p = pmadd(x2, alpha_13, alpha_11);\n  p = pmadd(x2, p, alpha_9);\n  p = pmadd(x2, p, alpha_7);\n  p = pmadd(x2, p, alpha_5);\n  p = pmadd(x2, p, alpha_3);\n  p = pmadd(x2, p, alpha_1);\n  p = pmul(x, p);\n\n  // Evaluate the denominator polynomial p.\n  T q = pmadd(x2, beta_6, beta_4);\n  q = pmadd(x2, q, beta_2);\n  q = pmadd(x2, q, beta_0);\n\n  // Divide the numerator by the denominator.\n  return pdiv(p, q);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATHFUNCTIONSIMPL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Matrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATRIX_H\n#define EIGEN_MATRIX_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct traits<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n{\nprivate:\n  enum { size = internal::size_at_compile_time<_Rows,_Cols>::ret };\n  typedef typename find_best_packet<_Scalar,size>::type PacketScalar;\n  enum {\n      row_major_bit = _Options&RowMajor ? RowMajorBit : 0,\n      is_dynamic_size_storage = _MaxRows==Dynamic || _MaxCols==Dynamic,\n      max_size = is_dynamic_size_storage ? Dynamic : _MaxRows*_MaxCols,\n      default_alignment = compute_default_alignment<_Scalar,max_size>::value,\n      actual_alignment = ((_Options&DontAlign)==0) ? default_alignment : 0,\n      required_alignment = unpacket_traits<PacketScalar>::alignment,\n      packet_access_bit = (packet_traits<_Scalar>::Vectorizable && (EIGEN_UNALIGNED_VECTORIZE || (actual_alignment>=required_alignment))) ? PacketAccessBit : 0\n    };\n    \npublic:\n  typedef _Scalar Scalar;\n  typedef Dense StorageKind;\n  typedef Eigen::Index StorageIndex;\n  typedef MatrixXpr XprKind;\n  enum {\n    RowsAtCompileTime = _Rows,\n    ColsAtCompileTime = _Cols,\n    MaxRowsAtCompileTime = _MaxRows,\n    MaxColsAtCompileTime = _MaxCols,\n    Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,\n    Options = _Options,\n    InnerStrideAtCompileTime = 1,\n    OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime,\n    \n    // FIXME, the following flag in only used to define NeedsToAlign in PlainObjectBase\n    EvaluatorFlags = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit,\n    Alignment = actual_alignment\n  };\n};\n}\n\n/** \\class Matrix\n  * \\ingroup Core_Module\n  *\n  * \\brief The matrix class, also used for vectors and row-vectors\n  *\n  * The %Matrix class is the work-horse for all \\em dense (\\ref dense \"note\") matrices and vectors within Eigen.\n  * Vectors are matrices with one column, and row-vectors are matrices with one row.\n  *\n  * The %Matrix class encompasses \\em both fixed-size and dynamic-size objects (\\ref fixedsize \"note\").\n  *\n  * The first three template parameters are required:\n  * \\tparam _Scalar Numeric type, e.g. float, double, int or std::complex<float>.\n  *                 User defined scalar types are supported as well (see \\ref user_defined_scalars \"here\").\n  * \\tparam _Rows Number of rows, or \\b Dynamic\n  * \\tparam _Cols Number of columns, or \\b Dynamic\n  *\n  * The remaining template parameters are optional -- in most cases you don't have to worry about them.\n  * \\tparam _Options A combination of either \\b #RowMajor or \\b #ColMajor, and of either\n  *                 \\b #AutoAlign or \\b #DontAlign.\n  *                 The former controls \\ref TopicStorageOrders \"storage order\", and defaults to column-major. The latter controls alignment, which is required\n  *                 for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size.\n  * \\tparam _MaxRows Maximum number of rows. Defaults to \\a _Rows (\\ref maxrows \"note\").\n  * \\tparam _MaxCols Maximum number of columns. Defaults to \\a _Cols (\\ref maxrows \"note\").\n  *\n  * Eigen provides a number of typedefs covering the usual cases. Here are some examples:\n  *\n  * \\li \\c Matrix2d is a 2x2 square matrix of doubles (\\c Matrix<double, 2, 2>)\n  * \\li \\c Vector4f is a vector of 4 floats (\\c Matrix<float, 4, 1>)\n  * \\li \\c RowVector3i is a row-vector of 3 ints (\\c Matrix<int, 1, 3>)\n  *\n  * \\li \\c MatrixXf is a dynamic-size matrix of floats (\\c Matrix<float, Dynamic, Dynamic>)\n  * \\li \\c VectorXf is a dynamic-size vector of floats (\\c Matrix<float, Dynamic, 1>)\n  *\n  * \\li \\c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\\c Matrix<float, 2, Dynamic>)\n  * \\li \\c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\\c Matrix<double, Dynamic, 3>)\n  *\n  * See \\link matrixtypedefs this page \\endlink for a complete list of predefined \\em %Matrix and \\em Vector typedefs.\n  *\n  * You can access elements of vectors and matrices using normal subscripting:\n  *\n  * \\code\n  * Eigen::VectorXd v(10);\n  * v[0] = 0.1;\n  * v[1] = 0.2;\n  * v(0) = 0.3;\n  * v(1) = 0.4;\n  *\n  * Eigen::MatrixXi m(10, 10);\n  * m(0, 1) = 1;\n  * m(0, 2) = 2;\n  * m(0, 3) = 3;\n  * \\endcode\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_MATRIX_PLUGIN.\n  *\n  * <i><b>Some notes:</b></i>\n  *\n  * <dl>\n  * <dt><b>\\anchor dense Dense versus sparse:</b></dt>\n  * <dd>This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module.\n  *\n  * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array.\n  * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.</dd>\n  *\n  * <dt><b>\\anchor fixedsize Fixed-size versus dynamic-size:</b></dt>\n  * <dd>Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array\n  * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up\n  * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time.\n  *\n  * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime\n  * variables, and the array of coefficients is allocated dynamically on the heap.\n  *\n  * Note that \\em dense matrices, be they Fixed-size or Dynamic-size, <em>do not</em> expand dynamically in the sense of a std::map.\n  * If you want this behavior, see the Sparse module.</dd>\n  *\n  * <dt><b>\\anchor maxrows _MaxRows and _MaxCols:</b></dt>\n  * <dd>In most cases, one just leaves these parameters to the default values.\n  * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases\n  * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot\n  * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols\n  * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.</dd>\n  * </dl>\n  *\n  * <i><b>ABI and storage layout</b></i>\n  *\n  * The table below summarizes the ABI of some possible Matrix instances which is fixed thorough the lifetime of Eigen 3.\n  * <table  class=\"manual\">\n  * <tr><th>Matrix type</th><th>Equivalent C structure</th></tr>\n  * <tr><td>\\code Matrix<T,Dynamic,Dynamic> \\endcode</td><td>\\code\n  * struct {\n  *   T *data;                  // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0\n  *   Eigen::Index rows, cols;\n  *  };\n  * \\endcode</td></tr>\n  * <tr class=\"alt\"><td>\\code\n  * Matrix<T,Dynamic,1>\n  * Matrix<T,1,Dynamic> \\endcode</td><td>\\code\n  * struct {\n  *   T *data;                  // with (size_t(data)%EIGEN_MAX_ALIGN_BYTES)==0\n  *   Eigen::Index size;\n  *  };\n  * \\endcode</td></tr>\n  * <tr><td>\\code Matrix<T,Rows,Cols> \\endcode</td><td>\\code\n  * struct {\n  *   T data[Rows*Cols];        // with (size_t(data)%A(Rows*Cols*sizeof(T)))==0\n  *  };\n  * \\endcode</td></tr>\n  * <tr class=\"alt\"><td>\\code Matrix<T,Dynamic,Dynamic,0,MaxRows,MaxCols> \\endcode</td><td>\\code\n  * struct {\n  *   T data[MaxRows*MaxCols];  // with (size_t(data)%A(MaxRows*MaxCols*sizeof(T)))==0\n  *   Eigen::Index rows, cols;\n  *  };\n  * \\endcode</td></tr>\n  * </table>\n  * Note that in this table Rows, Cols, MaxRows and MaxCols are all positive integers. A(S) is defined to the largest possible power-of-two\n  * smaller to EIGEN_MAX_STATIC_ALIGN_BYTES.\n  *\n  * \\see MatrixBase for the majority of the API methods for matrices, \\ref TopicClassHierarchy,\n  * \\ref TopicStorageOrders\n  */\n\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nclass Matrix\n  : public PlainObjectBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n{\n  public:\n\n    /** \\brief Base class typedef.\n      * \\sa PlainObjectBase\n      */\n    typedef PlainObjectBase<Matrix> Base;\n\n    enum { Options = _Options };\n\n    EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)\n\n    typedef typename Base::PlainObject PlainObject;\n\n    using Base::base;\n    using Base::coeffRef;\n\n    /**\n      * \\brief Assigns matrices to each other.\n      *\n      * \\note This is a special case of the templated operator=. Its purpose is\n      * to prevent a default operator= from hiding the templated operator=.\n      *\n      * \\callgraph\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other)\n    {\n      return Base::_set(other);\n    }\n\n    /** \\internal\n      * \\brief Copies the value of the expression \\a other into \\c *this with automatic resizing.\n      *\n      * *this might be resized to match the dimensions of \\a other. If *this was a null matrix (not already initialized),\n      * it will be initialized.\n      *\n      * Note that copying a row-vector into a vector (and conversely) is allowed.\n      * The resizing, if any, is then done in the appropriate way so that row-vectors\n      * remain row-vectors and vectors remain vectors.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix& operator=(const DenseBase<OtherDerived>& other)\n    {\n      return Base::_set(other);\n    }\n\n    /* Here, doxygen failed to copy the brief information when using \\copydoc */\n\n    /**\n      * \\brief Copies the generic expression \\a other into *this.\n      * \\copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived> &other)\n    {\n      return Base::operator=(other);\n    }\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue<OtherDerived>& func)\n    {\n      return Base::operator=(func);\n    }\n\n    /** \\brief Default constructor.\n      *\n      * For fixed-size matrices, does nothing.\n      *\n      * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix\n      * is called a null matrix. This constructor is the unique way to create null matrices: resizing\n      * a matrix to 0 is not supported.\n      *\n      * \\sa resize(Index,Index)\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix() : Base()\n    {\n      Base::_check_template_params();\n      EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n\n    // FIXME is it still needed\n    EIGEN_DEVICE_FUNC\n    explicit Matrix(internal::constructor_without_unaligned_array_assert)\n      : Base(internal::constructor_without_unaligned_array_assert())\n    { Base::_check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED }\n\n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    Matrix(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_constructible<Scalar>::value)\n      : Base(std::move(other))\n    {\n      Base::_check_template_params();\n      if (RowsAtCompileTime!=Dynamic && ColsAtCompileTime!=Dynamic)\n        Base::_set_noalias(other);\n    }\n    EIGEN_DEVICE_FUNC\n    Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)\n    {\n      other.swap(*this);\n      return *this;\n    }\n#endif\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n\n    // This constructor is for both 1x1 matrices and dynamic vectors\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE explicit Matrix(const T& x)\n    {\n      Base::_check_template_params();\n      Base::template _init1<T>(x);\n    }\n\n    template<typename T0, typename T1>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y)\n    {\n      Base::_check_template_params();\n      Base::template _init2<T0,T1>(x, y);\n    }\n    #else\n    /** \\brief Constructs a fixed-sized matrix initialized with coefficients starting at \\a data */\n    EIGEN_DEVICE_FUNC\n    explicit Matrix(const Scalar *data);\n\n    /** \\brief Constructs a vector or row-vector with given dimension. \\only_for_vectors\n      *\n      * This is useful for dynamic-size vectors. For fixed-size vectors,\n      * it is redundant to pass these parameters, so one should use the default constructor\n      * Matrix() instead.\n      * \n      * \\warning This constructor is disabled for fixed-size \\c 1x1 matrices. For instance,\n      * calling Matrix<double,1,1>(1) will call the initialization constructor: Matrix(const Scalar&).\n      * For fixed-size \\c 1x1 matrices it is therefore recommended to use the default\n      * constructor Matrix() instead, especially when using one of the non standard\n      * \\c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\\c NAN} macros (see \\ref TopicPreprocessorDirectives).\n      */\n    EIGEN_STRONG_INLINE explicit Matrix(Index dim);\n    /** \\brief Constructs an initialized 1x1 matrix with the given coefficient */\n    Matrix(const Scalar& x);\n    /** \\brief Constructs an uninitialized matrix with \\a rows rows and \\a cols columns.\n      *\n      * This is useful for dynamic-size matrices. For fixed-size matrices,\n      * it is redundant to pass these parameters, so one should use the default constructor\n      * Matrix() instead.\n      * \n      * \\warning This constructor is disabled for fixed-size \\c 1x2 and \\c 2x1 vectors. For instance,\n      * calling Matrix2f(2,1) will call the initialization constructor: Matrix(const Scalar& x, const Scalar& y).\n      * For fixed-size \\c 1x2 or \\c 2x1 vectors it is therefore recommended to use the default\n      * constructor Matrix() instead, especially when using one of the non standard\n      * \\c EIGEN_INITIALIZE_MATRICES_BY_{ZERO,\\c NAN} macros (see \\ref TopicPreprocessorDirectives).\n      */\n    EIGEN_DEVICE_FUNC\n    Matrix(Index rows, Index cols);\n    \n    /** \\brief Constructs an initialized 2D vector with given coefficients */\n    Matrix(const Scalar& x, const Scalar& y);\n    #endif\n\n    /** \\brief Constructs an initialized 3D vector with given coefficients */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z)\n    {\n      Base::_check_template_params();\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3)\n      m_storage.data()[0] = x;\n      m_storage.data()[1] = y;\n      m_storage.data()[2] = z;\n    }\n    /** \\brief Constructs an initialized 4D vector with given coefficients */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)\n    {\n      Base::_check_template_params();\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4)\n      m_storage.data()[0] = x;\n      m_storage.data()[1] = y;\n      m_storage.data()[2] = z;\n      m_storage.data()[3] = w;\n    }\n\n\n    /** \\brief Copy constructor */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix(const Matrix& other) : Base(other)\n    { }\n\n    /** \\brief Copy constructor for generic expressions.\n      * \\sa MatrixBase::operator=(const EigenBase<OtherDerived>&)\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived> &other)\n      : Base(other.derived())\n    { }\n\n    EIGEN_DEVICE_FUNC inline Index innerStride() const { return 1; }\n    EIGEN_DEVICE_FUNC inline Index outerStride() const { return this->innerSize(); }\n\n    /////////// Geometry module ///////////\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    explicit Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r);\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Matrix& operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r);\n\n    // allow to extend Matrix outside Eigen\n    #ifdef EIGEN_MATRIX_PLUGIN\n    #include EIGEN_MATRIX_PLUGIN\n    #endif\n\n  protected:\n    template <typename Derived, typename OtherDerived, bool IsVector>\n    friend struct internal::conservative_resize_like_impl;\n\n    using Base::m_storage;\n};\n\n/** \\defgroup matrixtypedefs Global matrix typedefs\n  *\n  * \\ingroup Core_Module\n  *\n  * Eigen defines several typedef shortcuts for most common matrix and vector types.\n  *\n  * The general patterns are the following:\n  *\n  * \\c MatrixSizeType where \\c Size can be \\c 2,\\c 3,\\c 4 for fixed size square matrices or \\c X for dynamic size,\n  * and where \\c Type can be \\c i for integer, \\c f for float, \\c d for double, \\c cf for complex float, \\c cd\n  * for complex double.\n  *\n  * For example, \\c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \\c MatrixXf is a dynamic-size matrix of floats.\n  *\n  * There are also \\c VectorSizeType and \\c RowVectorSizeType which are self-explanatory. For example, \\c Vector4cf is\n  * a fixed-size vector of 4 complex floats.\n  *\n  * \\sa class Matrix\n  */\n\n#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \\\n/** \\ingroup matrixtypedefs */                                    \\\ntypedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix;  \\\n/** \\ingroup matrixtypedefs */                                    \\\ntypedef Matrix<Type, Size, 1>    Vector##SizeSuffix##TypeSuffix;  \\\n/** \\ingroup matrixtypedefs */                                    \\\ntypedef Matrix<Type, 1, Size>    RowVector##SizeSuffix##TypeSuffix;\n\n#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size)         \\\n/** \\ingroup matrixtypedefs */                                    \\\ntypedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix;  \\\n/** \\ingroup matrixtypedefs */                                    \\\ntypedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;\n\n#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \\\nEIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \\\nEIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \\\nEIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4)\n\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)\n\n#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES\n#undef EIGEN_MAKE_TYPEDEFS\n#undef EIGEN_MAKE_FIXED_TYPEDEFS\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/MatrixBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATRIXBASE_H\n#define EIGEN_MATRIXBASE_H\n\nnamespace Eigen {\n\n/** \\class MatrixBase\n  * \\ingroup Core_Module\n  *\n  * \\brief Base class for all dense matrices, vectors, and expressions\n  *\n  * This class is the base that is inherited by all matrix, vector, and related expression\n  * types. Most of the Eigen API is contained in this class, and its base classes. Other important\n  * classes for the Eigen API are Matrix, and VectorwiseOp.\n  *\n  * Note that some methods are defined in other modules such as the \\ref LU_Module LU module\n  * for all functions related to matrix inversions.\n  *\n  * \\tparam Derived is the derived type, e.g. a matrix type, or an expression, etc.\n  *\n  * When writing a function taking Eigen objects as argument, if you want your function\n  * to take as argument any matrix, vector, or expression, just let it take a\n  * MatrixBase argument. As an example, here is a function printFirstRow which, given\n  * a matrix, vector, or expression \\a x, prints the first row of \\a x.\n  *\n  * \\code\n    template<typename Derived>\n    void printFirstRow(const Eigen::MatrixBase<Derived>& x)\n    {\n      cout << x.row(0) << endl;\n    }\n  * \\endcode\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_MATRIXBASE_PLUGIN.\n  *\n  * \\sa \\blank \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived> class MatrixBase\n  : public DenseBase<Derived>\n{\n  public:\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef MatrixBase StorageBaseType;\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::StorageIndex StorageIndex;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    typedef DenseBase<Derived> Base;\n    using Base::RowsAtCompileTime;\n    using Base::ColsAtCompileTime;\n    using Base::SizeAtCompileTime;\n    using Base::MaxRowsAtCompileTime;\n    using Base::MaxColsAtCompileTime;\n    using Base::MaxSizeAtCompileTime;\n    using Base::IsVectorAtCompileTime;\n    using Base::Flags;\n\n    using Base::derived;\n    using Base::const_cast_derived;\n    using Base::rows;\n    using Base::cols;\n    using Base::size;\n    using Base::coeff;\n    using Base::coeffRef;\n    using Base::lazyAssign;\n    using Base::eval;\n    using Base::operator-;\n    using Base::operator+=;\n    using Base::operator-=;\n    using Base::operator*=;\n    using Base::operator/=;\n\n    typedef typename Base::CoeffReturnType CoeffReturnType;\n    typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType;\n    typedef typename Base::RowXpr RowXpr;\n    typedef typename Base::ColXpr ColXpr;\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** type of the equivalent square matrix */\n    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),\n                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n    /** \\returns the size of the main diagonal, which is min(rows(),cols()).\n      * \\sa rows(), cols(), SizeAtCompileTime. */\n    EIGEN_DEVICE_FUNC\n    inline Index diagonalSize() const { return (numext::mini)(rows(),cols()); }\n\n    typedef typename Base::PlainObject PlainObject;\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal Represents a matrix with all coefficients equal to one another*/\n    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,PlainObject> ConstantReturnType;\n    /** \\internal the return type of MatrixBase::adjoint() */\n    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,\n                        ConstTransposeReturnType\n                     >::type AdjointReturnType;\n    /** \\internal Return type of eigenvalues() */\n    typedef Matrix<std::complex<RealScalar>, internal::traits<Derived>::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType;\n    /** \\internal the return type of identity */\n    typedef CwiseNullaryOp<internal::scalar_identity_op<Scalar>,PlainObject> IdentityReturnType;\n    /** \\internal the return type of unit vectors */\n    typedef Block<const CwiseNullaryOp<internal::scalar_identity_op<Scalar>, SquareMatrixType>,\n                  internal::traits<Derived>::RowsAtCompileTime,\n                  internal::traits<Derived>::ColsAtCompileTime> BasisReturnType;\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase\n#define EIGEN_DOC_UNARY_ADDONS(X,Y)\n#   include \"../plugins/CommonCwiseBinaryOps.h\"\n#   include \"../plugins/MatrixCwiseUnaryOps.h\"\n#   include \"../plugins/MatrixCwiseBinaryOps.h\"\n#   ifdef EIGEN_MATRIXBASE_PLUGIN\n#     include EIGEN_MATRIXBASE_PLUGIN\n#   endif\n#undef EIGEN_CURRENT_STORAGE_BASE_CLASS\n#undef EIGEN_DOC_UNARY_ADDONS\n\n    /** Special case of the template operator=, in order to prevent the compiler\n      * from generating a default operator= (issue hit with g++ 4.1)\n      */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const MatrixBase& other);\n\n    // We cannot inherit here via Base::operator= since it is causing\n    // trouble with MSVC.\n\n    template <typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator=(const DenseBase<OtherDerived>& other);\n\n    template <typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator=(const EigenBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    Derived& operator=(const ReturnByValue<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator+=(const MatrixBase<OtherDerived>& other);\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n    Derived& operator-=(const MatrixBase<OtherDerived>& other);\n\n#ifdef __CUDACC__\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<Derived,OtherDerived,LazyProduct>\n    operator*(const MatrixBase<OtherDerived> &other) const\n    { return this->lazyProduct(other); }\n#else\n\n    template<typename OtherDerived>\n    const Product<Derived,OtherDerived>\n    operator*(const MatrixBase<OtherDerived> &other) const;\n\n#endif\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<Derived,OtherDerived,LazyProduct>\n    lazyProduct(const MatrixBase<OtherDerived> &other) const;\n\n    template<typename OtherDerived>\n    Derived& operator*=(const EigenBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    void applyOnTheLeft(const EigenBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    void applyOnTheRight(const EigenBase<OtherDerived>& other);\n\n    template<typename DiagonalDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<Derived, DiagonalDerived, LazyProduct>\n    operator*(const DiagonalBase<DiagonalDerived> &diagonal) const;\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType\n    dot(const MatrixBase<OtherDerived>& other) const;\n\n    EIGEN_DEVICE_FUNC RealScalar squaredNorm() const;\n    EIGEN_DEVICE_FUNC RealScalar norm() const;\n    RealScalar stableNorm() const;\n    RealScalar blueNorm() const;\n    RealScalar hypotNorm() const;\n    EIGEN_DEVICE_FUNC const PlainObject normalized() const;\n    EIGEN_DEVICE_FUNC const PlainObject stableNormalized() const;\n    EIGEN_DEVICE_FUNC void normalize();\n    EIGEN_DEVICE_FUNC void stableNormalize();\n\n    EIGEN_DEVICE_FUNC const AdjointReturnType adjoint() const;\n    EIGEN_DEVICE_FUNC void adjointInPlace();\n\n    typedef Diagonal<Derived> DiagonalReturnType;\n    EIGEN_DEVICE_FUNC\n    DiagonalReturnType diagonal();\n\n    typedef typename internal::add_const<Diagonal<const Derived> >::type ConstDiagonalReturnType;\n    EIGEN_DEVICE_FUNC\n    ConstDiagonalReturnType diagonal() const;\n\n    template<int Index> struct DiagonalIndexReturnType { typedef Diagonal<Derived,Index> Type; };\n    template<int Index> struct ConstDiagonalIndexReturnType { typedef const Diagonal<const Derived,Index> Type; };\n\n    template<int Index>\n    EIGEN_DEVICE_FUNC\n    typename DiagonalIndexReturnType<Index>::Type diagonal();\n\n    template<int Index>\n    EIGEN_DEVICE_FUNC\n    typename ConstDiagonalIndexReturnType<Index>::Type diagonal() const;\n\n    typedef Diagonal<Derived,DynamicIndex> DiagonalDynamicIndexReturnType;\n    typedef typename internal::add_const<Diagonal<const Derived,DynamicIndex> >::type ConstDiagonalDynamicIndexReturnType;\n\n    EIGEN_DEVICE_FUNC\n    DiagonalDynamicIndexReturnType diagonal(Index index);\n    EIGEN_DEVICE_FUNC\n    ConstDiagonalDynamicIndexReturnType diagonal(Index index) const;\n\n    template<unsigned int Mode> struct TriangularViewReturnType { typedef TriangularView<Derived, Mode> Type; };\n    template<unsigned int Mode> struct ConstTriangularViewReturnType { typedef const TriangularView<const Derived, Mode> Type; };\n\n    template<unsigned int Mode>\n    EIGEN_DEVICE_FUNC\n    typename TriangularViewReturnType<Mode>::Type triangularView();\n    template<unsigned int Mode>\n    EIGEN_DEVICE_FUNC\n    typename ConstTriangularViewReturnType<Mode>::Type triangularView() const;\n\n    template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SelfAdjointView<Derived, UpLo> Type; };\n    template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView<const Derived, UpLo> Type; };\n\n    template<unsigned int UpLo>\n    EIGEN_DEVICE_FUNC\n    typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();\n    template<unsigned int UpLo>\n    EIGEN_DEVICE_FUNC\n    typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;\n\n    const SparseView<Derived> sparseView(const Scalar& m_reference = Scalar(0),\n                                         const typename NumTraits<Scalar>::Real& m_epsilon = NumTraits<Scalar>::dummy_precision()) const;\n    EIGEN_DEVICE_FUNC static const IdentityReturnType Identity();\n    EIGEN_DEVICE_FUNC static const IdentityReturnType Identity(Index rows, Index cols);\n    EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index size, Index i);\n    EIGEN_DEVICE_FUNC static const BasisReturnType Unit(Index i);\n    EIGEN_DEVICE_FUNC static const BasisReturnType UnitX();\n    EIGEN_DEVICE_FUNC static const BasisReturnType UnitY();\n    EIGEN_DEVICE_FUNC static const BasisReturnType UnitZ();\n    EIGEN_DEVICE_FUNC static const BasisReturnType UnitW();\n\n    EIGEN_DEVICE_FUNC\n    const DiagonalWrapper<const Derived> asDiagonal() const;\n    const PermutationWrapper<const Derived> asPermutation() const;\n\n    EIGEN_DEVICE_FUNC\n    Derived& setIdentity();\n    EIGEN_DEVICE_FUNC\n    Derived& setIdentity(Index rows, Index cols);\n\n    bool isIdentity(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    bool isDiagonal(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n\n    bool isUpperTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    bool isLowerTriangular(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n\n    template<typename OtherDerived>\n    bool isOrthogonal(const MatrixBase<OtherDerived>& other,\n                      const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n    bool isUnitary(const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n\n    /** \\returns true if each coefficients of \\c *this and \\a other are all exactly equal.\n      * \\warning When using floating point scalar values you probably should rather use a\n      *          fuzzy comparison such as isApprox()\n      * \\sa isApprox(), operator!= */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC inline bool operator==(const MatrixBase<OtherDerived>& other) const\n    { return cwiseEqual(other).all(); }\n\n    /** \\returns true if at least one pair of coefficients of \\c *this and \\a other are not exactly equal to each other.\n      * \\warning When using floating point scalar values you probably should rather use a\n      *          fuzzy comparison such as isApprox()\n      * \\sa isApprox(), operator== */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC inline bool operator!=(const MatrixBase<OtherDerived>& other) const\n    { return cwiseNotEqual(other).any(); }\n\n    NoAlias<Derived,Eigen::MatrixBase > noalias();\n\n    // TODO forceAlignedAccess is temporarily disabled\n    // Need to find a nicer workaround.\n    inline const Derived& forceAlignedAccess() const { return derived(); }\n    inline Derived& forceAlignedAccess() { return derived(); }\n    template<bool Enable> inline const Derived& forceAlignedAccessIf() const { return derived(); }\n    template<bool Enable> inline Derived& forceAlignedAccessIf() { return derived(); }\n\n    EIGEN_DEVICE_FUNC Scalar trace() const;\n\n    template<int p> EIGEN_DEVICE_FUNC RealScalar lpNorm() const;\n\n    EIGEN_DEVICE_FUNC MatrixBase<Derived>& matrix() { return *this; }\n    EIGEN_DEVICE_FUNC const MatrixBase<Derived>& matrix() const { return *this; }\n\n    /** \\returns an \\link Eigen::ArrayBase Array \\endlink expression of this matrix\n      * \\sa ArrayBase::matrix() */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ArrayWrapper<Derived> array() { return ArrayWrapper<Derived>(derived()); }\n    /** \\returns a const \\link Eigen::ArrayBase Array \\endlink expression of this matrix\n      * \\sa ArrayBase::matrix() */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const ArrayWrapper<const Derived> array() const { return ArrayWrapper<const Derived>(derived()); }\n\n/////////// LU module ///////////\n\n    inline const FullPivLU<PlainObject> fullPivLu() const;\n    inline const PartialPivLU<PlainObject> partialPivLu() const;\n\n    inline const PartialPivLU<PlainObject> lu() const;\n\n    inline const Inverse<Derived> inverse() const;\n\n    template<typename ResultType>\n    inline void computeInverseAndDetWithCheck(\n      ResultType& inverse,\n      typename ResultType::Scalar& determinant,\n      bool& invertible,\n      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()\n    ) const;\n    template<typename ResultType>\n    inline void computeInverseWithCheck(\n      ResultType& inverse,\n      bool& invertible,\n      const RealScalar& absDeterminantThreshold = NumTraits<Scalar>::dummy_precision()\n    ) const;\n    Scalar determinant() const;\n\n/////////// Cholesky module ///////////\n\n    inline const LLT<PlainObject>  llt() const;\n    inline const LDLT<PlainObject> ldlt() const;\n\n/////////// QR module ///////////\n\n    inline const HouseholderQR<PlainObject> householderQr() const;\n    inline const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;\n    inline const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;\n    inline const CompleteOrthogonalDecomposition<PlainObject> completeOrthogonalDecomposition() const;\n\n/////////// Eigenvalues module ///////////\n\n    inline EigenvaluesReturnType eigenvalues() const;\n    inline RealScalar operatorNorm() const;\n\n/////////// SVD module ///////////\n\n    inline JacobiSVD<PlainObject> jacobiSvd(unsigned int computationOptions = 0) const;\n    inline BDCSVD<PlainObject>    bdcSvd(unsigned int computationOptions = 0) const;\n\n/////////// Geometry module ///////////\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /// \\internal helper struct to form the return type of the cross product\n    template<typename OtherDerived> struct cross_product_return_type {\n      typedef typename ScalarBinaryOpTraits<typename internal::traits<Derived>::Scalar,typename internal::traits<OtherDerived>::Scalar>::ReturnType Scalar;\n      typedef Matrix<Scalar,MatrixBase::RowsAtCompileTime,MatrixBase::ColsAtCompileTime> type;\n    };\n    #endif // EIGEN_PARSED_BY_DOXYGEN\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    inline typename cross_product_return_type<OtherDerived>::type\n#else\n    inline PlainObject\n#endif\n    cross(const MatrixBase<OtherDerived>& other) const;\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    inline PlainObject cross3(const MatrixBase<OtherDerived>& other) const;\n\n    EIGEN_DEVICE_FUNC\n    inline PlainObject unitOrthogonal(void) const;\n\n    EIGEN_DEVICE_FUNC\n    inline Matrix<Scalar,3,1> eulerAngles(Index a0, Index a1, Index a2) const;\n\n    // put this as separate enum value to work around possible GCC 4.3 bug (?)\n    enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1&&RowsAtCompileTime==1 ? ((internal::traits<Derived>::Flags&RowMajorBit)==RowMajorBit ? Horizontal : Vertical)\n                                          : ColsAtCompileTime==1 ? Vertical : Horizontal };\n    typedef Homogeneous<Derived, HomogeneousReturnTypeDirection> HomogeneousReturnType;\n    EIGEN_DEVICE_FUNC\n    inline HomogeneousReturnType homogeneous() const;\n\n    enum {\n      SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1\n    };\n    typedef Block<const Derived,\n                  internal::traits<Derived>::ColsAtCompileTime==1 ? SizeMinusOne : 1,\n                  internal::traits<Derived>::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne;\n    typedef EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(ConstStartMinusOne,Scalar,quotient) HNormalizedReturnType;\n    EIGEN_DEVICE_FUNC\n    inline const HNormalizedReturnType hnormalized() const;\n\n////////// Householder module ///////////\n\n    void makeHouseholderInPlace(Scalar& tau, RealScalar& beta);\n    template<typename EssentialPart>\n    void makeHouseholder(EssentialPart& essential,\n                         Scalar& tau, RealScalar& beta) const;\n    template<typename EssentialPart>\n    void applyHouseholderOnTheLeft(const EssentialPart& essential,\n                                   const Scalar& tau,\n                                   Scalar* workspace);\n    template<typename EssentialPart>\n    void applyHouseholderOnTheRight(const EssentialPart& essential,\n                                    const Scalar& tau,\n                                    Scalar* workspace);\n\n///////// Jacobi module /////////\n\n    template<typename OtherScalar>\n    void applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j);\n    template<typename OtherScalar>\n    void applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j);\n\n///////// SparseCore module /////////\n\n    template<typename OtherDerived>\n    EIGEN_STRONG_INLINE const typename SparseMatrixBase<OtherDerived>::template CwiseProductDenseReturnType<Derived>::Type\n    cwiseProduct(const SparseMatrixBase<OtherDerived> &other) const\n    {\n      return other.cwiseProduct(derived());\n    }\n\n///////// MatrixFunctions module /////////\n\n    typedef typename internal::stem_function<Scalar>::type StemFunction;\n    const MatrixExponentialReturnValue<Derived> exp() const;\n    const MatrixFunctionReturnValue<Derived> matrixFunction(StemFunction f) const;\n    const MatrixFunctionReturnValue<Derived> cosh() const;\n    const MatrixFunctionReturnValue<Derived> sinh() const;\n    const MatrixFunctionReturnValue<Derived> cos() const;\n    const MatrixFunctionReturnValue<Derived> sin() const;\n    const MatrixSquareRootReturnValue<Derived> sqrt() const;\n    const MatrixLogarithmReturnValue<Derived> log() const;\n    const MatrixPowerReturnValue<Derived> pow(const RealScalar& p) const;\n    const MatrixComplexPowerReturnValue<Derived> pow(const std::complex<RealScalar>& p) const;\n\n  protected:\n    EIGEN_DEVICE_FUNC MatrixBase() : Base() {}\n\n  private:\n    EIGEN_DEVICE_FUNC explicit MatrixBase(int);\n    EIGEN_DEVICE_FUNC MatrixBase(int,int);\n    template<typename OtherDerived> EIGEN_DEVICE_FUNC explicit MatrixBase(const MatrixBase<OtherDerived>&);\n  protected:\n    // mixing arrays and matrices is not legal\n    template<typename OtherDerived> Derived& operator+=(const ArrayBase<OtherDerived>& )\n    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}\n    // mixing arrays and matrices is not legal\n    template<typename OtherDerived> Derived& operator-=(const ArrayBase<OtherDerived>& )\n    {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;}\n};\n\n\n/***************************************************************************\n* Implementation of matrix base methods\n***************************************************************************/\n\n/** replaces \\c *this by \\c *this * \\a other.\n  *\n  * \\returns a reference to \\c *this\n  *\n  * Example: \\include MatrixBase_applyOnTheRight.cpp\n  * Output: \\verbinclude MatrixBase_applyOnTheRight.out\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline Derived&\nMatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)\n{\n  other.derived().applyThisOnTheRight(derived());\n  return derived();\n}\n\n/** replaces \\c *this by \\c *this * \\a other. It is equivalent to MatrixBase::operator*=().\n  *\n  * Example: \\include MatrixBase_applyOnTheRight.cpp\n  * Output: \\verbinclude MatrixBase_applyOnTheRight.out\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)\n{\n  other.derived().applyThisOnTheRight(derived());\n}\n\n/** replaces \\c *this by \\a other * \\c *this.\n  *\n  * Example: \\include MatrixBase_applyOnTheLeft.cpp\n  * Output: \\verbinclude MatrixBase_applyOnTheLeft.out\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)\n{\n  other.derived().applyThisOnTheLeft(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATRIXBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/NestByValue.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_NESTBYVALUE_H\n#define EIGEN_NESTBYVALUE_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename ExpressionType>\nstruct traits<NestByValue<ExpressionType> > : public traits<ExpressionType>\n{};\n}\n\n/** \\class NestByValue\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression which must be nested by value\n  *\n  * \\tparam ExpressionType the type of the object of which we are requiring nesting-by-value\n  *\n  * This class is the return type of MatrixBase::nestByValue()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::nestByValue()\n  */\ntemplate<typename ExpressionType> class NestByValue\n  : public internal::dense_xpr_base< NestByValue<ExpressionType> >::type\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<NestByValue>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue)\n\n    EIGEN_DEVICE_FUNC explicit inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_expression.rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_expression.cols(); }\n    EIGEN_DEVICE_FUNC inline Index outerStride() const { return m_expression.outerStride(); }\n    EIGEN_DEVICE_FUNC inline Index innerStride() const { return m_expression.innerStride(); }\n\n    EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index row, Index col) const\n    {\n      return m_expression.coeff(row, col);\n    }\n\n    EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index row, Index col)\n    {\n      return m_expression.const_cast_derived().coeffRef(row, col);\n    }\n\n    EIGEN_DEVICE_FUNC inline const CoeffReturnType coeff(Index index) const\n    {\n      return m_expression.coeff(index);\n    }\n\n    EIGEN_DEVICE_FUNC inline Scalar& coeffRef(Index index)\n    {\n      return m_expression.const_cast_derived().coeffRef(index);\n    }\n\n    template<int LoadMode>\n    EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index row, Index col) const\n    {\n      return m_expression.template packet<LoadMode>(row, col);\n    }\n\n    template<int LoadMode>\n    EIGEN_DEVICE_FUNC inline void writePacket(Index row, Index col, const PacketScalar& x)\n    {\n      m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);\n    }\n\n    template<int LoadMode>\n    EIGEN_DEVICE_FUNC inline const PacketScalar packet(Index index) const\n    {\n      return m_expression.template packet<LoadMode>(index);\n    }\n\n    template<int LoadMode>\n    EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& x)\n    {\n      m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);\n    }\n\n    EIGEN_DEVICE_FUNC operator const ExpressionType&() const { return m_expression; }\n\n  protected:\n    const ExpressionType m_expression;\n};\n\n/** \\returns an expression of the temporary version of *this.\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const NestByValue<Derived>\nDenseBase<Derived>::nestByValue() const\n{\n  return NestByValue<Derived>(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_NESTBYVALUE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/NoAlias.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_NOALIAS_H\n#define EIGEN_NOALIAS_H\n\nnamespace Eigen {\n\n/** \\class NoAlias\n  * \\ingroup Core_Module\n  *\n  * \\brief Pseudo expression providing an operator = assuming no aliasing\n  *\n  * \\tparam ExpressionType the type of the object on which to do the lazy assignment\n  *\n  * This class represents an expression with special assignment operators\n  * assuming no aliasing between the target expression and the source expression.\n  * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression.\n  * It is the return type of MatrixBase::noalias()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::noalias()\n  */\ntemplate<typename ExpressionType, template <typename> class StorageBase>\nclass NoAlias\n{\n  public:\n    typedef typename ExpressionType::Scalar Scalar;\n    \n    explicit NoAlias(ExpressionType& expression) : m_expression(expression) {}\n    \n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase<OtherDerived>& other)\n    {\n      call_assignment_no_alias(m_expression, other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());\n      return m_expression;\n    }\n    \n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase<OtherDerived>& other)\n    {\n      call_assignment_no_alias(m_expression, other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n      return m_expression;\n    }\n    \n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase<OtherDerived>& other)\n    {\n      call_assignment_no_alias(m_expression, other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());\n      return m_expression;\n    }\n\n    EIGEN_DEVICE_FUNC\n    ExpressionType& expression() const\n    {\n      return m_expression;\n    }\n\n  protected:\n    ExpressionType& m_expression;\n};\n\n/** \\returns a pseudo expression of \\c *this with an operator= assuming\n  * no aliasing between \\c *this and the source expression.\n  *\n  * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.\n  * Currently, even though several expressions may alias, only product\n  * expressions have this flag. Therefore, noalias() is only usefull when\n  * the source expression contains a matrix product.\n  *\n  * Here are some examples where noalias is usefull:\n  * \\code\n  * D.noalias()  = A * B;\n  * D.noalias() += A.transpose() * B;\n  * D.noalias() -= 2 * A * B.adjoint();\n  * \\endcode\n  *\n  * On the other hand the following example will lead to a \\b wrong result:\n  * \\code\n  * A.noalias() = A * B;\n  * \\endcode\n  * because the result matrix A is also an operand of the matrix product. Therefore,\n  * there is no alternative than evaluating A * B in a temporary, that is the default\n  * behavior when you write:\n  * \\code\n  * A = A * B;\n  * \\endcode\n  *\n  * \\sa class NoAlias\n  */\ntemplate<typename Derived>\nNoAlias<Derived,MatrixBase> MatrixBase<Derived>::noalias()\n{\n  return NoAlias<Derived, Eigen::MatrixBase >(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_NOALIAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/NumTraits.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_NUMTRAITS_H\n#define EIGEN_NUMTRAITS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// default implementation of digits10(), based on numeric_limits if specialized,\n// 0 for integer types, and log10(epsilon()) otherwise.\ntemplate< typename T,\n          bool use_numeric_limits = std::numeric_limits<T>::is_specialized,\n          bool is_integer = NumTraits<T>::IsInteger>\nstruct default_digits10_impl\n{\n  static int run() { return std::numeric_limits<T>::digits10; }\n};\n\ntemplate<typename T>\nstruct default_digits10_impl<T,false,false> // Floating point\n{\n  static int run() {\n    using std::log10;\n    using std::ceil;\n    typedef typename NumTraits<T>::Real Real;\n    return int(ceil(-log10(NumTraits<Real>::epsilon())));\n  }\n};\n\ntemplate<typename T>\nstruct default_digits10_impl<T,false,true> // Integer\n{\n  static int run() { return 0; }\n};\n\n} // end namespace internal\n\n/** \\class NumTraits\n  * \\ingroup Core_Module\n  *\n  * \\brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen.\n  *\n  * \\tparam T the numeric type at hand\n  *\n  * This class stores enums, typedefs and static methods giving information about a numeric type.\n  *\n  * The provided data consists of:\n  * \\li A typedef \\c Real, giving the \"real part\" type of \\a T. If \\a T is already real,\n  *     then \\c Real is just a typedef to \\a T. If \\a T is \\c std::complex<U> then \\c Real\n  *     is a typedef to \\a U.\n  * \\li A typedef \\c NonInteger, giving the type that should be used for operations producing non-integral values,\n  *     such as quotients, square roots, etc. If \\a T is a floating-point type, then this typedef just gives\n  *     \\a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to\n  *     take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is\n  *     only intended as a helper for code that needs to explicitly promote types.\n  * \\li A typedef \\c Literal giving the type to use for numeric literals such as \"2\" or \"0.5\". For instance, for \\c std::complex<U>, Literal is defined as \\c U.\n  *     Of course, this type must be fully compatible with \\a T. In doubt, just use \\a T here.\n  * \\li A typedef \\a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what\n  *     this means, just use \\a T here.\n  * \\li An enum value \\a IsComplex. It is equal to 1 if \\a T is a \\c std::complex\n  *     type, and to 0 otherwise.\n  * \\li An enum value \\a IsInteger. It is equal to \\c 1 if \\a T is an integer type such as \\c int,\n  *     and to \\c 0 otherwise.\n  * \\li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed\n  *     to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers.\n  *     Stay vague here. No need to do architecture-specific stuff. If you don't know what this means, just use \\c Eigen::HugeCost.\n  * \\li An enum value \\a IsSigned. It is equal to \\c 1 if \\a T is a signed type and to 0 if \\a T is unsigned.\n  * \\li An enum value \\a RequireInitialization. It is equal to \\c 1 if the constructor of the numeric type \\a T must\n  *     be called, and to 0 if it is safe not to call it. Default is 0 if \\a T is an arithmetic type, and 1 otherwise.\n  * \\li An epsilon() function which, unlike <a href=\"http://en.cppreference.com/w/cpp/types/numeric_limits/epsilon\">std::numeric_limits::epsilon()</a>,\n  *     it returns a \\a Real instead of a \\a T.\n  * \\li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default\n  *     value by the fuzzy comparison operators.\n  * \\li highest() and lowest() functions returning the highest and lowest possible values respectively.\n  * \\li digits10() function returning the number of decimal digits that can be represented without change. This is\n  *     the analogue of <a href=\"http://en.cppreference.com/w/cpp/types/numeric_limits/digits10\">std::numeric_limits<T>::digits10</a>\n  *     which is used as the default implementation if specialized.\n  */\n\ntemplate<typename T> struct GenericNumTraits\n{\n  enum {\n    IsInteger = std::numeric_limits<T>::is_integer,\n    IsSigned = std::numeric_limits<T>::is_signed,\n    IsComplex = 0,\n    RequireInitialization = internal::is_arithmetic<T>::value ? 0 : 1,\n    ReadCost = 1,\n    AddCost = 1,\n    MulCost = 1\n  };\n\n  typedef T Real;\n  typedef typename internal::conditional<\n                     IsInteger,\n                     typename internal::conditional<sizeof(T)<=2, float, double>::type,\n                     T\n                   >::type NonInteger;\n  typedef T Nested;\n  typedef T Literal;\n\n  EIGEN_DEVICE_FUNC\n  static inline Real epsilon()\n  {\n    return numext::numeric_limits<T>::epsilon();\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline int digits10()\n  {\n    return internal::default_digits10_impl<T>::run();\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline Real dummy_precision()\n  {\n    // make sure to override this for floating-point types\n    return Real(0);\n  }\n\n\n  EIGEN_DEVICE_FUNC\n  static inline T highest() {\n    return (numext::numeric_limits<T>::max)();\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline T lowest()  {\n    return IsInteger ? (numext::numeric_limits<T>::min)() : (-(numext::numeric_limits<T>::max)());\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline T infinity() {\n    return numext::numeric_limits<T>::infinity();\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline T quiet_NaN() {\n    return numext::numeric_limits<T>::quiet_NaN();\n  }\n};\n\ntemplate<typename T> struct NumTraits : GenericNumTraits<T>\n{};\n\ntemplate<> struct NumTraits<float>\n  : GenericNumTraits<float>\n{\n  EIGEN_DEVICE_FUNC\n  static inline float dummy_precision() { return 1e-5f; }\n};\n\ntemplate<> struct NumTraits<double> : GenericNumTraits<double>\n{\n  EIGEN_DEVICE_FUNC\n  static inline double dummy_precision() { return 1e-12; }\n};\n\ntemplate<> struct NumTraits<long double>\n  : GenericNumTraits<long double>\n{\n  static inline long double dummy_precision() { return 1e-15l; }\n};\n\ntemplate<typename _Real> struct NumTraits<std::complex<_Real> >\n  : GenericNumTraits<std::complex<_Real> >\n{\n  typedef _Real Real;\n  typedef typename NumTraits<_Real>::Literal Literal;\n  enum {\n    IsComplex = 1,\n    RequireInitialization = NumTraits<_Real>::RequireInitialization,\n    ReadCost = 2 * NumTraits<_Real>::ReadCost,\n    AddCost = 2 * NumTraits<Real>::AddCost,\n    MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost\n  };\n\n  EIGEN_DEVICE_FUNC\n  static inline Real epsilon() { return NumTraits<Real>::epsilon(); }\n  EIGEN_DEVICE_FUNC\n  static inline Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }\n  EIGEN_DEVICE_FUNC\n  static inline int digits10() { return NumTraits<Real>::digits10(); }\n};\n\ntemplate<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>\nstruct NumTraits<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >\n{\n  typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> ArrayType;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  typedef Array<RealScalar, Rows, Cols, Options, MaxRows, MaxCols> Real;\n  typedef typename NumTraits<Scalar>::NonInteger NonIntegerScalar;\n  typedef Array<NonIntegerScalar, Rows, Cols, Options, MaxRows, MaxCols> NonInteger;\n  typedef ArrayType & Nested;\n  typedef typename NumTraits<Scalar>::Literal Literal;\n\n  enum {\n    IsComplex = NumTraits<Scalar>::IsComplex,\n    IsInteger = NumTraits<Scalar>::IsInteger,\n    IsSigned  = NumTraits<Scalar>::IsSigned,\n    RequireInitialization = 1,\n    ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::ReadCost,\n    AddCost  = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::AddCost,\n    MulCost  = ArrayType::SizeAtCompileTime==Dynamic ? HugeCost : ArrayType::SizeAtCompileTime * NumTraits<Scalar>::MulCost\n  };\n\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar epsilon() { return NumTraits<RealScalar>::epsilon(); }\n  EIGEN_DEVICE_FUNC\n  static inline RealScalar dummy_precision() { return NumTraits<RealScalar>::dummy_precision(); }\n};\n\ntemplate<> struct NumTraits<std::string>\n  : GenericNumTraits<std::string>\n{\n  enum {\n    RequireInitialization = 1,\n    ReadCost = HugeCost,\n    AddCost  = HugeCost,\n    MulCost  = HugeCost\n  };\n\n  static inline int digits10() { return 0; }\n\nprivate:\n  static inline std::string epsilon();\n  static inline std::string dummy_precision();\n  static inline std::string lowest();\n  static inline std::string highest();\n  static inline std::string infinity();\n  static inline std::string quiet_NaN();\n};\n\n// Empty specialization for void to allow template specialization based on NumTraits<T>::Real with T==void and SFINAE.\ntemplate<> struct NumTraits<void> {};\n\n} // end namespace Eigen\n\n#endif // EIGEN_NUMTRAITS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/PermutationMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PERMUTATIONMATRIX_H\n#define EIGEN_PERMUTATIONMATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\nenum PermPermProduct_t {PermPermProduct};\n\n} // end namespace internal\n\n/** \\class PermutationBase\n  * \\ingroup Core_Module\n  *\n  * \\brief Base class for permutations\n  *\n  * \\tparam Derived the derived class\n  *\n  * This class is the base class for all expressions representing a permutation matrix,\n  * internally stored as a vector of integers.\n  * The convention followed here is that if \\f$ \\sigma \\f$ is a permutation, the corresponding permutation matrix\n  * \\f$ P_\\sigma \\f$ is such that if \\f$ (e_1,\\ldots,e_p) \\f$ is the canonical basis, we have:\n  *  \\f[ P_\\sigma(e_i) = e_{\\sigma(i)}. \\f]\n  * This convention ensures that for any two permutations \\f$ \\sigma, \\tau \\f$, we have:\n  *  \\f[ P_{\\sigma\\circ\\tau} = P_\\sigma P_\\tau. \\f]\n  *\n  * Permutation matrices are square and invertible.\n  *\n  * Notice that in addition to the member functions and operators listed here, there also are non-member\n  * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase)\n  * on either side.\n  *\n  * \\sa class PermutationMatrix, class PermutationWrapper\n  */\ntemplate<typename Derived>\nclass PermutationBase : public EigenBase<Derived>\n{\n    typedef internal::traits<Derived> Traits;\n    typedef EigenBase<Derived> Base;\n  public:\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename Traits::IndicesType IndicesType;\n    enum {\n      Flags = Traits::Flags,\n      RowsAtCompileTime = Traits::RowsAtCompileTime,\n      ColsAtCompileTime = Traits::ColsAtCompileTime,\n      MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = Traits::MaxColsAtCompileTime\n    };\n    typedef typename Traits::StorageIndex StorageIndex;\n    typedef Matrix<StorageIndex,RowsAtCompileTime,ColsAtCompileTime,0,MaxRowsAtCompileTime,MaxColsAtCompileTime>\n            DenseMatrixType;\n    typedef PermutationMatrix<IndicesType::SizeAtCompileTime,IndicesType::MaxSizeAtCompileTime,StorageIndex>\n            PlainPermutationType;\n    typedef PlainPermutationType PlainObject;\n    using Base::derived;\n    typedef Inverse<Derived> InverseReturnType;\n    typedef void Scalar;\n    #endif\n\n    /** Copies the other permutation into *this */\n    template<typename OtherDerived>\n    Derived& operator=(const PermutationBase<OtherDerived>& other)\n    {\n      indices() = other.indices();\n      return derived();\n    }\n\n    /** Assignment from the Transpositions \\a tr */\n    template<typename OtherDerived>\n    Derived& operator=(const TranspositionsBase<OtherDerived>& tr)\n    {\n      setIdentity(tr.size());\n      for(Index k=size()-1; k>=0; --k)\n        applyTranspositionOnTheRight(k,tr.coeff(k));\n      return derived();\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    Derived& operator=(const PermutationBase& other)\n    {\n      indices() = other.indices();\n      return derived();\n    }\n    #endif\n\n    /** \\returns the number of rows */\n    inline Index rows() const { return Index(indices().size()); }\n\n    /** \\returns the number of columns */\n    inline Index cols() const { return Index(indices().size()); }\n\n    /** \\returns the size of a side of the respective square matrix, i.e., the number of indices */\n    inline Index size() const { return Index(indices().size()); }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename DenseDerived>\n    void evalTo(MatrixBase<DenseDerived>& other) const\n    {\n      other.setZero();\n      for (Index i=0; i<rows(); ++i)\n        other.coeffRef(indices().coeff(i),i) = typename DenseDerived::Scalar(1);\n    }\n    #endif\n\n    /** \\returns a Matrix object initialized from this permutation matrix. Notice that it\n      * is inefficient to return this Matrix object by value. For efficiency, favor using\n      * the Matrix constructor taking EigenBase objects.\n      */\n    DenseMatrixType toDenseMatrix() const\n    {\n      return derived();\n    }\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return derived().indices(); }\n    /** \\returns a reference to the stored array representing the permutation. */\n    IndicesType& indices() { return derived().indices(); }\n\n    /** Resizes to given size.\n      */\n    inline void resize(Index newSize)\n    {\n      indices().resize(newSize);\n    }\n\n    /** Sets *this to be the identity permutation matrix */\n    void setIdentity()\n    {\n      StorageIndex n = StorageIndex(size());\n      for(StorageIndex i = 0; i < n; ++i)\n        indices().coeffRef(i) = i;\n    }\n\n    /** Sets *this to be the identity permutation matrix of given size.\n      */\n    void setIdentity(Index newSize)\n    {\n      resize(newSize);\n      setIdentity();\n    }\n\n    /** Multiplies *this by the transposition \\f$(ij)\\f$ on the left.\n      *\n      * \\returns a reference to *this.\n      *\n      * \\warning This is much slower than applyTranspositionOnTheRight(Index,Index):\n      * this has linear complexity and requires a lot of branching.\n      *\n      * \\sa applyTranspositionOnTheRight(Index,Index)\n      */\n    Derived& applyTranspositionOnTheLeft(Index i, Index j)\n    {\n      eigen_assert(i>=0 && j>=0 && i<size() && j<size());\n      for(Index k = 0; k < size(); ++k)\n      {\n        if(indices().coeff(k) == i) indices().coeffRef(k) = StorageIndex(j);\n        else if(indices().coeff(k) == j) indices().coeffRef(k) = StorageIndex(i);\n      }\n      return derived();\n    }\n\n    /** Multiplies *this by the transposition \\f$(ij)\\f$ on the right.\n      *\n      * \\returns a reference to *this.\n      *\n      * This is a fast operation, it only consists in swapping two indices.\n      *\n      * \\sa applyTranspositionOnTheLeft(Index,Index)\n      */\n    Derived& applyTranspositionOnTheRight(Index i, Index j)\n    {\n      eigen_assert(i>=0 && j>=0 && i<size() && j<size());\n      std::swap(indices().coeffRef(i), indices().coeffRef(j));\n      return derived();\n    }\n\n    /** \\returns the inverse permutation matrix.\n      *\n      * \\note \\blank \\note_try_to_help_rvo\n      */\n    inline InverseReturnType inverse() const\n    { return InverseReturnType(derived()); }\n    /** \\returns the tranpose permutation matrix.\n      *\n      * \\note \\blank \\note_try_to_help_rvo\n      */\n    inline InverseReturnType transpose() const\n    { return InverseReturnType(derived()); }\n\n    /**** multiplication helpers to hopefully get RVO ****/\n\n  \n#ifndef EIGEN_PARSED_BY_DOXYGEN\n  protected:\n    template<typename OtherDerived>\n    void assignTranspose(const PermutationBase<OtherDerived>& other)\n    {\n      for (Index i=0; i<rows();++i) indices().coeffRef(other.indices().coeff(i)) = i;\n    }\n    template<typename Lhs,typename Rhs>\n    void assignProduct(const Lhs& lhs, const Rhs& rhs)\n    {\n      eigen_assert(lhs.cols() == rhs.rows());\n      for (Index i=0; i<rows();++i) indices().coeffRef(i) = lhs.indices().coeff(rhs.indices().coeff(i));\n    }\n#endif\n\n  public:\n\n    /** \\returns the product permutation matrix.\n      *\n      * \\note \\blank \\note_try_to_help_rvo\n      */\n    template<typename Other>\n    inline PlainPermutationType operator*(const PermutationBase<Other>& other) const\n    { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); }\n\n    /** \\returns the product of a permutation with another inverse permutation.\n      *\n      * \\note \\blank \\note_try_to_help_rvo\n      */\n    template<typename Other>\n    inline PlainPermutationType operator*(const InverseImpl<Other,PermutationStorage>& other) const\n    { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); }\n\n    /** \\returns the product of an inverse permutation with another permutation.\n      *\n      * \\note \\blank \\note_try_to_help_rvo\n      */\n    template<typename Other> friend\n    inline PlainPermutationType operator*(const InverseImpl<Other, PermutationStorage>& other, const PermutationBase& perm)\n    { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }\n    \n    /** \\returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.\n      *\n      * This function is O(\\c n) procedure allocating a buffer of \\c n booleans.\n      */\n    Index determinant() const\n    {\n      Index res = 1;\n      Index n = size();\n      Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);\n      mask.fill(false);\n      Index r = 0;\n      while(r < n)\n      {\n        // search for the next seed\n        while(r<n && mask[r]) r++;\n        if(r>=n)\n          break;\n        // we got one, let's follow it until we are back to the seed\n        Index k0 = r++;\n        mask.coeffRef(k0) = true;\n        for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))\n        {\n          mask.coeffRef(k) = true;\n          res = -res;\n        }\n      }\n      return res;\n    }\n\n  protected:\n\n};\n\nnamespace internal {\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>\nstruct traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> >\n : traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >\n{\n  typedef PermutationStorage StorageKind;\n  typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;\n  typedef _StorageIndex StorageIndex;\n  typedef void Scalar;\n};\n}\n\n/** \\class PermutationMatrix\n  * \\ingroup Core_Module\n  *\n  * \\brief Permutation matrix\n  *\n  * \\tparam SizeAtCompileTime the number of rows/cols, or Dynamic\n  * \\tparam MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.\n  * \\tparam _StorageIndex the integer type of the indices\n  *\n  * This class represents a permutation matrix, internally stored as a vector of integers.\n  *\n  * \\sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix\n  */\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>\nclass PermutationMatrix : public PermutationBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex> >\n{\n    typedef PermutationBase<PermutationMatrix> Base;\n    typedef internal::traits<PermutationMatrix> Traits;\n  public:\n\n    typedef const PermutationMatrix& Nested;\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename Traits::StorageIndex StorageIndex;\n    #endif\n\n    inline PermutationMatrix()\n    {}\n\n    /** Constructs an uninitialized permutation matrix of given size.\n      */\n    explicit inline PermutationMatrix(Index size) : m_indices(size)\n    {\n      eigen_internal_assert(size <= NumTraits<StorageIndex>::highest());\n    }\n\n    /** Copy constructor. */\n    template<typename OtherDerived>\n    inline PermutationMatrix(const PermutationBase<OtherDerived>& other)\n      : m_indices(other.indices()) {}\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** Standard copy constructor. Defined only to prevent a default copy constructor\n      * from hiding the other templated constructor */\n    inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {}\n    #endif\n\n    /** Generic constructor from expression of the indices. The indices\n      * array has the meaning that the permutations sends each integer i to indices[i].\n      *\n      * \\warning It is your responsibility to check that the indices array that you passes actually\n      * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the\n      * array's size.\n      */\n    template<typename Other>\n    explicit inline PermutationMatrix(const MatrixBase<Other>& indices) : m_indices(indices)\n    {}\n\n    /** Convert the Transpositions \\a tr to a permutation matrix */\n    template<typename Other>\n    explicit PermutationMatrix(const TranspositionsBase<Other>& tr)\n      : m_indices(tr.size())\n    {\n      *this = tr;\n    }\n\n    /** Copies the other permutation into *this */\n    template<typename Other>\n    PermutationMatrix& operator=(const PermutationBase<Other>& other)\n    {\n      m_indices = other.indices();\n      return *this;\n    }\n\n    /** Assignment from the Transpositions \\a tr */\n    template<typename Other>\n    PermutationMatrix& operator=(const TranspositionsBase<Other>& tr)\n    {\n      return Base::operator=(tr.derived());\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    PermutationMatrix& operator=(const PermutationMatrix& other)\n    {\n      m_indices = other.m_indices;\n      return *this;\n    }\n    #endif\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return m_indices; }\n    /** \\returns a reference to the stored array representing the permutation. */\n    IndicesType& indices() { return m_indices; }\n\n\n    /**** multiplication helpers to hopefully get RVO ****/\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename Other>\n    PermutationMatrix(const InverseImpl<Other,PermutationStorage>& other)\n      : m_indices(other.derived().nestedExpression().size())\n    {\n      eigen_internal_assert(m_indices.size() <= NumTraits<StorageIndex>::highest());\n      StorageIndex end = StorageIndex(m_indices.size());\n      for (StorageIndex i=0; i<end;++i)\n        m_indices.coeffRef(other.derived().nestedExpression().indices().coeff(i)) = i;\n    }\n    template<typename Lhs,typename Rhs>\n    PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs)\n      : m_indices(lhs.indices().size())\n    {\n      Base::assignProduct(lhs,rhs);\n    }\n#endif\n\n  protected:\n\n    IndicesType m_indices;\n};\n\n\nnamespace internal {\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>\nstruct traits<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> >\n : traits<Matrix<_StorageIndex,SizeAtCompileTime,SizeAtCompileTime,0,MaxSizeAtCompileTime,MaxSizeAtCompileTime> >\n{\n  typedef PermutationStorage StorageKind;\n  typedef Map<const Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1>, _PacketAccess> IndicesType;\n  typedef _StorageIndex StorageIndex;\n  typedef void Scalar;\n};\n}\n\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>\nclass Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess>\n  : public PermutationBase<Map<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime, _StorageIndex>,_PacketAccess> >\n{\n    typedef PermutationBase<Map> Base;\n    typedef internal::traits<Map> Traits;\n  public:\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename IndicesType::Scalar StorageIndex;\n    #endif\n\n    inline Map(const StorageIndex* indicesPtr)\n      : m_indices(indicesPtr)\n    {}\n\n    inline Map(const StorageIndex* indicesPtr, Index size)\n      : m_indices(indicesPtr,size)\n    {}\n\n    /** Copies the other permutation into *this */\n    template<typename Other>\n    Map& operator=(const PermutationBase<Other>& other)\n    { return Base::operator=(other.derived()); }\n\n    /** Assignment from the Transpositions \\a tr */\n    template<typename Other>\n    Map& operator=(const TranspositionsBase<Other>& tr)\n    { return Base::operator=(tr.derived()); }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    Map& operator=(const Map& other)\n    {\n      m_indices = other.m_indices;\n      return *this;\n    }\n    #endif\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return m_indices; }\n    /** \\returns a reference to the stored array representing the permutation. */\n    IndicesType& indices() { return m_indices; }\n\n  protected:\n\n    IndicesType m_indices;\n};\n\ntemplate<typename _IndicesType> class TranspositionsWrapper;\nnamespace internal {\ntemplate<typename _IndicesType>\nstruct traits<PermutationWrapper<_IndicesType> >\n{\n  typedef PermutationStorage StorageKind;\n  typedef void Scalar;\n  typedef typename _IndicesType::Scalar StorageIndex;\n  typedef _IndicesType IndicesType;\n  enum {\n    RowsAtCompileTime = _IndicesType::SizeAtCompileTime,\n    ColsAtCompileTime = _IndicesType::SizeAtCompileTime,\n    MaxRowsAtCompileTime = IndicesType::MaxSizeAtCompileTime,\n    MaxColsAtCompileTime = IndicesType::MaxSizeAtCompileTime,\n    Flags = 0\n  };\n};\n}\n\n/** \\class PermutationWrapper\n  * \\ingroup Core_Module\n  *\n  * \\brief Class to view a vector of integers as a permutation matrix\n  *\n  * \\tparam _IndicesType the type of the vector of integer (can be any compatible expression)\n  *\n  * This class allows to view any vector expression of integers as a permutation matrix.\n  *\n  * \\sa class PermutationBase, class PermutationMatrix\n  */\ntemplate<typename _IndicesType>\nclass PermutationWrapper : public PermutationBase<PermutationWrapper<_IndicesType> >\n{\n    typedef PermutationBase<PermutationWrapper> Base;\n    typedef internal::traits<PermutationWrapper> Traits;\n  public:\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename Traits::IndicesType IndicesType;\n    #endif\n\n    inline PermutationWrapper(const IndicesType& indices)\n      : m_indices(indices)\n    {}\n\n    /** const version of indices(). */\n    const typename internal::remove_all<typename IndicesType::Nested>::type&\n    indices() const { return m_indices; }\n\n  protected:\n\n    typename IndicesType::Nested m_indices;\n};\n\n\n/** \\returns the matrix with the permutation applied to the columns.\n  */\ntemplate<typename MatrixDerived, typename PermutationDerived>\nEIGEN_DEVICE_FUNC\nconst Product<MatrixDerived, PermutationDerived, AliasFreeProduct>\noperator*(const MatrixBase<MatrixDerived> &matrix,\n          const PermutationBase<PermutationDerived>& permutation)\n{\n  return Product<MatrixDerived, PermutationDerived, AliasFreeProduct>\n            (matrix.derived(), permutation.derived());\n}\n\n/** \\returns the matrix with the permutation applied to the rows.\n  */\ntemplate<typename PermutationDerived, typename MatrixDerived>\nEIGEN_DEVICE_FUNC\nconst Product<PermutationDerived, MatrixDerived, AliasFreeProduct>\noperator*(const PermutationBase<PermutationDerived> &permutation,\n          const MatrixBase<MatrixDerived>& matrix)\n{\n  return Product<PermutationDerived, MatrixDerived, AliasFreeProduct>\n            (permutation.derived(), matrix.derived());\n}\n\n\ntemplate<typename PermutationType>\nclass InverseImpl<PermutationType, PermutationStorage>\n  : public EigenBase<Inverse<PermutationType> >\n{\n    typedef typename PermutationType::PlainPermutationType PlainPermutationType;\n    typedef internal::traits<PermutationType> PermTraits;\n  protected:\n    InverseImpl() {}\n  public:\n    typedef Inverse<PermutationType> InverseType;\n    using EigenBase<Inverse<PermutationType> >::derived;\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    typedef typename PermutationType::DenseMatrixType DenseMatrixType;\n    enum {\n      RowsAtCompileTime = PermTraits::RowsAtCompileTime,\n      ColsAtCompileTime = PermTraits::ColsAtCompileTime,\n      MaxRowsAtCompileTime = PermTraits::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = PermTraits::MaxColsAtCompileTime\n    };\n    #endif\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename DenseDerived>\n    void evalTo(MatrixBase<DenseDerived>& other) const\n    {\n      other.setZero();\n      for (Index i=0; i<derived().rows();++i)\n        other.coeffRef(i, derived().nestedExpression().indices().coeff(i)) = typename DenseDerived::Scalar(1);\n    }\n    #endif\n\n    /** \\return the equivalent permutation matrix */\n    PlainPermutationType eval() const { return derived(); }\n\n    DenseMatrixType toDenseMatrix() const { return derived(); }\n\n    /** \\returns the matrix with the inverse permutation applied to the columns.\n      */\n    template<typename OtherDerived> friend\n    const Product<OtherDerived, InverseType, AliasFreeProduct>\n    operator*(const MatrixBase<OtherDerived>& matrix, const InverseType& trPerm)\n    {\n      return Product<OtherDerived, InverseType, AliasFreeProduct>(matrix.derived(), trPerm.derived());\n    }\n\n    /** \\returns the matrix with the inverse permutation applied to the rows.\n      */\n    template<typename OtherDerived>\n    const Product<InverseType, OtherDerived, AliasFreeProduct>\n    operator*(const MatrixBase<OtherDerived>& matrix) const\n    {\n      return Product<InverseType, OtherDerived, AliasFreeProduct>(derived(), matrix.derived());\n    }\n};\n\ntemplate<typename Derived>\nconst PermutationWrapper<const Derived> MatrixBase<Derived>::asPermutation() const\n{\n  return derived();\n}\n\nnamespace internal {\n\ntemplate<> struct AssignmentKind<DenseShape,PermutationShape> { typedef EigenBase2EigenBase Kind; };\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PERMUTATIONMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/PlainObjectBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DENSESTORAGEBASE_H\n#define EIGEN_DENSESTORAGEBASE_H\n\n#if defined(EIGEN_INITIALIZE_MATRICES_BY_ZERO)\n# define EIGEN_INITIALIZE_COEFFS\n# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=Scalar(0);\n#elif defined(EIGEN_INITIALIZE_MATRICES_BY_NAN)\n# define EIGEN_INITIALIZE_COEFFS\n# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED for(int i=0;i<base().size();++i) coeffRef(i)=std::numeric_limits<Scalar>::quiet_NaN();\n#else\n# undef EIGEN_INITIALIZE_COEFFS\n# define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n#endif\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<int MaxSizeAtCompileTime> struct check_rows_cols_for_overflow {\n  template<typename Index>\n  EIGEN_DEVICE_FUNC\n  static EIGEN_ALWAYS_INLINE void run(Index, Index)\n  {\n  }\n};\n\ntemplate<> struct check_rows_cols_for_overflow<Dynamic> {\n  template<typename Index>\n  EIGEN_DEVICE_FUNC\n  static EIGEN_ALWAYS_INLINE void run(Index rows, Index cols)\n  {\n    // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242\n    // we assume Index is signed\n    Index max_index = (std::size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed\n    bool error = (rows == 0 || cols == 0) ? false\n               : (rows > max_index / cols);\n    if (error)\n      throw_std_bad_alloc();\n  }\n};\n\ntemplate <typename Derived,\n          typename OtherDerived = Derived,\n          bool IsVector = bool(Derived::IsVectorAtCompileTime) && bool(OtherDerived::IsVectorAtCompileTime)>\nstruct conservative_resize_like_impl;\n\ntemplate<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers> struct matrix_swap_impl;\n\n} // end namespace internal\n\n#ifdef EIGEN_PARSED_BY_DOXYGEN\nnamespace doxygen {\n\n// This is a workaround to doxygen not being able to understand the inheritance logic\n// when it is hidden by the dense_xpr_base helper struct.\n// Moreover, doxygen fails to include members that are not documented in the declaration body of\n// MatrixBase if we inherits MatrixBase<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >,\n// this is why we simply inherits MatrixBase, though this does not make sense.\n\n/** This class is just a workaround for Doxygen and it does not not actually exist. */\ntemplate<typename Derived> struct dense_xpr_base_dispatcher;\n/** This class is just a workaround for Doxygen and it does not not actually exist. */\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct dense_xpr_base_dispatcher<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n    : public MatrixBase {};\n/** This class is just a workaround for Doxygen and it does not not actually exist. */\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct dense_xpr_base_dispatcher<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> >\n    : public ArrayBase {};\n\n} // namespace doxygen\n\n/** \\class PlainObjectBase\n  * \\ingroup Core_Module\n  * \\brief %Dense storage base class for matrices and arrays.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_PLAINOBJECTBASE_PLUGIN.\n  *\n  * \\tparam Derived is the derived type, e.g., a Matrix or Array\n  *\n  * \\sa \\ref TopicClassHierarchy\n  */\ntemplate<typename Derived>\nclass PlainObjectBase : public doxygen::dense_xpr_base_dispatcher<Derived>\n#else\ntemplate<typename Derived>\nclass PlainObjectBase : public internal::dense_xpr_base<Derived>::type\n#endif\n{\n  public:\n    enum { Options = internal::traits<Derived>::Options };\n    typedef typename internal::dense_xpr_base<Derived>::type Base;\n\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    \n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Derived DenseType;\n\n    using Base::RowsAtCompileTime;\n    using Base::ColsAtCompileTime;\n    using Base::SizeAtCompileTime;\n    using Base::MaxRowsAtCompileTime;\n    using Base::MaxColsAtCompileTime;\n    using Base::MaxSizeAtCompileTime;\n    using Base::IsVectorAtCompileTime;\n    using Base::Flags;\n\n    template<typename PlainObjectType, int MapOptions, typename StrideType> friend class Eigen::Map;\n    friend  class Eigen::Map<Derived, Unaligned>;\n    typedef Eigen::Map<Derived, Unaligned>  MapType;\n    friend  class Eigen::Map<const Derived, Unaligned>;\n    typedef const Eigen::Map<const Derived, Unaligned> ConstMapType;\n#if EIGEN_MAX_ALIGN_BYTES>0\n    // for EIGEN_MAX_ALIGN_BYTES==0, AlignedMax==Unaligned, and many compilers generate warnings for friend-ing a class twice.\n    friend  class Eigen::Map<Derived, AlignedMax>;\n    friend  class Eigen::Map<const Derived, AlignedMax>;\n#endif\n    typedef Eigen::Map<Derived, AlignedMax> AlignedMapType;\n    typedef const Eigen::Map<const Derived, AlignedMax> ConstAlignedMapType;\n    template<typename StrideType> struct StridedMapType { typedef Eigen::Map<Derived, Unaligned, StrideType> type; };\n    template<typename StrideType> struct StridedConstMapType { typedef Eigen::Map<const Derived, Unaligned, StrideType> type; };\n    template<typename StrideType> struct StridedAlignedMapType { typedef Eigen::Map<Derived, AlignedMax, StrideType> type; };\n    template<typename StrideType> struct StridedConstAlignedMapType { typedef Eigen::Map<const Derived, AlignedMax, StrideType> type; };\n\n  protected:\n    DenseStorage<Scalar, Base::MaxSizeAtCompileTime, Base::RowsAtCompileTime, Base::ColsAtCompileTime, Options> m_storage;\n\n  public:\n    enum { NeedsToAlign = (SizeAtCompileTime != Dynamic) && (internal::traits<Derived>::Alignment>0) };\n    EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)\n\n    EIGEN_DEVICE_FUNC\n    Base& base() { return *static_cast<Base*>(this); }\n    EIGEN_DEVICE_FUNC\n    const Base& base() const { return *static_cast<const Base*>(this); }\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); }\n\n    /** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index,Index) const\n      * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.\n      *\n      * See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE const Scalar& coeff(Index rowId, Index colId) const\n    {\n      if(Flags & RowMajorBit)\n        return m_storage.data()[colId + rowId * m_storage.cols()];\n      else // column-major\n        return m_storage.data()[rowId + colId * m_storage.rows()];\n    }\n\n    /** This is an overloaded version of DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const\n      * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.\n      *\n      * See DenseCoeffsBase<Derived,ReadOnlyAccessors>::coeff(Index) const for details. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const\n    {\n      return m_storage.data()[index];\n    }\n\n    /** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const\n      * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.\n      *\n      * See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index,Index) const for details. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar& coeffRef(Index rowId, Index colId)\n    {\n      if(Flags & RowMajorBit)\n        return m_storage.data()[colId + rowId * m_storage.cols()];\n      else // column-major\n        return m_storage.data()[rowId + colId * m_storage.rows()];\n    }\n\n    /** This is an overloaded version of DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const\n      * provided to by-pass the creation of an evaluator of the expression, thus saving compilation efforts.\n      *\n      * See DenseCoeffsBase<Derived,WriteAccessors>::coeffRef(Index) const for details. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)\n    {\n      return m_storage.data()[index];\n    }\n\n    /** This is the const version of coeffRef(Index,Index) which is thus synonym of coeff(Index,Index).\n      * It is provided for convenience. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      if(Flags & RowMajorBit)\n        return m_storage.data()[colId + rowId * m_storage.cols()];\n      else // column-major\n        return m_storage.data()[rowId + colId * m_storage.rows()];\n    }\n\n    /** This is the const version of coeffRef(Index) which is thus synonym of coeff(Index).\n      * It is provided for convenience. */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const\n    {\n      return m_storage.data()[index];\n    }\n\n    /** \\internal */\n    template<int LoadMode>\n    EIGEN_STRONG_INLINE PacketScalar packet(Index rowId, Index colId) const\n    {\n      return internal::ploadt<PacketScalar, LoadMode>\n               (m_storage.data() + (Flags & RowMajorBit\n                                   ? colId + rowId * m_storage.cols()\n                                   : rowId + colId * m_storage.rows()));\n    }\n\n    /** \\internal */\n    template<int LoadMode>\n    EIGEN_STRONG_INLINE PacketScalar packet(Index index) const\n    {\n      return internal::ploadt<PacketScalar, LoadMode>(m_storage.data() + index);\n    }\n\n    /** \\internal */\n    template<int StoreMode>\n    EIGEN_STRONG_INLINE void writePacket(Index rowId, Index colId, const PacketScalar& val)\n    {\n      internal::pstoret<Scalar, PacketScalar, StoreMode>\n              (m_storage.data() + (Flags & RowMajorBit\n                                   ? colId + rowId * m_storage.cols()\n                                   : rowId + colId * m_storage.rows()), val);\n    }\n\n    /** \\internal */\n    template<int StoreMode>\n    EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& val)\n    {\n      internal::pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, val);\n    }\n\n    /** \\returns a const pointer to the data array of this matrix */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const\n    { return m_storage.data(); }\n\n    /** \\returns a pointer to the data array of this matrix */\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data()\n    { return m_storage.data(); }\n\n    /** Resizes \\c *this to a \\a rows x \\a cols matrix.\n      *\n      * This method is intended for dynamic-size matrices, although it is legal to call it on any\n      * matrix as long as fixed dimensions are left unchanged. If you only want to change the number\n      * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t).\n      *\n      * If the current number of coefficients of \\c *this exactly matches the\n      * product \\a rows * \\a cols, then no memory allocation is performed and\n      * the current values are left unchanged. In all other cases, including\n      * shrinking, the data is reallocated and all previous values are lost.\n      *\n      * Example: \\include Matrix_resize_int_int.cpp\n      * Output: \\verbinclude Matrix_resize_int_int.out\n      *\n      * \\sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t)\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void resize(Index rows, Index cols)\n    {\n      eigen_assert(   EIGEN_IMPLIES(RowsAtCompileTime!=Dynamic,rows==RowsAtCompileTime)\n                   && EIGEN_IMPLIES(ColsAtCompileTime!=Dynamic,cols==ColsAtCompileTime)\n                   && EIGEN_IMPLIES(RowsAtCompileTime==Dynamic && MaxRowsAtCompileTime!=Dynamic,rows<=MaxRowsAtCompileTime)\n                   && EIGEN_IMPLIES(ColsAtCompileTime==Dynamic && MaxColsAtCompileTime!=Dynamic,cols<=MaxColsAtCompileTime)\n                   && rows>=0 && cols>=0 && \"Invalid sizes when resizing a matrix or array.\");\n      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(rows, cols);\n      #ifdef EIGEN_INITIALIZE_COEFFS\n        Index size = rows*cols;\n        bool size_changed = size != this->size();\n        m_storage.resize(size, rows, cols);\n        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n      #else\n        m_storage.resize(rows*cols, rows, cols);\n      #endif\n    }\n\n    /** Resizes \\c *this to a vector of length \\a size\n      *\n      * \\only_for_vectors. This method does not work for\n      * partially dynamic matrices when the static dimension is anything other\n      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.\n      *\n      * Example: \\include Matrix_resize_int.cpp\n      * Output: \\verbinclude Matrix_resize_int.out\n      *\n      * \\sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t)\n      */\n    EIGEN_DEVICE_FUNC\n    inline void resize(Index size)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase)\n      eigen_assert(((SizeAtCompileTime == Dynamic && (MaxSizeAtCompileTime==Dynamic || size<=MaxSizeAtCompileTime)) || SizeAtCompileTime == size) && size>=0);\n      #ifdef EIGEN_INITIALIZE_COEFFS\n        bool size_changed = size != this->size();\n      #endif\n      if(RowsAtCompileTime == 1)\n        m_storage.resize(size, 1, size);\n      else\n        m_storage.resize(size, size, 1);\n      #ifdef EIGEN_INITIALIZE_COEFFS\n        if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n      #endif\n    }\n\n    /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \\c NoChange\n      * as in the example below.\n      *\n      * Example: \\include Matrix_resize_NoChange_int.cpp\n      * Output: \\verbinclude Matrix_resize_NoChange_int.out\n      *\n      * \\sa resize(Index,Index)\n      */\n    EIGEN_DEVICE_FUNC\n    inline void resize(NoChange_t, Index cols)\n    {\n      resize(rows(), cols);\n    }\n\n    /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \\c NoChange\n      * as in the example below.\n      *\n      * Example: \\include Matrix_resize_int_NoChange.cpp\n      * Output: \\verbinclude Matrix_resize_int_NoChange.out\n      *\n      * \\sa resize(Index,Index)\n      */\n    EIGEN_DEVICE_FUNC\n    inline void resize(Index rows, NoChange_t)\n    {\n      resize(rows, cols());\n    }\n\n    /** Resizes \\c *this to have the same dimensions as \\a other.\n      * Takes care of doing all the checking that's needed.\n      *\n      * Note that copying a row-vector into a vector (and conversely) is allowed.\n      * The resizing, if any, is then done in the appropriate way so that row-vectors\n      * remain row-vectors and vectors remain vectors.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE void resizeLike(const EigenBase<OtherDerived>& _other)\n    {\n      const OtherDerived& other = _other.derived();\n      internal::check_rows_cols_for_overflow<MaxSizeAtCompileTime>::run(other.rows(), other.cols());\n      const Index othersize = other.rows()*other.cols();\n      if(RowsAtCompileTime == 1)\n      {\n        eigen_assert(other.rows() == 1 || other.cols() == 1);\n        resize(1, othersize);\n      }\n      else if(ColsAtCompileTime == 1)\n      {\n        eigen_assert(other.rows() == 1 || other.cols() == 1);\n        resize(othersize, 1);\n      }\n      else resize(other.rows(), other.cols());\n    }\n\n    /** Resizes the matrix to \\a rows x \\a cols while leaving old values untouched.\n      *\n      * The method is intended for matrices of dynamic size. If you only want to change the number\n      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or\n      * conservativeResize(Index, NoChange_t).\n      *\n      * Matrices are resized relative to the top-left element. In case values need to be \n      * appended to the matrix they will be uninitialized.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols)\n    {\n      internal::conservative_resize_like_impl<Derived>::run(*this, rows, cols);\n    }\n\n    /** Resizes the matrix to \\a rows x \\a cols while leaving old values untouched.\n      *\n      * As opposed to conservativeResize(Index rows, Index cols), this version leaves\n      * the number of columns unchanged.\n      *\n      * In case the matrix is growing, new rows will be uninitialized.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t)\n    {\n      // Note: see the comment in conservativeResize(Index,Index)\n      conservativeResize(rows, cols());\n    }\n\n    /** Resizes the matrix to \\a rows x \\a cols while leaving old values untouched.\n      *\n      * As opposed to conservativeResize(Index rows, Index cols), this version leaves\n      * the number of rows unchanged.\n      *\n      * In case the matrix is growing, new columns will be uninitialized.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols)\n    {\n      // Note: see the comment in conservativeResize(Index,Index)\n      conservativeResize(rows(), cols);\n    }\n\n    /** Resizes the vector to \\a size while retaining old values.\n      *\n      * \\only_for_vectors. This method does not work for\n      * partially dynamic matrices when the static dimension is anything other\n      * than 1. For example it will not work with Matrix<double, 2, Dynamic>.\n      *\n      * When values are appended, they will be uninitialized.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void conservativeResize(Index size)\n    {\n      internal::conservative_resize_like_impl<Derived>::run(*this, size);\n    }\n\n    /** Resizes the matrix to \\a rows x \\a cols of \\c other, while leaving old values untouched.\n      *\n      * The method is intended for matrices of dynamic size. If you only want to change the number\n      * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or\n      * conservativeResize(Index, NoChange_t).\n      *\n      * Matrices are resized relative to the top-left element. In case values need to be \n      * appended to the matrix they will copied from \\c other.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase<OtherDerived>& other)\n    {\n      internal::conservative_resize_like_impl<Derived,OtherDerived>::run(*this, other);\n    }\n\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other)\n    {\n      return _set(other);\n    }\n\n    /** \\sa MatrixBase::lazyAssign() */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase<OtherDerived>& other)\n    {\n      _resize_to_match(other);\n      return Base::lazyAssign(other.derived());\n    }\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue<OtherDerived>& func)\n    {\n      resize(func.rows(), func.cols());\n      return Base::operator=(func);\n    }\n\n    // Prevent user from trying to instantiate PlainObjectBase objects\n    // by making all its constructor protected. See bug 1074.\n  protected:\n\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase() : m_storage()\n    {\n//       _check_template_params();\n//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    // FIXME is it still needed ?\n    /** \\internal */\n    EIGEN_DEVICE_FUNC\n    explicit PlainObjectBase(internal::constructor_without_unaligned_array_assert)\n      : m_storage(internal::constructor_without_unaligned_array_assert())\n    {\n//       _check_template_params(); EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n#endif\n\n#if EIGEN_HAS_RVALUE_REFERENCES\n    EIGEN_DEVICE_FUNC\n    PlainObjectBase(PlainObjectBase&& other) EIGEN_NOEXCEPT\n      : m_storage( std::move(other.m_storage) )\n    {\n    }\n\n    EIGEN_DEVICE_FUNC\n    PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT\n    {\n      using std::swap;\n      swap(m_storage, other.m_storage);\n      return *this;\n    }\n#endif\n\n    /** Copy constructor */\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase(const PlainObjectBase& other)\n      : Base(), m_storage(other.m_storage) { }\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols)\n      : m_storage(size, rows, cols)\n    {\n//       _check_template_params();\n//       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED\n    }\n\n    /** \\sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase(const DenseBase<OtherDerived> &other)\n      : m_storage()\n    {\n      _check_template_params();\n      resizeLike(other);\n      _set_noalias(other);\n    }\n\n    /** \\sa PlainObjectBase::operator=(const EigenBase<OtherDerived>&) */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase<OtherDerived> &other)\n      : m_storage()\n    {\n      _check_template_params();\n      resizeLike(other);\n      *this = other.derived();\n    }\n    /** \\brief Copy constructor with in-place evaluation */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE PlainObjectBase(const ReturnByValue<OtherDerived>& other)\n    {\n      _check_template_params();\n      // FIXME this does not automatically transpose vectors if necessary\n      resize(other.rows(), other.cols());\n      other.evalTo(this->derived());\n    }\n\n  public:\n\n    /** \\brief Copies the generic expression \\a other into *this.\n      * \\copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)\n    {\n      _resize_to_match(other);\n      Base::operator=(other.derived());\n      return this->derived();\n    }\n\n    /** \\name Map\n      * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects,\n      * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned\n      * \\a data pointers.\n      *\n      * \\see class Map\n      */\n    //@{\n    static inline ConstMapType Map(const Scalar* data)\n    { return ConstMapType(data); }\n    static inline MapType Map(Scalar* data)\n    { return MapType(data); }\n    static inline ConstMapType Map(const Scalar* data, Index size)\n    { return ConstMapType(data, size); }\n    static inline MapType Map(Scalar* data, Index size)\n    { return MapType(data, size); }\n    static inline ConstMapType Map(const Scalar* data, Index rows, Index cols)\n    { return ConstMapType(data, rows, cols); }\n    static inline MapType Map(Scalar* data, Index rows, Index cols)\n    { return MapType(data, rows, cols); }\n\n    static inline ConstAlignedMapType MapAligned(const Scalar* data)\n    { return ConstAlignedMapType(data); }\n    static inline AlignedMapType MapAligned(Scalar* data)\n    { return AlignedMapType(data); }\n    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size)\n    { return ConstAlignedMapType(data, size); }\n    static inline AlignedMapType MapAligned(Scalar* data, Index size)\n    { return AlignedMapType(data, size); }\n    static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols)\n    { return ConstAlignedMapType(data, rows, cols); }\n    static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols)\n    { return AlignedMapType(data, rows, cols); }\n\n    template<int Outer, int Inner>\n    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, const Stride<Outer, Inner>& stride)\n    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, size, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index size, const Stride<Outer, Inner>& stride)\n    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, size, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedConstMapType<Stride<Outer, Inner> >::type Map(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedMapType<Stride<Outer, Inner> >::type Map(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)\n    { return typename StridedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }\n\n    template<int Outer, int Inner>\n    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, const Stride<Outer, Inner>& stride)\n    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index size, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index size, const Stride<Outer, Inner>& stride)\n    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, size, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)\n    { return typename StridedConstAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }\n    template<int Outer, int Inner>\n    static inline typename StridedAlignedMapType<Stride<Outer, Inner> >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride<Outer, Inner>& stride)\n    { return typename StridedAlignedMapType<Stride<Outer, Inner> >::type(data, rows, cols, stride); }\n    //@}\n\n    using Base::setConstant;\n    EIGEN_DEVICE_FUNC Derived& setConstant(Index size, const Scalar& val);\n    EIGEN_DEVICE_FUNC Derived& setConstant(Index rows, Index cols, const Scalar& val);\n\n    using Base::setZero;\n    EIGEN_DEVICE_FUNC Derived& setZero(Index size);\n    EIGEN_DEVICE_FUNC Derived& setZero(Index rows, Index cols);\n\n    using Base::setOnes;\n    EIGEN_DEVICE_FUNC Derived& setOnes(Index size);\n    EIGEN_DEVICE_FUNC Derived& setOnes(Index rows, Index cols);\n\n    using Base::setRandom;\n    Derived& setRandom(Index size);\n    Derived& setRandom(Index rows, Index cols);\n\n    #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN\n    #include EIGEN_PLAINOBJECTBASE_PLUGIN\n    #endif\n\n  protected:\n    /** \\internal Resizes *this in preparation for assigning \\a other to it.\n      * Takes care of doing all the checking that's needed.\n      *\n      * Note that copying a row-vector into a vector (and conversely) is allowed.\n      * The resizing, if any, is then done in the appropriate way so that row-vectors\n      * remain row-vectors and vectors remain vectors.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase<OtherDerived>& other)\n    {\n      #ifdef EIGEN_NO_AUTOMATIC_RESIZING\n      eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size())\n                 : (rows() == other.rows() && cols() == other.cols())))\n        && \"Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined\");\n      EIGEN_ONLY_USED_FOR_DEBUG(other);\n      #else\n      resizeLike(other);\n      #endif\n    }\n\n    /**\n      * \\brief Copies the value of the expression \\a other into \\c *this with automatic resizing.\n      *\n      * *this might be resized to match the dimensions of \\a other. If *this was a null matrix (not already initialized),\n      * it will be initialized.\n      *\n      * Note that copying a row-vector into a vector (and conversely) is allowed.\n      * The resizing, if any, is then done in the appropriate way so that row-vectors\n      * remain row-vectors and vectors remain vectors.\n      *\n      * \\sa operator=(const MatrixBase<OtherDerived>&), _set_noalias()\n      *\n      * \\internal\n      */\n    // aliasing is dealt once in internall::call_assignment\n    // so at this stage we have to assume aliasing... and resising has to be done later.\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE Derived& _set(const DenseBase<OtherDerived>& other)\n    {\n      internal::call_assignment(this->derived(), other.derived());\n      return this->derived();\n    }\n\n    /** \\internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which\n      * is the case when creating a new matrix) so one can enforce lazy evaluation.\n      *\n      * \\sa operator=(const MatrixBase<OtherDerived>&), _set()\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase<OtherDerived>& other)\n    {\n      // I don't think we need this resize call since the lazyAssign will anyways resize\n      // and lazyAssign will be called by the assign selector.\n      //_resize_to_match(other);\n      // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because\n      // it wouldn't allow to copy a row-vector into a column-vector.\n      internal::call_assignment_no_alias(this->derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());\n      return this->derived();\n    }\n\n    template<typename T0, typename T1>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if<Base::SizeAtCompileTime!=2,T0>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT(bool(NumTraits<T0>::IsInteger) &&\n                          bool(NumTraits<T1>::IsInteger),\n                          FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)\n      resize(rows,cols);\n    }\n    \n    template<typename T0, typename T1>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE void _init2(const T0& val0, const T1& val1, typename internal::enable_if<Base::SizeAtCompileTime==2,T0>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)\n      m_storage.data()[0] = Scalar(val0);\n      m_storage.data()[1] = Scalar(val1);\n    }\n    \n    template<typename T0, typename T1>\n    EIGEN_DEVICE_FUNC \n    EIGEN_STRONG_INLINE void _init2(const Index& val0, const Index& val1,\n                                    typename internal::enable_if<    (!internal::is_same<Index,Scalar>::value)\n                                                                  && (internal::is_same<T0,Index>::value)\n                                                                  && (internal::is_same<T1,Index>::value)\n                                                                  && Base::SizeAtCompileTime==2,T1>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2)\n      m_storage.data()[0] = Scalar(val0);\n      m_storage.data()[1] = Scalar(val1);\n    }\n\n    // The argument is convertible to the Index type and we either have a non 1x1 Matrix, or a dynamic-sized Array,\n    // then the argument is meant to be the size of the object.\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(Index size, typename internal::enable_if<    (Base::SizeAtCompileTime!=1 || !internal::is_convertible<T, Scalar>::value)\n                                                                              && ((!internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value || Base::SizeAtCompileTime==Dynamic)),T>::type* = 0)\n    {\n      // NOTE MSVC 2008 complains if we directly put bool(NumTraits<T>::IsInteger) as the EIGEN_STATIC_ASSERT argument.\n      const bool is_integer = NumTraits<T>::IsInteger;\n      EIGEN_UNUSED_VARIABLE(is_integer);\n      EIGEN_STATIC_ASSERT(is_integer,\n                          FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED)\n      resize(size);\n    }\n    \n    // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted)\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1)\n      m_storage.data()[0] = val0;\n    }\n    \n    // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type match the index type)\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Index& val0,\n                                    typename internal::enable_if<    (!internal::is_same<Index,Scalar>::value)\n                                                                  && (internal::is_same<Index,T>::value)\n                                                                  && Base::SizeAtCompileTime==1\n                                                                  && internal::is_convertible<T, Scalar>::value,T*>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 1)\n      m_storage.data()[0] = Scalar(val0);\n    }\n\n    // Initialize a fixed size matrix from a pointer to raw data\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Scalar* data){\n      this->_set_noalias(ConstMapType(data));\n    }\n\n    // Initialize an arbitrary matrix from a dense expression\n    template<typename T, typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const DenseBase<OtherDerived>& other){\n      this->_set_noalias(other);\n    }\n\n    // Initialize an arbitrary matrix from an object convertible to the Derived type.\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Derived& other){\n      this->_set_noalias(other);\n    }\n\n    // Initialize an arbitrary matrix from a generic Eigen expression\n    template<typename T, typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const EigenBase<OtherDerived>& other){\n      this->derived() = other;\n    }\n\n    template<typename T, typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const ReturnByValue<OtherDerived>& other)\n    {\n      resize(other.rows(), other.cols());\n      other.evalTo(this->derived());\n    }\n\n    template<typename T, typename OtherDerived, int ColsAtCompileTime>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const RotationBase<OtherDerived,ColsAtCompileTime>& r)\n    {\n      this->derived() = r;\n    }\n    \n    // For fixed-size Array<Scalar,...>\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Scalar& val0,\n                                    typename internal::enable_if<    Base::SizeAtCompileTime!=Dynamic\n                                                                  && Base::SizeAtCompileTime!=1\n                                                                  && internal::is_convertible<T, Scalar>::value\n                                                                  && internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value,T>::type* = 0)\n    {\n      Base::setConstant(val0);\n    }\n    \n    // For fixed-size Array<Index,...>\n    template<typename T>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _init1(const Index& val0,\n                                    typename internal::enable_if<    (!internal::is_same<Index,Scalar>::value)\n                                                                  && (internal::is_same<Index,T>::value)\n                                                                  && Base::SizeAtCompileTime!=Dynamic\n                                                                  && Base::SizeAtCompileTime!=1\n                                                                  && internal::is_convertible<T, Scalar>::value\n                                                                  && internal::is_same<typename internal::traits<Derived>::XprKind,ArrayXpr>::value,T*>::type* = 0)\n    {\n      Base::setConstant(val0);\n    }\n    \n    template<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>\n    friend struct internal::matrix_swap_impl;\n\n  public:\n    \n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal\n      * \\brief Override DenseBase::swap() since for dynamic-sized matrices\n      * of same type it is enough to swap the data pointers.\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void swap(DenseBase<OtherDerived> & other)\n    {\n      enum { SwapPointers = internal::is_same<Derived, OtherDerived>::value && Base::SizeAtCompileTime==Dynamic };\n      internal::matrix_swap_impl<Derived, OtherDerived, bool(SwapPointers)>::run(this->derived(), other.derived());\n    }\n    \n    /** \\internal\n      * \\brief const version forwarded to DenseBase::swap\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void swap(DenseBase<OtherDerived> const & other)\n    { Base::swap(other.derived()); }\n    \n    EIGEN_DEVICE_FUNC \n    static EIGEN_STRONG_INLINE void _check_template_params()\n    {\n      EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor)\n                        && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0)\n                        && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0))\n                        && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0))\n                        && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0))\n                        && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0))\n                        && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic)\n                        && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic)\n                        && (Options & (DontAlign|RowMajor)) == Options),\n        INVALID_MATRIX_TEMPLATE_PARAMETERS)\n    }\n\n    enum { IsPlainObjectBase = 1 };\n#endif\n};\n\nnamespace internal {\n\ntemplate <typename Derived, typename OtherDerived, bool IsVector>\nstruct conservative_resize_like_impl\n{\n  static void run(DenseBase<Derived>& _this, Index rows, Index cols)\n  {\n    if (_this.rows() == rows && _this.cols() == cols) return;\n    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)\n\n    if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows\n         (!Derived::IsRowMajor && _this.rows() == rows) )  // column-major and we change only the number of columns\n    {\n      internal::check_rows_cols_for_overflow<Derived::MaxSizeAtCompileTime>::run(rows, cols);\n      _this.derived().m_storage.conservativeResize(rows*cols,rows,cols);\n    }\n    else\n    {\n      // The storage order does not allow us to use reallocation.\n      typename Derived::PlainObject tmp(rows,cols);\n      const Index common_rows = numext::mini(rows, _this.rows());\n      const Index common_cols = numext::mini(cols, _this.cols());\n      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);\n      _this.derived().swap(tmp);\n    }\n  }\n\n  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)\n  {\n    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;\n\n    // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index),\n    // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the\n    // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or\n    // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like\n    // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good.\n    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)\n    EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)\n\n    if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows\n         (!Derived::IsRowMajor && _this.rows() == other.rows()) )  // column-major and we change only the number of columns\n    {\n      const Index new_rows = other.rows() - _this.rows();\n      const Index new_cols = other.cols() - _this.cols();\n      _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols());\n      if (new_rows>0)\n        _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows);\n      else if (new_cols>0)\n        _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols);\n    }\n    else\n    {\n      // The storage order does not allow us to use reallocation.\n      typename Derived::PlainObject tmp(other);\n      const Index common_rows = numext::mini(tmp.rows(), _this.rows());\n      const Index common_cols = numext::mini(tmp.cols(), _this.cols());\n      tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);\n      _this.derived().swap(tmp);\n    }\n  }\n};\n\n// Here, the specialization for vectors inherits from the general matrix case\n// to allow calling .conservativeResize(rows,cols) on vectors.\ntemplate <typename Derived, typename OtherDerived>\nstruct conservative_resize_like_impl<Derived,OtherDerived,true>\n  : conservative_resize_like_impl<Derived,OtherDerived,false>\n{\n  using conservative_resize_like_impl<Derived,OtherDerived,false>::run;\n  \n  static void run(DenseBase<Derived>& _this, Index size)\n  {\n    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size;\n    const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1;\n    _this.derived().m_storage.conservativeResize(size,new_rows,new_cols);\n  }\n\n  static void run(DenseBase<Derived>& _this, const DenseBase<OtherDerived>& other)\n  {\n    if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;\n\n    const Index num_new_elements = other.size() - _this.size();\n\n    const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows();\n    const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1;\n    _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols);\n\n    if (num_new_elements > 0)\n      _this.tail(num_new_elements) = other.tail(num_new_elements);\n  }\n};\n\ntemplate<typename MatrixTypeA, typename MatrixTypeB, bool SwapPointers>\nstruct matrix_swap_impl\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(MatrixTypeA& a, MatrixTypeB& b)\n  {\n    a.base().swap(b);\n  }\n};\n\ntemplate<typename MatrixTypeA, typename MatrixTypeB>\nstruct matrix_swap_impl<MatrixTypeA, MatrixTypeB, true>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(MatrixTypeA& a, MatrixTypeB& b)\n  {\n    static_cast<typename MatrixTypeA::Base&>(a).m_storage.swap(static_cast<typename MatrixTypeB::Base&>(b).m_storage);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_DENSESTORAGEBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Product.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PRODUCT_H\n#define EIGEN_PRODUCT_H\n\nnamespace Eigen {\n\ntemplate<typename Lhs, typename Rhs, int Option, typename StorageKind> class ProductImpl;\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, int Option>\nstruct traits<Product<Lhs, Rhs, Option> >\n{\n  typedef typename remove_all<Lhs>::type LhsCleaned;\n  typedef typename remove_all<Rhs>::type RhsCleaned;\n  typedef traits<LhsCleaned> LhsTraits;\n  typedef traits<RhsCleaned> RhsTraits;\n  \n  typedef MatrixXpr XprKind;\n  \n  typedef typename ScalarBinaryOpTraits<typename traits<LhsCleaned>::Scalar, typename traits<RhsCleaned>::Scalar>::ReturnType Scalar;\n  typedef typename product_promote_storage_type<typename LhsTraits::StorageKind,\n                                                typename RhsTraits::StorageKind,\n                                                internal::product_type<Lhs,Rhs>::ret>::ret StorageKind;\n  typedef typename promote_index_type<typename LhsTraits::StorageIndex,\n                                      typename RhsTraits::StorageIndex>::type StorageIndex;\n  \n  enum {\n    RowsAtCompileTime    = LhsTraits::RowsAtCompileTime,\n    ColsAtCompileTime    = RhsTraits::ColsAtCompileTime,\n    MaxRowsAtCompileTime = LhsTraits::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = RhsTraits::MaxColsAtCompileTime,\n    \n    // FIXME: only needed by GeneralMatrixMatrixTriangular\n    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsTraits::ColsAtCompileTime, RhsTraits::RowsAtCompileTime),\n    \n    // The storage order is somewhat arbitrary here. The correct one will be determined through the evaluator.\n    Flags = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? RowMajorBit\n          : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0\n          : (   ((LhsTraits::Flags&NoPreferredStorageOrderBit) && (RhsTraits::Flags&RowMajorBit))\n             || ((RhsTraits::Flags&NoPreferredStorageOrderBit) && (LhsTraits::Flags&RowMajorBit)) ) ? RowMajorBit\n          : NoPreferredStorageOrderBit\n  };\n};\n\n} // end namespace internal\n\n/** \\class Product\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of the product of two arbitrary matrices or vectors\n  *\n  * \\tparam _Lhs the type of the left-hand side expression\n  * \\tparam _Rhs the type of the right-hand side expression\n  *\n  * This class represents an expression of the product of two arbitrary matrices.\n  *\n  * The other template parameters are:\n  * \\tparam Option     can be DefaultProduct, AliasFreeProduct, or LazyProduct\n  *\n  */\ntemplate<typename _Lhs, typename _Rhs, int Option>\nclass Product : public ProductImpl<_Lhs,_Rhs,Option,\n                                   typename internal::product_promote_storage_type<typename internal::traits<_Lhs>::StorageKind,\n                                                                                   typename internal::traits<_Rhs>::StorageKind,\n                                                                                   internal::product_type<_Lhs,_Rhs>::ret>::ret>\n{\n  public:\n    \n    typedef _Lhs Lhs;\n    typedef _Rhs Rhs;\n    \n    typedef typename ProductImpl<\n        Lhs, Rhs, Option,\n        typename internal::product_promote_storage_type<typename internal::traits<Lhs>::StorageKind,\n                                                        typename internal::traits<Rhs>::StorageKind,\n                                                        internal::product_type<Lhs,Rhs>::ret>::ret>::Base Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)\n\n    typedef typename internal::ref_selector<Lhs>::type LhsNested;\n    typedef typename internal::ref_selector<Rhs>::type RhsNested;\n    typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;\n    typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;\n\n    EIGEN_DEVICE_FUNC Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)\n    {\n      eigen_assert(lhs.cols() == rhs.rows()\n        && \"invalid matrix product\"\n        && \"if you wanted a coeff-wise or a dot product use the respective explicit functions\");\n    }\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }\n\n    EIGEN_DEVICE_FUNC const LhsNestedCleaned& lhs() const { return m_lhs; }\n    EIGEN_DEVICE_FUNC const RhsNestedCleaned& rhs() const { return m_rhs; }\n\n  protected:\n\n    LhsNested m_lhs;\n    RhsNested m_rhs;\n};\n\nnamespace internal {\n  \ntemplate<typename Lhs, typename Rhs, int Option, int ProductTag = internal::product_type<Lhs,Rhs>::ret>\nclass dense_product_base\n : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type\n{};\n\n/** Convertion to scalar for inner-products */\ntemplate<typename Lhs, typename Rhs, int Option>\nclass dense_product_base<Lhs, Rhs, Option, InnerProduct>\n : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type\n{\n  typedef Product<Lhs,Rhs,Option> ProductXpr;\n  typedef typename internal::dense_xpr_base<ProductXpr>::type Base;\npublic:\n  using Base::derived;\n  typedef typename Base::Scalar Scalar;\n  \n  operator const Scalar() const\n  {\n    return internal::evaluator<ProductXpr>(derived()).coeff(0,0);\n  }\n};\n\n} // namespace internal\n\n// Generic API dispatcher\ntemplate<typename Lhs, typename Rhs, int Option, typename StorageKind>\nclass ProductImpl : public internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type\n{\n  public:\n    typedef typename internal::generic_xpr_base<Product<Lhs,Rhs,Option>, MatrixXpr, StorageKind>::type Base;\n};\n\ntemplate<typename Lhs, typename Rhs, int Option>\nclass ProductImpl<Lhs,Rhs,Option,Dense>\n  : public internal::dense_product_base<Lhs,Rhs,Option>\n{\n    typedef Product<Lhs, Rhs, Option> Derived;\n    \n  public:\n    \n    typedef typename internal::dense_product_base<Lhs, Rhs, Option> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Derived)\n  protected:\n    enum {\n      IsOneByOne = (RowsAtCompileTime == 1 || RowsAtCompileTime == Dynamic) && \n                   (ColsAtCompileTime == 1 || ColsAtCompileTime == Dynamic),\n      EnableCoeff = IsOneByOne || Option==LazyProduct\n    };\n    \n  public:\n  \n    EIGEN_DEVICE_FUNC Scalar coeff(Index row, Index col) const\n    {\n      EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);\n      eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );\n      \n      return internal::evaluator<Derived>(derived()).coeff(row,col);\n    }\n\n    EIGEN_DEVICE_FUNC Scalar coeff(Index i) const\n    {\n      EIGEN_STATIC_ASSERT(EnableCoeff, THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS);\n      eigen_assert( (Option==LazyProduct) || (this->rows() == 1 && this->cols() == 1) );\n      \n      return internal::evaluator<Derived>(derived()).coeff(i);\n    }\n    \n  \n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_PRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ProductEvaluators.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_PRODUCTEVALUATORS_H\n#define EIGEN_PRODUCTEVALUATORS_H\n\nnamespace Eigen {\n  \nnamespace internal {\n\n/** \\internal\n  * Evaluator of a product expression.\n  * Since products require special treatments to handle all possible cases,\n  * we simply deffer the evaluation logic to a product_evaluator class\n  * which offers more partial specialization possibilities.\n  * \n  * \\sa class product_evaluator\n  */\ntemplate<typename Lhs, typename Rhs, int Options>\nstruct evaluator<Product<Lhs, Rhs, Options> > \n : public product_evaluator<Product<Lhs, Rhs, Options> >\n{\n  typedef Product<Lhs, Rhs, Options> XprType;\n  typedef product_evaluator<XprType> Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}\n};\n \n// Catch \"scalar * ( A * B )\" and transform it to \"(A*scalar) * B\"\n// TODO we should apply that rule only if that's really helpful\ntemplate<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1>\nstruct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,\n                                               const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>,\n                                               const Product<Lhs, Rhs, DefaultProduct> > >\n{\n  static const bool value = true;\n};\ntemplate<typename Lhs, typename Rhs, typename Scalar1, typename Scalar2, typename Plain1>\nstruct evaluator<CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,\n                               const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>,\n                               const Product<Lhs, Rhs, DefaultProduct> > >\n : public evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> >\n{\n  typedef CwiseBinaryOp<internal::scalar_product_op<Scalar1,Scalar2>,\n                               const CwiseNullaryOp<internal::scalar_constant_op<Scalar1>, Plain1>,\n                               const Product<Lhs, Rhs, DefaultProduct> > XprType;\n  typedef evaluator<Product<EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar1,Lhs,product), Rhs, DefaultProduct> > Base;\n\n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)\n    : Base(xpr.lhs().functor().m_other * xpr.rhs().lhs() * xpr.rhs().rhs())\n  {}\n};\n\n\ntemplate<typename Lhs, typename Rhs, int DiagIndex>\nstruct evaluator<Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> > \n : public evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> >\n{\n  typedef Diagonal<const Product<Lhs, Rhs, DefaultProduct>, DiagIndex> XprType;\n  typedef evaluator<Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex> > Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)\n    : Base(Diagonal<const Product<Lhs, Rhs, LazyProduct>, DiagIndex>(\n        Product<Lhs, Rhs, LazyProduct>(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()),\n        xpr.index() ))\n  {}\n};\n\n\n// Helper class to perform a matrix product with the destination at hand.\n// Depending on the sizes of the factors, there are different evaluation strategies\n// as controlled by internal::product_type.\ntemplate< typename Lhs, typename Rhs,\n          typename LhsShape = typename evaluator_traits<Lhs>::Shape,\n          typename RhsShape = typename evaluator_traits<Rhs>::Shape,\n          int ProductType = internal::product_type<Lhs,Rhs>::value>\nstruct generic_product_impl;\n\ntemplate<typename Lhs, typename Rhs>\nstruct evaluator_assume_aliasing<Product<Lhs, Rhs, DefaultProduct> > {\n  static const bool value = true;\n};\n\n// This is the default evaluator implementation for products:\n// It creates a temporary and call generic_product_impl\ntemplate<typename Lhs, typename Rhs, int Options, int ProductTag, typename LhsShape, typename RhsShape>\nstruct product_evaluator<Product<Lhs, Rhs, Options>, ProductTag, LhsShape, RhsShape>\n  : public evaluator<typename Product<Lhs, Rhs, Options>::PlainObject>\n{\n  typedef Product<Lhs, Rhs, Options> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n  enum {\n    Flags = Base::Flags | EvalBeforeNestingBit\n  };\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  explicit product_evaluator(const XprType& xpr)\n    : m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    \n// FIXME shall we handle nested_eval here?,\n// if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.)\n//     typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;\n//     typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;\n//     typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;\n//     typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;\n//     \n//     const LhsNested lhs(xpr.lhs());\n//     const RhsNested rhs(xpr.rhs());\n//   \n//     generic_product_impl<LhsNestedCleaned, RhsNestedCleaned>::evalTo(m_result, lhs, rhs);\n\n    generic_product_impl<Lhs, Rhs, LhsShape, RhsShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());\n  }\n  \nprotected:  \n  PlainObject m_result;\n};\n\n// The following three shortcuts are enabled only if the scalar types match excatly.\n// TODO: we could enable them for different scalar types when the product is not vectorized.\n\n// Dense = Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::assign_op<Scalar,Scalar>, Dense2Dense,\n  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>\n{\n  typedef Product<Lhs,Rhs,Options> SrcXprType;\n  static EIGEN_STRONG_INLINE\n  void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n    // FIXME shall we handle nested_eval here?\n    generic_product_impl<Lhs, Rhs>::evalTo(dst, src.lhs(), src.rhs());\n  }\n};\n\n// Dense += Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::add_assign_op<Scalar,Scalar>, Dense2Dense,\n  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>\n{\n  typedef Product<Lhs,Rhs,Options> SrcXprType;\n  static EIGEN_STRONG_INLINE\n  void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,Scalar> &)\n  {\n    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n    // FIXME shall we handle nested_eval here?\n    generic_product_impl<Lhs, Rhs>::addTo(dst, src.lhs(), src.rhs());\n  }\n};\n\n// Dense -= Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, int Options, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,Options>, internal::sub_assign_op<Scalar,Scalar>, Dense2Dense,\n  typename enable_if<(Options==DefaultProduct || Options==AliasFreeProduct)>::type>\n{\n  typedef Product<Lhs,Rhs,Options> SrcXprType;\n  static EIGEN_STRONG_INLINE\n  void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,Scalar> &)\n  {\n    eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());\n    // FIXME shall we handle nested_eval here?\n    generic_product_impl<Lhs, Rhs>::subTo(dst, src.lhs(), src.rhs());\n  }\n};\n\n\n// Dense ?= scalar * Product\n// TODO we should apply that rule if that's really helpful\n// for instance, this is not good for inner products\ntemplate< typename DstXprType, typename Lhs, typename Rhs, typename AssignFunc, typename Scalar, typename ScalarBis, typename Plain>\nstruct Assignment<DstXprType, CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>, const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,\n                                           const Product<Lhs,Rhs,DefaultProduct> >, AssignFunc, Dense2Dense>\n{\n  typedef CwiseBinaryOp<internal::scalar_product_op<ScalarBis,Scalar>,\n                        const CwiseNullaryOp<internal::scalar_constant_op<ScalarBis>,Plain>,\n                        const Product<Lhs,Rhs,DefaultProduct> > SrcXprType;\n  static EIGEN_STRONG_INLINE\n  void run(DstXprType &dst, const SrcXprType &src, const AssignFunc& func)\n  {\n    call_assignment_no_alias(dst, (src.lhs().functor().m_other * src.rhs().lhs())*src.rhs().rhs(), func);\n  }\n};\n\n//----------------------------------------\n// Catch \"Dense ?= xpr + Product<>\" expression to save one temporary\n// FIXME we could probably enable these rules for any product, i.e., not only Dense and DefaultProduct\n\ntemplate<typename OtherXpr, typename Lhs, typename Rhs>\nstruct evaluator_assume_aliasing<CwiseBinaryOp<internal::scalar_sum_op<typename OtherXpr::Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, const OtherXpr,\n                                               const Product<Lhs,Rhs,DefaultProduct> >, DenseShape > {\n  static const bool value = true;\n};\n\ntemplate<typename DstXprType, typename OtherXpr, typename ProductType, typename Func1, typename Func2>\nstruct assignment_from_xpr_op_product\n{\n  template<typename SrcXprType, typename InitialFunc>\n  static EIGEN_STRONG_INLINE\n  void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)\n  {\n    call_assignment_no_alias(dst, src.lhs(), Func1());\n    call_assignment_no_alias(dst, src.rhs(), Func2());\n  }\n};\n\n#define EIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(ASSIGN_OP,BINOP,ASSIGN_OP2) \\\n  template< typename DstXprType, typename OtherXpr, typename Lhs, typename Rhs, typename DstScalar, typename SrcScalar, typename OtherScalar,typename ProdScalar> \\\n  struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<OtherScalar,ProdScalar>, const OtherXpr, \\\n                                            const Product<Lhs,Rhs,DefaultProduct> >, internal::ASSIGN_OP<DstScalar,SrcScalar>, Dense2Dense> \\\n    : assignment_from_xpr_op_product<DstXprType, OtherXpr, Product<Lhs,Rhs,DefaultProduct>, internal::ASSIGN_OP<DstScalar,OtherScalar>, internal::ASSIGN_OP2<DstScalar,ProdScalar> > \\\n  {}\n\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op,    scalar_sum_op,add_assign_op);\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_sum_op,add_assign_op);\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_sum_op,sub_assign_op);\n\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(assign_op,    scalar_difference_op,sub_assign_op);\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(add_assign_op,scalar_difference_op,sub_assign_op);\nEIGEN_CATCH_ASSIGN_XPR_OP_PRODUCT(sub_assign_op,scalar_difference_op,add_assign_op);\n\n//----------------------------------------\n\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,InnerProduct>\n{\n  template<typename Dst>\n  static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum();\n  }\n  \n  template<typename Dst>\n  static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    dst.coeffRef(0,0) += (lhs.transpose().cwiseProduct(rhs)).sum();\n  }\n  \n  template<typename Dst>\n  static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  { dst.coeffRef(0,0) -= (lhs.transpose().cwiseProduct(rhs)).sum(); }\n};\n\n\n/***********************************************************************\n*  Implementation of outer dense * dense vector product\n***********************************************************************/\n\n// Column major result\ntemplate<typename Dst, typename Lhs, typename Rhs, typename Func>\nvoid outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const false_type&)\n{\n  evaluator<Rhs> rhsEval(rhs);\n  typename nested_eval<Lhs,Rhs::SizeAtCompileTime>::type actual_lhs(lhs);\n  // FIXME if cols is large enough, then it might be useful to make sure that lhs is sequentially stored\n  // FIXME not very good if rhs is real and lhs complex while alpha is real too\n  const Index cols = dst.cols();\n  for (Index j=0; j<cols; ++j)\n    func(dst.col(j), rhsEval.coeff(Index(0),j) * actual_lhs);\n}\n\n// Row major result\ntemplate<typename Dst, typename Lhs, typename Rhs, typename Func>\nvoid outer_product_selector_run(Dst& dst, const Lhs &lhs, const Rhs &rhs, const Func& func, const true_type&)\n{\n  evaluator<Lhs> lhsEval(lhs);\n  typename nested_eval<Rhs,Lhs::SizeAtCompileTime>::type actual_rhs(rhs);\n  // FIXME if rows is large enough, then it might be useful to make sure that rhs is sequentially stored\n  // FIXME not very good if lhs is real and rhs complex while alpha is real too\n  const Index rows = dst.rows();\n  for (Index i=0; i<rows; ++i)\n    func(dst.row(i), lhsEval.coeff(i,Index(0)) * actual_rhs);\n}\n\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,OuterProduct>\n{\n  template<typename T> struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {};\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose\n  struct set  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived()  = src; } };\n  struct add  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } };\n  struct sub  { template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() -= src; } };\n  struct adds {\n    Scalar m_scale;\n    explicit adds(const Scalar& s) : m_scale(s) {}\n    template<typename Dst, typename Src> void operator()(const Dst& dst, const Src& src) const {\n      dst.const_cast_derived() += m_scale * src;\n    }\n  };\n  \n  template<typename Dst>\n  static inline void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major<Dst>());\n  }\n  \n  template<typename Dst>\n  static inline void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major<Dst>());\n  }\n  \n  template<typename Dst>\n  static inline void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major<Dst>());\n  }\n  \n  template<typename Dst>\n  static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major<Dst>());\n  }\n  \n};\n\n\n// This base class provides default implementations for evalTo, addTo, subTo, in terms of scaleAndAddTo\ntemplate<typename Lhs, typename Rhs, typename Derived>\nstruct generic_product_impl_base\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); }\n\n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  { scaleAndAddTo(dst,lhs, rhs, Scalar(1)); }\n\n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  { scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); }\n  \n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  { Derived::scaleAndAddTo(dst,lhs,rhs,alpha); }\n\n};\n\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct>\n  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemvProduct> >\n{\n  typedef typename nested_eval<Lhs,1>::type LhsNested;\n  typedef typename nested_eval<Rhs,1>::type RhsNested;\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight };\n  typedef typename internal::remove_all<typename internal::conditional<int(Side)==OnTheRight,LhsNested,RhsNested>::type>::type MatrixType;\n\n  template<typename Dest>\n  static EIGEN_STRONG_INLINE void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    LhsNested actual_lhs(lhs);\n    RhsNested actual_rhs(rhs);\n    internal::gemv_dense_selector<Side,\n                            (int(MatrixType::Flags)&RowMajorBit) ? RowMajor : ColMajor,\n                            bool(internal::blas_traits<MatrixType>::HasUsableDirectAccess)\n                           >::run(actual_lhs, actual_rhs, dst, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> \n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    // Same as: dst.noalias() = lhs.lazyProduct(rhs);\n    // but easier on the compiler side\n    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::assign_op<typename Dst::Scalar,Scalar>());\n  }\n  \n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    // dst.noalias() += lhs.lazyProduct(rhs);\n    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op<typename Dst::Scalar,Scalar>());\n  }\n  \n  template<typename Dst>\n  static EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    // dst.noalias() -= lhs.lazyProduct(rhs);\n    call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::sub_assign_op<typename Dst::Scalar,Scalar>());\n  }\n  \n//   template<typename Dst>\n//   static inline void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n//   { dst.noalias() += alpha * lhs.lazyProduct(rhs); }\n};\n\n// This specialization enforces the use of a coefficient-based evaluation strategy\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,LazyCoeffBasedProductMode>\n  : generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> {};\n\n// Case 2: Evaluate coeff by coeff\n//\n// This is mostly taken from CoeffBasedProduct.h\n// The main difference is that we add an extra argument to the etor_product_*_impl::run() function\n// for the inner dimension of the product, because evaluator object do not know their size.\n\ntemplate<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>\nstruct etor_product_coeff_impl;\n\ntemplate<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl;\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, DenseShape>\n    : evaluator_base<Product<Lhs, Rhs, LazyProduct> >\n{\n  typedef Product<Lhs, Rhs, LazyProduct> XprType;\n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  explicit product_evaluator(const XprType& xpr)\n    : m_lhs(xpr.lhs()),\n      m_rhs(xpr.rhs()),\n      m_lhsImpl(m_lhs),     // FIXME the creation of the evaluator objects should result in a no-op, but check that!\n      m_rhsImpl(m_rhs),     //       Moreover, they are only useful for the packet path, so we could completely disable them when not needed,\n                            //       or perhaps declare them on the fly on the packet method... We have experiment to check what's best.\n      m_innerDim(xpr.lhs().cols())\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::AddCost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n#if 0\n    std::cerr << \"LhsOuterStrideBytes=  \" << LhsOuterStrideBytes << \"\\n\";\n    std::cerr << \"RhsOuterStrideBytes=  \" << RhsOuterStrideBytes << \"\\n\";\n    std::cerr << \"LhsAlignment=         \" << LhsAlignment << \"\\n\";\n    std::cerr << \"RhsAlignment=         \" << RhsAlignment << \"\\n\";\n    std::cerr << \"CanVectorizeLhs=      \" << CanVectorizeLhs << \"\\n\";\n    std::cerr << \"CanVectorizeRhs=      \" << CanVectorizeRhs << \"\\n\";\n    std::cerr << \"CanVectorizeInner=    \" << CanVectorizeInner << \"\\n\";\n    std::cerr << \"EvalToRowMajor=       \" << EvalToRowMajor << \"\\n\";\n    std::cerr << \"Alignment=            \" << Alignment << \"\\n\";\n    std::cerr << \"Flags=                \" << Flags << \"\\n\";\n#endif\n  }\n\n  // Everything below here is taken from CoeffBasedProduct.h\n\n  typedef typename internal::nested_eval<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;\n  typedef typename internal::nested_eval<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;\n  \n  typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;\n  typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;\n\n  typedef evaluator<LhsNestedCleaned> LhsEtorType;\n  typedef evaluator<RhsNestedCleaned> RhsEtorType;\n\n  enum {\n    RowsAtCompileTime = LhsNestedCleaned::RowsAtCompileTime,\n    ColsAtCompileTime = RhsNestedCleaned::ColsAtCompileTime,\n    InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(LhsNestedCleaned::ColsAtCompileTime, RhsNestedCleaned::RowsAtCompileTime),\n    MaxRowsAtCompileTime = LhsNestedCleaned::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = RhsNestedCleaned::MaxColsAtCompileTime\n  };\n\n  typedef typename find_best_packet<Scalar,RowsAtCompileTime>::type LhsVecPacketType;\n  typedef typename find_best_packet<Scalar,ColsAtCompileTime>::type RhsVecPacketType;\n\n  enum {\n      \n    LhsCoeffReadCost = LhsEtorType::CoeffReadCost,\n    RhsCoeffReadCost = RhsEtorType::CoeffReadCost,\n    CoeffReadCost = InnerSize==0 ? NumTraits<Scalar>::ReadCost\n                  : InnerSize == Dynamic ? HugeCost\n                  : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)\n                    + (InnerSize - 1) * NumTraits<Scalar>::AddCost,\n\n    Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,\n    \n    LhsFlags = LhsEtorType::Flags,\n    RhsFlags = RhsEtorType::Flags,\n    \n    LhsRowMajor = LhsFlags & RowMajorBit,\n    RhsRowMajor = RhsFlags & RowMajorBit,\n\n    LhsVecPacketSize = unpacket_traits<LhsVecPacketType>::size,\n    RhsVecPacketSize = unpacket_traits<RhsVecPacketType>::size,\n\n    // Here, we don't care about alignment larger than the usable packet size.\n    LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))),\n    RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))),\n      \n    SameType = is_same<typename LhsNestedCleaned::Scalar,typename RhsNestedCleaned::Scalar>::value,\n\n    CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1),\n    CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) && (RowsAtCompileTime!=1),\n\n    EvalToRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1\n                    : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0\n                    : (bool(RhsRowMajor) && !CanVectorizeLhs),\n\n    Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & ~RowMajorBit)\n          | (EvalToRowMajor ? RowMajorBit : 0)\n          // TODO enable vectorization for mixed types\n          | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0)\n          | (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0),\n          \n    LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)),\n    RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)),\n\n    Alignment = bool(CanVectorizeLhs) ? (LhsOuterStrideBytes<=0 || (int(LhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,LhsAlignment))!=0 ? 0 : LhsAlignment)\n              : bool(CanVectorizeRhs) ? (RhsOuterStrideBytes<=0 || (int(RhsOuterStrideBytes) % EIGEN_PLAIN_ENUM_MAX(1,RhsAlignment))!=0 ? 0 : RhsAlignment)\n              : 0,\n\n    /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside\n     * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner\n     * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect\n     * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.\n     */\n    CanVectorizeInner =    SameType\n                        && LhsRowMajor\n                        && (!RhsRowMajor)\n                        && (LhsFlags & RhsFlags & ActualPacketAccessBit)\n                        && (InnerSize % packet_traits<Scalar>::size == 0)\n  };\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const\n  {\n    return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum();\n  }\n\n  /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,\n   * which is why we don't set the LinearAccessBit.\n   * TODO: this seems possible when the result is a vector\n   */\n  EIGEN_DEVICE_FUNC const CoeffReturnType coeff(Index index) const\n  {\n    const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;\n    const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0;\n    return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum();\n  }\n\n  template<int LoadMode, typename PacketType>\n  const PacketType packet(Index row, Index col) const\n  {\n    PacketType res;\n    typedef etor_product_packet_impl<bool(int(Flags)&RowMajorBit) ? RowMajor : ColMajor,\n                                     Unroll ? int(InnerSize) : Dynamic,\n                                     LhsEtorType, RhsEtorType, PacketType, LoadMode> PacketImpl;\n    PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);\n    return res;\n  }\n\n  template<int LoadMode, typename PacketType>\n  const PacketType packet(Index index) const\n  {\n    const Index row = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? 0 : index;\n    const Index col = (RowsAtCompileTime == 1 || MaxRowsAtCompileTime==1) ? index : 0;\n    return packet<LoadMode,PacketType>(row,col);\n  }\n\nprotected:\n  typename internal::add_const_on_value_type<LhsNested>::type m_lhs;\n  typename internal::add_const_on_value_type<RhsNested>::type m_rhs;\n  \n  LhsEtorType m_lhsImpl;\n  RhsEtorType m_rhsImpl;\n\n  // TODO: Get rid of m_innerDim if known at compile time\n  Index m_innerDim;\n};\n\ntemplate<typename Lhs, typename Rhs>\nstruct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, LazyCoeffBasedProductMode, DenseShape, DenseShape>\n  : product_evaluator<Product<Lhs, Rhs, LazyProduct>, CoeffBasedProductMode, DenseShape, DenseShape>\n{\n  typedef Product<Lhs, Rhs, DefaultProduct> XprType;\n  typedef Product<Lhs, Rhs, LazyProduct> BaseProduct;\n  typedef product_evaluator<BaseProduct, CoeffBasedProductMode, DenseShape, DenseShape> Base;\n  enum {\n    Flags = Base::Flags | EvalBeforeNestingBit\n  };\n  EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)\n    : Base(BaseProduct(xpr.lhs(),xpr.rhs()))\n  {}\n};\n\n/****************************************\n*** Coeff based product, Packet path  ***\n****************************************/\n\ntemplate<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)\n  {\n    etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);\n    res =  pmadd(pset1<Packet>(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet<LoadMode,Packet>(Index(UnrollingIndex-1), col), res);\n  }\n};\n\ntemplate<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)\n  {\n    etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);\n    res =  pmadd(lhs.template packet<LoadMode,Packet>(row, Index(UnrollingIndex-1)), pset1<Packet>(rhs.coeff(Index(UnrollingIndex-1), col)), res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)\n  {\n    res = pmul(pset1<Packet>(lhs.coeff(row, Index(0))),rhs.template packet<LoadMode,Packet>(Index(0), col));\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)\n  {\n    res = pmul(lhs.template packet<LoadMode,Packet>(row, Index(0)), pset1<Packet>(rhs.coeff(Index(0), col)));\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)\n  {\n    res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res)\n  {\n    res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)\n  {\n    res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));\n    for(Index i = 0; i < innerDim; ++i)\n      res =  pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode,Packet>(i, col), res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename Packet, int LoadMode>\nstruct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>\n{\n  static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)\n  {\n    res = pset1<Packet>(typename unpacket_traits<Packet>::type(0));\n    for(Index i = 0; i < innerDim; ++i)\n      res =  pmadd(lhs.template packet<LoadMode,Packet>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);\n  }\n};\n\n\n/***************************************************************************\n* Triangular products\n***************************************************************************/\ntemplate<int Mode, bool LhsIsTriangular,\n         typename Lhs, bool LhsIsVector,\n         typename Rhs, bool RhsIsVector>\nstruct triangular_product_impl;\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag>\n  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,TriangularShape,DenseShape,ProductTag> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    triangular_product_impl<Lhs::Mode,true,typename Lhs::MatrixType,false,Rhs, Rhs::ColsAtCompileTime==1>\n        ::run(dst, lhs.nestedExpression(), rhs, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag>\n: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,TriangularShape,ProductTag> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    triangular_product_impl<Rhs::Mode,false,Lhs,Lhs::RowsAtCompileTime==1, typename Rhs::MatrixType, false>::run(dst, lhs, rhs.nestedExpression(), alpha);\n  }\n};\n\n\n/***************************************************************************\n* SelfAdjoint products\n***************************************************************************/\ntemplate <typename Lhs, int LhsMode, bool LhsIsVector,\n          typename Rhs, int RhsMode, bool RhsIsVector>\nstruct selfadjoint_product_impl;\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag>\n  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SelfAdjointShape,DenseShape,ProductTag> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    selfadjoint_product_impl<typename Lhs::MatrixType,Lhs::Mode,false,Rhs,0,Rhs::IsVectorAtCompileTime>::run(dst, lhs.nestedExpression(), rhs, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag>\n: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SelfAdjointShape,ProductTag> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    selfadjoint_product_impl<Lhs,0,Lhs::IsVectorAtCompileTime,typename Rhs::MatrixType,Rhs::Mode,false>::run(dst, lhs, rhs.nestedExpression(), alpha);\n  }\n};\n\n\n/***************************************************************************\n* Diagonal products\n***************************************************************************/\n  \ntemplate<typename MatrixType, typename DiagonalType, typename Derived, int ProductOrder>\nstruct diagonal_product_evaluator_base\n  : evaluator_base<Derived>\n{\n   typedef typename ScalarBinaryOpTraits<typename MatrixType::Scalar, typename DiagonalType::Scalar>::ReturnType Scalar;\npublic:\n  enum {\n    CoeffReadCost = NumTraits<Scalar>::MulCost + evaluator<MatrixType>::CoeffReadCost + evaluator<DiagonalType>::CoeffReadCost,\n    \n    MatrixFlags = evaluator<MatrixType>::Flags,\n    DiagFlags = evaluator<DiagonalType>::Flags,\n    _StorageOrder = MatrixFlags & RowMajorBit ? RowMajor : ColMajor,\n    _ScalarAccessOnDiag =  !((int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheLeft)\n                           ||(int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheRight)),\n    _SameTypes = is_same<typename MatrixType::Scalar, typename DiagonalType::Scalar>::value,\n    // FIXME currently we need same types, but in the future the next rule should be the one\n    //_Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagFlags)&PacketAccessBit))),\n    _Vectorizable = bool(int(MatrixFlags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagFlags)&PacketAccessBit))),\n    _LinearAccessMask = (MatrixType::RowsAtCompileTime==1 || MatrixType::ColsAtCompileTime==1) ? LinearAccessBit : 0,\n    Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixFlags)) | (_Vectorizable ? PacketAccessBit : 0),\n    Alignment = evaluator<MatrixType>::Alignment\n  };\n  \n  diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag)\n    : m_diagImpl(diag), m_matImpl(mat)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits<Scalar>::MulCost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const\n  {\n    return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx);\n  }\n  \nprotected:\n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const\n  {\n    return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),\n                          internal::pset1<PacketType>(m_diagImpl.coeff(id)));\n  }\n  \n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const\n  {\n    enum {\n      InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime,\n      DiagonalPacketLoadMode = EIGEN_PLAIN_ENUM_MIN(LoadMode,((InnerSize%16) == 0) ? int(Aligned16) : int(evaluator<DiagonalType>::Alignment)) // FIXME hardcoded 16!!\n    };\n    return internal::pmul(m_matImpl.template packet<LoadMode,PacketType>(row, col),\n                          m_diagImpl.template packet<DiagonalPacketLoadMode,PacketType>(id));\n  }\n  \n  evaluator<DiagonalType> m_diagImpl;\n  evaluator<MatrixType>   m_matImpl;\n};\n\n// diagonal * dense\ntemplate<typename Lhs, typename Rhs, int ProductKind, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DiagonalShape, DenseShape>\n  : diagonal_product_evaluator_base<Rhs, typename Lhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheLeft>\n{\n  typedef diagonal_product_evaluator_base<Rhs, typename Lhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheLeft> Base;\n  using Base::m_diagImpl;\n  using Base::m_matImpl;\n  using Base::coeff;\n  typedef typename Base::Scalar Scalar;\n  \n  typedef Product<Lhs, Rhs, ProductKind> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  \n  enum {\n    StorageOrder = int(Rhs::Flags) & RowMajorBit ? RowMajor : ColMajor\n  };\n\n  EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)\n    : Base(xpr.rhs(), xpr.lhs().diagonal())\n  {\n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const\n  {\n    return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col);\n  }\n  \n#ifndef __CUDACC__\n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const\n  {\n    // FIXME: NVCC used to complain about the template keyword, but we have to check whether this is still the case.\n    // See also similar calls below.\n    return this->template packet_impl<LoadMode,PacketType>(row,col, row,\n                                 typename internal::conditional<int(StorageOrder)==RowMajor, internal::true_type, internal::false_type>::type());\n  }\n  \n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet(Index idx) const\n  {\n    return packet<LoadMode,PacketType>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);\n  }\n#endif\n};\n\n// dense * diagonal\ntemplate<typename Lhs, typename Rhs, int ProductKind, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, ProductKind>, ProductTag, DenseShape, DiagonalShape>\n  : diagonal_product_evaluator_base<Lhs, typename Rhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheRight>\n{\n  typedef diagonal_product_evaluator_base<Lhs, typename Rhs::DiagonalVectorType, Product<Lhs, Rhs, LazyProduct>, OnTheRight> Base;\n  using Base::m_diagImpl;\n  using Base::m_matImpl;\n  using Base::coeff;\n  typedef typename Base::Scalar Scalar;\n  \n  typedef Product<Lhs, Rhs, ProductKind> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  \n  enum { StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor };\n\n  EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)\n    : Base(xpr.lhs(), xpr.rhs().diagonal())\n  {\n  }\n  \n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const\n  {\n    return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col);\n  }\n  \n#ifndef __CUDACC__\n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const\n  {\n    return this->template packet_impl<LoadMode,PacketType>(row,col, col,\n                                 typename internal::conditional<int(StorageOrder)==ColMajor, internal::true_type, internal::false_type>::type());\n  }\n  \n  template<int LoadMode,typename PacketType>\n  EIGEN_STRONG_INLINE PacketType packet(Index idx) const\n  {\n    return packet<LoadMode,PacketType>(int(StorageOrder)==ColMajor?idx:0,int(StorageOrder)==ColMajor?0:idx);\n  }\n#endif\n};\n\n/***************************************************************************\n* Products with permutation matrices\n***************************************************************************/\n\n/** \\internal\n  * \\class permutation_matrix_product\n  * Internal helper class implementing the product between a permutation matrix and a matrix.\n  * This class is specialized for DenseShape below and for SparseShape in SparseCore/SparsePermutation.h\n  */\ntemplate<typename ExpressionType, int Side, bool Transposed, typename ExpressionShape>\nstruct permutation_matrix_product;\n\ntemplate<typename ExpressionType, int Side, bool Transposed>\nstruct permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape>\n{\n    typedef typename nested_eval<ExpressionType, 1>::type MatrixType;\n    typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;\n\n    template<typename Dest, typename PermutationType>\n    static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)\n    {\n      MatrixType mat(xpr);\n      const Index n = Side==OnTheLeft ? mat.rows() : mat.cols();\n      // FIXME we need an is_same for expression that is not sensitive to constness. For instance\n      // is_same_xpr<Block<const Matrix>, Block<Matrix> >::value should be true.\n      //if(is_same<MatrixTypeCleaned,Dest>::value && extract_data(dst) == extract_data(mat))\n      if(is_same_dense(dst, mat))\n      {\n        // apply the permutation inplace\n        Matrix<bool,PermutationType::RowsAtCompileTime,1,0,PermutationType::MaxRowsAtCompileTime> mask(perm.size());\n        mask.fill(false);\n        Index r = 0;\n        while(r < perm.size())\n        {\n          // search for the next seed\n          while(r<perm.size() && mask[r]) r++;\n          if(r>=perm.size())\n            break;\n          // we got one, let's follow it until we are back to the seed\n          Index k0 = r++;\n          Index kPrev = k0;\n          mask.coeffRef(k0) = true;\n          for(Index k=perm.indices().coeff(k0); k!=k0; k=perm.indices().coeff(k))\n          {\n                  Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>(dst, k)\n            .swap(Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>\n                       (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev));\n\n            mask.coeffRef(k) = true;\n            kPrev = k;\n          }\n        }\n      }\n      else\n      {\n        for(Index i = 0; i < n; ++i)\n        {\n          Block<Dest, Side==OnTheLeft ? 1 : Dest::RowsAtCompileTime, Side==OnTheRight ? 1 : Dest::ColsAtCompileTime>\n               (dst, ((Side==OnTheLeft) ^ Transposed) ? perm.indices().coeff(i) : i)\n\n          =\n\n          Block<const MatrixTypeCleaned,Side==OnTheLeft ? 1 : MatrixTypeCleaned::RowsAtCompileTime,Side==OnTheRight ? 1 : MatrixTypeCleaned::ColsAtCompileTime>\n               (mat, ((Side==OnTheRight) ^ Transposed) ? perm.indices().coeff(i) : i);\n        }\n      }\n    }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Rhs, PermutationShape, MatrixShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    permutation_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Rhs, MatrixShape, PermutationShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    permutation_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Inverse<Lhs>, Rhs, PermutationShape, MatrixShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Inverse<Lhs>& lhs, const Rhs& rhs)\n  {\n    permutation_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Inverse<Rhs>, MatrixShape, PermutationShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Inverse<Rhs>& rhs)\n  {\n    permutation_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);\n  }\n};\n\n\n/***************************************************************************\n* Products with transpositions matrices\n***************************************************************************/\n\n// FIXME could we unify Transpositions and Permutation into a single \"shape\"??\n\n/** \\internal\n  * \\class transposition_matrix_product\n  * Internal helper class implementing the product between a permutation matrix and a matrix.\n  */\ntemplate<typename ExpressionType, int Side, bool Transposed, typename ExpressionShape>\nstruct transposition_matrix_product\n{\n  typedef typename nested_eval<ExpressionType, 1>::type MatrixType;\n  typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;\n  \n  template<typename Dest, typename TranspositionType>\n  static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr)\n  {\n    MatrixType mat(xpr);\n    typedef typename TranspositionType::StorageIndex StorageIndex;\n    const Index size = tr.size();\n    StorageIndex j = 0;\n\n    if(!is_same_dense(dst,mat))\n      dst = mat;\n\n    for(Index k=(Transposed?size-1:0) ; Transposed?k>=0:k<size ; Transposed?--k:++k)\n      if(Index(j=tr.coeff(k))!=k)\n      {\n        if(Side==OnTheLeft)        dst.row(k).swap(dst.row(j));\n        else if(Side==OnTheRight)  dst.col(k).swap(dst.col(j));\n      }\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Rhs, TranspositionsShape, MatrixShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    transposition_matrix_product<Rhs, OnTheLeft, false, MatrixShape>::run(dst, lhs, rhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Rhs, MatrixShape, TranspositionsShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    transposition_matrix_product<Lhs, OnTheRight, false, MatrixShape>::run(dst, rhs, lhs);\n  }\n};\n\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Transpose<Lhs>, Rhs, TranspositionsShape, MatrixShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)\n  {\n    transposition_matrix_product<Rhs, OnTheLeft, true, MatrixShape>::run(dst, lhs.nestedExpression(), rhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag, typename MatrixShape>\nstruct generic_product_impl<Lhs, Transpose<Rhs>, MatrixShape, TranspositionsShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)\n  {\n    transposition_matrix_product<Lhs, OnTheRight, true, MatrixShape>::run(dst, rhs.nestedExpression(), lhs);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PRODUCT_EVALUATORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Random.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_RANDOM_H\n#define EIGEN_RANDOM_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Scalar> struct scalar_random_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op)\n  inline const Scalar operator() () const { return random<Scalar>(); }\n};\n\ntemplate<typename Scalar>\nstruct functor_traits<scalar_random_op<Scalar> >\n{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };\n\n} // end namespace internal\n\n/** \\returns a random matrix expression\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  * \n  * The parameters \\a rows and \\a cols are the number of rows and of columns of\n  * the returned matrix. Must be compatible with this MatrixBase type.\n  *\n  * \\not_reentrant\n  * \n  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,\n  * it is redundant to pass \\a rows and \\a cols as arguments, so Random() should be used\n  * instead.\n  * \n  *\n  * Example: \\include MatrixBase_random_int_int.cpp\n  * Output: \\verbinclude MatrixBase_random_int_int.out\n  *\n  * This expression has the \"evaluate before nesting\" flag so that it will be evaluated into\n  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected\n  * behavior with expressions involving random matrices.\n  * \n  * See DenseBase::NullaryExpr(Index, const CustomNullaryOp&) for an example using C++11 random generators.\n  *\n  * \\sa DenseBase::setRandom(), DenseBase::Random(Index), DenseBase::Random()\n  */\ntemplate<typename Derived>\ninline const typename DenseBase<Derived>::RandomReturnType\nDenseBase<Derived>::Random(Index rows, Index cols)\n{\n  return NullaryExpr(rows, cols, internal::scalar_random_op<Scalar>());\n}\n\n/** \\returns a random vector expression\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  *\n  * The parameter \\a size is the size of the returned vector.\n  * Must be compatible with this MatrixBase type.\n  *\n  * \\only_for_vectors\n  * \\not_reentrant\n  *\n  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,\n  * it is redundant to pass \\a size as argument, so Random() should be used\n  * instead.\n  *\n  * Example: \\include MatrixBase_random_int.cpp\n  * Output: \\verbinclude MatrixBase_random_int.out\n  *\n  * This expression has the \"evaluate before nesting\" flag so that it will be evaluated into\n  * a temporary vector whenever it is nested in a larger expression. This prevents unexpected\n  * behavior with expressions involving random matrices.\n  *\n  * \\sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random()\n  */\ntemplate<typename Derived>\ninline const typename DenseBase<Derived>::RandomReturnType\nDenseBase<Derived>::Random(Index size)\n{\n  return NullaryExpr(size, internal::scalar_random_op<Scalar>());\n}\n\n/** \\returns a fixed-size random matrix or vector expression\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  * \n  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you\n  * need to use the variants taking size arguments.\n  *\n  * Example: \\include MatrixBase_random.cpp\n  * Output: \\verbinclude MatrixBase_random.out\n  *\n  * This expression has the \"evaluate before nesting\" flag so that it will be evaluated into\n  * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected\n  * behavior with expressions involving random matrices.\n  * \n  * \\not_reentrant\n  *\n  * \\sa DenseBase::setRandom(), DenseBase::Random(Index,Index), DenseBase::Random(Index)\n  */\ntemplate<typename Derived>\ninline const typename DenseBase<Derived>::RandomReturnType\nDenseBase<Derived>::Random()\n{\n  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op<Scalar>());\n}\n\n/** Sets all coefficients in this expression to random values.\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  * \n  * \\not_reentrant\n  * \n  * Example: \\include MatrixBase_setRandom.cpp\n  * Output: \\verbinclude MatrixBase_setRandom.out\n  *\n  * \\sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index)\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Derived& DenseBase<Derived>::setRandom()\n{\n  return *this = Random(rows(), cols());\n}\n\n/** Resizes to the given \\a newSize, and sets all coefficients in this expression to random values.\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  * \n  * \\only_for_vectors\n  * \\not_reentrant\n  *\n  * Example: \\include Matrix_setRandom_int.cpp\n  * Output: \\verbinclude Matrix_setRandom_int.out\n  *\n  * \\sa DenseBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, DenseBase::Random()\n  */\ntemplate<typename Derived>\nEIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setRandom(Index newSize)\n{\n  resize(newSize);\n  return setRandom();\n}\n\n/** Resizes to the given size, and sets all coefficients in this expression to random values.\n  *\n  * Numbers are uniformly spread through their whole definition range for integer types,\n  * and in the [-1:1] range for floating point scalar types.\n  *\n  * \\not_reentrant\n  * \n  * \\param rows the new number of rows\n  * \\param cols the new number of columns\n  *\n  * Example: \\include Matrix_setRandom_int_int.cpp\n  * Output: \\verbinclude Matrix_setRandom_int_int.out\n  *\n  * \\sa DenseBase::setRandom(), setRandom(Index), class CwiseNullaryOp, DenseBase::Random()\n  */\ntemplate<typename Derived>\nEIGEN_STRONG_INLINE Derived&\nPlainObjectBase<Derived>::setRandom(Index rows, Index cols)\n{\n  resize(rows, cols);\n  return setRandom();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_RANDOM_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Redux.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REDUX_H\n#define EIGEN_REDUX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// TODO\n//  * implement other kind of vectorization\n//  * factorize code\n\n/***************************************************************************\n* Part 1 : the logic deciding a strategy for vectorization and unrolling\n***************************************************************************/\n\ntemplate<typename Func, typename Derived>\nstruct redux_traits\n{\npublic:\n    typedef typename find_best_packet<typename Derived::Scalar,Derived::SizeAtCompileTime>::type PacketType;\n  enum {\n    PacketSize = unpacket_traits<PacketType>::size,\n    InnerMaxSize = int(Derived::IsRowMajor)\n                 ? Derived::MaxColsAtCompileTime\n                 : Derived::MaxRowsAtCompileTime\n  };\n\n  enum {\n    MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit)\n                  && (functor_traits<Func>::PacketAccess),\n    MayLinearVectorize = bool(MightVectorize) && (int(Derived::Flags)&LinearAccessBit),\n    MaySliceVectorize  = bool(MightVectorize) && int(InnerMaxSize)>=3*PacketSize\n  };\n\npublic:\n  enum {\n    Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal)\n              : int(MaySliceVectorize)  ? int(SliceVectorizedTraversal)\n                                        : int(DefaultTraversal)\n  };\n\npublic:\n  enum {\n    Cost = Derived::SizeAtCompileTime == Dynamic ? HugeCost\n         : Derived::SizeAtCompileTime * Derived::CoeffReadCost + (Derived::SizeAtCompileTime-1) * functor_traits<Func>::Cost,\n    UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize))\n  };\n\npublic:\n  enum {\n    Unrolling = Cost <= UnrollingLimit ? CompleteUnrolling : NoUnrolling\n  };\n  \n#ifdef EIGEN_DEBUG_ASSIGN\n  static void debug()\n  {\n    std::cerr << \"Xpr: \" << typeid(typename Derived::XprType).name() << std::endl;\n    std::cerr.setf(std::ios::hex, std::ios::basefield);\n    EIGEN_DEBUG_VAR(Derived::Flags)\n    std::cerr.unsetf(std::ios::hex);\n    EIGEN_DEBUG_VAR(InnerMaxSize)\n    EIGEN_DEBUG_VAR(PacketSize)\n    EIGEN_DEBUG_VAR(MightVectorize)\n    EIGEN_DEBUG_VAR(MayLinearVectorize)\n    EIGEN_DEBUG_VAR(MaySliceVectorize)\n    EIGEN_DEBUG_VAR(Traversal)\n    EIGEN_DEBUG_VAR(UnrollingLimit)\n    EIGEN_DEBUG_VAR(Unrolling)\n    std::cerr << std::endl;\n  }\n#endif\n};\n\n/***************************************************************************\n* Part 2 : unrollers\n***************************************************************************/\n\n/*** no vectorization ***/\n\ntemplate<typename Func, typename Derived, int Start, int Length>\nstruct redux_novec_unroller\n{\n  enum {\n    HalfLength = Length/2\n  };\n\n  typedef typename Derived::Scalar Scalar;\n\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)\n  {\n    return func(redux_novec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),\n                redux_novec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func));\n  }\n};\n\ntemplate<typename Func, typename Derived, int Start>\nstruct redux_novec_unroller<Func, Derived, Start, 1>\n{\n  enum {\n    outer = Start / Derived::InnerSizeAtCompileTime,\n    inner = Start % Derived::InnerSizeAtCompileTime\n  };\n\n  typedef typename Derived::Scalar Scalar;\n\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&)\n  {\n    return mat.coeffByOuterInner(outer, inner);\n  }\n};\n\n// This is actually dead code and will never be called. It is required\n// to prevent false warnings regarding failed inlining though\n// for 0 length run() will never be called at all.\ntemplate<typename Func, typename Derived, int Start>\nstruct redux_novec_unroller<Func, Derived, Start, 0>\n{\n  typedef typename Derived::Scalar Scalar;\n  EIGEN_DEVICE_FUNC \n  static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); }\n};\n\n/*** vectorization ***/\n\ntemplate<typename Func, typename Derived, int Start, int Length>\nstruct redux_vec_unroller\n{\n  enum {\n    PacketSize = redux_traits<Func, Derived>::PacketSize,\n    HalfLength = Length/2\n  };\n\n  typedef typename Derived::Scalar Scalar;\n  typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;\n\n  static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func)\n  {\n    return func.packetOp(\n            redux_vec_unroller<Func, Derived, Start, HalfLength>::run(mat,func),\n            redux_vec_unroller<Func, Derived, Start+HalfLength, Length-HalfLength>::run(mat,func) );\n  }\n};\n\ntemplate<typename Func, typename Derived, int Start>\nstruct redux_vec_unroller<Func, Derived, Start, 1>\n{\n  enum {\n    index = Start * redux_traits<Func, Derived>::PacketSize,\n    outer = index / int(Derived::InnerSizeAtCompileTime),\n    inner = index % int(Derived::InnerSizeAtCompileTime),\n    alignment = Derived::Alignment\n  };\n\n  typedef typename Derived::Scalar Scalar;\n  typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;\n\n  static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&)\n  {\n    return mat.template packetByOuterInner<alignment,PacketScalar>(outer, inner);\n  }\n};\n\n/***************************************************************************\n* Part 3 : implementation of all cases\n***************************************************************************/\n\ntemplate<typename Func, typename Derived,\n         int Traversal = redux_traits<Func, Derived>::Traversal,\n         int Unrolling = redux_traits<Func, Derived>::Unrolling\n>\nstruct redux_impl;\n\ntemplate<typename Func, typename Derived>\nstruct redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>\n{\n  typedef typename Derived::Scalar Scalar;\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)\n  {\n    eigen_assert(mat.rows()>0 && mat.cols()>0 && \"you are using an empty matrix\");\n    Scalar res;\n    res = mat.coeffByOuterInner(0, 0);\n    for(Index i = 1; i < mat.innerSize(); ++i)\n      res = func(res, mat.coeffByOuterInner(0, i));\n    for(Index i = 1; i < mat.outerSize(); ++i)\n      for(Index j = 0; j < mat.innerSize(); ++j)\n        res = func(res, mat.coeffByOuterInner(i, j));\n    return res;\n  }\n};\n\ntemplate<typename Func, typename Derived>\nstruct redux_impl<Func,Derived, DefaultTraversal, CompleteUnrolling>\n  : public redux_novec_unroller<Func,Derived, 0, Derived::SizeAtCompileTime>\n{};\n\ntemplate<typename Func, typename Derived>\nstruct redux_impl<Func, Derived, LinearVectorizedTraversal, NoUnrolling>\n{\n  typedef typename Derived::Scalar Scalar;\n  typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;\n\n  static Scalar run(const Derived &mat, const Func& func)\n  {\n    const Index size = mat.size();\n    \n    const Index packetSize = redux_traits<Func, Derived>::PacketSize;\n    const int packetAlignment = unpacket_traits<PacketScalar>::alignment;\n    enum {\n      alignment0 = (bool(Derived::Flags & DirectAccessBit) && bool(packet_traits<Scalar>::AlignedOnScalar)) ? int(packetAlignment) : int(Unaligned),\n      alignment = EIGEN_PLAIN_ENUM_MAX(alignment0, Derived::Alignment)\n    };\n    const Index alignedStart = internal::first_default_aligned(mat.nestedExpression());\n    const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize);\n    const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize);\n    const Index alignedEnd2 = alignedStart + alignedSize2;\n    const Index alignedEnd  = alignedStart + alignedSize;\n    Scalar res;\n    if(alignedSize)\n    {\n      PacketScalar packet_res0 = mat.template packet<alignment,PacketScalar>(alignedStart);\n      if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop\n      {\n        PacketScalar packet_res1 = mat.template packet<alignment,PacketScalar>(alignedStart+packetSize);\n        for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize)\n        {\n          packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(index));\n          packet_res1 = func.packetOp(packet_res1, mat.template packet<alignment,PacketScalar>(index+packetSize));\n        }\n\n        packet_res0 = func.packetOp(packet_res0,packet_res1);\n        if(alignedEnd>alignedEnd2)\n          packet_res0 = func.packetOp(packet_res0, mat.template packet<alignment,PacketScalar>(alignedEnd2));\n      }\n      res = func.predux(packet_res0);\n\n      for(Index index = 0; index < alignedStart; ++index)\n        res = func(res,mat.coeff(index));\n\n      for(Index index = alignedEnd; index < size; ++index)\n        res = func(res,mat.coeff(index));\n    }\n    else // too small to vectorize anything.\n         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.\n    {\n      res = mat.coeff(0);\n      for(Index index = 1; index < size; ++index)\n        res = func(res,mat.coeff(index));\n    }\n\n    return res;\n  }\n};\n\n// NOTE: for SliceVectorizedTraversal we simply bypass unrolling\ntemplate<typename Func, typename Derived, int Unrolling>\nstruct redux_impl<Func, Derived, SliceVectorizedTraversal, Unrolling>\n{\n  typedef typename Derived::Scalar Scalar;\n  typedef typename redux_traits<Func, Derived>::PacketType PacketType;\n\n  EIGEN_DEVICE_FUNC static Scalar run(const Derived &mat, const Func& func)\n  {\n    eigen_assert(mat.rows()>0 && mat.cols()>0 && \"you are using an empty matrix\");\n    const Index innerSize = mat.innerSize();\n    const Index outerSize = mat.outerSize();\n    enum {\n      packetSize = redux_traits<Func, Derived>::PacketSize\n    };\n    const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize;\n    Scalar res;\n    if(packetedInnerSize)\n    {\n      PacketType packet_res = mat.template packet<Unaligned,PacketType>(0,0);\n      for(Index j=0; j<outerSize; ++j)\n        for(Index i=(j==0?packetSize:0); i<packetedInnerSize; i+=Index(packetSize))\n          packet_res = func.packetOp(packet_res, mat.template packetByOuterInner<Unaligned,PacketType>(j,i));\n\n      res = func.predux(packet_res);\n      for(Index j=0; j<outerSize; ++j)\n        for(Index i=packetedInnerSize; i<innerSize; ++i)\n          res = func(res, mat.coeffByOuterInner(j,i));\n    }\n    else // too small to vectorize anything.\n         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.\n    {\n      res = redux_impl<Func, Derived, DefaultTraversal, NoUnrolling>::run(mat, func);\n    }\n\n    return res;\n  }\n};\n\ntemplate<typename Func, typename Derived>\nstruct redux_impl<Func, Derived, LinearVectorizedTraversal, CompleteUnrolling>\n{\n  typedef typename Derived::Scalar Scalar;\n\n  typedef typename redux_traits<Func, Derived>::PacketType PacketScalar;\n  enum {\n    PacketSize = redux_traits<Func, Derived>::PacketSize,\n    Size = Derived::SizeAtCompileTime,\n    VectorizedSize = (Size / PacketSize) * PacketSize\n  };\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func)\n  {\n    eigen_assert(mat.rows()>0 && mat.cols()>0 && \"you are using an empty matrix\");\n    if (VectorizedSize > 0) {\n      Scalar res = func.predux(redux_vec_unroller<Func, Derived, 0, Size / PacketSize>::run(mat,func));\n      if (VectorizedSize != Size)\n        res = func(res,redux_novec_unroller<Func, Derived, VectorizedSize, Size-VectorizedSize>::run(mat,func));\n      return res;\n    }\n    else {\n      return redux_novec_unroller<Func, Derived, 0, Size>::run(mat,func);\n    }\n  }\n};\n\n// evaluator adaptor\ntemplate<typename _XprType>\nclass redux_evaluator\n{\npublic:\n  typedef _XprType XprType;\n  EIGEN_DEVICE_FUNC explicit redux_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}\n  \n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n  typedef typename XprType::PacketScalar PacketScalar;\n  typedef typename XprType::PacketReturnType PacketReturnType;\n  \n  enum {\n    MaxRowsAtCompileTime = XprType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = XprType::MaxColsAtCompileTime,\n    // TODO we should not remove DirectAccessBit and rather find an elegant way to query the alignment offset at runtime from the evaluator\n    Flags = evaluator<XprType>::Flags & ~DirectAccessBit,\n    IsRowMajor = XprType::IsRowMajor,\n    SizeAtCompileTime = XprType::SizeAtCompileTime,\n    InnerSizeAtCompileTime = XprType::InnerSizeAtCompileTime,\n    CoeffReadCost = evaluator<XprType>::CoeffReadCost,\n    Alignment = evaluator<XprType>::Alignment\n  };\n  \n  EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }\n  EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }\n  EIGEN_DEVICE_FUNC Index innerSize() const { return m_xpr.innerSize(); }\n  EIGEN_DEVICE_FUNC Index outerSize() const { return m_xpr.outerSize(); }\n\n  EIGEN_DEVICE_FUNC\n  CoeffReturnType coeff(Index row, Index col) const\n  { return m_evaluator.coeff(row, col); }\n\n  EIGEN_DEVICE_FUNC\n  CoeffReturnType coeff(Index index) const\n  { return m_evaluator.coeff(index); }\n\n  template<int LoadMode, typename PacketType>\n  PacketType packet(Index row, Index col) const\n  { return m_evaluator.template packet<LoadMode,PacketType>(row, col); }\n\n  template<int LoadMode, typename PacketType>\n  PacketType packet(Index index) const\n  { return m_evaluator.template packet<LoadMode,PacketType>(index); }\n  \n  EIGEN_DEVICE_FUNC\n  CoeffReturnType coeffByOuterInner(Index outer, Index inner) const\n  { return m_evaluator.coeff(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }\n  \n  template<int LoadMode, typename PacketType>\n  PacketType packetByOuterInner(Index outer, Index inner) const\n  { return m_evaluator.template packet<LoadMode,PacketType>(IsRowMajor ? outer : inner, IsRowMajor ? inner : outer); }\n  \n  const XprType & nestedExpression() const { return m_xpr; }\n  \nprotected:\n  internal::evaluator<XprType> m_evaluator;\n  const XprType &m_xpr;\n};\n\n} // end namespace internal\n\n/***************************************************************************\n* Part 4 : public API\n***************************************************************************/\n\n\n/** \\returns the result of a full redux operation on the whole matrix or vector using \\a func\n  *\n  * The template parameter \\a BinaryOp is the type of the functor \\a func which must be\n  * an associative operator. Both current C++98 and C++11 functor styles are handled.\n  *\n  * \\sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()\n  */\ntemplate<typename Derived>\ntemplate<typename Func>\nEIGEN_DEVICE_FUNC typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::redux(const Func& func) const\n{\n  eigen_assert(this->rows()>0 && this->cols()>0 && \"you are using an empty matrix\");\n\n  typedef typename internal::redux_evaluator<Derived> ThisEvaluator;\n  ThisEvaluator thisEval(derived());\n  \n  return internal::redux_impl<Func, ThisEvaluator>::run(thisEval, func);\n}\n\n/** \\returns the minimum of all coefficients of \\c *this.\n  * \\warning the result is undefined if \\c *this contains NaN.\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::minCoeff() const\n{\n  return derived().redux(Eigen::internal::scalar_min_op<Scalar,Scalar>());\n}\n\n/** \\returns the maximum of all coefficients of \\c *this.\n  * \\warning the result is undefined if \\c *this contains NaN.\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::maxCoeff() const\n{\n  return derived().redux(Eigen::internal::scalar_max_op<Scalar,Scalar>());\n}\n\n/** \\returns the sum of all coefficients of \\c *this\n  *\n  * If \\c *this is empty, then the value 0 is returned.\n  *\n  * \\sa trace(), prod(), mean()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::sum() const\n{\n  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))\n    return Scalar(0);\n  return derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>());\n}\n\n/** \\returns the mean of all coefficients of *this\n*\n* \\sa trace(), prod(), sum()\n*/\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::mean() const\n{\n#ifdef __INTEL_COMPILER\n  #pragma warning push\n  #pragma warning ( disable : 2259 )\n#endif\n  return Scalar(derived().redux(Eigen::internal::scalar_sum_op<Scalar,Scalar>())) / Scalar(this->size());\n#ifdef __INTEL_COMPILER\n  #pragma warning pop\n#endif\n}\n\n/** \\returns the product of all coefficients of *this\n  *\n  * Example: \\include MatrixBase_prod.cpp\n  * Output: \\verbinclude MatrixBase_prod.out\n  *\n  * \\sa sum(), mean(), trace()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nDenseBase<Derived>::prod() const\n{\n  if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0))\n    return Scalar(1);\n  return derived().redux(Eigen::internal::scalar_product_op<Scalar>());\n}\n\n/** \\returns the trace of \\c *this, i.e. the sum of the coefficients on the main diagonal.\n  *\n  * \\c *this can be any matrix, not necessarily square.\n  *\n  * \\sa diagonal(), sum()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::traits<Derived>::Scalar\nMatrixBase<Derived>::trace() const\n{\n  return derived().diagonal().sum();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_REDUX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Ref.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REF_H\n#define EIGEN_REF_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename _PlainObjectType, int _Options, typename _StrideType>\nstruct traits<Ref<_PlainObjectType, _Options, _StrideType> >\n  : public traits<Map<_PlainObjectType, _Options, _StrideType> >\n{\n  typedef _PlainObjectType PlainObjectType;\n  typedef _StrideType StrideType;\n  enum {\n    Options = _Options,\n    Flags = traits<Map<_PlainObjectType, _Options, _StrideType> >::Flags | NestByRefBit,\n    Alignment = traits<Map<_PlainObjectType, _Options, _StrideType> >::Alignment\n  };\n\n  template<typename Derived> struct match {\n    enum {\n      HasDirectAccess = internal::has_direct_access<Derived>::ret,\n      StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),\n      InnerStrideMatch = int(StrideType::InnerStrideAtCompileTime)==int(Dynamic)\n                      || int(StrideType::InnerStrideAtCompileTime)==int(Derived::InnerStrideAtCompileTime)\n                      || (int(StrideType::InnerStrideAtCompileTime)==0 && int(Derived::InnerStrideAtCompileTime)==1),\n      OuterStrideMatch = Derived::IsVectorAtCompileTime\n                      || int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),\n      // NOTE, this indirection of evaluator<Derived>::Alignment is needed\n      // to workaround a very strange bug in MSVC related to the instantiation\n      // of has_*ary_operator in evaluator<CwiseNullaryOp>.\n      // This line is surprisingly very sensitive. For instance, simply adding parenthesis\n      // as \"DerivedAlignment = (int(evaluator<Derived>::Alignment)),\" will make MSVC fail...\n      DerivedAlignment = int(evaluator<Derived>::Alignment),\n      AlignmentMatch = (int(traits<PlainObjectType>::Alignment)==int(Unaligned)) || (DerivedAlignment >= int(Alignment)), // FIXME the first condition is not very clear, it should be replaced by the required alignment\n      ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,\n      MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch\n    };\n    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;\n  };\n  \n};\n\ntemplate<typename Derived>\nstruct traits<RefBase<Derived> > : public traits<Derived> {};\n\n}\n\ntemplate<typename Derived> class RefBase\n : public MapBase<Derived>\n{\n  typedef typename internal::traits<Derived>::PlainObjectType PlainObjectType;\n  typedef typename internal::traits<Derived>::StrideType StrideType;\n\npublic:\n\n  typedef MapBase<Derived> Base;\n  EIGEN_DENSE_PUBLIC_INTERFACE(RefBase)\n\n  EIGEN_DEVICE_FUNC inline Index innerStride() const\n  {\n    return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1;\n  }\n\n  EIGEN_DEVICE_FUNC inline Index outerStride() const\n  {\n    return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer()\n         : IsVectorAtCompileTime ? this->size()\n         : int(Flags)&RowMajorBit ? this->cols()\n         : this->rows();\n  }\n\n  EIGEN_DEVICE_FUNC RefBase()\n    : Base(0,RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime),\n      // Stride<> does not allow default ctor for Dynamic strides, so let' initialize it with dummy values:\n      m_stride(StrideType::OuterStrideAtCompileTime==Dynamic?0:StrideType::OuterStrideAtCompileTime,\n               StrideType::InnerStrideAtCompileTime==Dynamic?0:StrideType::InnerStrideAtCompileTime)\n  {}\n  \n  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(RefBase)\n\nprotected:\n\n  typedef Stride<StrideType::OuterStrideAtCompileTime,StrideType::InnerStrideAtCompileTime> StrideBase;\n\n  template<typename Expression>\n  EIGEN_DEVICE_FUNC void construct(Expression& expr)\n  {\n    if(PlainObjectType::RowsAtCompileTime==1)\n    {\n      eigen_assert(expr.rows()==1 || expr.cols()==1);\n      ::new (static_cast<Base*>(this)) Base(expr.data(), 1, expr.size());\n    }\n    else if(PlainObjectType::ColsAtCompileTime==1)\n    {\n      eigen_assert(expr.rows()==1 || expr.cols()==1);\n      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.size(), 1);\n    }\n    else\n      ::new (static_cast<Base*>(this)) Base(expr.data(), expr.rows(), expr.cols());\n    \n    if(Expression::IsVectorAtCompileTime && (!PlainObjectType::IsVectorAtCompileTime) && ((Expression::Flags&RowMajorBit)!=(PlainObjectType::Flags&RowMajorBit)))\n      ::new (&m_stride) StrideBase(expr.innerStride(), StrideType::InnerStrideAtCompileTime==0?0:1);\n    else\n      ::new (&m_stride) StrideBase(StrideType::OuterStrideAtCompileTime==0?0:expr.outerStride(),\n                                   StrideType::InnerStrideAtCompileTime==0?0:expr.innerStride());    \n  }\n\n  StrideBase m_stride;\n};\n\n/** \\class Ref\n  * \\ingroup Core_Module\n  *\n  * \\brief A matrix or vector expression mapping an existing expression\n  *\n  * \\tparam PlainObjectType the equivalent matrix type of the mapped data\n  * \\tparam Options specifies the pointer alignment in bytes. It can be: \\c #Aligned128, , \\c #Aligned64, \\c #Aligned32, \\c #Aligned16, \\c #Aligned8 or \\c #Unaligned.\n  *                 The default is \\c #Unaligned.\n  * \\tparam StrideType optionally specifies strides. By default, Ref implies a contiguous storage along the inner dimension (inner stride==1),\n  *                   but accepts a variable outer stride (leading dimension).\n  *                   This can be overridden by specifying strides.\n  *                   The type passed here must be a specialization of the Stride template, see examples below.\n  *\n  * This class provides a way to write non-template functions taking Eigen objects as parameters while limiting the number of copies.\n  * A Ref<> object can represent either a const expression or a l-value:\n  * \\code\n  * // in-out argument:\n  * void foo1(Ref<VectorXf> x);\n  *\n  * // read-only const argument:\n  * void foo2(const Ref<const VectorXf>& x);\n  * \\endcode\n  *\n  * In the in-out case, the input argument must satisfy the constraints of the actual Ref<> type, otherwise a compilation issue will be triggered.\n  * By default, a Ref<VectorXf> can reference any dense vector expression of float having a contiguous memory layout.\n  * Likewise, a Ref<MatrixXf> can reference any column-major dense matrix expression of float whose column's elements are contiguously stored with\n  * the possibility to have a constant space in-between each column, i.e. the inner stride must be equal to 1, but the outer stride (or leading dimension)\n  * can be greater than the number of rows.\n  *\n  * In the const case, if the input expression does not match the above requirement, then it is evaluated into a temporary before being passed to the function.\n  * Here are some examples:\n  * \\code\n  * MatrixXf A;\n  * VectorXf a;\n  * foo1(a.head());             // OK\n  * foo1(A.col());              // OK\n  * foo1(A.row());              // Compilation error because here innerstride!=1\n  * foo2(A.row());              // Compilation error because A.row() is a 1xN object while foo2 is expecting a Nx1 object\n  * foo2(A.row().transpose());  // The row is copied into a contiguous temporary\n  * foo2(2*a);                  // The expression is evaluated into a temporary\n  * foo2(A.col().segment(2,4)); // No temporary\n  * \\endcode\n  *\n  * The range of inputs that can be referenced without temporary can be enlarged using the last two template parameters.\n  * Here is an example accepting an innerstride!=1:\n  * \\code\n  * // in-out argument:\n  * void foo3(Ref<VectorXf,0,InnerStride<> > x);\n  * foo3(A.row());              // OK\n  * \\endcode\n  * The downside here is that the function foo3 might be significantly slower than foo1 because it won't be able to exploit vectorization, and will involve more\n  * expensive address computations even if the input is contiguously stored in memory. To overcome this issue, one might propose to overload internally calling a\n  * template function, e.g.:\n  * \\code\n  * // in the .h:\n  * void foo(const Ref<MatrixXf>& A);\n  * void foo(const Ref<MatrixXf,0,Stride<> >& A);\n  *\n  * // in the .cpp:\n  * template<typename TypeOfA> void foo_impl(const TypeOfA& A) {\n  *     ... // crazy code goes here\n  * }\n  * void foo(const Ref<MatrixXf>& A) { foo_impl(A); }\n  * void foo(const Ref<MatrixXf,0,Stride<> >& A) { foo_impl(A); }\n  * \\endcode\n  *\n  * See also the following stackoverflow questions for further references:\n  *  - <a href=\"http://stackoverflow.com/questions/21132538/correct-usage-of-the-eigenref-class\">Correct usage of the Eigen::Ref<> class</a>\n  *\n  * \\sa PlainObjectBase::Map(), \\ref TopicStorageOrders\n  */\ntemplate<typename PlainObjectType, int Options, typename StrideType> class Ref\n  : public RefBase<Ref<PlainObjectType, Options, StrideType> >\n{\n  private:\n    typedef internal::traits<Ref> Traits;\n    template<typename Derived>\n    EIGEN_DEVICE_FUNC inline Ref(const PlainObjectBase<Derived>& expr,\n                                 typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);\n  public:\n\n    typedef RefBase<Ref> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)\n\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename Derived>\n    EIGEN_DEVICE_FUNC inline Ref(PlainObjectBase<Derived>& expr,\n                                 typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)\n    {\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      Base::construct(expr.derived());\n    }\n    template<typename Derived>\n    EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,\n                                 typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)\n    #else\n    /** Implicit constructor from any dense expression */\n    template<typename Derived>\n    inline Ref(DenseBase<Derived>& expr)\n    #endif\n    {\n      EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      EIGEN_STATIC_ASSERT(!Derived::IsPlainObjectBase,THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);\n      Base::construct(expr.const_cast_derived());\n    }\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Ref)\n\n};\n\n// this is the const ref version\ntemplate<typename TPlainObjectType, int Options, typename StrideType> class Ref<const TPlainObjectType, Options, StrideType>\n  : public RefBase<Ref<const TPlainObjectType, Options, StrideType> >\n{\n    typedef internal::traits<Ref> Traits;\n  public:\n\n    typedef RefBase<Ref> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Ref)\n\n    template<typename Derived>\n    EIGEN_DEVICE_FUNC inline Ref(const DenseBase<Derived>& expr,\n                                 typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)\n    {\n//      std::cout << match_helper<Derived>::HasDirectAccess << \",\" << match_helper<Derived>::OuterStrideMatch << \",\" << match_helper<Derived>::InnerStrideMatch << \"\\n\";\n//      std::cout << int(StrideType::OuterStrideAtCompileTime) << \" - \" << int(Derived::OuterStrideAtCompileTime) << \"\\n\";\n//      std::cout << int(StrideType::InnerStrideAtCompileTime) << \" - \" << int(Derived::InnerStrideAtCompileTime) << \"\\n\";\n      construct(expr.derived(), typename Traits::template match<Derived>::type());\n    }\n\n    EIGEN_DEVICE_FUNC inline Ref(const Ref& other) : Base(other) {\n      // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy\n    }\n\n    template<typename OtherRef>\n    EIGEN_DEVICE_FUNC inline Ref(const RefBase<OtherRef>& other) {\n      construct(other.derived(), typename Traits::template match<OtherRef>::type());\n    }\n\n  protected:\n\n    template<typename Expression>\n    EIGEN_DEVICE_FUNC void construct(const Expression& expr,internal::true_type)\n    {\n      Base::construct(expr);\n    }\n\n    template<typename Expression>\n    EIGEN_DEVICE_FUNC void construct(const Expression& expr, internal::false_type)\n    {\n      internal::call_assignment_no_alias(m_object,expr,internal::assign_op<Scalar,Scalar>());\n      Base::construct(m_object);\n    }\n\n  protected:\n    TPlainObjectType m_object;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_REF_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Replicate.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REPLICATE_H\n#define EIGEN_REPLICATE_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename MatrixType,int RowFactor,int ColFactor>\nstruct traits<Replicate<MatrixType,RowFactor,ColFactor> >\n : traits<MatrixType>\n{\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename traits<MatrixType>::StorageKind StorageKind;\n  typedef typename traits<MatrixType>::XprKind XprKind;\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;\n  enum {\n    RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic\n                      ? Dynamic\n                      : RowFactor * MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic\n                      ? Dynamic\n                      : ColFactor * MatrixType::ColsAtCompileTime,\n   //FIXME we don't propagate the max sizes !!!\n    MaxRowsAtCompileTime = RowsAtCompileTime,\n    MaxColsAtCompileTime = ColsAtCompileTime,\n    IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1\n               : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0\n               : (MatrixType::Flags & RowMajorBit) ? 1 : 0,\n    \n    // FIXME enable DirectAccess with negative strides?\n    Flags = IsRowMajor ? RowMajorBit : 0\n  };\n};\n}\n\n/**\n  * \\class Replicate\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of the multiple replication of a matrix or vector\n  *\n  * \\tparam MatrixType the type of the object we are replicating\n  * \\tparam RowFactor number of repetitions at compile time along the vertical direction, can be Dynamic.\n  * \\tparam ColFactor number of repetitions at compile time along the horizontal direction, can be Dynamic.\n  *\n  * This class represents an expression of the multiple replication of a matrix or vector.\n  * It is the return type of DenseBase::replicate() and most of the time\n  * this is the only way it is used.\n  *\n  * \\sa DenseBase::replicate()\n  */\ntemplate<typename MatrixType,int RowFactor,int ColFactor> class Replicate\n  : public internal::dense_xpr_base< Replicate<MatrixType,RowFactor,ColFactor> >::type\n{\n    typedef typename internal::traits<Replicate>::MatrixTypeNested MatrixTypeNested;\n    typedef typename internal::traits<Replicate>::_MatrixTypeNested _MatrixTypeNested;\n  public:\n\n    typedef typename internal::dense_xpr_base<Replicate>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Replicate)\n    typedef typename internal::remove_all<MatrixType>::type NestedExpression;\n\n    template<typename OriginalMatrixType>\n    EIGEN_DEVICE_FUNC\n    inline explicit Replicate(const OriginalMatrixType& matrix)\n      : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor)\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),\n                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)\n      eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic);\n    }\n\n    template<typename OriginalMatrixType>\n    EIGEN_DEVICE_FUNC\n    inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor)\n      : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor)\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<typename internal::remove_const<MatrixType>::type,OriginalMatrixType>::value),\n                          THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE)\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); }\n\n    EIGEN_DEVICE_FUNC\n    const _MatrixTypeNested& nestedExpression() const\n    { \n      return m_matrix; \n    }\n\n  protected:\n    MatrixTypeNested m_matrix;\n    const internal::variable_if_dynamic<Index, RowFactor> m_rowFactor;\n    const internal::variable_if_dynamic<Index, ColFactor> m_colFactor;\n};\n\n/**\n  * \\return an expression of the replication of \\c *this\n  *\n  * Example: \\include MatrixBase_replicate.cpp\n  * Output: \\verbinclude MatrixBase_replicate.out\n  *\n  * \\sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate\n  */\ntemplate<typename Derived>\ntemplate<int RowFactor, int ColFactor>\nEIGEN_DEVICE_FUNC const Replicate<Derived,RowFactor,ColFactor>\nDenseBase<Derived>::replicate() const\n{\n  return Replicate<Derived,RowFactor,ColFactor>(derived());\n}\n\n/**\n  * \\return an expression of the replication of each column (or row) of \\c *this\n  *\n  * Example: \\include DirectionWise_replicate_int.cpp\n  * Output: \\verbinclude DirectionWise_replicate_int.out\n  *\n  * \\sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate\n  */\ntemplate<typename ExpressionType, int Direction>\nEIGEN_DEVICE_FUNC const typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType\nVectorwiseOp<ExpressionType,Direction>::replicate(Index factor) const\n{\n  return typename VectorwiseOp<ExpressionType,Direction>::ReplicateReturnType\n          (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_REPLICATE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/ReturnByValue.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_RETURNBYVALUE_H\n#define EIGEN_RETURNBYVALUE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename Derived>\nstruct traits<ReturnByValue<Derived> >\n  : public traits<typename traits<Derived>::ReturnType>\n{\n  enum {\n    // We're disabling the DirectAccess because e.g. the constructor of\n    // the Block-with-DirectAccess expression requires to have a coeffRef method.\n    // Also, we don't want to have to implement the stride stuff.\n    Flags = (traits<typename traits<Derived>::ReturnType>::Flags\n             | EvalBeforeNestingBit) & ~DirectAccessBit\n  };\n};\n\n/* The ReturnByValue object doesn't even have a coeff() method.\n * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.\n * So internal::nested always gives the plain return matrix type.\n *\n * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ??\n * Answer: EvalBeforeNestingBit should be deprecated since we have the evaluators\n */\ntemplate<typename Derived,int n,typename PlainObject>\nstruct nested_eval<ReturnByValue<Derived>, n, PlainObject>\n{\n  typedef typename traits<Derived>::ReturnType type;\n};\n\n} // end namespace internal\n\n/** \\class ReturnByValue\n  * \\ingroup Core_Module\n  *\n  */\ntemplate<typename Derived> class ReturnByValue\n  : public internal::dense_xpr_base< ReturnByValue<Derived> >::type, internal::no_assignment_operator\n{\n  public:\n    typedef typename internal::traits<Derived>::ReturnType ReturnType;\n\n    typedef typename internal::dense_xpr_base<ReturnByValue>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)\n\n    template<typename Dest>\n    EIGEN_DEVICE_FUNC\n    inline void evalTo(Dest& dst) const\n    { static_cast<const Derived*>(this)->evalTo(dst); }\n    EIGEN_DEVICE_FUNC inline Index rows() const { return static_cast<const Derived*>(this)->rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return static_cast<const Derived*>(this)->cols(); }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT\n    class Unusable{\n      Unusable(const Unusable&) {}\n      Unusable& operator=(const Unusable&) {return *this;}\n    };\n    const Unusable& coeff(Index) const { return *reinterpret_cast<const Unusable*>(this); }\n    const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }\n    Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }\n    Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }\n#undef Unusable\n#endif\n};\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)\n{\n  other.evalTo(derived());\n  return derived();\n}\n\nnamespace internal {\n\n// Expression is evaluated in a temporary; default implementation of Assignment is bypassed so that\n// when a ReturnByValue expression is assigned, the evaluator is not constructed.\n// TODO: Finalize port to new regime; ReturnByValue should not exist in the expression world\n  \ntemplate<typename Derived>\nstruct evaluator<ReturnByValue<Derived> >\n  : public evaluator<typename internal::traits<Derived>::ReturnType>\n{\n  typedef ReturnByValue<Derived> XprType;\n  typedef typename internal::traits<Derived>::ReturnType PlainObject;\n  typedef evaluator<PlainObject> Base;\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)\n    : m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    xpr.evalTo(m_result);\n  }\n\nprotected:\n  PlainObject m_result;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_RETURNBYVALUE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Reverse.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Ricard Marxer <email@ricardmarxer.com>\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REVERSE_H\n#define EIGEN_REVERSE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename MatrixType, int Direction>\nstruct traits<Reverse<MatrixType, Direction> >\n : traits<MatrixType>\n{\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename traits<MatrixType>::StorageKind StorageKind;\n  typedef typename traits<MatrixType>::XprKind XprKind;\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;\n  enum {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    Flags = _MatrixTypeNested::Flags & (RowMajorBit | LvalueBit)\n  };\n};\n\ntemplate<typename PacketType, bool ReversePacket> struct reverse_packet_cond\n{\n  static inline PacketType run(const PacketType& x) { return preverse(x); }\n};\n\ntemplate<typename PacketType> struct reverse_packet_cond<PacketType,false>\n{\n  static inline PacketType run(const PacketType& x) { return x; }\n};\n\n} // end namespace internal \n\n/** \\class Reverse\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of the reverse of a vector or matrix\n  *\n  * \\tparam MatrixType the type of the object of which we are taking the reverse\n  * \\tparam Direction defines the direction of the reverse operation, can be Vertical, Horizontal, or BothDirections\n  *\n  * This class represents an expression of the reverse of a vector.\n  * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::reverse(), VectorwiseOp::reverse()\n  */\ntemplate<typename MatrixType, int Direction> class Reverse\n  : public internal::dense_xpr_base< Reverse<MatrixType, Direction> >::type\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<Reverse>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Reverse)\n    typedef typename internal::remove_all<MatrixType>::type NestedExpression;\n    using Base::IsRowMajor;\n\n  protected:\n    enum {\n      PacketSize = internal::packet_traits<Scalar>::size,\n      IsColMajor = !IsRowMajor,\n      ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),\n      ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),\n      OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,\n      OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1,\n      ReversePacket = (Direction == BothDirections)\n                    || ((Direction == Vertical)   && IsColMajor)\n                    || ((Direction == Horizontal) && IsRowMajor)\n    };\n    typedef internal::reverse_packet_cond<PacketScalar,ReversePacket> reverse_packet;\n  public:\n\n    EIGEN_DEVICE_FUNC explicit inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { }\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse)\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols(); }\n\n    EIGEN_DEVICE_FUNC inline Index innerStride() const\n    {\n      return -m_matrix.innerStride();\n    }\n\n    EIGEN_DEVICE_FUNC const typename internal::remove_all<typename MatrixType::Nested>::type&\n    nestedExpression() const \n    {\n      return m_matrix;\n    }\n\n  protected:\n    typename MatrixType::Nested m_matrix;\n};\n\n/** \\returns an expression of the reverse of *this.\n  *\n  * Example: \\include MatrixBase_reverse.cpp\n  * Output: \\verbinclude MatrixBase_reverse.out\n  *\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ReverseReturnType\nDenseBase<Derived>::reverse()\n{\n  return ReverseReturnType(derived());\n}\n\n\n//reverse const overload moved DenseBase.h due to a CUDA compiler bug\n\n/** This is the \"in place\" version of reverse: it reverses \\c *this.\n  *\n  * In most cases it is probably better to simply use the reversed expression\n  * of a matrix. However, when reversing the matrix data itself is really needed,\n  * then this \"in-place\" version is probably the right choice because it provides\n  * the following additional benefits:\n  *  - less error prone: doing the same operation with .reverse() requires special care:\n  *    \\code m = m.reverse().eval(); \\endcode\n  *  - this API enables reverse operations without the need for a temporary\n  *  - it allows future optimizations (cache friendliness, etc.)\n  *\n  * \\sa VectorwiseOp::reverseInPlace(), reverse() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline void DenseBase<Derived>::reverseInPlace()\n{\n  if(cols()>rows())\n  {\n    Index half = cols()/2;\n    leftCols(half).swap(rightCols(half).reverse());\n    if((cols()%2)==1)\n    {\n      Index half2 = rows()/2;\n      col(half).head(half2).swap(col(half).tail(half2).reverse());\n    }\n  }\n  else\n  {\n    Index half = rows()/2;\n    topRows(half).swap(bottomRows(half).reverse());\n    if((rows()%2)==1)\n    {\n      Index half2 = cols()/2;\n      row(half).head(half2).swap(row(half).tail(half2).reverse());\n    }\n  }\n}\n\nnamespace internal {\n  \ntemplate<int Direction>\nstruct vectorwise_reverse_inplace_impl;\n\ntemplate<>\nstruct vectorwise_reverse_inplace_impl<Vertical>\n{\n  template<typename ExpressionType>\n  static void run(ExpressionType &xpr)\n  {\n    Index half = xpr.rows()/2;\n    xpr.topRows(half).swap(xpr.bottomRows(half).colwise().reverse());\n  }\n};\n\ntemplate<>\nstruct vectorwise_reverse_inplace_impl<Horizontal>\n{\n  template<typename ExpressionType>\n  static void run(ExpressionType &xpr)\n  {\n    Index half = xpr.cols()/2;\n    xpr.leftCols(half).swap(xpr.rightCols(half).rowwise().reverse());\n  }\n};\n\n} // end namespace internal\n\n/** This is the \"in place\" version of VectorwiseOp::reverse: it reverses each column or row of \\c *this.\n  *\n  * In most cases it is probably better to simply use the reversed expression\n  * of a matrix. However, when reversing the matrix data itself is really needed,\n  * then this \"in-place\" version is probably the right choice because it provides\n  * the following additional benefits:\n  *  - less error prone: doing the same operation with .reverse() requires special care:\n  *    \\code m = m.reverse().eval(); \\endcode\n  *  - this API enables reverse operations without the need for a temporary\n  *\n  * \\sa DenseBase::reverseInPlace(), reverse() */\ntemplate<typename ExpressionType, int Direction>\nEIGEN_DEVICE_FUNC void VectorwiseOp<ExpressionType,Direction>::reverseInPlace()\n{\n  internal::vectorwise_reverse_inplace_impl<Direction>::run(_expression().const_cast_derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_REVERSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Select.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELECT_H\n#define EIGEN_SELECT_H\n\nnamespace Eigen { \n\n/** \\class Select\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a coefficient wise version of the C++ ternary operator ?:\n  *\n  * \\param ConditionMatrixType the type of the \\em condition expression which must be a boolean matrix\n  * \\param ThenMatrixType the type of the \\em then expression\n  * \\param ElseMatrixType the type of the \\em else expression\n  *\n  * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:.\n  * It is the return type of DenseBase::select() and most of the time this is the only way it is used.\n  *\n  * \\sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const\n  */\n\nnamespace internal {\ntemplate<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>\nstruct traits<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >\n : traits<ThenMatrixType>\n{\n  typedef typename traits<ThenMatrixType>::Scalar Scalar;\n  typedef Dense StorageKind;\n  typedef typename traits<ThenMatrixType>::XprKind XprKind;\n  typedef typename ConditionMatrixType::Nested ConditionMatrixNested;\n  typedef typename ThenMatrixType::Nested ThenMatrixNested;\n  typedef typename ElseMatrixType::Nested ElseMatrixNested;\n  enum {\n    RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime,\n    Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & RowMajorBit\n  };\n};\n}\n\ntemplate<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>\nclass Select : public internal::dense_xpr_base< Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >::type,\n               internal::no_assignment_operator\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<Select>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Select)\n\n    inline EIGEN_DEVICE_FUNC\n    Select(const ConditionMatrixType& a_conditionMatrix,\n           const ThenMatrixType& a_thenMatrix,\n           const ElseMatrixType& a_elseMatrix)\n      : m_condition(a_conditionMatrix), m_then(a_thenMatrix), m_else(a_elseMatrix)\n    {\n      eigen_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows());\n      eigen_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols());\n    }\n\n    inline EIGEN_DEVICE_FUNC Index rows() const { return m_condition.rows(); }\n    inline EIGEN_DEVICE_FUNC Index cols() const { return m_condition.cols(); }\n\n    inline EIGEN_DEVICE_FUNC\n    const Scalar coeff(Index i, Index j) const\n    {\n      if (m_condition.coeff(i,j))\n        return m_then.coeff(i,j);\n      else\n        return m_else.coeff(i,j);\n    }\n\n    inline EIGEN_DEVICE_FUNC\n    const Scalar coeff(Index i) const\n    {\n      if (m_condition.coeff(i))\n        return m_then.coeff(i);\n      else\n        return m_else.coeff(i);\n    }\n\n    inline EIGEN_DEVICE_FUNC const ConditionMatrixType& conditionMatrix() const\n    {\n      return m_condition;\n    }\n\n    inline EIGEN_DEVICE_FUNC const ThenMatrixType& thenMatrix() const\n    {\n      return m_then;\n    }\n\n    inline EIGEN_DEVICE_FUNC const ElseMatrixType& elseMatrix() const\n    {\n      return m_else;\n    }\n\n  protected:\n    typename ConditionMatrixType::Nested m_condition;\n    typename ThenMatrixType::Nested m_then;\n    typename ElseMatrixType::Nested m_else;\n};\n\n\n/** \\returns a matrix where each coefficient (i,j) is equal to \\a thenMatrix(i,j)\n  * if \\c *this(i,j), and \\a elseMatrix(i,j) otherwise.\n  *\n  * Example: \\include MatrixBase_select.cpp\n  * Output: \\verbinclude MatrixBase_select.out\n  *\n  * \\sa class Select\n  */\ntemplate<typename Derived>\ntemplate<typename ThenDerived,typename ElseDerived>\ninline const Select<Derived,ThenDerived,ElseDerived>\nDenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,\n                            const DenseBase<ElseDerived>& elseMatrix) const\n{\n  return Select<Derived,ThenDerived,ElseDerived>(derived(), thenMatrix.derived(), elseMatrix.derived());\n}\n\n/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with\n  * the \\em else expression being a scalar value.\n  *\n  * \\sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select\n  */\ntemplate<typename Derived>\ntemplate<typename ThenDerived>\ninline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>\nDenseBase<Derived>::select(const DenseBase<ThenDerived>& thenMatrix,\n                           const typename ThenDerived::Scalar& elseScalar) const\n{\n  return Select<Derived,ThenDerived,typename ThenDerived::ConstantReturnType>(\n    derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar));\n}\n\n/** Version of DenseBase::select(const DenseBase&, const DenseBase&) with\n  * the \\em then expression being a scalar value.\n  *\n  * \\sa DenseBase::select(const DenseBase<ThenDerived>&, const DenseBase<ElseDerived>&) const, class Select\n  */\ntemplate<typename Derived>\ntemplate<typename ElseDerived>\ninline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >\nDenseBase<Derived>::select(const typename ElseDerived::Scalar& thenScalar,\n                           const DenseBase<ElseDerived>& elseMatrix) const\n{\n  return Select<Derived,typename ElseDerived::ConstantReturnType,ElseDerived>(\n    derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELECT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/SelfAdjointView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINTMATRIX_H\n#define EIGEN_SELFADJOINTMATRIX_H\n\nnamespace Eigen { \n\n/** \\class SelfAdjointView\n  * \\ingroup Core_Module\n  *\n  *\n  * \\brief Expression of a selfadjoint matrix from a triangular part of a dense matrix\n  *\n  * \\param MatrixType the type of the dense matrix storing the coefficients\n  * \\param TriangularPart can be either \\c #Lower or \\c #Upper\n  *\n  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix\n  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()\n  * and most of the time this is the only way that it is used.\n  *\n  * \\sa class TriangularBase, MatrixBase::selfadjointView()\n  */\n\nnamespace internal {\ntemplate<typename MatrixType, unsigned int UpLo>\nstruct traits<SelfAdjointView<MatrixType, UpLo> > : traits<MatrixType>\n{\n  typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested;\n  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;\n  typedef MatrixType ExpressionType;\n  typedef typename MatrixType::PlainObject FullMatrixType;\n  enum {\n    Mode = UpLo | SelfAdjoint,\n    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,\n    Flags =  MatrixTypeNestedCleaned::Flags & (HereditaryBits|FlagsLvalueBit)\n           & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)) // FIXME these flags should be preserved\n  };\n};\n}\n\n\ntemplate<typename _MatrixType, unsigned int UpLo> class SelfAdjointView\n  : public TriangularBase<SelfAdjointView<_MatrixType, UpLo> >\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    typedef TriangularBase<SelfAdjointView> Base;\n    typedef typename internal::traits<SelfAdjointView>::MatrixTypeNested MatrixTypeNested;\n    typedef typename internal::traits<SelfAdjointView>::MatrixTypeNestedCleaned MatrixTypeNestedCleaned;\n    typedef MatrixTypeNestedCleaned NestedExpression;\n\n    /** \\brief The type of coefficients in this matrix */\n    typedef typename internal::traits<SelfAdjointView>::Scalar Scalar; \n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;\n\n    enum {\n      Mode = internal::traits<SelfAdjointView>::Mode,\n      Flags = internal::traits<SelfAdjointView>::Flags,\n      TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0)\n    };\n    typedef typename MatrixType::PlainObject PlainObject;\n\n    EIGEN_DEVICE_FUNC\n    explicit inline SelfAdjointView(MatrixType& matrix) : m_matrix(matrix)\n    {}\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return m_matrix.rows(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return m_matrix.cols(); }\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const { return m_matrix.outerStride(); }\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const { return m_matrix.innerStride(); }\n\n    /** \\sa MatrixBase::coeff()\n      * \\warning the coordinates must fit into the referenced triangular part\n      */\n    EIGEN_DEVICE_FUNC\n    inline Scalar coeff(Index row, Index col) const\n    {\n      Base::check_coordinates_internal(row, col);\n      return m_matrix.coeff(row, col);\n    }\n\n    /** \\sa MatrixBase::coeffRef()\n      * \\warning the coordinates must fit into the referenced triangular part\n      */\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(SelfAdjointView);\n      Base::check_coordinates_internal(row, col);\n      return m_matrix.coeffRef(row, col);\n    }\n\n    /** \\internal */\n    EIGEN_DEVICE_FUNC\n    const MatrixTypeNestedCleaned& _expression() const { return m_matrix; }\n\n    EIGEN_DEVICE_FUNC\n    const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }\n    EIGEN_DEVICE_FUNC\n    MatrixTypeNestedCleaned& nestedExpression() { return m_matrix; }\n\n    /** Efficient triangular matrix times vector/matrix product */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<SelfAdjointView,OtherDerived>\n    operator*(const MatrixBase<OtherDerived>& rhs) const\n    {\n      return Product<SelfAdjointView,OtherDerived>(*this, rhs.derived());\n    }\n\n    /** Efficient vector/matrix times triangular matrix product */\n    template<typename OtherDerived> friend\n    EIGEN_DEVICE_FUNC\n    const Product<OtherDerived,SelfAdjointView>\n    operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView& rhs)\n    {\n      return Product<OtherDerived,SelfAdjointView>(lhs.derived(),rhs);\n    }\n    \n    friend EIGEN_DEVICE_FUNC\n    const SelfAdjointView<const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(Scalar,MatrixType,product),UpLo>\n    operator*(const Scalar& s, const SelfAdjointView& mat)\n    {\n      return (s*mat.nestedExpression()).template selfadjointView<UpLo>();\n    }\n\n    /** Perform a symmetric rank 2 update of the selfadjoint matrix \\c *this:\n      * \\f$ this = this + \\alpha u v^* + conj(\\alpha) v u^* \\f$\n      * \\returns a reference to \\c *this\n      *\n      * The vectors \\a u and \\c v \\b must be column vectors, however they can be\n      * a adjoint expression without any overhead. Only the meaningful triangular\n      * part of the matrix is updated, the rest is left unchanged.\n      *\n      * \\sa rankUpdate(const MatrixBase<DerivedU>&, Scalar)\n      */\n    template<typename DerivedU, typename DerivedV>\n    EIGEN_DEVICE_FUNC\n    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha = Scalar(1));\n\n    /** Perform a symmetric rank K update of the selfadjoint matrix \\c *this:\n      * \\f$ this = this + \\alpha ( u u^* ) \\f$ where \\a u is a vector or matrix.\n      *\n      * \\returns a reference to \\c *this\n      *\n      * Note that to perform \\f$ this = this + \\alpha ( u^* u ) \\f$ you can simply\n      * call this function with u.adjoint().\n      *\n      * \\sa rankUpdate(const MatrixBase<DerivedU>&, const MatrixBase<DerivedV>&, Scalar)\n      */\n    template<typename DerivedU>\n    EIGEN_DEVICE_FUNC\n    SelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));\n\n    /** \\returns an expression of a triangular view extracted from the current selfadjoint view of a given triangular part\n      *\n      * The parameter \\a TriMode can have the following values: \\c #Upper, \\c #StrictlyUpper, \\c #UnitUpper,\n      * \\c #Lower, \\c #StrictlyLower, \\c #UnitLower.\n      *\n      * If \\c TriMode references the same triangular part than \\c *this, then this method simply return a \\c TriangularView of the nested expression,\n      * otherwise, the nested expression is first transposed, thus returning a \\c TriangularView<Transpose<MatrixType>> object.\n      *\n      * \\sa MatrixBase::triangularView(), class TriangularView\n      */\n    template<unsigned int TriMode>\n    EIGEN_DEVICE_FUNC\n    typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)),\n                                   TriangularView<MatrixType,TriMode>,\n                                   TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type\n    triangularView() const\n    {\n      typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::ConstTransposeReturnType>::type tmp1(m_matrix);\n      typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)), MatrixType&, typename MatrixType::AdjointReturnType>::type tmp2(tmp1);\n      return typename internal::conditional<(TriMode&(Upper|Lower))==(UpLo&(Upper|Lower)),\n                                   TriangularView<MatrixType,TriMode>,\n                                   TriangularView<typename MatrixType::AdjointReturnType,TriMode> >::type(tmp2);\n    }\n\n    typedef SelfAdjointView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;\n    /** \\sa MatrixBase::conjugate() const */\n    EIGEN_DEVICE_FUNC\n    inline const ConjugateReturnType conjugate() const\n    { return ConjugateReturnType(m_matrix.conjugate()); }\n\n    typedef SelfAdjointView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;\n    /** \\sa MatrixBase::adjoint() const */\n    EIGEN_DEVICE_FUNC\n    inline const AdjointReturnType adjoint() const\n    { return AdjointReturnType(m_matrix.adjoint()); }\n\n    typedef SelfAdjointView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType;\n     /** \\sa MatrixBase::transpose() */\n    EIGEN_DEVICE_FUNC\n    inline TransposeReturnType transpose()\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)\n      typename MatrixType::TransposeReturnType tmp(m_matrix);\n      return TransposeReturnType(tmp);\n    }\n\n    typedef SelfAdjointView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;\n    /** \\sa MatrixBase::transpose() const */\n    EIGEN_DEVICE_FUNC\n    inline const ConstTransposeReturnType transpose() const\n    {\n      return ConstTransposeReturnType(m_matrix.transpose());\n    }\n\n    /** \\returns a const expression of the main diagonal of the matrix \\c *this\n      *\n      * This method simply returns the diagonal of the nested expression, thus by-passing the SelfAdjointView decorator.\n      *\n      * \\sa MatrixBase::diagonal(), class Diagonal */\n    EIGEN_DEVICE_FUNC\n    typename MatrixType::ConstDiagonalReturnType diagonal() const\n    {\n      return typename MatrixType::ConstDiagonalReturnType(m_matrix);\n    }\n\n/////////// Cholesky module ///////////\n\n    const LLT<PlainObject, UpLo> llt() const;\n    const LDLT<PlainObject, UpLo> ldlt() const;\n\n/////////// Eigenvalue module ///////////\n\n    /** Real part of #Scalar */\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    /** Return type of eigenvalues() */\n    typedef Matrix<RealScalar, internal::traits<MatrixType>::ColsAtCompileTime, 1> EigenvaluesReturnType;\n\n    EIGEN_DEVICE_FUNC\n    EigenvaluesReturnType eigenvalues() const;\n    EIGEN_DEVICE_FUNC\n    RealScalar operatorNorm() const;\n\n  protected:\n    MatrixTypeNested m_matrix;\n};\n\n\n// template<typename OtherDerived, typename MatrixType, unsigned int UpLo>\n// internal::selfadjoint_matrix_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >\n// operator*(const MatrixBase<OtherDerived>& lhs, const SelfAdjointView<MatrixType,UpLo>& rhs)\n// {\n//   return internal::matrix_selfadjoint_product_returntype<OtherDerived,SelfAdjointView<MatrixType,UpLo> >(lhs.derived(),rhs);\n// }\n\n// selfadjoint to dense matrix\n\nnamespace internal {\n\n// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>\n//      in the future selfadjoint-ness should be defined by the expression traits\n//      such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)\ntemplate<typename MatrixType, unsigned int Mode>\nstruct evaluator_traits<SelfAdjointView<MatrixType,Mode> >\n{\n  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;\n  typedef SelfAdjointShape Shape;\n};\n\ntemplate<int UpLo, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version>\nclass triangular_dense_assignment_kernel<UpLo,SelfAdjoint,SetOpposite,DstEvaluatorTypeT,SrcEvaluatorTypeT,Functor,Version>\n  : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version>\n{\nprotected:\n  typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base;\n  typedef typename Base::DstXprType DstXprType;\n  typedef typename Base::SrcXprType SrcXprType;\n  using Base::m_dst;\n  using Base::m_src;\n  using Base::m_functor;\npublic:\n  \n  typedef typename Base::DstEvaluatorType DstEvaluatorType;\n  typedef typename Base::SrcEvaluatorType SrcEvaluatorType;\n  typedef typename Base::Scalar Scalar;\n  typedef typename Base::AssignmentTraits AssignmentTraits;\n  \n  \n  EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)\n    : Base(dst, src, func, dstExpr)\n  {}\n  \n  EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)\n  {\n    eigen_internal_assert(row!=col);\n    Scalar tmp = m_src.coeff(row,col);\n    m_functor.assignCoeff(m_dst.coeffRef(row,col), tmp);\n    m_functor.assignCoeff(m_dst.coeffRef(col,row), numext::conj(tmp));\n  }\n  \n  EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)\n  {\n    Base::assignCoeff(id,id);\n  }\n  \n  EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index, Index)\n  { eigen_internal_assert(false && \"should never be called\"); }\n};\n\n} // end namespace internal\n\n/***************************************************************************\n* Implementation of MatrixBase methods\n***************************************************************************/\n\n/** This is the const version of MatrixBase::selfadjointView() */\ntemplate<typename Derived>\ntemplate<unsigned int UpLo>\nEIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type\nMatrixBase<Derived>::selfadjointView() const\n{\n  return typename ConstSelfAdjointViewReturnType<UpLo>::Type(derived());\n}\n\n/** \\returns an expression of a symmetric/self-adjoint view extracted from the upper or lower triangular part of the current matrix\n  *\n  * The parameter \\a UpLo can be either \\c #Upper or \\c #Lower\n  *\n  * Example: \\include MatrixBase_selfadjointView.cpp\n  * Output: \\verbinclude MatrixBase_selfadjointView.out\n  *\n  * \\sa class SelfAdjointView\n  */\ntemplate<typename Derived>\ntemplate<unsigned int UpLo>\nEIGEN_DEVICE_FUNC typename MatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type\nMatrixBase<Derived>::selfadjointView()\n{\n  return typename SelfAdjointViewReturnType<UpLo>::Type(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINTMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/SelfCwiseBinaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFCWISEBINARYOP_H\n#define EIGEN_SELFCWISEBINARYOP_H\n\nnamespace Eigen { \n\n// TODO generalize the scalar type of 'other'\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator*=(const Scalar& other)\n{\n  typedef typename Derived::PlainObject PlainObject;\n  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::mul_assign_op<Scalar,Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator+=(const Scalar& other)\n{\n  typedef typename Derived::PlainObject PlainObject;\n  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::add_assign_op<Scalar,Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& ArrayBase<Derived>::operator-=(const Scalar& other)\n{\n  typedef typename Derived::PlainObject PlainObject;\n  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::sub_assign_op<Scalar,Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& DenseBase<Derived>::operator/=(const Scalar& other)\n{\n  typedef typename Derived::PlainObject PlainObject;\n  internal::call_assignment(this->derived(), PlainObject::Constant(rows(),cols(),other), internal::div_assign_op<Scalar,Scalar>());\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFCWISEBINARYOP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Solve.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SOLVE_H\n#define EIGEN_SOLVE_H\n\nnamespace Eigen {\n\ntemplate<typename Decomposition, typename RhsType, typename StorageKind> class SolveImpl;\n  \n/** \\class Solve\n  * \\ingroup Core_Module\n  *\n  * \\brief Pseudo expression representing a solving operation\n  *\n  * \\tparam Decomposition the type of the matrix or decomposion object\n  * \\tparam Rhstype the type of the right-hand side\n  *\n  * This class represents an expression of A.solve(B)\n  * and most of the time this is the only way it is used.\n  *\n  */\nnamespace internal {\n\n// this solve_traits class permits to determine the evaluation type with respect to storage kind (Dense vs Sparse)\ntemplate<typename Decomposition, typename RhsType,typename StorageKind> struct solve_traits;\n\ntemplate<typename Decomposition, typename RhsType>\nstruct solve_traits<Decomposition,RhsType,Dense>\n{\n  typedef Matrix<typename RhsType::Scalar,\n                 Decomposition::ColsAtCompileTime,\n                 RhsType::ColsAtCompileTime,\n                 RhsType::PlainObject::Options,\n                 Decomposition::MaxColsAtCompileTime,\n                 RhsType::MaxColsAtCompileTime> PlainObject;  \n};\n\ntemplate<typename Decomposition, typename RhsType>\nstruct traits<Solve<Decomposition, RhsType> >\n  : traits<typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject>\n{\n  typedef typename solve_traits<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>::PlainObject PlainObject;\n  typedef typename promote_index_type<typename Decomposition::StorageIndex, typename RhsType::StorageIndex>::type StorageIndex;\n  typedef traits<PlainObject> BaseTraits;\n  enum {\n    Flags = BaseTraits::Flags & RowMajorBit,\n    CoeffReadCost = HugeCost\n  };\n};\n\n}\n\n\ntemplate<typename Decomposition, typename RhsType>\nclass Solve : public SolveImpl<Decomposition,RhsType,typename internal::traits<RhsType>::StorageKind>\n{\npublic:\n  typedef typename internal::traits<Solve>::PlainObject PlainObject;\n  typedef typename internal::traits<Solve>::StorageIndex StorageIndex;\n  \n  Solve(const Decomposition &dec, const RhsType &rhs)\n    : m_dec(dec), m_rhs(rhs)\n  {}\n  \n  EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }\n\n  EIGEN_DEVICE_FUNC const Decomposition& dec() const { return m_dec; }\n  EIGEN_DEVICE_FUNC const RhsType&       rhs() const { return m_rhs; }\n\nprotected:\n  const Decomposition &m_dec;\n  const RhsType       &m_rhs;\n};\n\n\n// Specialization of the Solve expression for dense results\ntemplate<typename Decomposition, typename RhsType>\nclass SolveImpl<Decomposition,RhsType,Dense>\n  : public MatrixBase<Solve<Decomposition,RhsType> >\n{\n  typedef Solve<Decomposition,RhsType> Derived;\n  \npublic:\n  \n  typedef MatrixBase<Solve<Decomposition,RhsType> > Base;\n  EIGEN_DENSE_PUBLIC_INTERFACE(Derived)\n\nprivate:\n  \n  Scalar coeff(Index row, Index col) const;\n  Scalar coeff(Index i) const;\n};\n\n// Generic API dispatcher\ntemplate<typename Decomposition, typename RhsType, typename StorageKind>\nclass SolveImpl : public internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type\n{\n  public:\n    typedef typename internal::generic_xpr_base<Solve<Decomposition,RhsType>, MatrixXpr, StorageKind>::type Base;\n};\n\nnamespace internal {\n\n// Evaluator of Solve -> eval into a temporary\ntemplate<typename Decomposition, typename RhsType>\nstruct evaluator<Solve<Decomposition,RhsType> >\n  : public evaluator<typename Solve<Decomposition,RhsType>::PlainObject>\n{\n  typedef Solve<Decomposition,RhsType> SolveType;\n  typedef typename SolveType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  enum { Flags = Base::Flags | EvalBeforeNestingBit };\n  \n  EIGEN_DEVICE_FUNC explicit evaluator(const SolveType& solve)\n    : m_result(solve.rows(), solve.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    solve.dec()._solve_impl(solve.rhs(), m_result);\n  }\n  \nprotected:  \n  PlainObject m_result;\n};\n\n// Specialization for \"dst = dec.solve(rhs)\"\n// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere\ntemplate<typename DstXprType, typename DecType, typename RhsType, typename Scalar>\nstruct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>\n{\n  typedef Solve<DecType,RhsType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    src.dec()._solve_impl(src.rhs(), dst);\n  }\n};\n\n// Specialization for \"dst = dec.transpose().solve(rhs)\"\ntemplate<typename DstXprType, typename DecType, typename RhsType, typename Scalar>\nstruct Assignment<DstXprType, Solve<Transpose<const DecType>,RhsType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>\n{\n  typedef Solve<Transpose<const DecType>,RhsType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    src.dec().nestedExpression().template _solve_impl_transposed<false>(src.rhs(), dst);\n  }\n};\n\n// Specialization for \"dst = dec.adjoint().solve(rhs)\"\ntemplate<typename DstXprType, typename DecType, typename RhsType, typename Scalar>\nstruct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType>,\n                  internal::assign_op<Scalar,Scalar>, Dense2Dense>\n{\n  typedef Solve<CwiseUnaryOp<internal::scalar_conjugate_op<typename DecType::Scalar>, const Transpose<const DecType> >,RhsType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n    \n    src.dec().nestedExpression().nestedExpression().template _solve_impl_transposed<true>(src.rhs(), dst);\n  }\n};\n\n} // end namepsace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SOLVE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/SolveTriangular.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SOLVETRIANGULAR_H\n#define EIGEN_SOLVETRIANGULAR_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// Forward declarations:\n// The following two routines are implemented in the products/TriangularSolver*.h files\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int Side, int Mode, bool Conjugate, int StorageOrder>\nstruct triangular_solve_vector;\n\ntemplate <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder, int OtherStorageOrder>\nstruct triangular_solve_matrix;\n\n// small helper struct extracting some traits on the underlying solver operation\ntemplate<typename Lhs, typename Rhs, int Side>\nclass trsolve_traits\n{\n  private:\n    enum {\n      RhsIsVectorAtCompileTime = (Side==OnTheLeft ? Rhs::ColsAtCompileTime : Rhs::RowsAtCompileTime)==1\n    };\n  public:\n    enum {\n      Unrolling   = (RhsIsVectorAtCompileTime && Rhs::SizeAtCompileTime != Dynamic && Rhs::SizeAtCompileTime <= 8)\n                  ? CompleteUnrolling : NoUnrolling,\n      RhsVectors  = RhsIsVectorAtCompileTime ? 1 : Dynamic\n    };\n};\n\ntemplate<typename Lhs, typename Rhs,\n  int Side, // can be OnTheLeft/OnTheRight\n  int Mode, // can be Upper/Lower | UnitDiag\n  int Unrolling = trsolve_traits<Lhs,Rhs,Side>::Unrolling,\n  int RhsVectors = trsolve_traits<Lhs,Rhs,Side>::RhsVectors\n  >\nstruct triangular_solver_selector;\n\ntemplate<typename Lhs, typename Rhs, int Side, int Mode>\nstruct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,1>\n{\n  typedef typename Lhs::Scalar LhsScalar;\n  typedef typename Rhs::Scalar RhsScalar;\n  typedef blas_traits<Lhs> LhsProductTraits;\n  typedef typename LhsProductTraits::ExtractType ActualLhsType;\n  typedef Map<Matrix<RhsScalar,Dynamic,1>, Aligned> MappedRhs;\n  static void run(const Lhs& lhs, Rhs& rhs)\n  {\n    ActualLhsType actualLhs = LhsProductTraits::extract(lhs);\n\n    // FIXME find a way to allow an inner stride if packet_traits<Scalar>::size==1\n\n    bool useRhsDirectly = Rhs::InnerStrideAtCompileTime==1 || rhs.innerStride()==1;\n\n    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhs,rhs.size(),\n                                                  (useRhsDirectly ? rhs.data() : 0));\n                                                  \n    if(!useRhsDirectly)\n      MappedRhs(actualRhs,rhs.size()) = rhs;\n\n    triangular_solve_vector<LhsScalar, RhsScalar, Index, Side, Mode, LhsProductTraits::NeedToConjugate,\n                            (int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor>\n      ::run(actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs);\n\n    if(!useRhsDirectly)\n      rhs = MappedRhs(actualRhs, rhs.size());\n  }\n};\n\n// the rhs is a matrix\ntemplate<typename Lhs, typename Rhs, int Side, int Mode>\nstruct triangular_solver_selector<Lhs,Rhs,Side,Mode,NoUnrolling,Dynamic>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef blas_traits<Lhs> LhsProductTraits;\n  typedef typename LhsProductTraits::DirectLinearAccessType ActualLhsType;\n\n  static void run(const Lhs& lhs, Rhs& rhs)\n  {\n    typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsProductTraits::extract(lhs);\n\n    const Index size = lhs.rows();\n    const Index othersize = Side==OnTheLeft? rhs.cols() : rhs.rows();\n\n    typedef internal::gemm_blocking_space<(Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,\n              Rhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxRowsAtCompileTime,4> BlockingType;\n\n    BlockingType blocking(rhs.rows(), rhs.cols(), size, 1, false);\n\n    triangular_solve_matrix<Scalar,Index,Side,Mode,LhsProductTraits::NeedToConjugate,(int(Lhs::Flags) & RowMajorBit) ? RowMajor : ColMajor,\n                               (Rhs::Flags&RowMajorBit) ? RowMajor : ColMajor>\n      ::run(size, othersize, &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &rhs.coeffRef(0,0), rhs.outerStride(), blocking);\n  }\n};\n\n/***************************************************************************\n* meta-unrolling implementation\n***************************************************************************/\n\ntemplate<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size,\n         bool Stop = LoopIndex==Size>\nstruct triangular_solver_unroller;\n\ntemplate<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>\nstruct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,false> {\n  enum {\n    IsLower = ((Mode&Lower)==Lower),\n    DiagIndex  = IsLower ? LoopIndex : Size - LoopIndex - 1,\n    StartIndex = IsLower ? 0         : DiagIndex+1\n  };\n  static void run(const Lhs& lhs, Rhs& rhs)\n  {\n    if (LoopIndex>0)\n      rhs.coeffRef(DiagIndex) -= lhs.row(DiagIndex).template segment<LoopIndex>(StartIndex).transpose()\n                                .cwiseProduct(rhs.template segment<LoopIndex>(StartIndex)).sum();\n\n    if(!(Mode & UnitDiag))\n      rhs.coeffRef(DiagIndex) /= lhs.coeff(DiagIndex,DiagIndex);\n\n    triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex+1,Size>::run(lhs,rhs);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int Mode, int LoopIndex, int Size>\nstruct triangular_solver_unroller<Lhs,Rhs,Mode,LoopIndex,Size,true> {\n  static void run(const Lhs&, Rhs&) {}\n};\n\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct triangular_solver_selector<Lhs,Rhs,OnTheLeft,Mode,CompleteUnrolling,1> {\n  static void run(const Lhs& lhs, Rhs& rhs)\n  { triangular_solver_unroller<Lhs,Rhs,Mode,0,Rhs::SizeAtCompileTime>::run(lhs,rhs); }\n};\n\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct triangular_solver_selector<Lhs,Rhs,OnTheRight,Mode,CompleteUnrolling,1> {\n  static void run(const Lhs& lhs, Rhs& rhs)\n  {\n    Transpose<const Lhs> trLhs(lhs);\n    Transpose<Rhs> trRhs(rhs);\n    \n    triangular_solver_unroller<Transpose<const Lhs>,Transpose<Rhs>,\n                              ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),\n                              0,Rhs::SizeAtCompileTime>::run(trLhs,trRhs);\n  }\n};\n\n} // end namespace internal\n\n/***************************************************************************\n* TriangularView methods\n***************************************************************************/\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<int Side, typename OtherDerived>\nEIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType,Mode,Dense>::solveInPlace(const MatrixBase<OtherDerived>& _other) const\n{\n  OtherDerived& other = _other.const_cast_derived();\n  eigen_assert( derived().cols() == derived().rows() && ((Side==OnTheLeft && derived().cols() == other.rows()) || (Side==OnTheRight && derived().cols() == other.cols())) );\n  eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));\n\n  enum { copy = (internal::traits<OtherDerived>::Flags & RowMajorBit)  && OtherDerived::IsVectorAtCompileTime && OtherDerived::SizeAtCompileTime!=1};\n  typedef typename internal::conditional<copy,\n    typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;\n  OtherCopy otherCopy(other);\n\n  internal::triangular_solver_selector<MatrixType, typename internal::remove_reference<OtherCopy>::type,\n    Side, Mode>::run(derived().nestedExpression(), otherCopy);\n\n  if (copy)\n    other = otherCopy;\n}\n\ntemplate<typename Derived, unsigned int Mode>\ntemplate<int Side, typename Other>\nconst internal::triangular_solve_retval<Side,TriangularView<Derived,Mode>,Other>\nTriangularViewImpl<Derived,Mode,Dense>::solve(const MatrixBase<Other>& other) const\n{\n  return internal::triangular_solve_retval<Side,TriangularViewType,Other>(derived(), other.derived());\n}\n#endif\n\nnamespace internal {\n\n\ntemplate<int Side, typename TriangularType, typename Rhs>\nstruct traits<triangular_solve_retval<Side, TriangularType, Rhs> >\n{\n  typedef typename internal::plain_matrix_type_column_major<Rhs>::type ReturnType;\n};\n\ntemplate<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval\n : public ReturnByValue<triangular_solve_retval<Side, TriangularType, Rhs> >\n{\n  typedef typename remove_all<typename Rhs::Nested>::type RhsNestedCleaned;\n  typedef ReturnByValue<triangular_solve_retval> Base;\n\n  triangular_solve_retval(const TriangularType& tri, const Rhs& rhs)\n    : m_triangularMatrix(tri), m_rhs(rhs)\n  {}\n\n  inline Index rows() const { return m_rhs.rows(); }\n  inline Index cols() const { return m_rhs.cols(); }\n\n  template<typename Dest> inline void evalTo(Dest& dst) const\n  {\n    if(!is_same_dense(dst,m_rhs))\n      dst = m_rhs;\n    m_triangularMatrix.template solveInPlace<Side>(dst);\n  }\n\n  protected:\n    const TriangularType& m_triangularMatrix;\n    typename Rhs::Nested m_rhs;\n};\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SOLVETRIANGULAR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/SolverBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SOLVERBASE_H\n#define EIGEN_SOLVERBASE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n\n\n} // end namespace internal\n\n/** \\class SolverBase\n  * \\brief A base class for matrix decomposition and solvers\n  *\n  * \\tparam Derived the actual type of the decomposition/solver.\n  *\n  * Any matrix decomposition inheriting this base class provide the following API:\n  *\n  * \\code\n  * MatrixType A, b, x;\n  * DecompositionType dec(A);\n  * x = dec.solve(b);             // solve A   * x = b\n  * x = dec.transpose().solve(b); // solve A^T * x = b\n  * x = dec.adjoint().solve(b);   // solve A'  * x = b\n  * \\endcode\n  *\n  * \\warning Currently, any other usage of transpose() and adjoint() are not supported and will produce compilation errors.\n  *\n  * \\sa class PartialPivLU, class FullPivLU\n  */\ntemplate<typename Derived>\nclass SolverBase : public EigenBase<Derived>\n{\n  public:\n\n    typedef EigenBase<Derived> Base;\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef Scalar CoeffReturnType;\n\n    enum {\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,\n                                                          internal::traits<Derived>::ColsAtCompileTime>::ret),\n      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,\n      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,\n                                                             internal::traits<Derived>::MaxColsAtCompileTime>::ret),\n      IsVectorAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime == 1\n                           || internal::traits<Derived>::MaxColsAtCompileTime == 1\n    };\n\n    /** Default constructor */\n    SolverBase()\n    {}\n\n    ~SolverBase()\n    {}\n\n    using Base::derived;\n\n    /** \\returns an expression of the solution x of \\f$ A x = b \\f$ using the current decomposition of A.\n      */\n    template<typename Rhs>\n    inline const Solve<Derived, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(derived().rows()==b.rows() && \"solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<Derived, Rhs>(derived(), b.derived());\n    }\n\n    /** \\internal the return type of transpose() */\n    typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;\n    /** \\returns an expression of the transposed of the factored matrix.\n      *\n      * A typical usage is to solve for the transposed problem A^T x = b:\n      * \\code x = dec.transpose().solve(b); \\endcode\n      *\n      * \\sa adjoint(), solve()\n      */\n    inline ConstTransposeReturnType transpose() const\n    {\n      return ConstTransposeReturnType(derived());\n    }\n\n    /** \\internal the return type of adjoint() */\n    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, ConstTransposeReturnType>,\n                        ConstTransposeReturnType\n                     >::type AdjointReturnType;\n    /** \\returns an expression of the adjoint of the factored matrix\n      *\n      * A typical usage is to solve for the adjoint problem A' x = b:\n      * \\code x = dec.adjoint().solve(b); \\endcode\n      *\n      * For real scalar types, this function is equivalent to transpose().\n      *\n      * \\sa transpose(), solve()\n      */\n    inline AdjointReturnType adjoint() const\n    {\n      return AdjointReturnType(derived().transpose());\n    }\n\n  protected:\n};\n\nnamespace internal {\n\ntemplate<typename Derived>\nstruct generic_xpr_base<Derived, MatrixXpr, SolverStorage>\n{\n  typedef SolverBase<Derived> type;\n\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SOLVERBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/StableNorm.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STABLENORM_H\n#define EIGEN_STABLENORM_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename ExpressionType, typename Scalar>\ninline void stable_norm_kernel(const ExpressionType& bl, Scalar& ssq, Scalar& scale, Scalar& invScale)\n{\n  Scalar maxCoeff = bl.cwiseAbs().maxCoeff();\n  \n  if(maxCoeff>scale)\n  {\n    ssq = ssq * numext::abs2(scale/maxCoeff);\n    Scalar tmp = Scalar(1)/maxCoeff;\n    if(tmp > NumTraits<Scalar>::highest())\n    {\n      invScale = NumTraits<Scalar>::highest();\n      scale = Scalar(1)/invScale;\n    }\n    else if(maxCoeff>NumTraits<Scalar>::highest()) // we got a INF\n    {\n      invScale = Scalar(1);\n      scale = maxCoeff;\n    }\n    else\n    {\n      scale = maxCoeff;\n      invScale = tmp;\n    }\n  }\n  else if(maxCoeff!=maxCoeff) // we got a NaN\n  {\n    scale = maxCoeff;\n  }\n  \n  // TODO if the maxCoeff is much much smaller than the current scale,\n  // then we can neglect this sub vector\n  if(scale>Scalar(0)) // if scale==0, then bl is 0 \n    ssq += (bl*invScale).squaredNorm();\n}\n\ntemplate<typename Derived>\ninline typename NumTraits<typename traits<Derived>::Scalar>::Real\nblueNorm_impl(const EigenBase<Derived>& _vec)\n{\n  typedef typename Derived::RealScalar RealScalar;  \n  using std::pow;\n  using std::sqrt;\n  using std::abs;\n  const Derived& vec(_vec.derived());\n  static bool initialized = false;\n  static RealScalar b1, b2, s1m, s2m, rbig, relerr;\n  if(!initialized)\n  {\n    int ibeta, it, iemin, iemax, iexp;\n    RealScalar eps;\n    // This program calculates the machine-dependent constants\n    // bl, b2, slm, s2m, relerr overfl\n    // from the \"basic\" machine-dependent numbers\n    // nbig, ibeta, it, iemin, iemax, rbig.\n    // The following define the basic machine-dependent constants.\n    // For portability, the PORT subprograms \"ilmaeh\" and \"rlmach\"\n    // are used. For any specific computer, each of the assignment\n    // statements can be replaced\n    ibeta = std::numeric_limits<RealScalar>::radix;                 // base for floating-point numbers\n    it    = std::numeric_limits<RealScalar>::digits;                // number of base-beta digits in mantissa\n    iemin = std::numeric_limits<RealScalar>::min_exponent;          // minimum exponent\n    iemax = std::numeric_limits<RealScalar>::max_exponent;          // maximum exponent\n    rbig  = (std::numeric_limits<RealScalar>::max)();               // largest floating-point number\n\n    iexp  = -((1-iemin)/2);\n    b1    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // lower boundary of midrange\n    iexp  = (iemax + 1 - it)/2;\n    b2    = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // upper boundary of midrange\n\n    iexp  = (2-iemin)/2;\n    s1m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for lower range\n    iexp  = - ((iemax+it)/2);\n    s2m   = RealScalar(pow(RealScalar(ibeta),RealScalar(iexp)));    // scaling factor for upper range\n\n    eps     = RealScalar(pow(double(ibeta), 1-it));\n    relerr  = sqrt(eps);                                            // tolerance for neglecting asml\n    initialized = true;\n  }\n  Index n = vec.size();\n  RealScalar ab2 = b2 / RealScalar(n);\n  RealScalar asml = RealScalar(0);\n  RealScalar amed = RealScalar(0);\n  RealScalar abig = RealScalar(0);\n  for(typename Derived::InnerIterator it(vec, 0); it; ++it)\n  {\n    RealScalar ax = abs(it.value());\n    if(ax > ab2)     abig += numext::abs2(ax*s2m);\n    else if(ax < b1) asml += numext::abs2(ax*s1m);\n    else             amed += numext::abs2(ax);\n  }\n  if(amed!=amed)\n    return amed;  // we got a NaN\n  if(abig > RealScalar(0))\n  {\n    abig = sqrt(abig);\n    if(abig > rbig) // overflow, or *this contains INF values\n      return abig;  // return INF\n    if(amed > RealScalar(0))\n    {\n      abig = abig/s2m;\n      amed = sqrt(amed);\n    }\n    else\n      return abig/s2m;\n  }\n  else if(asml > RealScalar(0))\n  {\n    if (amed > RealScalar(0))\n    {\n      abig = sqrt(amed);\n      amed = sqrt(asml) / s1m;\n    }\n    else\n      return sqrt(asml)/s1m;\n  }\n  else\n    return sqrt(amed);\n  asml = numext::mini(abig, amed);\n  abig = numext::maxi(abig, amed);\n  if(asml <= abig*relerr)\n    return abig;\n  else\n    return abig * sqrt(RealScalar(1) + numext::abs2(asml/abig));\n}\n\n} // end namespace internal\n\n/** \\returns the \\em l2 norm of \\c *this avoiding underflow and overflow.\n  * This version use a blockwise two passes algorithm:\n  *  1 - find the absolute largest coefficient \\c s\n  *  2 - compute \\f$ s \\Vert \\frac{*this}{s} \\Vert \\f$ in a standard way\n  *\n  * For architecture/scalar types supporting vectorization, this version\n  * is faster than blueNorm(). Otherwise the blueNorm() is much faster.\n  *\n  * \\sa norm(), blueNorm(), hypotNorm()\n  */\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nMatrixBase<Derived>::stableNorm() const\n{\n  using std::sqrt;\n  using std::abs;\n  const Index blockSize = 4096;\n  RealScalar scale(0);\n  RealScalar invScale(1);\n  RealScalar ssq(0); // sum of square\n  \n  typedef typename internal::nested_eval<Derived,2>::type DerivedCopy;\n  typedef typename internal::remove_all<DerivedCopy>::type DerivedCopyClean;\n  DerivedCopy copy(derived());\n  \n  enum {\n    CanAlign = (   (int(DerivedCopyClean::Flags)&DirectAccessBit)\n                || (int(internal::evaluator<DerivedCopyClean>::Alignment)>0) // FIXME Alignment)>0 might not be enough\n               ) && (blockSize*sizeof(Scalar)*2<EIGEN_STACK_ALLOCATION_LIMIT)\n                 && (EIGEN_MAX_STATIC_ALIGN_BYTES>0) // if we cannot allocate on the stack, then let's not bother about this optimization\n  };\n  typedef typename internal::conditional<CanAlign, Ref<const Matrix<Scalar,Dynamic,1,0,blockSize,1>, internal::evaluator<DerivedCopyClean>::Alignment>,\n                                                   typename DerivedCopyClean::ConstSegmentReturnType>::type SegmentWrapper;\n  Index n = size();\n  \n  if(n==1)\n    return abs(this->coeff(0));\n  \n  Index bi = internal::first_default_aligned(copy);\n  if (bi>0)\n    internal::stable_norm_kernel(copy.head(bi), ssq, scale, invScale);\n  for (; bi<n; bi+=blockSize)\n    internal::stable_norm_kernel(SegmentWrapper(copy.segment(bi,numext::mini(blockSize, n - bi))), ssq, scale, invScale);\n  return scale * sqrt(ssq);\n}\n\n/** \\returns the \\em l2 norm of \\c *this using the Blue's algorithm.\n  * A Portable Fortran Program to Find the Euclidean Norm of a Vector,\n  * ACM TOMS, Vol 4, Issue 1, 1978.\n  *\n  * For architecture/scalar types without vectorization, this version\n  * is much faster than stableNorm(). Otherwise the stableNorm() is faster.\n  *\n  * \\sa norm(), stableNorm(), hypotNorm()\n  */\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nMatrixBase<Derived>::blueNorm() const\n{\n  return internal::blueNorm_impl(*this);\n}\n\n/** \\returns the \\em l2 norm of \\c *this avoiding undeflow and overflow.\n  * This version use a concatenation of hypot() calls, and it is very slow.\n  *\n  * \\sa norm(), stableNorm()\n  */\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nMatrixBase<Derived>::hypotNorm() const\n{\n  return this->cwiseAbs().redux(internal::scalar_hypot_op<RealScalar>());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_STABLENORM_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Stride.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STRIDE_H\n#define EIGEN_STRIDE_H\n\nnamespace Eigen { \n\n/** \\class Stride\n  * \\ingroup Core_Module\n  *\n  * \\brief Holds strides information for Map\n  *\n  * This class holds the strides information for mapping arrays with strides with class Map.\n  *\n  * It holds two values: the inner stride and the outer stride.\n  *\n  * The inner stride is the pointer increment between two consecutive entries within a given row of a\n  * row-major matrix or within a given column of a column-major matrix.\n  *\n  * The outer stride is the pointer increment between two consecutive rows of a row-major matrix or\n  * between two consecutive columns of a column-major matrix.\n  *\n  * These two values can be passed either at compile-time as template parameters, or at runtime as\n  * arguments to the constructor.\n  *\n  * Indeed, this class takes two template parameters:\n  *  \\tparam _OuterStrideAtCompileTime the outer stride, or Dynamic if you want to specify it at runtime.\n  *  \\tparam _InnerStrideAtCompileTime the inner stride, or Dynamic if you want to specify it at runtime.\n  *\n  * Here is an example:\n  * \\include Map_general_stride.cpp\n  * Output: \\verbinclude Map_general_stride.out\n  *\n  * \\sa class InnerStride, class OuterStride, \\ref TopicStorageOrders\n  */\ntemplate<int _OuterStrideAtCompileTime, int _InnerStrideAtCompileTime>\nclass Stride\n{\n  public:\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    enum {\n      InnerStrideAtCompileTime = _InnerStrideAtCompileTime,\n      OuterStrideAtCompileTime = _OuterStrideAtCompileTime\n    };\n\n    /** Default constructor, for use when strides are fixed at compile time */\n    EIGEN_DEVICE_FUNC\n    Stride()\n      : m_outer(OuterStrideAtCompileTime), m_inner(InnerStrideAtCompileTime)\n    {\n      eigen_assert(InnerStrideAtCompileTime != Dynamic && OuterStrideAtCompileTime != Dynamic);\n    }\n\n    /** Constructor allowing to pass the strides at runtime */\n    EIGEN_DEVICE_FUNC\n    Stride(Index outerStride, Index innerStride)\n      : m_outer(outerStride), m_inner(innerStride)\n    {\n      eigen_assert(innerStride>=0 && outerStride>=0);\n    }\n\n    /** Copy constructor */\n    EIGEN_DEVICE_FUNC\n    Stride(const Stride& other)\n      : m_outer(other.outer()), m_inner(other.inner())\n    {}\n\n    /** \\returns the outer stride */\n    EIGEN_DEVICE_FUNC\n    inline Index outer() const { return m_outer.value(); }\n    /** \\returns the inner stride */\n    EIGEN_DEVICE_FUNC\n    inline Index inner() const { return m_inner.value(); }\n\n  protected:\n    internal::variable_if_dynamic<Index, OuterStrideAtCompileTime> m_outer;\n    internal::variable_if_dynamic<Index, InnerStrideAtCompileTime> m_inner;\n};\n\n/** \\brief Convenience specialization of Stride to specify only an inner stride\n  * See class Map for some examples */\ntemplate<int Value>\nclass InnerStride : public Stride<0, Value>\n{\n    typedef Stride<0, Value> Base;\n  public:\n    EIGEN_DEVICE_FUNC InnerStride() : Base() {}\n    EIGEN_DEVICE_FUNC InnerStride(Index v) : Base(0, v) {} // FIXME making this explicit could break valid code\n};\n\n/** \\brief Convenience specialization of Stride to specify only an outer stride\n  * See class Map for some examples */\ntemplate<int Value>\nclass OuterStride : public Stride<Value, 0>\n{\n    typedef Stride<Value, 0> Base;\n  public:\n    EIGEN_DEVICE_FUNC OuterStride() : Base() {}\n    EIGEN_DEVICE_FUNC OuterStride(Index v) : Base(v,0) {} // FIXME making this explicit could break valid code\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_STRIDE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Swap.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SWAP_H\n#define EIGEN_SWAP_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// Overload default assignPacket behavior for swapping them\ntemplate<typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT>\nclass generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, Specialized>\n : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn>\n{\nprotected:\n  typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, swap_assign_op<typename DstEvaluatorTypeT::Scalar>, BuiltIn> Base;\n  using Base::m_dst;\n  using Base::m_src;\n  using Base::m_functor;\n  \npublic:\n  typedef typename Base::Scalar Scalar;\n  typedef typename Base::DstXprType DstXprType;\n  typedef swap_assign_op<Scalar> Functor;\n  \n  EIGEN_DEVICE_FUNC generic_dense_assignment_kernel(DstEvaluatorTypeT &dst, const SrcEvaluatorTypeT &src, const Functor &func, DstXprType& dstExpr)\n    : Base(dst, src, func, dstExpr)\n  {}\n  \n  template<int StoreMode, int LoadMode, typename PacketType>\n  void assignPacket(Index row, Index col)\n  {\n    PacketType tmp = m_src.template packet<LoadMode,PacketType>(row,col);\n    const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(row,col, m_dst.template packet<StoreMode,PacketType>(row,col));\n    m_dst.template writePacket<StoreMode>(row,col,tmp);\n  }\n  \n  template<int StoreMode, int LoadMode, typename PacketType>\n  void assignPacket(Index index)\n  {\n    PacketType tmp = m_src.template packet<LoadMode,PacketType>(index);\n    const_cast<SrcEvaluatorTypeT&>(m_src).template writePacket<LoadMode>(index, m_dst.template packet<StoreMode,PacketType>(index));\n    m_dst.template writePacket<StoreMode>(index,tmp);\n  }\n  \n  // TODO find a simple way not to have to copy/paste this function from generic_dense_assignment_kernel, by simple I mean no CRTP (Gael)\n  template<int StoreMode, int LoadMode, typename PacketType>\n  void assignPacketByOuterInner(Index outer, Index inner)\n  {\n    Index row = Base::rowIndexByOuterInner(outer, inner); \n    Index col = Base::colIndexByOuterInner(outer, inner);\n    assignPacket<StoreMode,LoadMode,PacketType>(row, col);\n  }\n};\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SWAP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Transpose.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRANSPOSE_H\n#define EIGEN_TRANSPOSE_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename MatrixType>\nstruct traits<Transpose<MatrixType> > : public traits<MatrixType>\n{\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedPlain;\n  enum {\n    RowsAtCompileTime = MatrixType::ColsAtCompileTime,\n    ColsAtCompileTime = MatrixType::RowsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,\n    Flags0 = traits<MatrixTypeNestedPlain>::Flags & ~(LvalueBit | NestByRefBit),\n    Flags1 = Flags0 | FlagsLvalueBit,\n    Flags = Flags1 ^ RowMajorBit,\n    InnerStrideAtCompileTime = inner_stride_at_compile_time<MatrixType>::ret,\n    OuterStrideAtCompileTime = outer_stride_at_compile_time<MatrixType>::ret\n  };\n};\n}\n\ntemplate<typename MatrixType, typename StorageKind> class TransposeImpl;\n\n/** \\class Transpose\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of the transpose of a matrix\n  *\n  * \\tparam MatrixType the type of the object of which we are taking the transpose\n  *\n  * This class represents an expression of the transpose of a matrix.\n  * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::transpose(), MatrixBase::adjoint()\n  */\ntemplate<typename MatrixType> class Transpose\n  : public TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>\n{\n  public:\n\n    typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;\n\n    typedef typename TransposeImpl<MatrixType,typename internal::traits<MatrixType>::StorageKind>::Base Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)\n    typedef typename internal::remove_all<MatrixType>::type NestedExpression;\n\n    EIGEN_DEVICE_FUNC\n    explicit inline Transpose(MatrixType& matrix) : m_matrix(matrix) {}\n\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.cols(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.rows(); }\n\n    /** \\returns the nested expression */\n    EIGEN_DEVICE_FUNC\n    const typename internal::remove_all<MatrixTypeNested>::type&\n    nestedExpression() const { return m_matrix; }\n\n    /** \\returns the nested expression */\n    EIGEN_DEVICE_FUNC\n    typename internal::remove_reference<MatrixTypeNested>::type&\n    nestedExpression() { return m_matrix; }\n\n    /** \\internal */\n    void resize(Index nrows, Index ncols) {\n      m_matrix.resize(ncols,nrows);\n    }\n\n  protected:\n    typename internal::ref_selector<MatrixType>::non_const_type m_matrix;\n};\n\nnamespace internal {\n\ntemplate<typename MatrixType, bool HasDirectAccess = has_direct_access<MatrixType>::ret>\nstruct TransposeImpl_base\n{\n  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;\n};\n\ntemplate<typename MatrixType>\nstruct TransposeImpl_base<MatrixType, false>\n{\n  typedef typename dense_xpr_base<Transpose<MatrixType> >::type type;\n};\n\n} // end namespace internal\n\n// Generic API dispatcher\ntemplate<typename XprType, typename StorageKind>\nclass TransposeImpl\n  : public internal::generic_xpr_base<Transpose<XprType> >::type\n{\npublic:\n  typedef typename internal::generic_xpr_base<Transpose<XprType> >::type Base;\n};\n\ntemplate<typename MatrixType> class TransposeImpl<MatrixType,Dense>\n  : public internal::TransposeImpl_base<MatrixType>::type\n{\n  public:\n\n    typedef typename internal::TransposeImpl_base<MatrixType>::type Base;\n    using Base::coeffRef;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Transpose<MatrixType>)\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(TransposeImpl)\n\n    EIGEN_DEVICE_FUNC inline Index innerStride() const { return derived().nestedExpression().innerStride(); }\n    EIGEN_DEVICE_FUNC inline Index outerStride() const { return derived().nestedExpression().outerStride(); }\n\n    typedef typename internal::conditional<\n                       internal::is_lvalue<MatrixType>::value,\n                       Scalar,\n                       const Scalar\n                     >::type ScalarWithConstIfNotLvalue;\n\n    EIGEN_DEVICE_FUNC inline ScalarWithConstIfNotLvalue* data() { return derived().nestedExpression().data(); }\n    EIGEN_DEVICE_FUNC inline const Scalar* data() const { return derived().nestedExpression().data(); }\n\n    // FIXME: shall we keep the const version of coeffRef?\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index rowId, Index colId) const\n    {\n      return derived().nestedExpression().coeffRef(colId, rowId);\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline const Scalar& coeffRef(Index index) const\n    {\n      return derived().nestedExpression().coeffRef(index);\n    }\n};\n\n/** \\returns an expression of the transpose of *this.\n  *\n  * Example: \\include MatrixBase_transpose.cpp\n  * Output: \\verbinclude MatrixBase_transpose.out\n  *\n  * \\warning If you want to replace a matrix by its own transpose, do \\b NOT do this:\n  * \\code\n  * m = m.transpose(); // bug!!! caused by aliasing effect\n  * \\endcode\n  * Instead, use the transposeInPlace() method:\n  * \\code\n  * m.transposeInPlace();\n  * \\endcode\n  * which gives Eigen good opportunities for optimization, or alternatively you can also do:\n  * \\code\n  * m = m.transpose().eval();\n  * \\endcode\n  *\n  * \\sa transposeInPlace(), adjoint() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Transpose<Derived>\nDenseBase<Derived>::transpose()\n{\n  return TransposeReturnType(derived());\n}\n\n/** This is the const version of transpose().\n  *\n  * Make sure you read the warning for transpose() !\n  *\n  * \\sa transposeInPlace(), adjoint() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ConstTransposeReturnType\nDenseBase<Derived>::transpose() const\n{\n  return ConstTransposeReturnType(derived());\n}\n\n/** \\returns an expression of the adjoint (i.e. conjugate transpose) of *this.\n  *\n  * Example: \\include MatrixBase_adjoint.cpp\n  * Output: \\verbinclude MatrixBase_adjoint.out\n  *\n  * \\warning If you want to replace a matrix by its own adjoint, do \\b NOT do this:\n  * \\code\n  * m = m.adjoint(); // bug!!! caused by aliasing effect\n  * \\endcode\n  * Instead, use the adjointInPlace() method:\n  * \\code\n  * m.adjointInPlace();\n  * \\endcode\n  * which gives Eigen good opportunities for optimization, or alternatively you can also do:\n  * \\code\n  * m = m.adjoint().eval();\n  * \\endcode\n  *\n  * \\sa adjointInPlace(), transpose(), conjugate(), class Transpose, class internal::scalar_conjugate_op */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::AdjointReturnType\nMatrixBase<Derived>::adjoint() const\n{\n  return AdjointReturnType(this->transpose());\n}\n\n/***************************************************************************\n* \"in place\" transpose implementation\n***************************************************************************/\n\nnamespace internal {\n\ntemplate<typename MatrixType,\n  bool IsSquare = (MatrixType::RowsAtCompileTime == MatrixType::ColsAtCompileTime) && MatrixType::RowsAtCompileTime!=Dynamic,\n  bool MatchPacketSize =\n        (int(MatrixType::RowsAtCompileTime) == int(internal::packet_traits<typename MatrixType::Scalar>::size))\n    &&  (internal::evaluator<MatrixType>::Flags&PacketAccessBit) >\nstruct inplace_transpose_selector;\n\ntemplate<typename MatrixType>\nstruct inplace_transpose_selector<MatrixType,true,false> { // square matrix\n  static void run(MatrixType& m) {\n    m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());\n  }\n};\n\n// TODO: vectorized path is currently limited to LargestPacketSize x LargestPacketSize cases only.\ntemplate<typename MatrixType>\nstruct inplace_transpose_selector<MatrixType,true,true> { // PacketSize x PacketSize\n  static void run(MatrixType& m) {\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename internal::packet_traits<typename MatrixType::Scalar>::type Packet;\n    const Index PacketSize = internal::packet_traits<Scalar>::size;\n    const Index Alignment = internal::evaluator<MatrixType>::Alignment;\n    PacketBlock<Packet> A;\n    for (Index i=0; i<PacketSize; ++i)\n      A.packet[i] = m.template packetByOuterInner<Alignment>(i,0);\n    internal::ptranspose(A);\n    for (Index i=0; i<PacketSize; ++i)\n      m.template writePacket<Alignment>(m.rowIndexByOuterInner(i,0), m.colIndexByOuterInner(i,0), A.packet[i]);\n  }\n};\n\ntemplate<typename MatrixType,bool MatchPacketSize>\nstruct inplace_transpose_selector<MatrixType,false,MatchPacketSize> { // non square matrix\n  static void run(MatrixType& m) {\n    if (m.rows()==m.cols())\n      m.matrix().template triangularView<StrictlyUpper>().swap(m.matrix().transpose());\n    else\n      m = m.transpose().eval();\n  }\n};\n\n} // end namespace internal\n\n/** This is the \"in place\" version of transpose(): it replaces \\c *this by its own transpose.\n  * Thus, doing\n  * \\code\n  * m.transposeInPlace();\n  * \\endcode\n  * has the same effect on m as doing\n  * \\code\n  * m = m.transpose().eval();\n  * \\endcode\n  * and is faster and also safer because in the latter line of code, forgetting the eval() results\n  * in a bug caused by \\ref TopicAliasing \"aliasing\".\n  *\n  * Notice however that this method is only useful if you want to replace a matrix by its own transpose.\n  * If you just need the transpose of a matrix, use transpose().\n  *\n  * \\note if the matrix is not square, then \\c *this must be a resizable matrix. \n  * This excludes (non-square) fixed-size matrices, block-expressions and maps.\n  *\n  * \\sa transpose(), adjoint(), adjointInPlace() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline void DenseBase<Derived>::transposeInPlace()\n{\n  eigen_assert((rows() == cols() || (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic))\n               && \"transposeInPlace() called on a non-square non-resizable matrix\");\n  internal::inplace_transpose_selector<Derived>::run(derived());\n}\n\n/***************************************************************************\n* \"in place\" adjoint implementation\n***************************************************************************/\n\n/** This is the \"in place\" version of adjoint(): it replaces \\c *this by its own transpose.\n  * Thus, doing\n  * \\code\n  * m.adjointInPlace();\n  * \\endcode\n  * has the same effect on m as doing\n  * \\code\n  * m = m.adjoint().eval();\n  * \\endcode\n  * and is faster and also safer because in the latter line of code, forgetting the eval() results\n  * in a bug caused by aliasing.\n  *\n  * Notice however that this method is only useful if you want to replace a matrix by its own adjoint.\n  * If you just need the adjoint of a matrix, use adjoint().\n  *\n  * \\note if the matrix is not square, then \\c *this must be a resizable matrix.\n  * This excludes (non-square) fixed-size matrices, block-expressions and maps.\n  *\n  * \\sa transpose(), adjoint(), transposeInPlace() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline void MatrixBase<Derived>::adjointInPlace()\n{\n  derived() = adjoint().eval();\n}\n\n#ifndef EIGEN_NO_DEBUG\n\n// The following is to detect aliasing problems in most common cases.\n\nnamespace internal {\n\ntemplate<bool DestIsTransposed, typename OtherDerived>\nstruct check_transpose_aliasing_compile_time_selector\n{\n  enum { ret = bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed };\n};\n\ntemplate<bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>\nstruct check_transpose_aliasing_compile_time_selector<DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >\n{\n  enum { ret =    bool(blas_traits<DerivedA>::IsTransposed) != DestIsTransposed\n               || bool(blas_traits<DerivedB>::IsTransposed) != DestIsTransposed\n  };\n};\n\ntemplate<typename Scalar, bool DestIsTransposed, typename OtherDerived>\nstruct check_transpose_aliasing_run_time_selector\n{\n  static bool run(const Scalar* dest, const OtherDerived& src)\n  {\n    return (bool(blas_traits<OtherDerived>::IsTransposed) != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src));\n  }\n};\n\ntemplate<typename Scalar, bool DestIsTransposed, typename BinOp, typename DerivedA, typename DerivedB>\nstruct check_transpose_aliasing_run_time_selector<Scalar,DestIsTransposed,CwiseBinaryOp<BinOp,DerivedA,DerivedB> >\n{\n  static bool run(const Scalar* dest, const CwiseBinaryOp<BinOp,DerivedA,DerivedB>& src)\n  {\n    return ((blas_traits<DerivedA>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.lhs())))\n        || ((blas_traits<DerivedB>::IsTransposed != DestIsTransposed) && (dest!=0 && dest==(const Scalar*)extract_data(src.rhs())));\n  }\n};\n\n// the following selector, checkTransposeAliasing_impl, based on MightHaveTransposeAliasing,\n// is because when the condition controlling the assert is known at compile time, ICC emits a warning.\n// This is actually a good warning: in expressions that don't have any transposing, the condition is\n// known at compile time to be false, and using that, we can avoid generating the code of the assert again\n// and again for all these expressions that don't need it.\n\ntemplate<typename Derived, typename OtherDerived,\n         bool MightHaveTransposeAliasing\n                 = check_transpose_aliasing_compile_time_selector\n                     <blas_traits<Derived>::IsTransposed,OtherDerived>::ret\n        >\nstruct checkTransposeAliasing_impl\n{\n    static void run(const Derived& dst, const OtherDerived& other)\n    {\n        eigen_assert((!check_transpose_aliasing_run_time_selector\n                      <typename Derived::Scalar,blas_traits<Derived>::IsTransposed,OtherDerived>\n                      ::run(extract_data(dst), other))\n          && \"aliasing detected during transposition, use transposeInPlace() \"\n             \"or evaluate the rhs into a temporary using .eval()\");\n\n    }\n};\n\ntemplate<typename Derived, typename OtherDerived>\nstruct checkTransposeAliasing_impl<Derived, OtherDerived, false>\n{\n    static void run(const Derived&, const OtherDerived&)\n    {\n    }\n};\n\ntemplate<typename Dst, typename Src>\nvoid check_for_aliasing(const Dst &dst, const Src &src)\n{\n  internal::checkTransposeAliasing_impl<Dst, Src>::run(dst, src);\n}\n\n} // end namespace internal\n\n#endif // EIGEN_NO_DEBUG\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRANSPOSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Transpositions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRANSPOSITIONS_H\n#define EIGEN_TRANSPOSITIONS_H\n\nnamespace Eigen { \n\ntemplate<typename Derived>\nclass TranspositionsBase\n{\n    typedef internal::traits<Derived> Traits;\n    \n  public:\n\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename IndicesType::Scalar StorageIndex;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    Derived& derived() { return *static_cast<Derived*>(this); }\n    const Derived& derived() const { return *static_cast<const Derived*>(this); }\n\n    /** Copies the \\a other transpositions into \\c *this */\n    template<typename OtherDerived>\n    Derived& operator=(const TranspositionsBase<OtherDerived>& other)\n    {\n      indices() = other.indices();\n      return derived();\n    }\n    \n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    Derived& operator=(const TranspositionsBase& other)\n    {\n      indices() = other.indices();\n      return derived();\n    }\n    #endif\n\n    /** \\returns the number of transpositions */\n    Index size() const { return indices().size(); }\n    /** \\returns the number of rows of the equivalent permutation matrix */\n    Index rows() const { return indices().size(); }\n    /** \\returns the number of columns of the equivalent permutation matrix */\n    Index cols() const { return indices().size(); }\n\n    /** Direct access to the underlying index vector */\n    inline const StorageIndex& coeff(Index i) const { return indices().coeff(i); }\n    /** Direct access to the underlying index vector */\n    inline StorageIndex& coeffRef(Index i) { return indices().coeffRef(i); }\n    /** Direct access to the underlying index vector */\n    inline const StorageIndex& operator()(Index i) const { return indices()(i); }\n    /** Direct access to the underlying index vector */\n    inline StorageIndex& operator()(Index i) { return indices()(i); }\n    /** Direct access to the underlying index vector */\n    inline const StorageIndex& operator[](Index i) const { return indices()(i); }\n    /** Direct access to the underlying index vector */\n    inline StorageIndex& operator[](Index i) { return indices()(i); }\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return derived().indices(); }\n    /** \\returns a reference to the stored array representing the transpositions. */\n    IndicesType& indices() { return derived().indices(); }\n\n    /** Resizes to given size. */\n    inline void resize(Index newSize)\n    {\n      indices().resize(newSize);\n    }\n\n    /** Sets \\c *this to represents an identity transformation */\n    void setIdentity()\n    {\n      for(StorageIndex i = 0; i < indices().size(); ++i)\n        coeffRef(i) = i;\n    }\n\n    // FIXME: do we want such methods ?\n    // might be usefull when the target matrix expression is complex, e.g.:\n    // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);\n    /*\n    template<typename MatrixType>\n    void applyForwardToRows(MatrixType& mat) const\n    {\n      for(Index k=0 ; k<size() ; ++k)\n        if(m_indices(k)!=k)\n          mat.row(k).swap(mat.row(m_indices(k)));\n    }\n\n    template<typename MatrixType>\n    void applyBackwardToRows(MatrixType& mat) const\n    {\n      for(Index k=size()-1 ; k>=0 ; --k)\n        if(m_indices(k)!=k)\n          mat.row(k).swap(mat.row(m_indices(k)));\n    }\n    */\n\n    /** \\returns the inverse transformation */\n    inline Transpose<TranspositionsBase> inverse() const\n    { return Transpose<TranspositionsBase>(derived()); }\n\n    /** \\returns the tranpose transformation */\n    inline Transpose<TranspositionsBase> transpose() const\n    { return Transpose<TranspositionsBase>(derived()); }\n\n  protected:\n};\n\nnamespace internal {\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>\nstruct traits<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >\n : traits<PermutationMatrix<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >\n{\n  typedef Matrix<_StorageIndex, SizeAtCompileTime, 1, 0, MaxSizeAtCompileTime, 1> IndicesType;\n  typedef TranspositionsStorage StorageKind;\n};\n}\n\n/** \\class Transpositions\n  * \\ingroup Core_Module\n  *\n  * \\brief Represents a sequence of transpositions (row/column interchange)\n  *\n  * \\tparam SizeAtCompileTime the number of transpositions, or Dynamic\n  * \\tparam MaxSizeAtCompileTime the maximum number of transpositions, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it.\n  *\n  * This class represents a permutation transformation as a sequence of \\em n transpositions\n  * \\f$[T_{n-1} \\ldots T_{i} \\ldots T_{0}]\\f$. It is internally stored as a vector of integers \\c indices.\n  * Each transposition \\f$ T_{i} \\f$ applied on the left of a matrix (\\f$ T_{i} M\\f$) interchanges\n  * the rows \\c i and \\c indices[i] of the matrix \\c M.\n  * A transposition applied on the right (e.g., \\f$ M T_{i}\\f$) yields a column interchange.\n  *\n  * Compared to the class PermutationMatrix, such a sequence of transpositions is what is\n  * computed during a decomposition with pivoting, and it is faster when applying the permutation in-place.\n  *\n  * To apply a sequence of transpositions to a matrix, simply use the operator * as in the following example:\n  * \\code\n  * Transpositions tr;\n  * MatrixXf mat;\n  * mat = tr * mat;\n  * \\endcode\n  * In this example, we detect that the matrix appears on both side, and so the transpositions\n  * are applied in-place without any temporary or extra copy.\n  *\n  * \\sa class PermutationMatrix\n  */\n\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex>\nclass Transpositions : public TranspositionsBase<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >\n{\n    typedef internal::traits<Transpositions> Traits;\n  public:\n\n    typedef TranspositionsBase<Transpositions> Base;\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename IndicesType::Scalar StorageIndex;\n\n    inline Transpositions() {}\n\n    /** Copy constructor. */\n    template<typename OtherDerived>\n    inline Transpositions(const TranspositionsBase<OtherDerived>& other)\n      : m_indices(other.indices()) {}\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** Standard copy constructor. Defined only to prevent a default copy constructor\n      * from hiding the other templated constructor */\n    inline Transpositions(const Transpositions& other) : m_indices(other.indices()) {}\n    #endif\n\n    /** Generic constructor from expression of the transposition indices. */\n    template<typename Other>\n    explicit inline Transpositions(const MatrixBase<Other>& indices) : m_indices(indices)\n    {}\n\n    /** Copies the \\a other transpositions into \\c *this */\n    template<typename OtherDerived>\n    Transpositions& operator=(const TranspositionsBase<OtherDerived>& other)\n    {\n      return Base::operator=(other);\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    Transpositions& operator=(const Transpositions& other)\n    {\n      m_indices = other.m_indices;\n      return *this;\n    }\n    #endif\n\n    /** Constructs an uninitialized permutation matrix of given size.\n      */\n    inline Transpositions(Index size) : m_indices(size)\n    {}\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return m_indices; }\n    /** \\returns a reference to the stored array representing the transpositions. */\n    IndicesType& indices() { return m_indices; }\n\n  protected:\n\n    IndicesType m_indices;\n};\n\n\nnamespace internal {\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int _PacketAccess>\nstruct traits<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,_PacketAccess> >\n : traits<PermutationMatrix<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex> >\n{\n  typedef Map<const Matrix<_StorageIndex,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1>, _PacketAccess> IndicesType;\n  typedef _StorageIndex StorageIndex;\n  typedef TranspositionsStorage StorageKind;\n};\n}\n\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime, typename _StorageIndex, int PacketAccess>\nclass Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess>\n : public TranspositionsBase<Map<Transpositions<SizeAtCompileTime,MaxSizeAtCompileTime,_StorageIndex>,PacketAccess> >\n{\n    typedef internal::traits<Map> Traits;\n  public:\n\n    typedef TranspositionsBase<Map> Base;\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename IndicesType::Scalar StorageIndex;\n\n    explicit inline Map(const StorageIndex* indicesPtr)\n      : m_indices(indicesPtr)\n    {}\n\n    inline Map(const StorageIndex* indicesPtr, Index size)\n      : m_indices(indicesPtr,size)\n    {}\n\n    /** Copies the \\a other transpositions into \\c *this */\n    template<typename OtherDerived>\n    Map& operator=(const TranspositionsBase<OtherDerived>& other)\n    {\n      return Base::operator=(other);\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    Map& operator=(const Map& other)\n    {\n      m_indices = other.m_indices;\n      return *this;\n    }\n    #endif\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return m_indices; }\n    \n    /** \\returns a reference to the stored array representing the transpositions. */\n    IndicesType& indices() { return m_indices; }\n\n  protected:\n\n    IndicesType m_indices;\n};\n\nnamespace internal {\ntemplate<typename _IndicesType>\nstruct traits<TranspositionsWrapper<_IndicesType> >\n : traits<PermutationWrapper<_IndicesType> >\n{\n  typedef TranspositionsStorage StorageKind;\n};\n}\n\ntemplate<typename _IndicesType>\nclass TranspositionsWrapper\n : public TranspositionsBase<TranspositionsWrapper<_IndicesType> >\n{\n    typedef internal::traits<TranspositionsWrapper> Traits;\n  public:\n\n    typedef TranspositionsBase<TranspositionsWrapper> Base;\n    typedef typename Traits::IndicesType IndicesType;\n    typedef typename IndicesType::Scalar StorageIndex;\n\n    explicit inline TranspositionsWrapper(IndicesType& indices)\n      : m_indices(indices)\n    {}\n\n    /** Copies the \\a other transpositions into \\c *this */\n    template<typename OtherDerived>\n    TranspositionsWrapper& operator=(const TranspositionsBase<OtherDerived>& other)\n    {\n      return Base::operator=(other);\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is a special case of the templated operator=. Its purpose is to\n      * prevent a default operator= from hiding the templated operator=.\n      */\n    TranspositionsWrapper& operator=(const TranspositionsWrapper& other)\n    {\n      m_indices = other.m_indices;\n      return *this;\n    }\n    #endif\n\n    /** const version of indices(). */\n    const IndicesType& indices() const { return m_indices; }\n\n    /** \\returns a reference to the stored array representing the transpositions. */\n    IndicesType& indices() { return m_indices; }\n\n  protected:\n\n    typename IndicesType::Nested m_indices;\n};\n\n\n\n/** \\returns the \\a matrix with the \\a transpositions applied to the columns.\n  */\ntemplate<typename MatrixDerived, typename TranspositionsDerived>\nEIGEN_DEVICE_FUNC\nconst Product<MatrixDerived, TranspositionsDerived, AliasFreeProduct>\noperator*(const MatrixBase<MatrixDerived> &matrix,\n          const TranspositionsBase<TranspositionsDerived>& transpositions)\n{\n  return Product<MatrixDerived, TranspositionsDerived, AliasFreeProduct>\n            (matrix.derived(), transpositions.derived());\n}\n\n/** \\returns the \\a matrix with the \\a transpositions applied to the rows.\n  */\ntemplate<typename TranspositionsDerived, typename MatrixDerived>\nEIGEN_DEVICE_FUNC\nconst Product<TranspositionsDerived, MatrixDerived, AliasFreeProduct>\noperator*(const TranspositionsBase<TranspositionsDerived> &transpositions,\n          const MatrixBase<MatrixDerived>& matrix)\n{\n  return Product<TranspositionsDerived, MatrixDerived, AliasFreeProduct>\n            (transpositions.derived(), matrix.derived());\n}\n\n// Template partial specialization for transposed/inverse transpositions\n\nnamespace internal {\n\ntemplate<typename Derived>\nstruct traits<Transpose<TranspositionsBase<Derived> > >\n : traits<Derived>\n{};\n\n} // end namespace internal\n\ntemplate<typename TranspositionsDerived>\nclass Transpose<TranspositionsBase<TranspositionsDerived> >\n{\n    typedef TranspositionsDerived TranspositionType;\n    typedef typename TranspositionType::IndicesType IndicesType;\n  public:\n\n    explicit Transpose(const TranspositionType& t) : m_transpositions(t) {}\n\n    Index size() const { return m_transpositions.size(); }\n    Index rows() const { return m_transpositions.size(); }\n    Index cols() const { return m_transpositions.size(); }\n\n    /** \\returns the \\a matrix with the inverse transpositions applied to the columns.\n      */\n    template<typename OtherDerived> friend\n    const Product<OtherDerived, Transpose, AliasFreeProduct>\n    operator*(const MatrixBase<OtherDerived>& matrix, const Transpose& trt)\n    {\n      return Product<OtherDerived, Transpose, AliasFreeProduct>(matrix.derived(), trt.derived());\n    }\n\n    /** \\returns the \\a matrix with the inverse transpositions applied to the rows.\n      */\n    template<typename OtherDerived>\n    const Product<Transpose, OtherDerived, AliasFreeProduct>\n    operator*(const MatrixBase<OtherDerived>& matrix) const\n    {\n      return Product<Transpose, OtherDerived, AliasFreeProduct>(*this, matrix.derived());\n    }\n    \n    const TranspositionType& nestedExpression() const { return m_transpositions; }\n\n  protected:\n    const TranspositionType& m_transpositions;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRANSPOSITIONS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/TriangularMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIANGULARMATRIX_H\n#define EIGEN_TRIANGULARMATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n  \ntemplate<int Side, typename TriangularType, typename Rhs> struct triangular_solve_retval;\n  \n}\n\n/** \\class TriangularBase\n  * \\ingroup Core_Module\n  *\n  * \\brief Base class for triangular part in a matrix\n  */\ntemplate<typename Derived> class TriangularBase : public EigenBase<Derived>\n{\n  public:\n\n    enum {\n      Mode = internal::traits<Derived>::Mode,\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n      MaxRowsAtCompileTime = internal::traits<Derived>::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = internal::traits<Derived>::MaxColsAtCompileTime,\n      \n      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,\n                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),\n      /**< This is equal to the number of coefficients, i.e. the number of\n          * rows times the number of columns, or to \\a Dynamic if this is not\n          * known at compile-time. \\sa RowsAtCompileTime, ColsAtCompileTime */\n      \n      MaxSizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::MaxRowsAtCompileTime,\n                                                   internal::traits<Derived>::MaxColsAtCompileTime>::ret)\n        \n    };\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n    typedef typename internal::traits<Derived>::StorageIndex StorageIndex;\n    typedef typename internal::traits<Derived>::FullMatrixType DenseMatrixType;\n    typedef DenseMatrixType DenseType;\n    typedef Derived const& Nested;\n\n    EIGEN_DEVICE_FUNC\n    inline TriangularBase() { eigen_assert(!((Mode&UnitDiag) && (Mode&ZeroDiag))); }\n\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return derived().rows(); }\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return derived().cols(); }\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const { return derived().outerStride(); }\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const { return derived().innerStride(); }\n    \n    // dummy resize function\n    void resize(Index rows, Index cols)\n    {\n      EIGEN_UNUSED_VARIABLE(rows);\n      EIGEN_UNUSED_VARIABLE(cols);\n      eigen_assert(rows==this->rows() && cols==this->cols());\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar coeff(Index row, Index col) const  { return derived().coeff(row,col); }\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index row, Index col) { return derived().coeffRef(row,col); }\n\n    /** \\see MatrixBase::copyCoeff(row,col)\n      */\n    template<typename Other>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, Other& other)\n    {\n      derived().coeffRef(row, col) = other.coeff(row, col);\n    }\n\n    EIGEN_DEVICE_FUNC\n    inline Scalar operator()(Index row, Index col) const\n    {\n      check_coordinates(row, col);\n      return coeff(row,col);\n    }\n    EIGEN_DEVICE_FUNC\n    inline Scalar& operator()(Index row, Index col)\n    {\n      check_coordinates(row, col);\n      return coeffRef(row,col);\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    EIGEN_DEVICE_FUNC\n    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    EIGEN_DEVICE_FUNC\n    inline Derived& derived() { return *static_cast<Derived*>(this); }\n    #endif // not EIGEN_PARSED_BY_DOXYGEN\n\n    template<typename DenseDerived>\n    EIGEN_DEVICE_FUNC\n    void evalTo(MatrixBase<DenseDerived> &other) const;\n    template<typename DenseDerived>\n    EIGEN_DEVICE_FUNC\n    void evalToLazy(MatrixBase<DenseDerived> &other) const;\n\n    EIGEN_DEVICE_FUNC\n    DenseMatrixType toDenseMatrix() const\n    {\n      DenseMatrixType res(rows(), cols());\n      evalToLazy(res);\n      return res;\n    }\n\n  protected:\n\n    void check_coordinates(Index row, Index col) const\n    {\n      EIGEN_ONLY_USED_FOR_DEBUG(row);\n      EIGEN_ONLY_USED_FOR_DEBUG(col);\n      eigen_assert(col>=0 && col<cols() && row>=0 && row<rows());\n      const int mode = int(Mode) & ~SelfAdjoint;\n      EIGEN_ONLY_USED_FOR_DEBUG(mode);\n      eigen_assert((mode==Upper && col>=row)\n                || (mode==Lower && col<=row)\n                || ((mode==StrictlyUpper || mode==UnitUpper) && col>row)\n                || ((mode==StrictlyLower || mode==UnitLower) && col<row));\n    }\n\n    #ifdef EIGEN_INTERNAL_DEBUGGING\n    void check_coordinates_internal(Index row, Index col) const\n    {\n      check_coordinates(row, col);\n    }\n    #else\n    void check_coordinates_internal(Index , Index ) const {}\n    #endif\n\n};\n\n/** \\class TriangularView\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a triangular part in a matrix\n  *\n  * \\param MatrixType the type of the object in which we are taking the triangular part\n  * \\param Mode the kind of triangular matrix expression to construct. Can be #Upper,\n  *             #Lower, #UnitUpper, #UnitLower, #StrictlyUpper, or #StrictlyLower.\n  *             This is in fact a bit field; it must have either #Upper or #Lower, \n  *             and additionally it may have #UnitDiag or #ZeroDiag or neither.\n  *\n  * This class represents a triangular part of a matrix, not necessarily square. Strictly speaking, for rectangular\n  * matrices one should speak of \"trapezoid\" parts. This class is the return type\n  * of MatrixBase::triangularView() and SparseMatrixBase::triangularView(), and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::triangularView()\n  */\nnamespace internal {\ntemplate<typename MatrixType, unsigned int _Mode>\nstruct traits<TriangularView<MatrixType, _Mode> > : traits<MatrixType>\n{\n  typedef typename ref_selector<MatrixType>::non_const_type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;\n  typedef typename remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;\n  typedef typename MatrixType::PlainObject FullMatrixType;\n  typedef MatrixType ExpressionType;\n  enum {\n    Mode = _Mode,\n    FlagsLvalueBit = is_lvalue<MatrixType>::value ? LvalueBit : 0,\n    Flags = (MatrixTypeNestedCleaned::Flags & (HereditaryBits | FlagsLvalueBit) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit)))\n  };\n};\n}\n\ntemplate<typename _MatrixType, unsigned int _Mode, typename StorageKind> class TriangularViewImpl;\n\ntemplate<typename _MatrixType, unsigned int _Mode> class TriangularView\n  : public TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind >\n{\n  public:\n\n    typedef TriangularViewImpl<_MatrixType, _Mode, typename internal::traits<_MatrixType>::StorageKind > Base;\n    typedef typename internal::traits<TriangularView>::Scalar Scalar;\n    typedef _MatrixType MatrixType;\n\n  protected:\n    typedef typename internal::traits<TriangularView>::MatrixTypeNested MatrixTypeNested;\n    typedef typename internal::traits<TriangularView>::MatrixTypeNestedNonRef MatrixTypeNestedNonRef;\n\n    typedef typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type MatrixConjugateReturnType;\n    \n  public:\n\n    typedef typename internal::traits<TriangularView>::StorageKind StorageKind;\n    typedef typename internal::traits<TriangularView>::MatrixTypeNestedCleaned NestedExpression;\n\n    enum {\n      Mode = _Mode,\n      Flags = internal::traits<TriangularView>::Flags,\n      TransposeMode = (Mode & Upper ? Lower : 0)\n                    | (Mode & Lower ? Upper : 0)\n                    | (Mode & (UnitDiag))\n                    | (Mode & (ZeroDiag)),\n      IsVectorAtCompileTime = false\n    };\n\n    EIGEN_DEVICE_FUNC\n    explicit inline TriangularView(MatrixType& matrix) : m_matrix(matrix)\n    {}\n    \n    using Base::operator=;\n    TriangularView& operator=(const TriangularView &other)\n    { return Base::operator=(other); }\n\n    /** \\copydoc EigenBase::rows() */\n    EIGEN_DEVICE_FUNC\n    inline Index rows() const { return m_matrix.rows(); }\n    /** \\copydoc EigenBase::cols() */\n    EIGEN_DEVICE_FUNC\n    inline Index cols() const { return m_matrix.cols(); }\n\n    /** \\returns a const reference to the nested expression */\n    EIGEN_DEVICE_FUNC\n    const NestedExpression& nestedExpression() const { return m_matrix; }\n\n    /** \\returns a reference to the nested expression */\n    EIGEN_DEVICE_FUNC\n    NestedExpression& nestedExpression() { return m_matrix; }\n    \n    typedef TriangularView<const MatrixConjugateReturnType,Mode> ConjugateReturnType;\n    /** \\sa MatrixBase::conjugate() const */\n    EIGEN_DEVICE_FUNC\n    inline const ConjugateReturnType conjugate() const\n    { return ConjugateReturnType(m_matrix.conjugate()); }\n\n    typedef TriangularView<const typename MatrixType::AdjointReturnType,TransposeMode> AdjointReturnType;\n    /** \\sa MatrixBase::adjoint() const */\n    EIGEN_DEVICE_FUNC\n    inline const AdjointReturnType adjoint() const\n    { return AdjointReturnType(m_matrix.adjoint()); }\n\n    typedef TriangularView<typename MatrixType::TransposeReturnType,TransposeMode> TransposeReturnType;\n     /** \\sa MatrixBase::transpose() */\n    EIGEN_DEVICE_FUNC\n    inline TransposeReturnType transpose()\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(MatrixType)\n      typename MatrixType::TransposeReturnType tmp(m_matrix);\n      return TransposeReturnType(tmp);\n    }\n    \n    typedef TriangularView<const typename MatrixType::ConstTransposeReturnType,TransposeMode> ConstTransposeReturnType;\n    /** \\sa MatrixBase::transpose() const */\n    EIGEN_DEVICE_FUNC\n    inline const ConstTransposeReturnType transpose() const\n    {\n      return ConstTransposeReturnType(m_matrix.transpose());\n    }\n\n    template<typename Other>\n    EIGEN_DEVICE_FUNC\n    inline const Solve<TriangularView, Other> \n    solve(const MatrixBase<Other>& other) const\n    { return Solve<TriangularView, Other>(*this, other.derived()); }\n    \n  // workaround MSVC ICE\n  #if EIGEN_COMP_MSVC\n    template<int Side, typename Other>\n    EIGEN_DEVICE_FUNC\n    inline const internal::triangular_solve_retval<Side,TriangularView, Other>\n    solve(const MatrixBase<Other>& other) const\n    { return Base::template solve<Side>(other); }\n  #else\n    using Base::solve;\n  #endif\n\n    /** \\returns a selfadjoint view of the referenced triangular part which must be either \\c #Upper or \\c #Lower.\n      *\n      * This is a shortcut for \\code this->nestedExpression().selfadjointView<(*this)::Mode>() \\endcode\n      * \\sa MatrixBase::selfadjointView() */\n    EIGEN_DEVICE_FUNC\n    SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView()\n    {\n      EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR);\n      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);\n    }\n\n    /** This is the const version of selfadjointView() */\n    EIGEN_DEVICE_FUNC\n    const SelfAdjointView<MatrixTypeNestedNonRef,Mode> selfadjointView() const\n    {\n      EIGEN_STATIC_ASSERT((Mode&(UnitDiag|ZeroDiag))==0,PROGRAMMING_ERROR);\n      return SelfAdjointView<MatrixTypeNestedNonRef,Mode>(m_matrix);\n    }\n\n\n    /** \\returns the determinant of the triangular matrix\n      * \\sa MatrixBase::determinant() */\n    EIGEN_DEVICE_FUNC\n    Scalar determinant() const\n    {\n      if (Mode & UnitDiag)\n        return 1;\n      else if (Mode & ZeroDiag)\n        return 0;\n      else\n        return m_matrix.diagonal().prod();\n    }\n      \n  protected:\n\n    MatrixTypeNested m_matrix;\n};\n\n/** \\ingroup Core_Module\n  *\n  * \\brief Base class for a triangular part in a \\b dense matrix\n  *\n  * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.\n  * It extends class TriangularView with additional methods which available for dense expressions only.\n  *\n  * \\sa class TriangularView, MatrixBase::triangularView()\n  */\ntemplate<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_MatrixType,_Mode,Dense>\n  : public TriangularBase<TriangularView<_MatrixType, _Mode> >\n{\n  public:\n\n    typedef TriangularView<_MatrixType, _Mode> TriangularViewType;\n    typedef TriangularBase<TriangularViewType> Base;\n    typedef typename internal::traits<TriangularViewType>::Scalar Scalar;\n\n    typedef _MatrixType MatrixType;\n    typedef typename MatrixType::PlainObject DenseMatrixType;\n    typedef DenseMatrixType PlainObject;\n\n  public:\n    using Base::evalToLazy;\n    using Base::derived;\n\n    typedef typename internal::traits<TriangularViewType>::StorageKind StorageKind;\n\n    enum {\n      Mode = _Mode,\n      Flags = internal::traits<TriangularViewType>::Flags\n    };\n\n    /** \\returns the outer-stride of the underlying dense matrix\n      * \\sa DenseCoeffsBase::outerStride() */\n    EIGEN_DEVICE_FUNC\n    inline Index outerStride() const { return derived().nestedExpression().outerStride(); }\n    /** \\returns the inner-stride of the underlying dense matrix\n      * \\sa DenseCoeffsBase::innerStride() */\n    EIGEN_DEVICE_FUNC\n    inline Index innerStride() const { return derived().nestedExpression().innerStride(); }\n\n    /** \\sa MatrixBase::operator+=() */\n    template<typename Other>\n    EIGEN_DEVICE_FUNC\n    TriangularViewType&  operator+=(const DenseBase<Other>& other) {\n      internal::call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename Other::Scalar>());\n      return derived();\n    }\n    /** \\sa MatrixBase::operator-=() */\n    template<typename Other>\n    EIGEN_DEVICE_FUNC\n    TriangularViewType&  operator-=(const DenseBase<Other>& other) {\n      internal::call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename Other::Scalar>());\n      return derived();\n    }\n    \n    /** \\sa MatrixBase::operator*=() */\n    EIGEN_DEVICE_FUNC\n    TriangularViewType&  operator*=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() * other; }\n    /** \\sa DenseBase::operator/=() */\n    EIGEN_DEVICE_FUNC\n    TriangularViewType&  operator/=(const typename internal::traits<MatrixType>::Scalar& other) { return *this = derived().nestedExpression() / other; }\n\n    /** \\sa MatrixBase::fill() */\n    EIGEN_DEVICE_FUNC\n    void fill(const Scalar& value) { setConstant(value); }\n    /** \\sa MatrixBase::setConstant() */\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& setConstant(const Scalar& value)\n    { return *this = MatrixType::Constant(derived().rows(), derived().cols(), value); }\n    /** \\sa MatrixBase::setZero() */\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& setZero() { return setConstant(Scalar(0)); }\n    /** \\sa MatrixBase::setOnes() */\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& setOnes() { return setConstant(Scalar(1)); }\n\n    /** \\sa MatrixBase::coeff()\n      * \\warning the coordinates must fit into the referenced triangular part\n      */\n    EIGEN_DEVICE_FUNC\n    inline Scalar coeff(Index row, Index col) const\n    {\n      Base::check_coordinates_internal(row, col);\n      return derived().nestedExpression().coeff(row, col);\n    }\n\n    /** \\sa MatrixBase::coeffRef()\n      * \\warning the coordinates must fit into the referenced triangular part\n      */\n    EIGEN_DEVICE_FUNC\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(TriangularViewType);\n      Base::check_coordinates_internal(row, col);\n      return derived().nestedExpression().coeffRef(row, col);\n    }\n\n    /** Assigns a triangular matrix to a triangular part of a dense matrix */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& operator=(const TriangularBase<OtherDerived>& other);\n\n    /** Shortcut for\\code *this = other.other.triangularView<(*this)::Mode>() \\endcode */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& operator=(const MatrixBase<OtherDerived>& other);\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    EIGEN_DEVICE_FUNC\n    TriangularViewType& operator=(const TriangularViewImpl& other)\n    { return *this = other.derived().nestedExpression(); }\n\n    /** \\deprecated */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void lazyAssign(const TriangularBase<OtherDerived>& other);\n\n    /** \\deprecated */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void lazyAssign(const MatrixBase<OtherDerived>& other);\n#endif\n\n    /** Efficient triangular matrix times vector/matrix product */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    const Product<TriangularViewType,OtherDerived>\n    operator*(const MatrixBase<OtherDerived>& rhs) const\n    {\n      return Product<TriangularViewType,OtherDerived>(derived(), rhs.derived());\n    }\n\n    /** Efficient vector/matrix times triangular matrix product */\n    template<typename OtherDerived> friend\n    EIGEN_DEVICE_FUNC\n    const Product<OtherDerived,TriangularViewType>\n    operator*(const MatrixBase<OtherDerived>& lhs, const TriangularViewImpl& rhs)\n    {\n      return Product<OtherDerived,TriangularViewType>(lhs.derived(),rhs.derived());\n    }\n\n    /** \\returns the product of the inverse of \\c *this with \\a other, \\a *this being triangular.\n      *\n      * This function computes the inverse-matrix matrix product inverse(\\c *this) * \\a other if\n      * \\a Side==OnTheLeft (the default), or the right-inverse-multiply  \\a other * inverse(\\c *this) if\n      * \\a Side==OnTheRight.\n      *\n      * Note that the template parameter \\c Side can be ommitted, in which case \\c Side==OnTheLeft\n      *\n      * The matrix \\c *this must be triangular and invertible (i.e., all the coefficients of the\n      * diagonal must be non zero). It works as a forward (resp. backward) substitution if \\c *this\n      * is an upper (resp. lower) triangular matrix.\n      *\n      * Example: \\include Triangular_solve.cpp\n      * Output: \\verbinclude Triangular_solve.out\n      *\n      * This function returns an expression of the inverse-multiply and can works in-place if it is assigned\n      * to the same matrix or vector \\a other.\n      *\n      * For users coming from BLAS, this function (and more specifically solveInPlace()) offer\n      * all the operations supported by the \\c *TRSV and \\c *TRSM BLAS routines.\n      *\n      * \\sa TriangularView::solveInPlace()\n      */\n    template<int Side, typename Other>\n    inline const internal::triangular_solve_retval<Side,TriangularViewType, Other>\n    solve(const MatrixBase<Other>& other) const;\n\n    /** \"in-place\" version of TriangularView::solve() where the result is written in \\a other\n      *\n      * \\warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.\n      * This function will const_cast it, so constness isn't honored here.\n      *\n      * Note that the template parameter \\c Side can be ommitted, in which case \\c Side==OnTheLeft\n      *\n      * See TriangularView:solve() for the details.\n      */\n    template<int Side, typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void solveInPlace(const MatrixBase<OtherDerived>& other) const;\n\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void solveInPlace(const MatrixBase<OtherDerived>& other) const\n    { return solveInPlace<OnTheLeft>(other); }\n\n    /** Swaps the coefficients of the common triangular parts of two matrices */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n    void swap(TriangularBase<OtherDerived> &other)\n#else\n    void swap(TriangularBase<OtherDerived> const & other)\n#endif\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(OtherDerived);\n      call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());\n    }\n\n    /** \\deprecated\n      * Shortcut for \\code (*this).swap(other.triangularView<(*this)::Mode>()) \\endcode */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    void swap(MatrixBase<OtherDerived> const & other)\n    {\n      EIGEN_STATIC_ASSERT_LVALUE(OtherDerived);\n      call_assignment(derived(), other.const_cast_derived(), internal::swap_assign_op<Scalar>());\n    }\n\n    template<typename RhsType, typename DstType>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {\n      if(!internal::is_same_dense(dst,rhs))\n        dst = rhs;\n      this->solveInPlace(dst);\n    }\n\n    template<typename ProductType>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE TriangularViewType& _assignProduct(const ProductType& prod, const Scalar& alpha, bool beta);\n};\n\n/***************************************************************************\n* Implementation of triangular evaluation/assignment\n***************************************************************************/\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n// FIXME should we keep that possibility\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&\nTriangularViewImpl<MatrixType, Mode, Dense>::operator=(const MatrixBase<OtherDerived>& other)\n{\n  internal::call_assignment_no_alias(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\n// FIXME should we keep that possibility\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const MatrixBase<OtherDerived>& other)\n{\n  internal::call_assignment_no_alias(derived(), other.template triangularView<Mode>());\n}\n\n\n\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC inline TriangularView<MatrixType, Mode>&\nTriangularViewImpl<MatrixType, Mode, Dense>::operator=(const TriangularBase<OtherDerived>& other)\n{\n  eigen_assert(Mode == int(OtherDerived::Mode));\n  internal::call_assignment(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC void TriangularViewImpl<MatrixType, Mode, Dense>::lazyAssign(const TriangularBase<OtherDerived>& other)\n{\n  eigen_assert(Mode == int(OtherDerived::Mode));\n  internal::call_assignment_no_alias(derived(), other.derived());\n}\n#endif\n\n/***************************************************************************\n* Implementation of TriangularBase methods\n***************************************************************************/\n\n/** Assigns a triangular or selfadjoint matrix to a dense matrix.\n  * If the matrix is triangular, the opposite part is set to zero. */\ntemplate<typename Derived>\ntemplate<typename DenseDerived>\nEIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalTo(MatrixBase<DenseDerived> &other) const\n{\n  evalToLazy(other.derived());\n}\n\n/***************************************************************************\n* Implementation of TriangularView methods\n***************************************************************************/\n\n/***************************************************************************\n* Implementation of MatrixBase methods\n***************************************************************************/\n\n/**\n  * \\returns an expression of a triangular view extracted from the current matrix\n  *\n  * The parameter \\a Mode can have the following values: \\c #Upper, \\c #StrictlyUpper, \\c #UnitUpper,\n  * \\c #Lower, \\c #StrictlyLower, \\c #UnitLower.\n  *\n  * Example: \\include MatrixBase_triangularView.cpp\n  * Output: \\verbinclude MatrixBase_triangularView.out\n  *\n  * \\sa class TriangularView\n  */\ntemplate<typename Derived>\ntemplate<unsigned int Mode>\nEIGEN_DEVICE_FUNC\ntypename MatrixBase<Derived>::template TriangularViewReturnType<Mode>::Type\nMatrixBase<Derived>::triangularView()\n{\n  return typename TriangularViewReturnType<Mode>::Type(derived());\n}\n\n/** This is the const version of MatrixBase::triangularView() */\ntemplate<typename Derived>\ntemplate<unsigned int Mode>\nEIGEN_DEVICE_FUNC\ntypename MatrixBase<Derived>::template ConstTriangularViewReturnType<Mode>::Type\nMatrixBase<Derived>::triangularView() const\n{\n  return typename ConstTriangularViewReturnType<Mode>::Type(derived());\n}\n\n/** \\returns true if *this is approximately equal to an upper triangular matrix,\n  *          within the precision given by \\a prec.\n  *\n  * \\sa isLowerTriangular()\n  */\ntemplate<typename Derived>\nbool MatrixBase<Derived>::isUpperTriangular(const RealScalar& prec) const\n{\n  RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);\n  for(Index j = 0; j < cols(); ++j)\n  {\n    Index maxi = numext::mini(j, rows()-1);\n    for(Index i = 0; i <= maxi; ++i)\n    {\n      RealScalar absValue = numext::abs(coeff(i,j));\n      if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;\n    }\n  }\n  RealScalar threshold = maxAbsOnUpperPart * prec;\n  for(Index j = 0; j < cols(); ++j)\n    for(Index i = j+1; i < rows(); ++i)\n      if(numext::abs(coeff(i, j)) > threshold) return false;\n  return true;\n}\n\n/** \\returns true if *this is approximately equal to a lower triangular matrix,\n  *          within the precision given by \\a prec.\n  *\n  * \\sa isUpperTriangular()\n  */\ntemplate<typename Derived>\nbool MatrixBase<Derived>::isLowerTriangular(const RealScalar& prec) const\n{\n  RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);\n  for(Index j = 0; j < cols(); ++j)\n    for(Index i = j; i < rows(); ++i)\n    {\n      RealScalar absValue = numext::abs(coeff(i,j));\n      if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;\n    }\n  RealScalar threshold = maxAbsOnLowerPart * prec;\n  for(Index j = 1; j < cols(); ++j)\n  {\n    Index maxi = numext::mini(j, rows()-1);\n    for(Index i = 0; i < maxi; ++i)\n      if(numext::abs(coeff(i, j)) > threshold) return false;\n  }\n  return true;\n}\n\n\n/***************************************************************************\n****************************************************************************\n* Evaluators and Assignment of triangular expressions\n***************************************************************************\n***************************************************************************/\n\nnamespace internal {\n\n  \n// TODO currently a triangular expression has the form TriangularView<.,.>\n//      in the future triangular-ness should be defined by the expression traits\n//      such that Transpose<TriangularView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)\ntemplate<typename MatrixType, unsigned int Mode>\nstruct evaluator_traits<TriangularView<MatrixType,Mode> >\n{\n  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;\n  typedef typename glue_shapes<typename evaluator_traits<MatrixType>::Shape, TriangularShape>::type Shape;\n};\n\ntemplate<typename MatrixType, unsigned int Mode>\nstruct unary_evaluator<TriangularView<MatrixType,Mode>, IndexBased>\n : evaluator<typename internal::remove_all<MatrixType>::type>\n{\n  typedef TriangularView<MatrixType,Mode> XprType;\n  typedef evaluator<typename internal::remove_all<MatrixType>::type> Base;\n  unary_evaluator(const XprType &xpr) : Base(xpr.nestedExpression()) {}\n};\n\n// Additional assignment kinds:\nstruct Triangular2Triangular    {};\nstruct Triangular2Dense         {};\nstruct Dense2Triangular         {};\n\n\ntemplate<typename Kernel, unsigned int Mode, int UnrollCount, bool ClearOpposite> struct triangular_assignment_loop;\n\n \n/** \\internal Specialization of the dense assignment kernel for triangular matrices.\n  * The main difference is that the triangular, diagonal, and opposite parts are processed through three different functions.\n  * \\tparam UpLo must be either Lower or Upper\n  * \\tparam Mode must be either 0, UnitDiag, ZeroDiag, or SelfAdjoint\n  */\ntemplate<int UpLo, int Mode, int SetOpposite, typename DstEvaluatorTypeT, typename SrcEvaluatorTypeT, typename Functor, int Version = Specialized>\nclass triangular_dense_assignment_kernel : public generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version>\n{\nprotected:\n  typedef generic_dense_assignment_kernel<DstEvaluatorTypeT, SrcEvaluatorTypeT, Functor, Version> Base;\n  typedef typename Base::DstXprType DstXprType;\n  typedef typename Base::SrcXprType SrcXprType;\n  using Base::m_dst;\n  using Base::m_src;\n  using Base::m_functor;\npublic:\n  \n  typedef typename Base::DstEvaluatorType DstEvaluatorType;\n  typedef typename Base::SrcEvaluatorType SrcEvaluatorType;\n  typedef typename Base::Scalar Scalar;\n  typedef typename Base::AssignmentTraits AssignmentTraits;\n  \n  \n  EIGEN_DEVICE_FUNC triangular_dense_assignment_kernel(DstEvaluatorType &dst, const SrcEvaluatorType &src, const Functor &func, DstXprType& dstExpr)\n    : Base(dst, src, func, dstExpr)\n  {}\n  \n#ifdef EIGEN_INTERNAL_DEBUGGING\n  EIGEN_DEVICE_FUNC void assignCoeff(Index row, Index col)\n  {\n    eigen_internal_assert(row!=col);\n    Base::assignCoeff(row,col);\n  }\n#else\n  using Base::assignCoeff;\n#endif\n  \n  EIGEN_DEVICE_FUNC void assignDiagonalCoeff(Index id)\n  {\n         if(Mode==UnitDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(1));\n    else if(Mode==ZeroDiag && SetOpposite) m_functor.assignCoeff(m_dst.coeffRef(id,id), Scalar(0));\n    else if(Mode==0)                       Base::assignCoeff(id,id);\n  }\n  \n  EIGEN_DEVICE_FUNC void assignOppositeCoeff(Index row, Index col)\n  { \n    eigen_internal_assert(row!=col);\n    if(SetOpposite)\n      m_functor.assignCoeff(m_dst.coeffRef(row,col), Scalar(0));\n  }\n};\n\ntemplate<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType, typename Functor>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src, const Functor &func)\n{\n  typedef evaluator<DstXprType> DstEvaluatorType;\n  typedef evaluator<SrcXprType> SrcEvaluatorType;\n\n  SrcEvaluatorType srcEvaluator(src);\n\n  Index dstRows = src.rows();\n  Index dstCols = src.cols();\n  if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n    dst.resize(dstRows, dstCols);\n  DstEvaluatorType dstEvaluator(dst);\n    \n  typedef triangular_dense_assignment_kernel< Mode&(Lower|Upper),Mode&(UnitDiag|ZeroDiag|SelfAdjoint),SetOpposite,\n                                              DstEvaluatorType,SrcEvaluatorType,Functor> Kernel;\n  Kernel kernel(dstEvaluator, srcEvaluator, func, dst.const_cast_derived());\n  \n  enum {\n      unroll = DstXprType::SizeAtCompileTime != Dynamic\n            && SrcEvaluatorType::CoeffReadCost < HugeCost\n            && DstXprType::SizeAtCompileTime * (DstEvaluatorType::CoeffReadCost+SrcEvaluatorType::CoeffReadCost) / 2 <= EIGEN_UNROLLING_LIMIT\n    };\n  \n  triangular_assignment_loop<Kernel, Mode, unroll ? int(DstXprType::SizeAtCompileTime) : Dynamic, SetOpposite>::run(kernel);\n}\n\ntemplate<int Mode, bool SetOpposite, typename DstXprType, typename SrcXprType>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nvoid call_triangular_assignment_loop(DstXprType& dst, const SrcXprType& src)\n{\n  call_triangular_assignment_loop<Mode,SetOpposite>(dst, src, internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>());\n}\n\ntemplate<> struct AssignmentKind<TriangularShape,TriangularShape> { typedef Triangular2Triangular Kind; };\ntemplate<> struct AssignmentKind<DenseShape,TriangularShape>      { typedef Triangular2Dense      Kind; };\ntemplate<> struct AssignmentKind<TriangularShape,DenseShape>      { typedef Dense2Triangular      Kind; };\n\n\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Triangular2Triangular>\n{\n  EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)\n  {\n    eigen_assert(int(DstXprType::Mode) == int(SrcXprType::Mode));\n    \n    call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);  \n  }\n};\n\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Triangular2Dense>\n{\n  EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)\n  {\n    call_triangular_assignment_loop<SrcXprType::Mode, (SrcXprType::Mode&SelfAdjoint)==0>(dst, src, func);  \n  }\n};\n\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Dense2Triangular>\n{\n  EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)\n  {\n    call_triangular_assignment_loop<DstXprType::Mode, false>(dst, src, func);  \n  }\n};\n\n\ntemplate<typename Kernel, unsigned int Mode, int UnrollCount, bool SetOpposite>\nstruct triangular_assignment_loop\n{\n  // FIXME: this is not very clean, perhaps this information should be provided by the kernel?\n  typedef typename Kernel::DstEvaluatorType DstEvaluatorType;\n  typedef typename DstEvaluatorType::XprType DstXprType;\n  \n  enum {\n    col = (UnrollCount-1) / DstXprType::RowsAtCompileTime,\n    row = (UnrollCount-1) % DstXprType::RowsAtCompileTime\n  };\n  \n  typedef typename Kernel::Scalar Scalar;\n\n  EIGEN_DEVICE_FUNC\n  static inline void run(Kernel &kernel)\n  {\n    triangular_assignment_loop<Kernel, Mode, UnrollCount-1, SetOpposite>::run(kernel);\n    \n    if(row==col)\n      kernel.assignDiagonalCoeff(row);\n    else if( ((Mode&Lower) && row>col) || ((Mode&Upper) && row<col) )\n      kernel.assignCoeff(row,col);\n    else if(SetOpposite)\n      kernel.assignOppositeCoeff(row,col);\n  }\n};\n\n// prevent buggy user code from causing an infinite recursion\ntemplate<typename Kernel, unsigned int Mode, bool SetOpposite>\nstruct triangular_assignment_loop<Kernel, Mode, 0, SetOpposite>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(Kernel &) {}\n};\n\n\n\n// TODO: experiment with a recursive assignment procedure splitting the current\n//       triangular part into one rectangular and two triangular parts.\n\n\ntemplate<typename Kernel, unsigned int Mode, bool SetOpposite>\nstruct triangular_assignment_loop<Kernel, Mode, Dynamic, SetOpposite>\n{\n  typedef typename Kernel::Scalar Scalar;\n  EIGEN_DEVICE_FUNC\n  static inline void run(Kernel &kernel)\n  {\n    for(Index j = 0; j < kernel.cols(); ++j)\n    {\n      Index maxi = numext::mini(j, kernel.rows());\n      Index i = 0;\n      if (((Mode&Lower) && SetOpposite) || (Mode&Upper))\n      {\n        for(; i < maxi; ++i)\n          if(Mode&Upper) kernel.assignCoeff(i, j);\n          else           kernel.assignOppositeCoeff(i, j);\n      }\n      else\n        i = maxi;\n      \n      if(i<kernel.rows()) // then i==j\n        kernel.assignDiagonalCoeff(i++);\n      \n      if (((Mode&Upper) && SetOpposite) || (Mode&Lower))\n      {\n        for(; i < kernel.rows(); ++i)\n          if(Mode&Lower) kernel.assignCoeff(i, j);\n          else           kernel.assignOppositeCoeff(i, j);\n      }\n    }\n  }\n};\n\n} // end namespace internal\n\n/** Assigns a triangular or selfadjoint matrix to a dense matrix.\n  * If the matrix is triangular, the opposite part is set to zero. */\ntemplate<typename Derived>\ntemplate<typename DenseDerived>\nEIGEN_DEVICE_FUNC void TriangularBase<Derived>::evalToLazy(MatrixBase<DenseDerived> &other) const\n{\n  other.derived().resize(this->rows(), this->cols());\n  internal::call_triangular_assignment_loop<Derived::Mode,(Derived::Mode&SelfAdjoint)==0 /* SetOpposite */>(other.derived(), derived().nestedExpression());\n}\n\nnamespace internal {\n  \n// Triangular = Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>\n{\n  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename SrcXprType::Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    dst._assignProduct(src, 1, 0);\n  }\n};\n\n// Triangular += Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::add_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>\n{\n  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<Scalar,typename SrcXprType::Scalar> &)\n  {\n    dst._assignProduct(src, 1, 1);\n  }\n};\n\n// Triangular -= Product\ntemplate< typename DstXprType, typename Lhs, typename Rhs, typename Scalar>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,DefaultProduct>, internal::sub_assign_op<Scalar,typename Product<Lhs,Rhs,DefaultProduct>::Scalar>, Dense2Triangular>\n{\n  typedef Product<Lhs,Rhs,DefaultProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<Scalar,typename SrcXprType::Scalar> &)\n  {\n    dst._assignProduct(src, -1, 1);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULARMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/VectorBlock.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_VECTORBLOCK_H\n#define EIGEN_VECTORBLOCK_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename VectorType, int Size>\nstruct traits<VectorBlock<VectorType, Size> >\n  : public traits<Block<VectorType,\n                     traits<VectorType>::Flags & RowMajorBit ? 1 : Size,\n                     traits<VectorType>::Flags & RowMajorBit ? Size : 1> >\n{\n};\n}\n\n/** \\class VectorBlock\n  * \\ingroup Core_Module\n  *\n  * \\brief Expression of a fixed-size or dynamic-size sub-vector\n  *\n  * \\tparam VectorType the type of the object in which we are taking a sub-vector\n  * \\tparam Size size of the sub-vector we are taking at compile time (optional)\n  *\n  * This class represents an expression of either a fixed-size or dynamic-size sub-vector.\n  * It is the return type of DenseBase::segment(Index,Index) and DenseBase::segment<int>(Index) and\n  * most of the time this is the only way it is used.\n  *\n  * However, if you want to directly maniputate sub-vector expressions,\n  * for instance if you want to write a function returning such an expression, you\n  * will need to use this class.\n  *\n  * Here is an example illustrating the dynamic case:\n  * \\include class_VectorBlock.cpp\n  * Output: \\verbinclude class_VectorBlock.out\n  *\n  * \\note Even though this expression has dynamic size, in the case where \\a VectorType\n  * has fixed size, this expression inherits a fixed maximal size which means that evaluating\n  * it does not cause a dynamic memory allocation.\n  *\n  * Here is an example illustrating the fixed-size case:\n  * \\include class_FixedVectorBlock.cpp\n  * Output: \\verbinclude class_FixedVectorBlock.out\n  *\n  * \\sa class Block, DenseBase::segment(Index,Index,Index,Index), DenseBase::segment(Index,Index)\n  */\ntemplate<typename VectorType, int Size> class VectorBlock\n  : public Block<VectorType,\n                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,\n                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1>\n{\n    typedef Block<VectorType,\n                     internal::traits<VectorType>::Flags & RowMajorBit ? 1 : Size,\n                     internal::traits<VectorType>::Flags & RowMajorBit ? Size : 1> Base;\n    enum {\n      IsColVector = !(internal::traits<VectorType>::Flags & RowMajorBit)\n    };\n  public:\n    EIGEN_DENSE_PUBLIC_INTERFACE(VectorBlock)\n\n    using Base::operator=;\n\n    /** Dynamic-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline VectorBlock(VectorType& vector, Index start, Index size)\n      : Base(vector,\n             IsColVector ? start : 0, IsColVector ? 0 : start,\n             IsColVector ? size  : 1, IsColVector ? 1 : size)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);\n    }\n\n    /** Fixed-size constructor\n      */\n    EIGEN_DEVICE_FUNC\n    inline VectorBlock(VectorType& vector, Index start)\n      : Base(vector, IsColVector ? start : 0, IsColVector ? 0 : start)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorBlock);\n    }\n};\n\n\n} // end namespace Eigen\n\n#endif // EIGEN_VECTORBLOCK_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/VectorwiseOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARTIAL_REDUX_H\n#define EIGEN_PARTIAL_REDUX_H\n\nnamespace Eigen {\n\n/** \\class PartialReduxExpr\n  * \\ingroup Core_Module\n  *\n  * \\brief Generic expression of a partially reduxed matrix\n  *\n  * \\tparam MatrixType the type of the matrix we are applying the redux operation\n  * \\tparam MemberOp type of the member functor\n  * \\tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)\n  *\n  * This class represents an expression of a partial redux operator of a matrix.\n  * It is the return type of some VectorwiseOp functions,\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa class VectorwiseOp\n  */\n\ntemplate< typename MatrixType, typename MemberOp, int Direction>\nclass PartialReduxExpr;\n\nnamespace internal {\ntemplate<typename MatrixType, typename MemberOp, int Direction>\nstruct traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >\n : traits<MatrixType>\n{\n  typedef typename MemberOp::result_type Scalar;\n  typedef typename traits<MatrixType>::StorageKind StorageKind;\n  typedef typename traits<MatrixType>::XprKind XprKind;\n  typedef typename MatrixType::Scalar InputScalar;\n  enum {\n    RowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,\n    Flags = RowsAtCompileTime == 1 ? RowMajorBit : 0,\n    TraversalSize = Direction==Vertical ? MatrixType::RowsAtCompileTime :  MatrixType::ColsAtCompileTime\n  };\n};\n}\n\ntemplate< typename MatrixType, typename MemberOp, int Direction>\nclass PartialReduxExpr : public internal::dense_xpr_base< PartialReduxExpr<MatrixType, MemberOp, Direction> >::type,\n                         internal::no_assignment_operator\n{\n  public:\n\n    typedef typename internal::dense_xpr_base<PartialReduxExpr>::type Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(PartialReduxExpr)\n\n    EIGEN_DEVICE_FUNC\n    explicit PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())\n      : m_matrix(mat), m_functor(func) {}\n\n    EIGEN_DEVICE_FUNC\n    Index rows() const { return (Direction==Vertical   ? 1 : m_matrix.rows()); }\n    EIGEN_DEVICE_FUNC\n    Index cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }\n\n    EIGEN_DEVICE_FUNC\n    typename MatrixType::Nested nestedExpression() const { return m_matrix; }\n\n    EIGEN_DEVICE_FUNC\n    const MemberOp& functor() const { return m_functor; }\n\n  protected:\n    typename MatrixType::Nested m_matrix;\n    const MemberOp m_functor;\n};\n\n#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST)                               \\\n  template <typename ResultType>                                        \\\n  struct member_##MEMBER {                                              \\\n    EIGEN_EMPTY_STRUCT_CTOR(member_##MEMBER)                            \\\n    typedef ResultType result_type;                                     \\\n    template<typename Scalar, int Size> struct Cost                     \\\n    { enum { value = COST }; };                                         \\\n    template<typename XprType>                                          \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE                               \\\n    ResultType operator()(const XprType& mat) const                     \\\n    { return mat.MEMBER(); } \\\n  }\n\nnamespace internal {\n\nEIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(stableNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(blueNorm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(hypotNorm, (Size-1) * functor_traits<scalar_hypot_op<Scalar> >::Cost );\nEIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(mean, (Size-1)*NumTraits<Scalar>::AddCost + NumTraits<Scalar>::MulCost);\nEIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits<Scalar>::AddCost);\nEIGEN_MEMBER_FUNCTOR(prod, (Size-1)*NumTraits<Scalar>::MulCost);\n\ntemplate <int p, typename ResultType>\nstruct member_lpnorm {\n  typedef ResultType result_type;\n  template<typename Scalar, int Size> struct Cost\n  { enum { value = (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost }; };\n  EIGEN_DEVICE_FUNC member_lpnorm() {}\n  template<typename XprType>\n  EIGEN_DEVICE_FUNC inline ResultType operator()(const XprType& mat) const\n  { return mat.template lpNorm<p>(); }\n};\n\ntemplate <typename BinaryOp, typename Scalar>\nstruct member_redux {\n  typedef typename result_of<\n                     BinaryOp(const Scalar&,const Scalar&)\n                   >::type  result_type;\n  template<typename _Scalar, int Size> struct Cost\n  { enum { value = (Size-1) * functor_traits<BinaryOp>::Cost }; };\n  EIGEN_DEVICE_FUNC explicit member_redux(const BinaryOp func) : m_functor(func) {}\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline result_type operator()(const DenseBase<Derived>& mat) const\n  { return mat.redux(m_functor); }\n  const BinaryOp m_functor;\n};\n}\n\n/** \\class VectorwiseOp\n  * \\ingroup Core_Module\n  *\n  * \\brief Pseudo expression providing partial reduction operations\n  *\n  * \\tparam ExpressionType the type of the object on which to do partial reductions\n  * \\tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)\n  *\n  * This class represents a pseudo expression with partial reduction features.\n  * It is the return type of DenseBase::colwise() and DenseBase::rowwise()\n  * and most of the time this is the only way it is used.\n  *\n  * Example: \\include MatrixBase_colwise.cpp\n  * Output: \\verbinclude MatrixBase_colwise.out\n  *\n  * \\sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr\n  */\ntemplate<typename ExpressionType, int Direction> class VectorwiseOp\n{\n  public:\n\n    typedef typename ExpressionType::Scalar Scalar;\n    typedef typename ExpressionType::RealScalar RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    typedef typename internal::ref_selector<ExpressionType>::non_const_type ExpressionTypeNested;\n    typedef typename internal::remove_all<ExpressionTypeNested>::type ExpressionTypeNestedCleaned;\n\n    template<template<typename _Scalar> class Functor,\n                      typename Scalar_=Scalar> struct ReturnType\n    {\n      typedef PartialReduxExpr<ExpressionType,\n                               Functor<Scalar_>,\n                               Direction\n                              > Type;\n    };\n\n    template<typename BinaryOp> struct ReduxReturnType\n    {\n      typedef PartialReduxExpr<ExpressionType,\n                               internal::member_redux<BinaryOp,Scalar>,\n                               Direction\n                              > Type;\n    };\n\n    enum {\n      isVertical   = (Direction==Vertical) ? 1 : 0,\n      isHorizontal = (Direction==Horizontal) ? 1 : 0\n    };\n\n  protected:\n\n    typedef typename internal::conditional<isVertical,\n                               typename ExpressionType::ColXpr,\n                               typename ExpressionType::RowXpr>::type SubVector;\n    /** \\internal\n      * \\returns the i-th subvector according to the \\c Direction */\n    EIGEN_DEVICE_FUNC\n    SubVector subVector(Index i)\n    {\n      return SubVector(m_matrix.derived(),i);\n    }\n\n    /** \\internal\n      * \\returns the number of subvectors in the direction \\c Direction */\n    EIGEN_DEVICE_FUNC\n    Index subVectors() const\n    { return isVertical?m_matrix.cols():m_matrix.rows(); }\n\n    template<typename OtherDerived> struct ExtendedType {\n      typedef Replicate<OtherDerived,\n                        isVertical   ? 1 : ExpressionType::RowsAtCompileTime,\n                        isHorizontal ? 1 : ExpressionType::ColsAtCompileTime> Type;\n    };\n\n    /** \\internal\n      * Replicates a vector to match the size of \\c *this */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    typename ExtendedType<OtherDerived>::Type\n    extendedTo(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxColsAtCompileTime==1),\n                          YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)\n      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxRowsAtCompileTime==1),\n                          YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)\n      return typename ExtendedType<OtherDerived>::Type\n                      (other.derived(),\n                       isVertical   ? 1 : m_matrix.rows(),\n                       isHorizontal ? 1 : m_matrix.cols());\n    }\n\n    template<typename OtherDerived> struct OppositeExtendedType {\n      typedef Replicate<OtherDerived,\n                        isHorizontal ? 1 : ExpressionType::RowsAtCompileTime,\n                        isVertical   ? 1 : ExpressionType::ColsAtCompileTime> Type;\n    };\n\n    /** \\internal\n      * Replicates a vector in the opposite direction to match the size of \\c *this */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    typename OppositeExtendedType<OtherDerived>::Type\n    extendedToOpposite(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isHorizontal, OtherDerived::MaxColsAtCompileTime==1),\n                          YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED)\n      EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(isVertical, OtherDerived::MaxRowsAtCompileTime==1),\n                          YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED)\n      return typename OppositeExtendedType<OtherDerived>::Type\n                      (other.derived(),\n                       isHorizontal  ? 1 : m_matrix.rows(),\n                       isVertical    ? 1 : m_matrix.cols());\n    }\n\n  public:\n    EIGEN_DEVICE_FUNC\n    explicit inline VectorwiseOp(ExpressionType& matrix) : m_matrix(matrix) {}\n\n    /** \\internal */\n    EIGEN_DEVICE_FUNC\n    inline const ExpressionType& _expression() const { return m_matrix; }\n\n    /** \\returns a row or column vector expression of \\c *this reduxed by \\a func\n      *\n      * The template parameter \\a BinaryOp is the type of the functor\n      * of the custom redux operator. Note that func must be an associative operator.\n      *\n      * \\sa class VectorwiseOp, DenseBase::colwise(), DenseBase::rowwise()\n      */\n    template<typename BinaryOp>\n    EIGEN_DEVICE_FUNC\n    const typename ReduxReturnType<BinaryOp>::Type\n    redux(const BinaryOp& func = BinaryOp()) const\n    { return typename ReduxReturnType<BinaryOp>::Type(_expression(), internal::member_redux<BinaryOp,Scalar>(func)); }\n\n    typedef typename ReturnType<internal::member_minCoeff>::Type MinCoeffReturnType;\n    typedef typename ReturnType<internal::member_maxCoeff>::Type MaxCoeffReturnType;\n    typedef typename ReturnType<internal::member_squaredNorm,RealScalar>::Type SquaredNormReturnType;\n    typedef typename ReturnType<internal::member_norm,RealScalar>::Type NormReturnType;\n    typedef typename ReturnType<internal::member_blueNorm,RealScalar>::Type BlueNormReturnType;\n    typedef typename ReturnType<internal::member_stableNorm,RealScalar>::Type StableNormReturnType;\n    typedef typename ReturnType<internal::member_hypotNorm,RealScalar>::Type HypotNormReturnType;\n    typedef typename ReturnType<internal::member_sum>::Type SumReturnType;\n    typedef typename ReturnType<internal::member_mean>::Type MeanReturnType;\n    typedef typename ReturnType<internal::member_all>::Type AllReturnType;\n    typedef typename ReturnType<internal::member_any>::Type AnyReturnType;\n    typedef PartialReduxExpr<ExpressionType, internal::member_count<Index>, Direction> CountReturnType;\n    typedef typename ReturnType<internal::member_prod>::Type ProdReturnType;\n    typedef Reverse<const ExpressionType, Direction> ConstReverseReturnType;\n    typedef Reverse<ExpressionType, Direction> ReverseReturnType;\n\n    template<int p> struct LpNormReturnType {\n      typedef PartialReduxExpr<ExpressionType, internal::member_lpnorm<p,RealScalar>,Direction> Type;\n    };\n\n    /** \\returns a row (or column) vector expression of the smallest coefficient\n      * of each column (or row) of the referenced expression.\n      *\n      * \\warning the result is undefined if \\c *this contains NaN.\n      *\n      * Example: \\include PartialRedux_minCoeff.cpp\n      * Output: \\verbinclude PartialRedux_minCoeff.out\n      *\n      * \\sa DenseBase::minCoeff() */\n    EIGEN_DEVICE_FUNC\n    const MinCoeffReturnType minCoeff() const\n    { return MinCoeffReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the largest coefficient\n      * of each column (or row) of the referenced expression.\n      *\n      * \\warning the result is undefined if \\c *this contains NaN.\n      *\n      * Example: \\include PartialRedux_maxCoeff.cpp\n      * Output: \\verbinclude PartialRedux_maxCoeff.out\n      *\n      * \\sa DenseBase::maxCoeff() */\n    EIGEN_DEVICE_FUNC\n    const MaxCoeffReturnType maxCoeff() const\n    { return MaxCoeffReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the squared norm\n      * of each column (or row) of the referenced expression.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * Example: \\include PartialRedux_squaredNorm.cpp\n      * Output: \\verbinclude PartialRedux_squaredNorm.out\n      *\n      * \\sa DenseBase::squaredNorm() */\n    EIGEN_DEVICE_FUNC\n    const SquaredNormReturnType squaredNorm() const\n    { return SquaredNormReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the norm\n      * of each column (or row) of the referenced expression.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * Example: \\include PartialRedux_norm.cpp\n      * Output: \\verbinclude PartialRedux_norm.out\n      *\n      * \\sa DenseBase::norm() */\n    EIGEN_DEVICE_FUNC\n    const NormReturnType norm() const\n    { return NormReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the norm\n      * of each column (or row) of the referenced expression.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * Example: \\include PartialRedux_norm.cpp\n      * Output: \\verbinclude PartialRedux_norm.out\n      *\n      * \\sa DenseBase::norm() */\n    template<int p>\n    EIGEN_DEVICE_FUNC\n    const typename LpNormReturnType<p>::Type lpNorm() const\n    { return typename LpNormReturnType<p>::Type(_expression()); }\n\n\n    /** \\returns a row (or column) vector expression of the norm\n      * of each column (or row) of the referenced expression, using\n      * Blue's algorithm.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * \\sa DenseBase::blueNorm() */\n    EIGEN_DEVICE_FUNC\n    const BlueNormReturnType blueNorm() const\n    { return BlueNormReturnType(_expression()); }\n\n\n    /** \\returns a row (or column) vector expression of the norm\n      * of each column (or row) of the referenced expression, avoiding\n      * underflow and overflow.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * \\sa DenseBase::stableNorm() */\n    EIGEN_DEVICE_FUNC\n    const StableNormReturnType stableNorm() const\n    { return StableNormReturnType(_expression()); }\n\n\n    /** \\returns a row (or column) vector expression of the norm\n      * of each column (or row) of the referenced expression, avoiding\n      * underflow and overflow using a concatenation of hypot() calls.\n      * This is a vector with real entries, even if the original matrix has complex entries.\n      *\n      * \\sa DenseBase::hypotNorm() */\n    EIGEN_DEVICE_FUNC\n    const HypotNormReturnType hypotNorm() const\n    { return HypotNormReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the sum\n      * of each column (or row) of the referenced expression.\n      *\n      * Example: \\include PartialRedux_sum.cpp\n      * Output: \\verbinclude PartialRedux_sum.out\n      *\n      * \\sa DenseBase::sum() */\n    EIGEN_DEVICE_FUNC\n    const SumReturnType sum() const\n    { return SumReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the mean\n    * of each column (or row) of the referenced expression.\n    *\n    * \\sa DenseBase::mean() */\n    EIGEN_DEVICE_FUNC\n    const MeanReturnType mean() const\n    { return MeanReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression representing\n      * whether \\b all coefficients of each respective column (or row) are \\c true.\n      * This expression can be assigned to a vector with entries of type \\c bool.\n      *\n      * \\sa DenseBase::all() */\n    EIGEN_DEVICE_FUNC\n    const AllReturnType all() const\n    { return AllReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression representing\n      * whether \\b at \\b least one coefficient of each respective column (or row) is \\c true.\n      * This expression can be assigned to a vector with entries of type \\c bool.\n      *\n      * \\sa DenseBase::any() */\n    EIGEN_DEVICE_FUNC\n    const AnyReturnType any() const\n    { return AnyReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression representing\n      * the number of \\c true coefficients of each respective column (or row).\n      * This expression can be assigned to a vector whose entries have the same type as is used to\n      * index entries of the original matrix; for dense matrices, this is \\c std::ptrdiff_t .\n      *\n      * Example: \\include PartialRedux_count.cpp\n      * Output: \\verbinclude PartialRedux_count.out\n      *\n      * \\sa DenseBase::count() */\n    EIGEN_DEVICE_FUNC\n    const CountReturnType count() const\n    { return CountReturnType(_expression()); }\n\n    /** \\returns a row (or column) vector expression of the product\n      * of each column (or row) of the referenced expression.\n      *\n      * Example: \\include PartialRedux_prod.cpp\n      * Output: \\verbinclude PartialRedux_prod.out\n      *\n      * \\sa DenseBase::prod() */\n    EIGEN_DEVICE_FUNC\n    const ProdReturnType prod() const\n    { return ProdReturnType(_expression()); }\n\n\n    /** \\returns a matrix expression\n      * where each column (or row) are reversed.\n      *\n      * Example: \\include Vectorwise_reverse.cpp\n      * Output: \\verbinclude Vectorwise_reverse.out\n      *\n      * \\sa DenseBase::reverse() */\n    EIGEN_DEVICE_FUNC\n    const ConstReverseReturnType reverse() const\n    { return ConstReverseReturnType( _expression() ); }\n\n    /** \\returns a writable matrix expression\n      * where each column (or row) are reversed.\n      *\n      * \\sa reverse() const */\n    EIGEN_DEVICE_FUNC\n    ReverseReturnType reverse()\n    { return ReverseReturnType( _expression() ); }\n\n    typedef Replicate<ExpressionType,(isVertical?Dynamic:1),(isHorizontal?Dynamic:1)> ReplicateReturnType;\n    EIGEN_DEVICE_FUNC\n    const ReplicateReturnType replicate(Index factor) const;\n\n    /**\n      * \\return an expression of the replication of each column (or row) of \\c *this\n      *\n      * Example: \\include DirectionWise_replicate.cpp\n      * Output: \\verbinclude DirectionWise_replicate.out\n      *\n      * \\sa VectorwiseOp::replicate(Index), DenseBase::replicate(), class Replicate\n      */\n    // NOTE implemented here because of sunstudio's compilation errors\n    // isVertical*Factor+isHorizontal instead of (isVertical?Factor:1) to handle CUDA bug with ternary operator\n    template<int Factor> const Replicate<ExpressionType,isVertical*Factor+isHorizontal,isHorizontal*Factor+isVertical>\n    EIGEN_DEVICE_FUNC\n    replicate(Index factor = Factor) const\n    {\n      return Replicate<ExpressionType,(isVertical?Factor:1),(isHorizontal?Factor:1)>\n          (_expression(),isVertical?factor:1,isHorizontal?factor:1);\n    }\n\n/////////// Artithmetic operators ///////////\n\n    /** Copies the vector \\a other to each subvector of \\c *this */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    ExpressionType& operator=(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      //eigen_assert((m_matrix.isNull()) == (other.isNull())); FIXME\n      return const_cast<ExpressionType&>(m_matrix = extendedTo(other.derived()));\n    }\n\n    /** Adds the vector \\a other to each subvector of \\c *this */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    ExpressionType& operator+=(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return const_cast<ExpressionType&>(m_matrix += extendedTo(other.derived()));\n    }\n\n    /** Substracts the vector \\a other to each subvector of \\c *this */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    ExpressionType& operator-=(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return const_cast<ExpressionType&>(m_matrix -= extendedTo(other.derived()));\n    }\n\n    /** Multiples each subvector of \\c *this by the vector \\a other */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    ExpressionType& operator*=(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      m_matrix *= extendedTo(other.derived());\n      return const_cast<ExpressionType&>(m_matrix);\n    }\n\n    /** Divides each subvector of \\c *this by the vector \\a other */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    ExpressionType& operator/=(const DenseBase<OtherDerived>& other)\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      m_matrix /= extendedTo(other.derived());\n      return const_cast<ExpressionType&>(m_matrix);\n    }\n\n    /** Returns the expression of the sum of the vector \\a other to each subvector of \\c *this */\n    template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC\n    CwiseBinaryOp<internal::scalar_sum_op<Scalar,typename OtherDerived::Scalar>,\n                  const ExpressionTypeNestedCleaned,\n                  const typename ExtendedType<OtherDerived>::Type>\n    operator+(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return m_matrix + extendedTo(other.derived());\n    }\n\n    /** Returns the expression of the difference between each subvector of \\c *this and the vector \\a other */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    CwiseBinaryOp<internal::scalar_difference_op<Scalar,typename OtherDerived::Scalar>,\n                  const ExpressionTypeNestedCleaned,\n                  const typename ExtendedType<OtherDerived>::Type>\n    operator-(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return m_matrix - extendedTo(other.derived());\n    }\n\n    /** Returns the expression where each subvector is the product of the vector \\a other\n      * by the corresponding subvector of \\c *this */\n    template<typename OtherDerived> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC\n    CwiseBinaryOp<internal::scalar_product_op<Scalar>,\n                  const ExpressionTypeNestedCleaned,\n                  const typename ExtendedType<OtherDerived>::Type>\n    EIGEN_DEVICE_FUNC\n    operator*(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return m_matrix * extendedTo(other.derived());\n    }\n\n    /** Returns the expression where each subvector is the quotient of the corresponding\n      * subvector of \\c *this by the vector \\a other */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,\n                  const ExpressionTypeNestedCleaned,\n                  const typename ExtendedType<OtherDerived>::Type>\n    operator/(const DenseBase<OtherDerived>& other) const\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n      EIGEN_STATIC_ASSERT_ARRAYXPR(ExpressionType)\n      EIGEN_STATIC_ASSERT_SAME_XPR_KIND(ExpressionType, OtherDerived)\n      return m_matrix / extendedTo(other.derived());\n    }\n\n    /** \\returns an expression where each column (or row) of the referenced matrix are normalized.\n      * The referenced matrix is \\b not modified.\n      * \\sa MatrixBase::normalized(), normalize()\n      */\n    EIGEN_DEVICE_FUNC\n    CwiseBinaryOp<internal::scalar_quotient_op<Scalar>,\n                  const ExpressionTypeNestedCleaned,\n                  const typename OppositeExtendedType<typename ReturnType<internal::member_norm,RealScalar>::Type>::Type>\n    normalized() const { return m_matrix.cwiseQuotient(extendedToOpposite(this->norm())); }\n\n\n    /** Normalize in-place each row or columns of the referenced matrix.\n      * \\sa MatrixBase::normalize(), normalized()\n      */\n    EIGEN_DEVICE_FUNC void normalize() {\n      m_matrix = this->normalized();\n    }\n\n    EIGEN_DEVICE_FUNC inline void reverseInPlace();\n\n/////////// Geometry module ///////////\n\n    typedef Homogeneous<ExpressionType,Direction> HomogeneousReturnType;\n    EIGEN_DEVICE_FUNC\n    HomogeneousReturnType homogeneous() const;\n\n    typedef typename ExpressionType::PlainObject CrossReturnType;\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC\n    const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const;\n\n    enum {\n      HNormalized_Size = Direction==Vertical ? internal::traits<ExpressionType>::RowsAtCompileTime\n                                             : internal::traits<ExpressionType>::ColsAtCompileTime,\n      HNormalized_SizeMinusOne = HNormalized_Size==Dynamic ? Dynamic : HNormalized_Size-1\n    };\n    typedef Block<const ExpressionType,\n                  Direction==Vertical   ? int(HNormalized_SizeMinusOne)\n                                        : int(internal::traits<ExpressionType>::RowsAtCompileTime),\n                  Direction==Horizontal ? int(HNormalized_SizeMinusOne)\n                                        : int(internal::traits<ExpressionType>::ColsAtCompileTime)>\n            HNormalized_Block;\n    typedef Block<const ExpressionType,\n                  Direction==Vertical   ? 1 : int(internal::traits<ExpressionType>::RowsAtCompileTime),\n                  Direction==Horizontal ? 1 : int(internal::traits<ExpressionType>::ColsAtCompileTime)>\n            HNormalized_Factors;\n    typedef CwiseBinaryOp<internal::scalar_quotient_op<typename internal::traits<ExpressionType>::Scalar>,\n                const HNormalized_Block,\n                const Replicate<HNormalized_Factors,\n                  Direction==Vertical   ? HNormalized_SizeMinusOne : 1,\n                  Direction==Horizontal ? HNormalized_SizeMinusOne : 1> >\n            HNormalizedReturnType;\n\n    EIGEN_DEVICE_FUNC\n    const HNormalizedReturnType hnormalized() const;\n\n  protected:\n    ExpressionTypeNested m_matrix;\n};\n\n//const colwise moved to DenseBase.h due to CUDA compiler bug\n\n\n/** \\returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations\n  *\n  * \\sa rowwise(), class VectorwiseOp, \\ref TutorialReductionsVisitorsBroadcasting\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ColwiseReturnType\nDenseBase<Derived>::colwise()\n{\n  return ColwiseReturnType(derived());\n}\n\n//const rowwise moved to DenseBase.h due to CUDA compiler bug\n\n\n/** \\returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations\n  *\n  * \\sa colwise(), class VectorwiseOp, \\ref TutorialReductionsVisitorsBroadcasting\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::RowwiseReturnType\nDenseBase<Derived>::rowwise()\n{\n  return RowwiseReturnType(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARTIAL_REDUX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/Visitor.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_VISITOR_H\n#define EIGEN_VISITOR_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Visitor, typename Derived, int UnrollCount>\nstruct visitor_impl\n{\n  enum {\n    col = (UnrollCount-1) / Derived::RowsAtCompileTime,\n    row = (UnrollCount-1) % Derived::RowsAtCompileTime\n  };\n\n  EIGEN_DEVICE_FUNC\n  static inline void run(const Derived &mat, Visitor& visitor)\n  {\n    visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);\n    visitor(mat.coeff(row, col), row, col);\n  }\n};\n\ntemplate<typename Visitor, typename Derived>\nstruct visitor_impl<Visitor, Derived, 1>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const Derived &mat, Visitor& visitor)\n  {\n    return visitor.init(mat.coeff(0, 0), 0, 0);\n  }\n};\n\ntemplate<typename Visitor, typename Derived>\nstruct visitor_impl<Visitor, Derived, Dynamic>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const Derived& mat, Visitor& visitor)\n  {\n    visitor.init(mat.coeff(0,0), 0, 0);\n    for(Index i = 1; i < mat.rows(); ++i)\n      visitor(mat.coeff(i, 0), i, 0);\n    for(Index j = 1; j < mat.cols(); ++j)\n      for(Index i = 0; i < mat.rows(); ++i)\n        visitor(mat.coeff(i, j), i, j);\n  }\n};\n\n// evaluator adaptor\ntemplate<typename XprType>\nclass visitor_evaluator\n{\npublic:\n  EIGEN_DEVICE_FUNC\n  explicit visitor_evaluator(const XprType &xpr) : m_evaluator(xpr), m_xpr(xpr) {}\n  \n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::CoeffReturnType CoeffReturnType;\n  \n  enum {\n    RowsAtCompileTime = XprType::RowsAtCompileTime,\n    CoeffReadCost = internal::evaluator<XprType>::CoeffReadCost\n  };\n  \n  EIGEN_DEVICE_FUNC Index rows() const { return m_xpr.rows(); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_xpr.cols(); }\n  EIGEN_DEVICE_FUNC Index size() const { return m_xpr.size(); }\n\n  EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index row, Index col) const\n  { return m_evaluator.coeff(row, col); }\n  \nprotected:\n  internal::evaluator<XprType> m_evaluator;\n  const XprType &m_xpr;\n};\n} // end namespace internal\n\n/** Applies the visitor \\a visitor to the whole coefficients of the matrix or vector.\n  *\n  * The template parameter \\a Visitor is the type of the visitor and provides the following interface:\n  * \\code\n  * struct MyVisitor {\n  *   // called for the first coefficient\n  *   void init(const Scalar& value, Index i, Index j);\n  *   // called for all other coefficients\n  *   void operator() (const Scalar& value, Index i, Index j);\n  * };\n  * \\endcode\n  *\n  * \\note compared to one or two \\em for \\em loops, visitors offer automatic\n  * unrolling for small fixed size matrix.\n  *\n  * \\sa minCoeff(Index*,Index*), maxCoeff(Index*,Index*), DenseBase::redux()\n  */\ntemplate<typename Derived>\ntemplate<typename Visitor>\nEIGEN_DEVICE_FUNC\nvoid DenseBase<Derived>::visit(Visitor& visitor) const\n{\n  typedef typename internal::visitor_evaluator<Derived> ThisEvaluator;\n  ThisEvaluator thisEval(derived());\n  \n  enum {\n    unroll =  SizeAtCompileTime != Dynamic\n           && SizeAtCompileTime * ThisEvaluator::CoeffReadCost + (SizeAtCompileTime-1) * internal::functor_traits<Visitor>::Cost <= EIGEN_UNROLLING_LIMIT\n  };\n  return internal::visitor_impl<Visitor, ThisEvaluator, unroll ? int(SizeAtCompileTime) : Dynamic>::run(thisEval, visitor);\n}\n\nnamespace internal {\n\n/** \\internal\n  * \\brief Base class to implement min and max visitors\n  */\ntemplate <typename Derived>\nstruct coeff_visitor\n{\n  typedef typename Derived::Scalar Scalar;\n  Index row, col;\n  Scalar res;\n  EIGEN_DEVICE_FUNC\n  inline void init(const Scalar& value, Index i, Index j)\n  {\n    res = value;\n    row = i;\n    col = j;\n  }\n};\n\n/** \\internal\n  * \\brief Visitor computing the min coefficient with its value and coordinates\n  *\n  * \\sa DenseBase::minCoeff(Index*, Index*)\n  */\ntemplate <typename Derived>\nstruct min_coeff_visitor : coeff_visitor<Derived>\n{\n  typedef typename Derived::Scalar Scalar;\n  EIGEN_DEVICE_FUNC\n  void operator() (const Scalar& value, Index i, Index j)\n  {\n    if(value < this->res)\n    {\n      this->res = value;\n      this->row = i;\n      this->col = j;\n    }\n  }\n};\n\ntemplate<typename Scalar>\nstruct functor_traits<min_coeff_visitor<Scalar> > {\n  enum {\n    Cost = NumTraits<Scalar>::AddCost\n  };\n};\n\n/** \\internal\n  * \\brief Visitor computing the max coefficient with its value and coordinates\n  *\n  * \\sa DenseBase::maxCoeff(Index*, Index*)\n  */\ntemplate <typename Derived>\nstruct max_coeff_visitor : coeff_visitor<Derived>\n{\n  typedef typename Derived::Scalar Scalar; \n  EIGEN_DEVICE_FUNC\n  void operator() (const Scalar& value, Index i, Index j)\n  {\n    if(value > this->res)\n    {\n      this->res = value;\n      this->row = i;\n      this->col = j;\n    }\n  }\n};\n\ntemplate<typename Scalar>\nstruct functor_traits<max_coeff_visitor<Scalar> > {\n  enum {\n    Cost = NumTraits<Scalar>::AddCost\n  };\n};\n\n} // end namespace internal\n\n/** \\fn DenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const\n  * \\returns the minimum of all coefficients of *this and puts in *row and *col its location.\n  * \\warning the result is undefined if \\c *this contains NaN.\n  *\n  * \\sa DenseBase::minCoeff(Index*), DenseBase::maxCoeff(Index*,Index*), DenseBase::visit(), DenseBase::minCoeff()\n  */\ntemplate<typename Derived>\ntemplate<typename IndexType>\nEIGEN_DEVICE_FUNC\ntypename internal::traits<Derived>::Scalar\nDenseBase<Derived>::minCoeff(IndexType* rowId, IndexType* colId) const\n{\n  internal::min_coeff_visitor<Derived> minVisitor;\n  this->visit(minVisitor);\n  *rowId = minVisitor.row;\n  if (colId) *colId = minVisitor.col;\n  return minVisitor.res;\n}\n\n/** \\returns the minimum of all coefficients of *this and puts in *index its location.\n  * \\warning the result is undefined if \\c *this contains NaN. \n  *\n  * \\sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::minCoeff()\n  */\ntemplate<typename Derived>\ntemplate<typename IndexType>\nEIGEN_DEVICE_FUNC\ntypename internal::traits<Derived>::Scalar\nDenseBase<Derived>::minCoeff(IndexType* index) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  internal::min_coeff_visitor<Derived> minVisitor;\n  this->visit(minVisitor);\n  *index = IndexType((RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row);\n  return minVisitor.res;\n}\n\n/** \\fn DenseBase<Derived>::maxCoeff(IndexType* rowId, IndexType* colId) const\n  * \\returns the maximum of all coefficients of *this and puts in *row and *col its location.\n  * \\warning the result is undefined if \\c *this contains NaN. \n  *\n  * \\sa DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visit(), DenseBase::maxCoeff()\n  */\ntemplate<typename Derived>\ntemplate<typename IndexType>\nEIGEN_DEVICE_FUNC\ntypename internal::traits<Derived>::Scalar\nDenseBase<Derived>::maxCoeff(IndexType* rowPtr, IndexType* colPtr) const\n{\n  internal::max_coeff_visitor<Derived> maxVisitor;\n  this->visit(maxVisitor);\n  *rowPtr = maxVisitor.row;\n  if (colPtr) *colPtr = maxVisitor.col;\n  return maxVisitor.res;\n}\n\n/** \\returns the maximum of all coefficients of *this and puts in *index its location.\n  * \\warning the result is undefined if \\c *this contains NaN.\n  *\n  * \\sa DenseBase::maxCoeff(IndexType*,IndexType*), DenseBase::minCoeff(IndexType*,IndexType*), DenseBase::visitor(), DenseBase::maxCoeff()\n  */\ntemplate<typename Derived>\ntemplate<typename IndexType>\nEIGEN_DEVICE_FUNC\ntypename internal::traits<Derived>::Scalar\nDenseBase<Derived>::maxCoeff(IndexType* index) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  internal::max_coeff_visitor<Derived> maxVisitor;\n  this->visit(maxVisitor);\n  *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row;\n  return maxVisitor.res;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_VISITOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_AVX_H\n#define EIGEN_COMPLEX_AVX_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n//---------- float ----------\nstruct Packet4cf\n{\n  EIGEN_STRONG_INLINE Packet4cf() {}\n  EIGEN_STRONG_INLINE explicit Packet4cf(const __m256& a) : v(a) {}\n  __m256  v;\n};\n\ntemplate<> struct packet_traits<std::complex<float> >  : default_packet_traits\n{\n  typedef Packet4cf type;\n  typedef Packet2cf half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 4,\n    HasHalfPacket = 1,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4, alignment=Aligned32}; typedef Packet2cf half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf psub<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pnegate(const Packet4cf& a)\n{\n  return Packet4cf(pnegate(a.v));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pconj(const Packet4cf& a)\n{\n  const __m256 mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000,0x00000000,0x80000000));\n  return Packet4cf(_mm256_xor_ps(a.v,mask));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pmul<Packet4cf>(const Packet4cf& a, const Packet4cf& b)\n{\n  __m256 tmp1 = _mm256_mul_ps(_mm256_moveldup_ps(a.v), b.v);\n  __m256 tmp2 = _mm256_mul_ps(_mm256_movehdup_ps(a.v), _mm256_permute_ps(b.v, _MM_SHUFFLE(2,3,0,1)));\n  __m256 result = _mm256_addsub_ps(tmp1, tmp2);\n  return Packet4cf(result);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pand   <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_and_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf por    <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_or_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pxor   <Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_xor_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pandnot<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_andnot_ps(a.v,b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pload <Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet4cf(pload<Packet8f>(&numext::real_ref(*from))); }\ntemplate<> EIGEN_STRONG_INLINE Packet4cf ploadu<Packet4cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet4cf(ploadu<Packet8f>(&numext::real_ref(*from))); }\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pset1<Packet4cf>(const std::complex<float>& from)\n{\n  return Packet4cf(_mm256_castpd_ps(_mm256_broadcast_sd((const double*)(const void*)&from)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf ploaddup<Packet4cf>(const std::complex<float>* from)\n{\n  // FIXME The following might be optimized using _mm256_movedup_pd\n  Packet2cf a = ploaddup<Packet2cf>(from);\n  Packet2cf b = ploaddup<Packet2cf>(from+1);\n  return  Packet4cf(_mm256_insertf128_ps(_mm256_castps128_ps256(a.v), b.v, 1));\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float>* to, const Packet4cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), from.v); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4cf pgather<std::complex<float>, Packet4cf>(const std::complex<float>* from, Index stride)\n{\n  return Packet4cf(_mm256_set_ps(std::imag(from[3*stride]), std::real(from[3*stride]),\n                                 std::imag(from[2*stride]), std::real(from[2*stride]),\n                                 std::imag(from[1*stride]), std::real(from[1*stride]),\n                                 std::imag(from[0*stride]), std::real(from[0*stride])));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet4cf>(std::complex<float>* to, const Packet4cf& from, Index stride)\n{\n  __m128 low = _mm256_extractf128_ps(from.v, 0);\n  to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 0)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1)));\n  to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(low, low, 2)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3)));\n\n  __m128 high = _mm256_extractf128_ps(from.v, 1);\n  to[stride*2] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 0)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1)));\n  to[stride*3] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(high, high, 2)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3)));\n\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet4cf>(const Packet4cf& a)\n{\n  return pfirst(Packet2cf(_mm256_castps256_ps128(a.v)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf preverse(const Packet4cf& a) {\n  __m128 low  = _mm256_extractf128_ps(a.v, 0);\n  __m128 high = _mm256_extractf128_ps(a.v, 1);\n  __m128d lowd  = _mm_castps_pd(low);\n  __m128d highd = _mm_castps_pd(high);\n  low  = _mm_castpd_ps(_mm_shuffle_pd(lowd,lowd,0x1));\n  high = _mm_castpd_ps(_mm_shuffle_pd(highd,highd,0x1));\n  __m256 result = _mm256_setzero_ps();\n  result = _mm256_insertf128_ps(result, low, 1);\n  result = _mm256_insertf128_ps(result, high, 0);\n  return Packet4cf(result);\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet4cf>(const Packet4cf& a)\n{\n  return predux(padd(Packet2cf(_mm256_extractf128_ps(a.v,0)),\n                     Packet2cf(_mm256_extractf128_ps(a.v,1))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf preduxp<Packet4cf>(const Packet4cf* vecs)\n{\n  Packet8f t0 = _mm256_shuffle_ps(vecs[0].v, vecs[0].v, _MM_SHUFFLE(3, 1, 2 ,0));\n  Packet8f t1 = _mm256_shuffle_ps(vecs[1].v, vecs[1].v, _MM_SHUFFLE(3, 1, 2 ,0));\n  t0 = _mm256_hadd_ps(t0,t1);\n  Packet8f t2 = _mm256_shuffle_ps(vecs[2].v, vecs[2].v, _MM_SHUFFLE(3, 1, 2 ,0));\n  Packet8f t3 = _mm256_shuffle_ps(vecs[3].v, vecs[3].v, _MM_SHUFFLE(3, 1, 2 ,0));\n  t2 = _mm256_hadd_ps(t2,t3);\n  \n  t1 = _mm256_permute2f128_ps(t0,t2, 0 + (2<<4));\n  t3 = _mm256_permute2f128_ps(t0,t2, 1 + (3<<4));\n\n  return Packet4cf(_mm256_add_ps(t1,t3));\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet4cf>(const Packet4cf& a)\n{\n  return predux_mul(pmul(Packet2cf(_mm256_extractf128_ps(a.v, 0)),\n                         Packet2cf(_mm256_extractf128_ps(a.v, 1))));\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4cf>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4cf& first, const Packet4cf& second)\n  {\n    if (Offset==0) return;\n    palign_impl<Offset*2,Packet8f>::run(first.v, second.v);\n  }\n};\n\ntemplate<> struct conj_helper<Packet4cf, Packet4cf, false,true>\n{\n  EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet4cf, Packet4cf, true,false>\n{\n  EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet4cf, Packet4cf, true,true>\n{\n  EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet4cf& y, const Packet4cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& a, const Packet4cf& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet8f, Packet4cf, false,false>\n{\n  EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet8f& x, const Packet4cf& y, const Packet4cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet4cf pmul(const Packet8f& x, const Packet4cf& y) const\n  { return Packet4cf(Eigen::internal::pmul(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet4cf, Packet8f, false,false>\n{\n  EIGEN_STRONG_INLINE Packet4cf pmadd(const Packet4cf& x, const Packet8f& y, const Packet4cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet4cf pmul(const Packet4cf& x, const Packet8f& y) const\n  { return Packet4cf(Eigen::internal::pmul(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pdiv<Packet4cf>(const Packet4cf& a, const Packet4cf& b)\n{\n  Packet4cf num = pmul(a, pconj(b));\n  __m256 tmp = _mm256_mul_ps(b.v, b.v);\n  __m256 tmp2    = _mm256_shuffle_ps(tmp,tmp,0xB1);\n  __m256 denom = _mm256_add_ps(tmp, tmp2);\n  return Packet4cf(_mm256_div_ps(num.v, denom));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pcplxflip<Packet4cf>(const Packet4cf& x)\n{\n  return Packet4cf(_mm256_shuffle_ps(x.v, x.v, _MM_SHUFFLE(2, 3, 0 ,1)));\n}\n\n//---------- double ----------\nstruct Packet2cd\n{\n  EIGEN_STRONG_INLINE Packet2cd() {}\n  EIGEN_STRONG_INLINE explicit Packet2cd(const __m256d& a) : v(a) {}\n  __m256d  v;\n};\n\ntemplate<> struct packet_traits<std::complex<double> >  : default_packet_traits\n{\n  typedef Packet2cd type;\n  typedef Packet1cd half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 0,\n    size = 2,\n    HasHalfPacket = 1,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2, alignment=Aligned32}; typedef Packet1cd half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd psub<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pnegate(const Packet2cd& a) { return Packet2cd(pnegate(a.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pconj(const Packet2cd& a)\n{\n  const __m256d mask = _mm256_castsi256_pd(_mm256_set_epi32(0x80000000,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0));\n  return Packet2cd(_mm256_xor_pd(a.v,mask));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pmul<Packet2cd>(const Packet2cd& a, const Packet2cd& b)\n{\n  __m256d tmp1 = _mm256_shuffle_pd(a.v,a.v,0x0);\n  __m256d even = _mm256_mul_pd(tmp1, b.v);\n  __m256d tmp2 = _mm256_shuffle_pd(a.v,a.v,0xF);\n  __m256d tmp3 = _mm256_shuffle_pd(b.v,b.v,0x5);\n  __m256d odd  = _mm256_mul_pd(tmp2, tmp3);\n  return Packet2cd(_mm256_addsub_pd(even, odd));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pand   <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_and_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd por    <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_or_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pxor   <Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_xor_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pandnot<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_andnot_pd(a.v,b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pload <Packet2cd>(const std::complex<double>* from)\n{ EIGEN_DEBUG_ALIGNED_LOAD return Packet2cd(pload<Packet4d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cd ploadu<Packet2cd>(const std::complex<double>* from)\n{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cd(ploadu<Packet4d>((const double*)from)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pset1<Packet2cd>(const std::complex<double>& from)\n{\n  // in case casting to a __m128d* is really not safe, then we can still fallback to this version: (much slower though)\n//   return Packet2cd(_mm256_loadu2_m128d((const double*)&from,(const double*)&from));\n    return Packet2cd(_mm256_broadcast_pd((const __m128d*)(const void*)&from));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd ploaddup<Packet2cd>(const std::complex<double>* from) { return pset1<Packet2cd>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet2cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet2cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2cd pgather<std::complex<double>, Packet2cd>(const std::complex<double>* from, Index stride)\n{\n  return Packet2cd(_mm256_set_pd(std::imag(from[1*stride]), std::real(from[1*stride]),\n\t\t\t\t std::imag(from[0*stride]), std::real(from[0*stride])));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet2cd>(std::complex<double>* to, const Packet2cd& from, Index stride)\n{\n  __m128d low = _mm256_extractf128_pd(from.v, 0);\n  to[stride*0] = std::complex<double>(_mm_cvtsd_f64(low), _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1)));\n  __m128d high = _mm256_extractf128_pd(from.v, 1);\n  to[stride*1] = std::complex<double>(_mm_cvtsd_f64(high), _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> pfirst<Packet2cd>(const Packet2cd& a)\n{\n  __m128d low = _mm256_extractf128_pd(a.v, 0);\n  EIGEN_ALIGN16 double res[2];\n  _mm_store_pd(res, low);\n  return std::complex<double>(res[0],res[1]);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd preverse(const Packet2cd& a) {\n  __m256d result = _mm256_permute2f128_pd(a.v, a.v, 1);\n  return Packet2cd(result);\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet2cd>(const Packet2cd& a)\n{\n  return predux(padd(Packet1cd(_mm256_extractf128_pd(a.v,0)),\n                     Packet1cd(_mm256_extractf128_pd(a.v,1))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd preduxp<Packet2cd>(const Packet2cd* vecs)\n{\n  Packet4d t0 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 0 + (2<<4));\n  Packet4d t1 = _mm256_permute2f128_pd(vecs[0].v,vecs[1].v, 1 + (3<<4));\n\n  return Packet2cd(_mm256_add_pd(t0,t1));\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet2cd>(const Packet2cd& a)\n{\n  return predux(pmul(Packet1cd(_mm256_extractf128_pd(a.v,0)),\n                     Packet1cd(_mm256_extractf128_pd(a.v,1))));\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2cd>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2cd& first, const Packet2cd& second)\n  {\n    if (Offset==0) return;\n    palign_impl<Offset*2,Packet4d>::run(first.v, second.v);\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cd, Packet2cd, false,true>\n{\n  EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cd, Packet2cd, true,false>\n{\n  EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cd, Packet2cd, true,true>\n{\n  EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet2cd& y, const Packet2cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& a, const Packet2cd& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet4d, Packet2cd, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet4d& x, const Packet2cd& y, const Packet2cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cd pmul(const Packet4d& x, const Packet2cd& y) const\n  { return Packet2cd(Eigen::internal::pmul(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet2cd, Packet4d, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cd pmadd(const Packet2cd& x, const Packet4d& y, const Packet2cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cd pmul(const Packet2cd& x, const Packet4d& y) const\n  { return Packet2cd(Eigen::internal::pmul(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pdiv<Packet2cd>(const Packet2cd& a, const Packet2cd& b)\n{\n  Packet2cd num = pmul(a, pconj(b));\n  __m256d tmp = _mm256_mul_pd(b.v, b.v);\n  __m256d denom = _mm256_hadd_pd(tmp, tmp);\n  return Packet2cd(_mm256_div_pd(num.v, denom));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pcplxflip<Packet2cd>(const Packet2cd& x)\n{\n  return Packet2cd(_mm256_shuffle_pd(x.v, x.v, 0x5));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4cf,4>& kernel) {\n  __m256d P0 = _mm256_castps_pd(kernel.packet[0].v);\n  __m256d P1 = _mm256_castps_pd(kernel.packet[1].v);\n  __m256d P2 = _mm256_castps_pd(kernel.packet[2].v);\n  __m256d P3 = _mm256_castps_pd(kernel.packet[3].v);\n\n  __m256d T0 = _mm256_shuffle_pd(P0, P1, 15);\n  __m256d T1 = _mm256_shuffle_pd(P0, P1, 0);\n  __m256d T2 = _mm256_shuffle_pd(P2, P3, 15);\n  __m256d T3 = _mm256_shuffle_pd(P2, P3, 0);\n\n  kernel.packet[1].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 32));\n  kernel.packet[3].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T0, T2, 49));\n  kernel.packet[0].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 32));\n  kernel.packet[2].v = _mm256_castpd_ps(_mm256_permute2f128_pd(T1, T3, 49));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2cd,2>& kernel) {\n  __m256d tmp = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 0+(2<<4));\n  kernel.packet[1].v = _mm256_permute2f128_pd(kernel.packet[0].v, kernel.packet[1].v, 1+(3<<4));\n kernel.packet[0].v = tmp;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pinsertfirst(const Packet4cf& a, std::complex<float> b)\n{\n  return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,1|2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pinsertfirst(const Packet2cd& a, std::complex<double> b)\n{\n  return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,1|2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4cf pinsertlast(const Packet4cf& a, std::complex<float> b)\n{\n  return Packet4cf(_mm256_blend_ps(a.v,pset1<Packet4cf>(b).v,(1<<7)|(1<<6)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cd pinsertlast(const Packet2cd& a, std::complex<double> b)\n{\n  return Packet2cd(_mm256_blend_pd(a.v,pset1<Packet2cd>(b).v,(1<<3)|(1<<2)));\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_AVX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Pedro Gonnet (pedro.gonnet@gmail.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATH_FUNCTIONS_AVX_H\n#define EIGEN_MATH_FUNCTIONS_AVX_H\n\n/* The sin, cos, exp, and log functions of this file are loosely derived from\n * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/\n */\n\nnamespace Eigen {\n\nnamespace internal {\n\ninline Packet8i pshiftleft(Packet8i v, int n)\n{\n#ifdef EIGEN_VECTORIZE_AVX2\n  return _mm256_slli_epi32(v, n);\n#else\n  __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(v, 0), n);\n  __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(v, 1), n);\n  return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);\n#endif\n}\n\ninline Packet8f pshiftright(Packet8f v, int n)\n{\n#ifdef EIGEN_VECTORIZE_AVX2\n  return _mm256_cvtepi32_ps(_mm256_srli_epi32(_mm256_castps_si256(v), n));\n#else\n  __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 0), n);\n  __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(_mm256_castps_si256(v), 1), n);\n  return _mm256_cvtepi32_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1));\n#endif\n}\n\n// Sine function\n// Computes sin(x) by wrapping x to the interval [-Pi/4,3*Pi/4] and\n// evaluating interpolants in [-Pi/4,Pi/4] or [Pi/4,3*Pi/4]. The interpolants\n// are (anti-)symmetric and thus have only odd/even coefficients\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f\npsin<Packet8f>(const Packet8f& _x) {\n  Packet8f x = _x;\n\n  // Some useful values.\n  _EIGEN_DECLARE_CONST_Packet8i(one, 1);\n  _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);\n  _EIGEN_DECLARE_CONST_Packet8f(two, 2.0f);\n  _EIGEN_DECLARE_CONST_Packet8f(one_over_four, 0.25f);\n  _EIGEN_DECLARE_CONST_Packet8f(one_over_pi, 3.183098861837907e-01f);\n  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_first, -3.140625000000000e+00f);\n  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_second, -9.670257568359375e-04f);\n  _EIGEN_DECLARE_CONST_Packet8f(neg_pi_third, -6.278329571784980e-07f);\n  _EIGEN_DECLARE_CONST_Packet8f(four_over_pi, 1.273239544735163e+00f);\n\n  // Map x from [-Pi/4,3*Pi/4] to z in [-1,3] and subtract the shifted period.\n  Packet8f z = pmul(x, p8f_one_over_pi);\n  Packet8f shift = _mm256_floor_ps(padd(z, p8f_one_over_four));\n  x = pmadd(shift, p8f_neg_pi_first, x);\n  x = pmadd(shift, p8f_neg_pi_second, x);\n  x = pmadd(shift, p8f_neg_pi_third, x);\n  z = pmul(x, p8f_four_over_pi);\n\n  // Make a mask for the entries that need flipping, i.e. wherever the shift\n  // is odd.\n  Packet8i shift_ints = _mm256_cvtps_epi32(shift);\n  Packet8i shift_isodd = _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(shift_ints), _mm256_castsi256_ps(p8i_one)));\n  Packet8i sign_flip_mask = pshiftleft(shift_isodd, 31);\n\n  // Create a mask for which interpolant to use, i.e. if z > 1, then the mask\n  // is set to ones for that entry.\n  Packet8f ival_mask = _mm256_cmp_ps(z, p8f_one, _CMP_GT_OQ);\n\n  // Evaluate the polynomial for the interval [1,3] in z.\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_0, 9.999999724233232e-01f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_2, -3.084242535619928e-01f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_4, 1.584991525700324e-02f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_right_6, -3.188805084631342e-04f);\n  Packet8f z_minus_two = psub(z, p8f_two);\n  Packet8f z_minus_two2 = pmul(z_minus_two, z_minus_two);\n  Packet8f right = pmadd(p8f_coeff_right_6, z_minus_two2, p8f_coeff_right_4);\n  right = pmadd(right, z_minus_two2, p8f_coeff_right_2);\n  right = pmadd(right, z_minus_two2, p8f_coeff_right_0);\n\n  // Evaluate the polynomial for the interval [-1,1] in z.\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_1, 7.853981525427295e-01f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_3, -8.074536727092352e-02f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_5, 2.489871967827018e-03f);\n  _EIGEN_DECLARE_CONST_Packet8f(coeff_left_7, -3.587725841214251e-05f);\n  Packet8f z2 = pmul(z, z);\n  Packet8f left = pmadd(p8f_coeff_left_7, z2, p8f_coeff_left_5);\n  left = pmadd(left, z2, p8f_coeff_left_3);\n  left = pmadd(left, z2, p8f_coeff_left_1);\n  left = pmul(left, z);\n\n  // Assemble the results, i.e. select the left and right polynomials.\n  left = _mm256_andnot_ps(ival_mask, left);\n  right = _mm256_and_ps(ival_mask, right);\n  Packet8f res = _mm256_or_ps(left, right);\n\n  // Flip the sign on the odd intervals and return the result.\n  res = _mm256_xor_ps(res, _mm256_castsi256_ps(sign_flip_mask));\n  return res;\n}\n\n// Natural logarithm\n// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)\n// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can\n// be easily approximated by a polynomial centered on m=1 for stability.\n// TODO(gonnet): Further reduce the interval allowing for lower-degree\n//               polynomial interpolants -> ... -> profit!\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f\nplog<Packet8f>(const Packet8f& _x) {\n  Packet8f x = _x;\n  _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);\n  _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet8f(126f, 126.0f);\n\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inv_mant_mask, ~0x7f800000);\n\n  // The smallest non denormalized float number.\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(min_norm_pos, 0x00800000);\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(minus_inf, 0xff800000);\n\n  // Polynomial coefficients.\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_SQRTHF, 0.707106781186547524f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p0, 7.0376836292E-2f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p1, -1.1514610310E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p2, 1.1676998740E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p3, -1.2420140846E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p4, +1.4249322787E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p5, -1.6668057665E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p6, +2.0000714765E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p7, -2.4999993993E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_p8, +3.3333331174E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q1, -2.12194440e-4f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_log_q2, 0.693359375f);\n\n  Packet8f invalid_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_NGE_UQ); // not greater equal is true if x is NaN\n  Packet8f iszero_mask = _mm256_cmp_ps(x, _mm256_setzero_ps(), _CMP_EQ_OQ);\n\n  // Truncate input values to the minimum positive normal.\n  x = pmax(x, p8f_min_norm_pos);\n\n  Packet8f emm0 = pshiftright(x,23);\n  Packet8f e = _mm256_sub_ps(emm0, p8f_126f);\n\n  // Set the exponents to -1, i.e. x are in the range [0.5,1).\n  x = _mm256_and_ps(x, p8f_inv_mant_mask);\n  x = _mm256_or_ps(x, p8f_half);\n\n  // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))\n  // and shift by -1. The values are then centered around 0, which improves\n  // the stability of the polynomial evaluation.\n  //   if( x < SQRTHF ) {\n  //     e -= 1;\n  //     x = x + x - 1.0;\n  //   } else { x = x - 1.0; }\n  Packet8f mask = _mm256_cmp_ps(x, p8f_cephes_SQRTHF, _CMP_LT_OQ);\n  Packet8f tmp = _mm256_and_ps(x, mask);\n  x = psub(x, p8f_1);\n  e = psub(e, _mm256_and_ps(p8f_1, mask));\n  x = padd(x, tmp);\n\n  Packet8f x2 = pmul(x, x);\n  Packet8f x3 = pmul(x2, x);\n\n  // Evaluate the polynomial approximant of degree 8 in three parts, probably\n  // to improve instruction-level parallelism.\n  Packet8f y, y1, y2;\n  y = pmadd(p8f_cephes_log_p0, x, p8f_cephes_log_p1);\n  y1 = pmadd(p8f_cephes_log_p3, x, p8f_cephes_log_p4);\n  y2 = pmadd(p8f_cephes_log_p6, x, p8f_cephes_log_p7);\n  y = pmadd(y, x, p8f_cephes_log_p2);\n  y1 = pmadd(y1, x, p8f_cephes_log_p5);\n  y2 = pmadd(y2, x, p8f_cephes_log_p8);\n  y = pmadd(y, x3, y1);\n  y = pmadd(y, x3, y2);\n  y = pmul(y, x3);\n\n  // Add the logarithm of the exponent back to the result of the interpolation.\n  y1 = pmul(e, p8f_cephes_log_q1);\n  tmp = pmul(x2, p8f_half);\n  y = padd(y, y1);\n  x = psub(x, tmp);\n  y2 = pmul(e, p8f_cephes_log_q2);\n  x = padd(x, y);\n  x = padd(x, y2);\n\n  // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.\n  return _mm256_or_ps(\n      _mm256_andnot_ps(iszero_mask, _mm256_or_ps(x, invalid_mask)),\n      _mm256_and_ps(iszero_mask, p8f_minus_inf));\n}\n\n// Exponential function. Works by writing \"x = m*log(2) + r\" where\n// \"m = floor(x/log(2)+1/2)\" and \"r\" is the remainder. The result is then\n// \"exp(x) = 2^m*exp(r)\" where exp(r) is in the range [-1,1).\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f\npexp<Packet8f>(const Packet8f& _x) {\n  _EIGEN_DECLARE_CONST_Packet8f(1, 1.0f);\n  _EIGEN_DECLARE_CONST_Packet8f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet8f(127, 127.0f);\n\n  _EIGEN_DECLARE_CONST_Packet8f(exp_hi, 88.3762626647950f);\n  _EIGEN_DECLARE_CONST_Packet8f(exp_lo, -88.3762626647949f);\n\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_LOG2EF, 1.44269504088896341f);\n\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p0, 1.9875691500E-4f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p1, 1.3981999507E-3f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p2, 8.3334519073E-3f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p3, 4.1665795894E-2f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p4, 1.6666665459E-1f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_p5, 5.0000001201E-1f);\n\n  // Clamp x.\n  Packet8f x = pmax(pmin(_x, p8f_exp_hi), p8f_exp_lo);\n\n  // Express exp(x) as exp(m*ln(2) + r), start by extracting\n  // m = floor(x/ln(2) + 0.5).\n  Packet8f m = _mm256_floor_ps(pmadd(x, p8f_cephes_LOG2EF, p8f_half));\n\n// Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is\n// subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating\n// truncation errors. Note that we don't use the \"pmadd\" function here to\n// ensure that a precision-preserving FMA instruction is used.\n#ifdef EIGEN_VECTORIZE_FMA\n  _EIGEN_DECLARE_CONST_Packet8f(nln2, -0.6931471805599453f);\n  Packet8f r = _mm256_fmadd_ps(m, p8f_nln2, x);\n#else\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C1, 0.693359375f);\n  _EIGEN_DECLARE_CONST_Packet8f(cephes_exp_C2, -2.12194440e-4f);\n  Packet8f r = psub(x, pmul(m, p8f_cephes_exp_C1));\n  r = psub(r, pmul(m, p8f_cephes_exp_C2));\n#endif\n\n  Packet8f r2 = pmul(r, r);\n\n  // TODO(gonnet): Split into odd/even polynomials and try to exploit\n  //               instruction-level parallelism.\n  Packet8f y = p8f_cephes_exp_p0;\n  y = pmadd(y, r, p8f_cephes_exp_p1);\n  y = pmadd(y, r, p8f_cephes_exp_p2);\n  y = pmadd(y, r, p8f_cephes_exp_p3);\n  y = pmadd(y, r, p8f_cephes_exp_p4);\n  y = pmadd(y, r, p8f_cephes_exp_p5);\n  y = pmadd(y, r2, r);\n  y = padd(y, p8f_1);\n\n  // Build emm0 = 2^m.\n  Packet8i emm0 = _mm256_cvttps_epi32(padd(m, p8f_127));\n  emm0 = pshiftleft(emm0, 23);\n\n  // Return 2^m * exp(r).\n  return pmax(pmul(y, _mm256_castsi256_ps(emm0)), _x);\n}\n\n// Hyperbolic Tangent function.\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f\nptanh<Packet8f>(const Packet8f& x) {\n  return internal::generic_fast_tanh_float(x);\n}\n\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4d\npexp<Packet4d>(const Packet4d& _x) {\n  Packet4d x = _x;\n\n  _EIGEN_DECLARE_CONST_Packet4d(1, 1.0);\n  _EIGEN_DECLARE_CONST_Packet4d(2, 2.0);\n  _EIGEN_DECLARE_CONST_Packet4d(half, 0.5);\n\n  _EIGEN_DECLARE_CONST_Packet4d(exp_hi, 709.437);\n  _EIGEN_DECLARE_CONST_Packet4d(exp_lo, -709.436139303);\n\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_LOG2EF, 1.4426950408889634073599);\n\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p0, 1.26177193074810590878e-4);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p1, 3.02994407707441961300e-2);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_p2, 9.99999999999999999910e-1);\n\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q0, 3.00198505138664455042e-6);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q1, 2.52448340349684104192e-3);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q2, 2.27265548208155028766e-1);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_q3, 2.00000000000000000009e0);\n\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C1, 0.693145751953125);\n  _EIGEN_DECLARE_CONST_Packet4d(cephes_exp_C2, 1.42860682030941723212e-6);\n  _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);\n\n  Packet4d tmp, fx;\n\n  // clamp x\n  x = pmax(pmin(x, p4d_exp_hi), p4d_exp_lo);\n  // Express exp(x) as exp(g + n*log(2)).\n  fx = pmadd(p4d_cephes_LOG2EF, x, p4d_half);\n\n  // Get the integer modulus of log(2), i.e. the \"n\" described above.\n  fx = _mm256_floor_pd(fx);\n\n  // Get the remainder modulo log(2), i.e. the \"g\" described above. Subtract\n  // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last\n  // digits right.\n  tmp = pmul(fx, p4d_cephes_exp_C1);\n  Packet4d z = pmul(fx, p4d_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  Packet4d x2 = pmul(x, x);\n\n  // Evaluate the numerator polynomial of the rational interpolant.\n  Packet4d px = p4d_cephes_exp_p0;\n  px = pmadd(px, x2, p4d_cephes_exp_p1);\n  px = pmadd(px, x2, p4d_cephes_exp_p2);\n  px = pmul(px, x);\n\n  // Evaluate the denominator polynomial of the rational interpolant.\n  Packet4d qx = p4d_cephes_exp_q0;\n  qx = pmadd(qx, x2, p4d_cephes_exp_q1);\n  qx = pmadd(qx, x2, p4d_cephes_exp_q2);\n  qx = pmadd(qx, x2, p4d_cephes_exp_q3);\n\n  // I don't really get this bit, copied from the SSE2 routines, so...\n  // TODO(gonnet): Figure out what is going on here, perhaps find a better\n  // rational interpolant?\n  x = _mm256_div_pd(px, psub(qx, px));\n  x = pmadd(p4d_2, x, p4d_1);\n\n  // Build e=2^n by constructing the exponents in a 128-bit vector and\n  // shifting them to where they belong in double-precision values.\n  __m128i emm0 = _mm256_cvtpd_epi32(fx);\n  emm0 = _mm_add_epi32(emm0, p4i_1023);\n  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(3, 1, 2, 0));\n  __m128i lo = _mm_slli_epi64(emm0, 52);\n  __m128i hi = _mm_slli_epi64(_mm_srli_epi64(emm0, 32), 52);\n  __m256i e = _mm256_insertf128_si256(_mm256_setzero_si256(), lo, 0);\n  e = _mm256_insertf128_si256(e, hi, 1);\n\n  // Construct the result 2^n * exp(g) = e * x. The max is used to catch\n  // non-finite values in the input.\n  return pmax(pmul(x, _mm256_castsi256_pd(e)), _x);\n}\n\n// Functions for sqrt.\n// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step\n// of Newton's method, at a cost of 1-2 bits of precision as opposed to the\n// exact solution. It does not handle +inf, or denormalized numbers correctly.\n// The main advantage of this approach is not just speed, but also the fact that\n// it can be inlined and pipelined with other computations, further reducing its\n// effective latency. This is similar to Quake3's fast inverse square root.\n// For detail see here: http://www.beyond3d.com/content/articles/8/\n#if EIGEN_FAST_MATH\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8f\npsqrt<Packet8f>(const Packet8f& _x) {\n  Packet8f half = pmul(_x, pset1<Packet8f>(.5f));\n  Packet8f denormal_mask = _mm256_and_ps(\n      _mm256_cmp_ps(_x, pset1<Packet8f>((std::numeric_limits<float>::min)()),\n                    _CMP_LT_OQ),\n      _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_GE_OQ));\n\n  // Compute approximate reciprocal sqrt.\n  Packet8f x = _mm256_rsqrt_ps(_x);\n  // Do a single step of Newton's iteration.\n  x = pmul(x, psub(pset1<Packet8f>(1.5f), pmul(half, pmul(x,x))));\n  // Flush results for denormals to zero.\n  return _mm256_andnot_ps(denormal_mask, pmul(_x,x));\n}\n#else\ntemplate <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket8f psqrt<Packet8f>(const Packet8f& x) {\n  return _mm256_sqrt_ps(x);\n}\n#endif\ntemplate <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4d psqrt<Packet4d>(const Packet4d& x) {\n  return _mm256_sqrt_pd(x);\n}\n#if EIGEN_FAST_MATH\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket8f prsqrt<Packet8f>(const Packet8f& _x) {\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(inf, 0x7f800000);\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(nan, 0x7fc00000);\n  _EIGEN_DECLARE_CONST_Packet8f(one_point_five, 1.5f);\n  _EIGEN_DECLARE_CONST_Packet8f(minus_half, -0.5f);\n  _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(flt_min, 0x00800000);\n\n  Packet8f neg_half = pmul(_x, p8f_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  Packet8f le_zero_mask = _mm256_cmp_ps(_x, p8f_flt_min, _CMP_LT_OQ);\n  Packet8f x = _mm256_andnot_ps(le_zero_mask, _mm256_rsqrt_ps(_x));\n\n  // Fill in NaNs and Infs for the negative/zero entries.\n  Packet8f neg_mask = _mm256_cmp_ps(_x, _mm256_setzero_ps(), _CMP_LT_OQ);\n  Packet8f zero_mask = _mm256_andnot_ps(neg_mask, le_zero_mask);\n  Packet8f infs_and_nans = _mm256_or_ps(_mm256_and_ps(neg_mask, p8f_nan),\n                                        _mm256_and_ps(zero_mask, p8f_inf));\n\n  // Do a single step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p8f_one_point_five));\n\n  // Insert NaNs and Infs in all the right places.\n  return _mm256_or_ps(x, infs_and_nans);\n}\n\n#else\ntemplate <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket8f prsqrt<Packet8f>(const Packet8f& x) {\n  _EIGEN_DECLARE_CONST_Packet8f(one, 1.0f);\n  return _mm256_div_ps(p8f_one, _mm256_sqrt_ps(x));\n}\n#endif\n\ntemplate <> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4d prsqrt<Packet4d>(const Packet4d& x) {\n  _EIGEN_DECLARE_CONST_Packet4d(one, 1.0);\n  return _mm256_div_pd(p4d_one, _mm256_sqrt_pd(x));\n}\n\n\n}  // end namespace internal\n\n}  // end namespace Eigen\n\n#endif  // EIGEN_MATH_FUNCTIONS_AVX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_AVX_H\n#define EIGEN_PACKET_MATH_AVX_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8\n#endif\n\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))\n#endif\n\n#ifdef __FMA__\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#endif\n#endif\n\ntypedef __m256  Packet8f;\ntypedef __m256i Packet8i;\ntypedef __m256d Packet4d;\n\ntemplate<> struct is_arithmetic<__m256>  { enum { value = true }; };\ntemplate<> struct is_arithmetic<__m256i> { enum { value = true }; };\ntemplate<> struct is_arithmetic<__m256d> { enum { value = true }; };\n\n#define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \\\n  const Packet8f p8f_##NAME = pset1<Packet8f>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \\\n  const Packet4d p4d_##NAME = pset1<Packet4d>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \\\n  const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))\n\n#define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \\\n  const Packet8i p8i_##NAME = pset1<Packet8i>(X)\n\n// Use the packet_traits defined in AVX512/PacketMath.h instead if we're going\n// to leverage AVX512 instructions.\n#ifndef EIGEN_VECTORIZE_AVX512\ntemplate<> struct packet_traits<float>  : default_packet_traits\n{\n  typedef Packet8f type;\n  typedef Packet4f half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=8,\n    HasHalfPacket = 1,\n\n    HasDiv  = 1,\n    HasSin  = EIGEN_FAST_MATH,\n    HasCos  = 0,\n    HasLog  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasTanh  = EIGEN_FAST_MATH,\n    HasBlend = 1,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1\n  };\n};\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef Packet4d type;\n  typedef Packet2d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket = 1,\n\n    HasDiv  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasBlend = 1,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1\n  };\n};\n#endif\n\ntemplate<> struct scalar_div_cost<float,true> { enum { value = 14 }; };\ntemplate<> struct scalar_div_cost<double,true> { enum { value = 16 }; };\n\n/* Proper support for integers is only provided by AVX2. In the meantime, we'll\n   use SSE instructions and packets to deal with integers.\ntemplate<> struct packet_traits<int>    : default_packet_traits\n{\n  typedef Packet8i type;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=8\n  };\n};\n*/\n\ntemplate<> struct unpacket_traits<Packet8f> { typedef float  type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };\ntemplate<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };\ntemplate<> struct unpacket_traits<Packet8i> { typedef int    type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float&  from) { return _mm256_set1_ps(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int&    from) { return _mm256_set1_epi32(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float*  from) { return _mm256_broadcast_ss(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)\n{\n  return _mm256_sub_ps(_mm256_set1_ps(0.0),a);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)\n{\n  return _mm256_sub_pd(_mm256_set1_pd(0.0),a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)\n{ eigen_assert(false && \"packet integer division are not supported by AVX\");\n  return pset1<Packet8i>(0);\n}\n\n#ifdef __FMA__\ntemplate<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {\n#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )\n  // clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,\n  // and gcc stupidly generates a vfmadd132ps instruction,\n  // so let's enforce it to generate a vfmadd231ps instruction since the most common use case is to accumulate\n  // the result of the product.\n  Packet8f res = c;\n  __asm__(\"vfmadd231ps %[a], %[b], %[c]\" : [c] \"+x\" (res) : [a] \"x\" (a), [b] \"x\" (b));\n  return res;\n#else\n  return _mm256_fmadd_ps(a,b,c);\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {\n#if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )\n  // see above\n  Packet4d res = c;\n  __asm__(\"vfmadd231pd %[a], %[b], %[c]\" : [c] \"+x\" (res) : [a] \"x\" (a), [b] \"x\" (b));\n  return res;\n#else\n  return _mm256_fmadd_pd(a,b,c);\n#endif\n}\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {\n  // Arguments are swapped to match NaN propagation behavior of std::min.\n  return _mm256_min_ps(b,a);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {\n  // Arguments are swapped to match NaN propagation behavior of std::min.\n  return _mm256_min_pd(b,a);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {\n  // Arguments are swapped to match NaN propagation behavior of std::max.\n  return _mm256_max_ps(b,a);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {\n  // Arguments are swapped to match NaN propagation behavior of std::max.\n  return _mm256_max_pd(b,a);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }\n\n// Loads 4 floats from memory a returns the packet {a0, a0  a1, a1, a2, a2, a3, a3}\ntemplate<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)\n{\n  // TODO try to find a way to avoid the need of a temporary register\n//   Packet8f tmp  = _mm256_castps128_ps256(_mm_loadu_ps(from));\n//   tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);\n//   return _mm256_unpacklo_ps(tmp,tmp);\n\n  // _mm256_insertf128_ps is very slow on Haswell, thus:\n  Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);\n  // mimic an \"inplace\" permutation of the lower 128bits using a blend\n  tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);\n  // then we can perform a consistent permutation on the global register to get everything in shape:\n  return  _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));\n}\n// Loads 2 doubles from memory a returns the packet {a0, a0  a1, a1}\ntemplate<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)\n{\n  Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);\n  return  _mm256_permute_pd(tmp, 3<<2);\n}\n\n// Loads 2 floats from memory a returns the packet {a0, a0  a0, a0, a1, a1, a1, a1}\ntemplate<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)\n{\n  Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));\n  return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int>(int*       to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }\n\n// NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available\n// NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);\ntemplate<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)\n{\n  return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],\n                       from[3*stride], from[2*stride], from[1*stride], from[0*stride]);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)\n{\n  return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)\n{\n  __m128 low = _mm256_extractf128_ps(from, 0);\n  to[stride*0] = _mm_cvtss_f32(low);\n  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));\n  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));\n  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));\n\n  __m128 high = _mm256_extractf128_ps(from, 1);\n  to[stride*4] = _mm_cvtss_f32(high);\n  to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));\n  to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));\n  to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)\n{\n  __m128d low = _mm256_extractf128_pd(from, 0);\n  to[stride*0] = _mm_cvtsd_f64(low);\n  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));\n  __m128d high = _mm256_extractf128_pd(from, 1);\n  to[stride*2] = _mm_cvtsd_f64(high);\n  to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)\n{\n  Packet8f pa = pset1<Packet8f>(a);\n  pstore(to, pa);\n}\ntemplate<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)\n{\n  Packet4d pa = pset1<Packet4d>(a);\n  pstore(to, pa);\n}\ntemplate<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)\n{\n  Packet8i pa = pset1<Packet8i>(a);\n  pstore(to, pa);\n}\n\n#ifndef EIGEN_VECTORIZE_AVX512\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet8f>(const Packet8f& a) {\n  return _mm_cvtss_f32(_mm256_castps256_ps128(a));\n}\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {\n  return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));\n}\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet8i>(const Packet8i& a) {\n  return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));\n}\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)\n{\n  __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);\n  return _mm256_permute2f128_ps(tmp, tmp, 1);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)\n{\n   __m256d tmp = _mm256_shuffle_pd(a,a,5);\n  return _mm256_permute2f128_pd(tmp, tmp, 1);\n\n  __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);\n    return _mm256_permute_pd(swap_halves,5);\n}\n\n// pabs should be ok\ntemplate<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)\n{\n  const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));\n  return _mm256_and_ps(a,mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)\n{\n  const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));\n  return _mm256_and_pd(a,mask);\n}\n\n// preduxp should be ok\n// FIXME: why is this ok? why isn't the simply implementation working as expected?\ntemplate<> EIGEN_STRONG_INLINE Packet8f preduxp<Packet8f>(const Packet8f* vecs)\n{\n    __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);\n    __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);\n    __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);\n    __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);\n\n    __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);\n    __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);\n    __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);\n    __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);\n\n    __m256 perm1 =  _mm256_permute2f128_ps(hsum5, hsum5, 0x23);\n    __m256 perm2 =  _mm256_permute2f128_ps(hsum6, hsum6, 0x23);\n    __m256 perm3 =  _mm256_permute2f128_ps(hsum7, hsum7, 0x23);\n    __m256 perm4 =  _mm256_permute2f128_ps(hsum8, hsum8, 0x23);\n\n    __m256 sum1 = _mm256_add_ps(perm1, hsum5);\n    __m256 sum2 = _mm256_add_ps(perm2, hsum6);\n    __m256 sum3 = _mm256_add_ps(perm3, hsum7);\n    __m256 sum4 = _mm256_add_ps(perm4, hsum8);\n\n    __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);\n    __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);\n\n    __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);\n    return final;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d preduxp<Packet4d>(const Packet4d* vecs)\n{\n Packet4d tmp0, tmp1;\n\n  tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);\n  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));\n\n  tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);\n  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));\n\n  return _mm256_blend_pd(tmp0, tmp1, 0xC);\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)\n{\n  return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)\n{\n  return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f predux_downto4<Packet8f>(const Packet8f& a)\n{\n  return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)\n{\n  Packet8f tmp;\n  tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));\n  tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));\n  return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)\n{\n  Packet4d tmp;\n  tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));\n  return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)\n{\n  Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));\n  tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));\n  return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)\n{\n  Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));\n  return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)\n{\n  Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));\n  tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));\n  return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)\n{\n  Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));\n  return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));\n}\n\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet8f>\n{\n  static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second)\n  {\n    if (Offset==1)\n    {\n      first = _mm256_blend_ps(first, second, 1);\n      Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));\n      Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);\n      first = _mm256_blend_ps(tmp1, tmp2, 0x88);\n    }\n    else if (Offset==2)\n    {\n      first = _mm256_blend_ps(first, second, 3);\n      Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));\n      Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);\n      first = _mm256_blend_ps(tmp1, tmp2, 0xcc);\n    }\n    else if (Offset==3)\n    {\n      first = _mm256_blend_ps(first, second, 7);\n      Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));\n      Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);\n      first = _mm256_blend_ps(tmp1, tmp2, 0xee);\n    }\n    else if (Offset==4)\n    {\n      first = _mm256_blend_ps(first, second, 15);\n      Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));\n      Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);\n      first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));\n    }\n    else if (Offset==5)\n    {\n      first = _mm256_blend_ps(first, second, 31);\n      first = _mm256_permute2f128_ps(first, first, 1);\n      Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));\n      first = _mm256_permute2f128_ps(tmp, tmp, 1);\n      first = _mm256_blend_ps(tmp, first, 0x88);\n    }\n    else if (Offset==6)\n    {\n      first = _mm256_blend_ps(first, second, 63);\n      first = _mm256_permute2f128_ps(first, first, 1);\n      Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));\n      first = _mm256_permute2f128_ps(tmp, tmp, 1);\n      first = _mm256_blend_ps(tmp, first, 0xcc);\n    }\n    else if (Offset==7)\n    {\n      first = _mm256_blend_ps(first, second, 127);\n      first = _mm256_permute2f128_ps(first, first, 1);\n      Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));\n      first = _mm256_permute2f128_ps(tmp, tmp, 1);\n      first = _mm256_blend_ps(tmp, first, 0xee);\n    }\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4d>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second)\n  {\n    if (Offset==1)\n    {\n      first = _mm256_blend_pd(first, second, 1);\n      __m256d tmp = _mm256_permute_pd(first, 5);\n      first = _mm256_permute2f128_pd(tmp, tmp, 1);\n      first = _mm256_blend_pd(tmp, first, 0xA);\n    }\n    else if (Offset==2)\n    {\n      first = _mm256_blend_pd(first, second, 3);\n      first = _mm256_permute2f128_pd(first, first, 1);\n    }\n    else if (Offset==3)\n    {\n      first = _mm256_blend_pd(first, second, 7);\n      __m256d tmp = _mm256_permute_pd(first, 5);\n      first = _mm256_permute2f128_pd(tmp, tmp, 1);\n      first = _mm256_blend_pd(tmp, first, 5);\n    }\n  }\n};\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet8f,8>& kernel) {\n  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);\n  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);\n  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);\n  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);\n  __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);\n  __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);\n  __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);\n  __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);\n  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));\n  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));\n  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));\n  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));\n  __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));\n  __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));\n  __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));\n  __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));\n  kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);\n  kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);\n  kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);\n  kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);\n  kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);\n  kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);\n  kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);\n  kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet8f,4>& kernel) {\n  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);\n  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);\n  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);\n  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);\n\n  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));\n  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));\n  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));\n  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));\n\n  kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);\n  kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);\n  kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);\n  kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4d,4>& kernel) {\n  __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);\n  __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);\n  __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);\n  __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);\n\n  kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);\n  kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);\n  kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);\n  kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {\n  const __m256 zero = _mm256_setzero_ps();\n  const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);\n  __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);\n  return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {\n  const __m256d zero = _mm256_setzero_pd();\n  const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);\n  __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);\n  return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pinsertfirst(const Packet8f& a, float b)\n{\n  return _mm256_blend_ps(a,pset1<Packet8f>(b),1);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4d pinsertfirst(const Packet4d& a, double b)\n{\n  return _mm256_blend_pd(a,pset1<Packet4d>(b),1);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pinsertlast(const Packet8f& a, float b)\n{\n  return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4d pinsertlast(const Packet4d& a, double b)\n{\n  return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_AVX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX/TypeCasting.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TYPE_CASTING_AVX_H\n#define EIGEN_TYPE_CASTING_AVX_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// For now we use SSE to handle integers, so we can't use AVX instructions to cast\n// from int to float\ntemplate <>\nstruct type_casting_traits<float, int> {\n  enum {\n    VectorizedCast = 0,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate <>\nstruct type_casting_traits<int, float> {\n  enum {\n    VectorizedCast = 0,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet8i pcast<Packet8f, Packet8i>(const Packet8f& a) {\n  return _mm256_cvtps_epi32(a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8i, Packet8f>(const Packet8i& a) {\n  return _mm256_cvtepi32_ps(a);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TYPE_CASTING_AVX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX512/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Pedro Gonnet (pedro.gonnet@gmail.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_\n#define THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_\n\nnamespace Eigen {\n\nnamespace internal {\n\n// Disable the code for older versions of gcc that don't support many of the required avx512 instrinsics.\n#if EIGEN_GNUC_AT_LEAST(5, 3)\n\n#define _EIGEN_DECLARE_CONST_Packet16f(NAME, X) \\\n  const Packet16f p16f_##NAME = pset1<Packet16f>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(NAME, X) \\\n  const Packet16f p16f_##NAME = (__m512)pset1<Packet16i>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet8d(NAME, X) \\\n  const Packet8d p8d_##NAME = pset1<Packet8d>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(NAME, X) \\\n  const Packet8d p8d_##NAME = _mm512_castsi512_pd(_mm512_set1_epi64(X))\n\n// Natural logarithm\n// Computes log(x) as log(2^e * m) = C*e + log(m), where the constant C =log(2)\n// and m is in the range [sqrt(1/2),sqrt(2)). In this range, the logarithm can\n// be easily approximated by a polynomial centered on m=1 for stability.\n#if defined(EIGEN_VECTORIZE_AVX512DQ)\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f\nplog<Packet16f>(const Packet16f& _x) {\n  Packet16f x = _x;\n  _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);\n  _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet16f(126f, 126.0f);\n\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inv_mant_mask, ~0x7f800000);\n\n  // The smallest non denormalized float number.\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(min_norm_pos, 0x00800000);\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(minus_inf, 0xff800000);\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);\n\n  // Polynomial coefficients.\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_SQRTHF, 0.707106781186547524f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p0, 7.0376836292E-2f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p1, -1.1514610310E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p2, 1.1676998740E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p3, -1.2420140846E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p4, +1.4249322787E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p5, -1.6668057665E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p6, +2.0000714765E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p7, -2.4999993993E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_p8, +3.3333331174E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q1, -2.12194440e-4f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_log_q2, 0.693359375f);\n\n  // invalid_mask is set to true when x is NaN\n  __mmask16 invalid_mask =\n      _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_NGE_UQ);\n  __mmask16 iszero_mask =\n      _mm512_cmp_ps_mask(x, _mm512_setzero_ps(), _CMP_EQ_UQ);\n\n  // Truncate input values to the minimum positive normal.\n  x = pmax(x, p16f_min_norm_pos);\n\n  // Extract the shifted exponents.\n  Packet16f emm0 = _mm512_cvtepi32_ps(_mm512_srli_epi32((__m512i)x, 23));\n  Packet16f e = _mm512_sub_ps(emm0, p16f_126f);\n\n  // Set the exponents to -1, i.e. x are in the range [0.5,1).\n  x = _mm512_and_ps(x, p16f_inv_mant_mask);\n  x = _mm512_or_ps(x, p16f_half);\n\n  // part2: Shift the inputs from the range [0.5,1) to [sqrt(1/2),sqrt(2))\n  // and shift by -1. The values are then centered around 0, which improves\n  // the stability of the polynomial evaluation.\n  //   if( x < SQRTHF ) {\n  //     e -= 1;\n  //     x = x + x - 1.0;\n  //   } else { x = x - 1.0; }\n  __mmask16 mask = _mm512_cmp_ps_mask(x, p16f_cephes_SQRTHF, _CMP_LT_OQ);\n  Packet16f tmp = _mm512_mask_blend_ps(mask, x, _mm512_setzero_ps());\n  x = psub(x, p16f_1);\n  e = psub(e, _mm512_mask_blend_ps(mask, p16f_1, _mm512_setzero_ps()));\n  x = padd(x, tmp);\n\n  Packet16f x2 = pmul(x, x);\n  Packet16f x3 = pmul(x2, x);\n\n  // Evaluate the polynomial approximant of degree 8 in three parts, probably\n  // to improve instruction-level parallelism.\n  Packet16f y, y1, y2;\n  y = pmadd(p16f_cephes_log_p0, x, p16f_cephes_log_p1);\n  y1 = pmadd(p16f_cephes_log_p3, x, p16f_cephes_log_p4);\n  y2 = pmadd(p16f_cephes_log_p6, x, p16f_cephes_log_p7);\n  y = pmadd(y, x, p16f_cephes_log_p2);\n  y1 = pmadd(y1, x, p16f_cephes_log_p5);\n  y2 = pmadd(y2, x, p16f_cephes_log_p8);\n  y = pmadd(y, x3, y1);\n  y = pmadd(y, x3, y2);\n  y = pmul(y, x3);\n\n  // Add the logarithm of the exponent back to the result of the interpolation.\n  y1 = pmul(e, p16f_cephes_log_q1);\n  tmp = pmul(x2, p16f_half);\n  y = padd(y, y1);\n  x = psub(x, tmp);\n  y2 = pmul(e, p16f_cephes_log_q2);\n  x = padd(x, y);\n  x = padd(x, y2);\n\n  // Filter out invalid inputs, i.e. negative arg will be NAN, 0 will be -INF.\n  return _mm512_mask_blend_ps(iszero_mask, p16f_minus_inf,\n                              _mm512_mask_blend_ps(invalid_mask, p16f_nan, x));\n}\n#endif\n\n// Exponential function. Works by writing \"x = m*log(2) + r\" where\n// \"m = floor(x/log(2)+1/2)\" and \"r\" is the remainder. The result is then\n// \"exp(x) = 2^m*exp(r)\" where exp(r) is in the range [-1,1).\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f\npexp<Packet16f>(const Packet16f& _x) {\n  _EIGEN_DECLARE_CONST_Packet16f(1, 1.0f);\n  _EIGEN_DECLARE_CONST_Packet16f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet16f(127, 127.0f);\n\n  _EIGEN_DECLARE_CONST_Packet16f(exp_hi, 88.3762626647950f);\n  _EIGEN_DECLARE_CONST_Packet16f(exp_lo, -88.3762626647949f);\n\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_LOG2EF, 1.44269504088896341f);\n\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p0, 1.9875691500E-4f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p1, 1.3981999507E-3f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p2, 8.3334519073E-3f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p3, 4.1665795894E-2f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p4, 1.6666665459E-1f);\n  _EIGEN_DECLARE_CONST_Packet16f(cephes_exp_p5, 5.0000001201E-1f);\n\n  // Clamp x.\n  Packet16f x = pmax(pmin(_x, p16f_exp_hi), p16f_exp_lo);\n\n  // Express exp(x) as exp(m*ln(2) + r), start by extracting\n  // m = floor(x/ln(2) + 0.5).\n  Packet16f m = _mm512_floor_ps(pmadd(x, p16f_cephes_LOG2EF, p16f_half));\n\n  // Get r = x - m*ln(2). Note that we can do this without losing more than one\n  // ulp precision due to the FMA instruction.\n  _EIGEN_DECLARE_CONST_Packet16f(nln2, -0.6931471805599453f);\n  Packet16f r = _mm512_fmadd_ps(m, p16f_nln2, x);\n  Packet16f r2 = pmul(r, r);\n\n  // TODO(gonnet): Split into odd/even polynomials and try to exploit\n  //               instruction-level parallelism.\n  Packet16f y = p16f_cephes_exp_p0;\n  y = pmadd(y, r, p16f_cephes_exp_p1);\n  y = pmadd(y, r, p16f_cephes_exp_p2);\n  y = pmadd(y, r, p16f_cephes_exp_p3);\n  y = pmadd(y, r, p16f_cephes_exp_p4);\n  y = pmadd(y, r, p16f_cephes_exp_p5);\n  y = pmadd(y, r2, r);\n  y = padd(y, p16f_1);\n\n  // Build emm0 = 2^m.\n  Packet16i emm0 = _mm512_cvttps_epi32(padd(m, p16f_127));\n  emm0 = _mm512_slli_epi32(emm0, 23);\n\n  // Return 2^m * exp(r).\n  return pmax(pmul(y, _mm512_castsi512_ps(emm0)), _x);\n}\n\n/*template <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d\npexp<Packet8d>(const Packet8d& _x) {\n  Packet8d x = _x;\n\n  _EIGEN_DECLARE_CONST_Packet8d(1, 1.0);\n  _EIGEN_DECLARE_CONST_Packet8d(2, 2.0);\n\n  _EIGEN_DECLARE_CONST_Packet8d(exp_hi, 709.437);\n  _EIGEN_DECLARE_CONST_Packet8d(exp_lo, -709.436139303);\n\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_LOG2EF, 1.4426950408889634073599);\n\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p0, 1.26177193074810590878e-4);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p1, 3.02994407707441961300e-2);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_p2, 9.99999999999999999910e-1);\n\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q0, 3.00198505138664455042e-6);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q1, 2.52448340349684104192e-3);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q2, 2.27265548208155028766e-1);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_q3, 2.00000000000000000009e0);\n\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C1, 0.693145751953125);\n  _EIGEN_DECLARE_CONST_Packet8d(cephes_exp_C2, 1.42860682030941723212e-6);\n\n  // clamp x\n  x = pmax(pmin(x, p8d_exp_hi), p8d_exp_lo);\n\n  // Express exp(x) as exp(g + n*log(2)).\n  const Packet8d n =\n      _mm512_mul_round_pd(p8d_cephes_LOG2EF, x, _MM_FROUND_TO_NEAREST_INT);\n\n  // Get the remainder modulo log(2), i.e. the \"g\" described above. Subtract\n  // n*log(2) out in two steps, i.e. n*C1 + n*C2, C1+C2=log2 to get the last\n  // digits right.\n  const Packet8d nC1 = pmul(n, p8d_cephes_exp_C1);\n  const Packet8d nC2 = pmul(n, p8d_cephes_exp_C2);\n  x = psub(x, nC1);\n  x = psub(x, nC2);\n\n  const Packet8d x2 = pmul(x, x);\n\n  // Evaluate the numerator polynomial of the rational interpolant.\n  Packet8d px = p8d_cephes_exp_p0;\n  px = pmadd(px, x2, p8d_cephes_exp_p1);\n  px = pmadd(px, x2, p8d_cephes_exp_p2);\n  px = pmul(px, x);\n\n  // Evaluate the denominator polynomial of the rational interpolant.\n  Packet8d qx = p8d_cephes_exp_q0;\n  qx = pmadd(qx, x2, p8d_cephes_exp_q1);\n  qx = pmadd(qx, x2, p8d_cephes_exp_q2);\n  qx = pmadd(qx, x2, p8d_cephes_exp_q3);\n\n  // I don't really get this bit, copied from the SSE2 routines, so...\n  // TODO(gonnet): Figure out what is going on here, perhaps find a better\n  // rational interpolant?\n  x = _mm512_div_pd(px, psub(qx, px));\n  x = pmadd(p8d_2, x, p8d_1);\n\n  // Build e=2^n.\n  const Packet8d e = _mm512_castsi512_pd(_mm512_slli_epi64(\n      _mm512_add_epi64(_mm512_cvtpd_epi64(n), _mm512_set1_epi64(1023)), 52));\n\n  // Construct the result 2^n * exp(g) = e * x. The max is used to catch\n  // non-finite values in the input.\n  return pmax(pmul(x, e), _x);\n  }*/\n\n// Functions for sqrt.\n// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step\n// of Newton's method, at a cost of 1-2 bits of precision as opposed to the\n// exact solution. The main advantage of this approach is not just speed, but\n// also the fact that it can be inlined and pipelined with other computations,\n// further reducing its effective latency.\n#if EIGEN_FAST_MATH\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f\npsqrt<Packet16f>(const Packet16f& _x) {\n  _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);\n  _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);\n\n  Packet16f neg_half = pmul(_x, p16f_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  __mmask16 non_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_GE_OQ);\n  Packet16f x = _mm512_mask_blend_ps(non_zero_mask, _mm512_rsqrt14_ps(_x),\n                                     _mm512_setzero_ps());\n\n  // Do a single step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));\n\n  // Multiply the original _x by it's reciprocal square root to extract the\n  // square root.\n  return pmul(_x, x);\n}\n\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d\npsqrt<Packet8d>(const Packet8d& _x) {\n  _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);\n  _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);\n  _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);\n\n  Packet8d neg_half = pmul(_x, p8d_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  __mmask8 non_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_GE_OQ);\n  Packet8d x = _mm512_mask_blend_pd(non_zero_mask, _mm512_rsqrt14_pd(_x),\n                                    _mm512_setzero_pd());\n\n  // Do a first step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));\n\n  // Do a second step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));\n\n  // Multiply the original _x by it's reciprocal square root to extract the\n  // square root.\n  return pmul(_x, x);\n}\n#else\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f psqrt<Packet16f>(const Packet16f& x) {\n  return _mm512_sqrt_ps(x);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d psqrt<Packet8d>(const Packet8d& x) {\n  return _mm512_sqrt_pd(x);\n}\n#endif\n\n// Functions for rsqrt.\n// Almost identical to the sqrt routine, just leave out the last multiplication\n// and fill in NaN/Inf where needed. Note that this function only exists as an\n// iterative version for doubles since there is no instruction for diretly\n// computing the reciprocal square root in AVX-512.\n#ifdef EIGEN_FAST_MATH\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet16f\nprsqrt<Packet16f>(const Packet16f& _x) {\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(inf, 0x7f800000);\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(nan, 0x7fc00000);\n  _EIGEN_DECLARE_CONST_Packet16f(one_point_five, 1.5f);\n  _EIGEN_DECLARE_CONST_Packet16f(minus_half, -0.5f);\n  _EIGEN_DECLARE_CONST_Packet16f_FROM_INT(flt_min, 0x00800000);\n\n  Packet16f neg_half = pmul(_x, p16f_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  __mmask16 le_zero_mask = _mm512_cmp_ps_mask(_x, p16f_flt_min, _CMP_LT_OQ);\n  Packet16f x = _mm512_mask_blend_ps(le_zero_mask, _mm512_setzero_ps(),\n                                     _mm512_rsqrt14_ps(_x));\n\n  // Fill in NaNs and Infs for the negative/zero entries.\n  __mmask16 neg_mask = _mm512_cmp_ps_mask(_x, _mm512_setzero_ps(), _CMP_LT_OQ);\n  Packet16f infs_and_nans = _mm512_mask_blend_ps(\n      neg_mask, p16f_nan,\n      _mm512_mask_blend_ps(le_zero_mask, p16f_inf, _mm512_setzero_ps()));\n\n  // Do a single step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p16f_one_point_five));\n\n  // Insert NaNs and Infs in all the right places.\n  return _mm512_mask_blend_ps(le_zero_mask, infs_and_nans, x);\n}\n\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet8d\nprsqrt<Packet8d>(const Packet8d& _x) {\n  _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(inf, 0x7ff0000000000000LL);\n  _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(nan, 0x7ff1000000000000LL);\n  _EIGEN_DECLARE_CONST_Packet8d(one_point_five, 1.5);\n  _EIGEN_DECLARE_CONST_Packet8d(minus_half, -0.5);\n  _EIGEN_DECLARE_CONST_Packet8d_FROM_INT64(dbl_min, 0x0010000000000000LL);\n\n  Packet8d neg_half = pmul(_x, p8d_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  __mmask8 le_zero_mask = _mm512_cmp_pd_mask(_x, p8d_dbl_min, _CMP_LT_OQ);\n  Packet8d x = _mm512_mask_blend_pd(le_zero_mask, _mm512_setzero_pd(),\n                                    _mm512_rsqrt14_pd(_x));\n\n  // Fill in NaNs and Infs for the negative/zero entries.\n  __mmask8 neg_mask = _mm512_cmp_pd_mask(_x, _mm512_setzero_pd(), _CMP_LT_OQ);\n  Packet8d infs_and_nans = _mm512_mask_blend_pd(\n      neg_mask, p8d_nan,\n      _mm512_mask_blend_pd(le_zero_mask, p8d_inf, _mm512_setzero_pd()));\n\n  // Do a first step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));\n\n  // Do a second step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p8d_one_point_five));\n\n  // Insert NaNs and Infs in all the right places.\n  return _mm512_mask_blend_pd(le_zero_mask, infs_and_nans, x);\n}\n#else\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f prsqrt<Packet16f>(const Packet16f& x) {\n  return _mm512_rsqrt28_ps(x);\n}\n#endif\n#endif\n\n}  // end namespace internal\n\n}  // end namespace Eigen\n\n#endif  // THIRD_PARTY_EIGEN3_EIGEN_SRC_CORE_ARCH_AVX512_MATHFUNCTIONS_H_\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AVX512/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Benoit Steiner (benoit.steiner.goog@gmail.com)\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_AVX512_H\n#define EIGEN_PACKET_MATH_AVX512_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8\n#endif\n\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))\n#endif\n\n#ifdef __FMA__\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#endif\n#endif\n\ntypedef __m512 Packet16f;\ntypedef __m512i Packet16i;\ntypedef __m512d Packet8d;\n\ntemplate <>\nstruct is_arithmetic<__m512> {\n  enum { value = true };\n};\ntemplate <>\nstruct is_arithmetic<__m512i> {\n  enum { value = true };\n};\ntemplate <>\nstruct is_arithmetic<__m512d> {\n  enum { value = true };\n};\n\ntemplate<> struct packet_traits<float>  : default_packet_traits\n{\n  typedef Packet16f type;\n  typedef Packet8f half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 16,\n    HasHalfPacket = 1,\n#if EIGEN_GNUC_AT_LEAST(5, 3)\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n    HasLog = 1,\n#endif\n    HasExp = 1,\n    HasSqrt = EIGEN_FAST_MATH,\n    HasRsqrt = EIGEN_FAST_MATH,\n#endif\n    HasDiv = 1\n  };\n };\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef Packet8d type;\n  typedef Packet4d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 8,\n    HasHalfPacket = 1,\n#if EIGEN_GNUC_AT_LEAST(5, 3)\n    HasSqrt = EIGEN_FAST_MATH,\n    HasRsqrt = EIGEN_FAST_MATH,\n#endif\n    HasDiv = 1\n  };\n};\n\n/* TODO Implement AVX512 for integers\ntemplate<> struct packet_traits<int>    : default_packet_traits\n{\n  typedef Packet16i type;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=8\n  };\n};\n*/\n\ntemplate <>\nstruct unpacket_traits<Packet16f> {\n  typedef float type;\n  typedef Packet8f half;\n  enum { size = 16, alignment=Aligned64 };\n};\ntemplate <>\nstruct unpacket_traits<Packet8d> {\n  typedef double type;\n  typedef Packet4d half;\n  enum { size = 8, alignment=Aligned64 };\n};\ntemplate <>\nstruct unpacket_traits<Packet16i> {\n  typedef int type;\n  typedef Packet8i half;\n  enum { size = 16, alignment=Aligned64 };\n};\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pset1<Packet16f>(const float& from) {\n  return _mm512_set1_ps(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pset1<Packet8d>(const double& from) {\n  return _mm512_set1_pd(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16i pset1<Packet16i>(const int& from) {\n  return _mm512_set1_epi32(from);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pload1<Packet16f>(const float* from) {\n  return _mm512_broadcastss_ps(_mm_load_ps1(from));\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pload1<Packet8d>(const double* from) {\n  return _mm512_broadcastsd_pd(_mm_load_pd1(from));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f plset<Packet16f>(const float& a) {\n  return _mm512_add_ps(\n      _mm512_set1_ps(a),\n      _mm512_set_ps(15.0f, 14.0f, 13.0f, 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f,\n                    4.0f, 3.0f, 2.0f, 1.0f, 0.0f));\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d plset<Packet8d>(const double& a) {\n  return _mm512_add_pd(_mm512_set1_pd(a),\n                       _mm512_set_pd(7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f padd<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  return _mm512_add_ps(a, b);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d padd<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  return _mm512_add_pd(a, b);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f psub<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  return _mm512_sub_ps(a, b);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d psub<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  return _mm512_sub_pd(a, b);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pnegate(const Packet16f& a) {\n  return _mm512_sub_ps(_mm512_set1_ps(0.0), a);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pnegate(const Packet8d& a) {\n  return _mm512_sub_pd(_mm512_set1_pd(0.0), a);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pconj(const Packet16f& a) {\n  return a;\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pconj(const Packet8d& a) {\n  return a;\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16i pconj(const Packet16i& a) {\n  return a;\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pmul<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  return _mm512_mul_ps(a, b);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pmul<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  return _mm512_mul_pd(a, b);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pdiv<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  return _mm512_div_ps(a, b);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pdiv<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  return _mm512_div_pd(a, b);\n}\n\n#ifdef __FMA__\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pmadd(const Packet16f& a, const Packet16f& b,\n                                    const Packet16f& c) {\n  return _mm512_fmadd_ps(a, b, c);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pmadd(const Packet8d& a, const Packet8d& b,\n                                   const Packet8d& c) {\n  return _mm512_fmadd_pd(a, b, c);\n}\n#endif\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pmin<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  // Arguments are reversed to match NaN propagation behavior of std::min.\n  return _mm512_min_ps(b, a);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pmin<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  // Arguments are reversed to match NaN propagation behavior of std::min.\n  return _mm512_min_pd(b, a);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pmax<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n  // Arguments are reversed to match NaN propagation behavior of std::max.\n  return _mm512_max_ps(b, a);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pmax<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n  // Arguments are reversed to match NaN propagation behavior of std::max.\n  return _mm512_max_pd(b, a);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pand<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_and_ps(a, b);\n#else\n  Packet16f res = _mm512_undefined_ps();\n  Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);\n  Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);\n  res = _mm512_insertf32x4(res, _mm_and_ps(lane0_a, lane0_b), 0);\n\n  Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);\n  Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);\n  res = _mm512_insertf32x4(res, _mm_and_ps(lane1_a, lane1_b), 1);\n\n  Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);\n  Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);\n  res = _mm512_insertf32x4(res, _mm_and_ps(lane2_a, lane2_b), 2);\n\n  Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);\n  Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);\n  res = _mm512_insertf32x4(res, _mm_and_ps(lane3_a, lane3_b), 3);\n\n  return res;\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pand<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_and_pd(a, b);\n#else\n  Packet8d res = _mm512_undefined_pd();\n  Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);\n  Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);\n  res = _mm512_insertf64x4(res, _mm256_and_pd(lane0_a, lane0_b), 0);\n\n  Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);\n  Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);\n  res = _mm512_insertf64x4(res, _mm256_and_pd(lane1_a, lane1_b), 1);\n\n  return res;\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f por<Packet16f>(const Packet16f& a,\n                                             const Packet16f& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_or_ps(a, b);\n#else\n  Packet16f res = _mm512_undefined_ps();\n  Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);\n  Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);\n  res = _mm512_insertf32x4(res, _mm_or_ps(lane0_a, lane0_b), 0);\n\n  Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);\n  Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);\n  res = _mm512_insertf32x4(res, _mm_or_ps(lane1_a, lane1_b), 1);\n\n  Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);\n  Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);\n  res = _mm512_insertf32x4(res, _mm_or_ps(lane2_a, lane2_b), 2);\n\n  Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);\n  Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);\n  res = _mm512_insertf32x4(res, _mm_or_ps(lane3_a, lane3_b), 3);\n\n  return res;\n#endif\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d por<Packet8d>(const Packet8d& a,\n                                           const Packet8d& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_or_pd(a, b);\n#else\n  Packet8d res = _mm512_undefined_pd();\n  Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);\n  Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);\n  res = _mm512_insertf64x4(res, _mm256_or_pd(lane0_a, lane0_b), 0);\n\n  Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);\n  Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);\n  res = _mm512_insertf64x4(res, _mm256_or_pd(lane1_a, lane1_b), 1);\n\n  return res;\n#endif\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pxor<Packet16f>(const Packet16f& a,\n                                              const Packet16f& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_xor_ps(a, b);\n#else\n  Packet16f res = _mm512_undefined_ps();\n  Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);\n  Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);\n  res = _mm512_insertf32x4(res, _mm_xor_ps(lane0_a, lane0_b), 0);\n\n  Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);\n  Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);\n  res = _mm512_insertf32x4(res, _mm_xor_ps(lane1_a, lane1_b), 1);\n\n  Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);\n  Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);\n  res = _mm512_insertf32x4(res, _mm_xor_ps(lane2_a, lane2_b), 2);\n\n  Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);\n  Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);\n  res = _mm512_insertf32x4(res, _mm_xor_ps(lane3_a, lane3_b), 3);\n\n  return res;\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pxor<Packet8d>(const Packet8d& a,\n                                            const Packet8d& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_xor_pd(a, b);\n#else\n  Packet8d res = _mm512_undefined_pd();\n  Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);\n  Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);\n  res = _mm512_insertf64x4(res, _mm256_xor_pd(lane0_a, lane0_b), 0);\n\n  Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);\n  Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);\n  res = _mm512_insertf64x4(res, _mm256_xor_pd(lane1_a, lane1_b), 1);\n\n  return res;\n#endif\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pandnot<Packet16f>(const Packet16f& a,\n                                                 const Packet16f& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_andnot_ps(a, b);\n#else\n  Packet16f res = _mm512_undefined_ps();\n  Packet4f lane0_a = _mm512_extractf32x4_ps(a, 0);\n  Packet4f lane0_b = _mm512_extractf32x4_ps(b, 0);\n  res = _mm512_insertf32x4(res, _mm_andnot_ps(lane0_a, lane0_b), 0);\n\n  Packet4f lane1_a = _mm512_extractf32x4_ps(a, 1);\n  Packet4f lane1_b = _mm512_extractf32x4_ps(b, 1);\n  res = _mm512_insertf32x4(res, _mm_andnot_ps(lane1_a, lane1_b), 1);\n\n  Packet4f lane2_a = _mm512_extractf32x4_ps(a, 2);\n  Packet4f lane2_b = _mm512_extractf32x4_ps(b, 2);\n  res = _mm512_insertf32x4(res, _mm_andnot_ps(lane2_a, lane2_b), 2);\n\n  Packet4f lane3_a = _mm512_extractf32x4_ps(a, 3);\n  Packet4f lane3_b = _mm512_extractf32x4_ps(b, 3);\n  res = _mm512_insertf32x4(res, _mm_andnot_ps(lane3_a, lane3_b), 3);\n\n  return res;\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pandnot<Packet8d>(const Packet8d& a,\n                                               const Packet8d& b) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  return _mm512_andnot_pd(a, b);\n#else\n  Packet8d res = _mm512_undefined_pd();\n  Packet4d lane0_a = _mm512_extractf64x4_pd(a, 0);\n  Packet4d lane0_b = _mm512_extractf64x4_pd(b, 0);\n  res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane0_a, lane0_b), 0);\n\n  Packet4d lane1_a = _mm512_extractf64x4_pd(a, 1);\n  Packet4d lane1_b = _mm512_extractf64x4_pd(b, 1);\n  res = _mm512_insertf64x4(res, _mm256_andnot_pd(lane1_a, lane1_b), 1);\n\n  return res;\n#endif\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pload<Packet16f>(const float* from) {\n  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_ps(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pload<Packet8d>(const double* from) {\n  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_pd(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16i pload<Packet16i>(const int* from) {\n  EIGEN_DEBUG_ALIGNED_LOAD return _mm512_load_si512(\n      reinterpret_cast<const __m512i*>(from));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f ploadu<Packet16f>(const float* from) {\n  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_ps(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d ploadu<Packet8d>(const double* from) {\n  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_pd(from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16i ploadu<Packet16i>(const int* from) {\n  EIGEN_DEBUG_UNALIGNED_LOAD return _mm512_loadu_si512(\n      reinterpret_cast<const __m512i*>(from));\n}\n\n// Loads 8 floats from memory a returns the packet\n// {a0, a0  a1, a1, a2, a2, a3, a3, a4, a4, a5, a5, a6, a6, a7, a7}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f ploaddup<Packet16f>(const float* from) {\n  __m256i low_half = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));\n  __m512 even_elements = _mm512_castsi512_ps(_mm512_cvtepu32_epi64(low_half));\n  __m512 pairs = _mm512_permute_ps(even_elements, _MM_SHUFFLE(2, 2, 0, 0));\n  return pairs;\n}\n// Loads 4 doubles from memory a returns the packet {a0, a0  a1, a1, a2, a2, a3,\n// a3}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d ploaddup<Packet8d>(const double* from) {\n __m512d x = _mm512_setzero_pd();\n  x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[0]), 0);\n  x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[1]), 1);\n  x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[2]), 2);\n  x = _mm512_insertf64x2(x, _mm_loaddup_pd(&from[3]), 3);\n  return x;\n}\n\n// Loads 4 floats from memory a returns the packet\n// {a0, a0  a0, a0, a1, a1, a1, a1, a2, a2, a2, a2, a3, a3, a3, a3}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f ploadquad<Packet16f>(const float* from) {\n  Packet16f tmp = _mm512_undefined_ps();\n  tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from), 0);\n  tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 1), 1);\n  tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 2), 2);\n  tmp = _mm512_insertf32x4(tmp, _mm_load_ps1(from + 3), 3);\n  return tmp;\n}\n// Loads 2 doubles from memory a returns the packet\n// {a0, a0  a0, a0, a1, a1, a1, a1}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d ploadquad<Packet8d>(const double* from) {\n  __m128d tmp0 = _mm_load_pd1(from);\n  __m256d lane0 = _mm256_broadcastsd_pd(tmp0);\n  __m128d tmp1 = _mm_load_pd1(from + 1);\n  __m256d lane1 = _mm256_broadcastsd_pd(tmp1);\n  __m512d tmp = _mm512_undefined_pd();\n  tmp = _mm512_insertf64x4(tmp, lane0, 0);\n  return _mm512_insertf64x4(tmp, lane1, 1);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet16f& from) {\n  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_ps(to, from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet8d& from) {\n  EIGEN_DEBUG_ALIGNED_STORE _mm512_store_pd(to, from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet16i& from) {\n  EIGEN_DEBUG_ALIGNED_STORE _mm512_storeu_si512(reinterpret_cast<__m512i*>(to),\n                                                from);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet16f& from) {\n  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_ps(to, from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet8d& from) {\n  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_pd(to, from);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet16i& from) {\n  EIGEN_DEBUG_UNALIGNED_STORE _mm512_storeu_si512(\n      reinterpret_cast<__m512i*>(to), from);\n}\n\ntemplate <>\nEIGEN_DEVICE_FUNC inline Packet16f pgather<float, Packet16f>(const float* from,\n                                                             Index stride) {\n  Packet16i stride_vector = _mm512_set1_epi32(stride);\n  Packet16i stride_multiplier =\n      _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);\n  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);\n\n  return _mm512_i32gather_ps(indices, from, 4);\n}\ntemplate <>\nEIGEN_DEVICE_FUNC inline Packet8d pgather<double, Packet8d>(const double* from,\n                                                            Index stride) {\n  Packet8i stride_vector = _mm256_set1_epi32(stride);\n  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);\n  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);\n\n  return _mm512_i32gather_pd(indices, from, 8);\n}\n\ntemplate <>\nEIGEN_DEVICE_FUNC inline void pscatter<float, Packet16f>(float* to,\n                                                         const Packet16f& from,\n                                                         Index stride) {\n  Packet16i stride_vector = _mm512_set1_epi32(stride);\n  Packet16i stride_multiplier =\n      _mm512_set_epi32(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);\n  Packet16i indices = _mm512_mullo_epi32(stride_vector, stride_multiplier);\n  _mm512_i32scatter_ps(to, indices, from, 4);\n}\ntemplate <>\nEIGEN_DEVICE_FUNC inline void pscatter<double, Packet8d>(double* to,\n                                                         const Packet8d& from,\n                                                         Index stride) {\n  Packet8i stride_vector = _mm256_set1_epi32(stride);\n  Packet8i stride_multiplier = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);\n  Packet8i indices = _mm256_mullo_epi32(stride_vector, stride_multiplier);\n  _mm512_i32scatter_pd(to, indices, from, 8);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE void pstore1<Packet16f>(float* to, const float& a) {\n  Packet16f pa = pset1<Packet16f>(a);\n  pstore(to, pa);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstore1<Packet8d>(double* to, const double& a) {\n  Packet8d pa = pset1<Packet8d>(a);\n  pstore(to, pa);\n}\ntemplate <>\nEIGEN_STRONG_INLINE void pstore1<Packet16i>(int* to, const int& a) {\n  Packet16i pa = pset1<Packet16i>(a);\n  pstore(to, pa);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\n\ntemplate <>\nEIGEN_STRONG_INLINE float pfirst<Packet16f>(const Packet16f& a) {\n  return _mm_cvtss_f32(_mm512_extractf32x4_ps(a, 0));\n}\ntemplate <>\nEIGEN_STRONG_INLINE double pfirst<Packet8d>(const Packet8d& a) {\n  return _mm_cvtsd_f64(_mm256_extractf128_pd(_mm512_extractf64x4_pd(a, 0), 0));\n}\ntemplate <>\nEIGEN_STRONG_INLINE int pfirst<Packet16i>(const Packet16i& a) {\n  return _mm_extract_epi32(_mm512_extracti32x4_epi32(a, 0), 0);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16f preverse(const Packet16f& a)\n{\n  return _mm512_permutexvar_ps(_mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8d preverse(const Packet8d& a)\n{\n  return _mm512_permutexvar_pd(_mm512_set_epi32(0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7), a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16f pabs(const Packet16f& a)\n{\n  // _mm512_abs_ps intrinsic not found, so hack around it\n  return (__m512)_mm512_and_si512((__m512i)a, _mm512_set1_epi32(0x7fffffff));\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pabs(const Packet8d& a) {\n  // _mm512_abs_ps intrinsic not found, so hack around it\n  return (__m512d)_mm512_and_si512((__m512i)a,\n                                   _mm512_set1_epi64(0x7fffffffffffffff));\n}\n\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n// AVX512F does not define _mm512_extractf32x8_ps to extract _m256 from _m512\n#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                           \\\n  __m256 OUTPUT##_0 = _mm512_extractf32x8_ps(INPUT, 0);                    \\\n  __m256 OUTPUT##_1 = _mm512_extractf32x8_ps(INPUT, 1)\n#else\n#define EIGEN_EXTRACT_8f_FROM_16f(INPUT, OUTPUT)                \\\n  __m256 OUTPUT##_0 = _mm256_insertf128_ps(                     \\\n      _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 0)), \\\n      _mm512_extractf32x4_ps(INPUT, 1), 1);                     \\\n  __m256 OUTPUT##_1 = _mm256_insertf128_ps(                     \\\n      _mm256_castps128_ps256(_mm512_extractf32x4_ps(INPUT, 2)), \\\n      _mm512_extractf32x4_ps(INPUT, 3), 1);\n#endif\n\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB) \\\n  OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTA, 0);        \\\n  OUTPUT = _mm512_insertf32x8(OUTPUT, INPUTB, 1);\n#else\n#define EIGEN_INSERT_8f_INTO_16f(OUTPUT, INPUTA, INPUTB)                    \\\n  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 0), 0); \\\n  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTA, 1), 1); \\\n  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 0), 2); \\\n  OUTPUT = _mm512_insertf32x4(OUTPUT, _mm256_extractf128_ps(INPUTB, 1), 3);\n#endif\ntemplate<> EIGEN_STRONG_INLINE Packet16f preduxp<Packet16f>(const Packet16f*\nvecs)\n{\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[0], vecs0);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[1], vecs1);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[2], vecs2);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[3], vecs3);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[4], vecs4);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[5], vecs5);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[6], vecs6);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[7], vecs7);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[8], vecs8);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[9], vecs9);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[10], vecs10);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[11], vecs11);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[12], vecs12);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[13], vecs13);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[14], vecs14);\n  EIGEN_EXTRACT_8f_FROM_16f(vecs[15], vecs15);\n\n  __m256 hsum1 = _mm256_hadd_ps(vecs0_0, vecs1_0);\n  __m256 hsum2 = _mm256_hadd_ps(vecs2_0, vecs3_0);\n  __m256 hsum3 = _mm256_hadd_ps(vecs4_0, vecs5_0);\n  __m256 hsum4 = _mm256_hadd_ps(vecs6_0, vecs7_0);\n\n  __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);\n  __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);\n  __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);\n  __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);\n\n  __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);\n  __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);\n  __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);\n  __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);\n\n  __m256 sum1 = _mm256_add_ps(perm1, hsum5);\n  __m256 sum2 = _mm256_add_ps(perm2, hsum6);\n  __m256 sum3 = _mm256_add_ps(perm3, hsum7);\n  __m256 sum4 = _mm256_add_ps(perm4, hsum8);\n\n  __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);\n  __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);\n\n  __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);\n\n  hsum1 = _mm256_hadd_ps(vecs0_1, vecs1_1);\n  hsum2 = _mm256_hadd_ps(vecs2_1, vecs3_1);\n  hsum3 = _mm256_hadd_ps(vecs4_1, vecs5_1);\n  hsum4 = _mm256_hadd_ps(vecs6_1, vecs7_1);\n\n  hsum5 = _mm256_hadd_ps(hsum1, hsum1);\n  hsum6 = _mm256_hadd_ps(hsum2, hsum2);\n  hsum7 = _mm256_hadd_ps(hsum3, hsum3);\n  hsum8 = _mm256_hadd_ps(hsum4, hsum4);\n\n  perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);\n  perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);\n  perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);\n  perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);\n\n  sum1 = _mm256_add_ps(perm1, hsum5);\n  sum2 = _mm256_add_ps(perm2, hsum6);\n  sum3 = _mm256_add_ps(perm3, hsum7);\n  sum4 = _mm256_add_ps(perm4, hsum8);\n\n  blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);\n  blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);\n\n  final = _mm256_add_ps(final, _mm256_blend_ps(blend1, blend2, 0xf0));\n\n  hsum1 = _mm256_hadd_ps(vecs8_0, vecs9_0);\n  hsum2 = _mm256_hadd_ps(vecs10_0, vecs11_0);\n  hsum3 = _mm256_hadd_ps(vecs12_0, vecs13_0);\n  hsum4 = _mm256_hadd_ps(vecs14_0, vecs15_0);\n\n  hsum5 = _mm256_hadd_ps(hsum1, hsum1);\n  hsum6 = _mm256_hadd_ps(hsum2, hsum2);\n  hsum7 = _mm256_hadd_ps(hsum3, hsum3);\n  hsum8 = _mm256_hadd_ps(hsum4, hsum4);\n\n  perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);\n  perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);\n  perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);\n  perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);\n\n  sum1 = _mm256_add_ps(perm1, hsum5);\n  sum2 = _mm256_add_ps(perm2, hsum6);\n  sum3 = _mm256_add_ps(perm3, hsum7);\n  sum4 = _mm256_add_ps(perm4, hsum8);\n\n  blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);\n  blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);\n\n  __m256 final_1 = _mm256_blend_ps(blend1, blend2, 0xf0);\n\n  hsum1 = _mm256_hadd_ps(vecs8_1, vecs9_1);\n  hsum2 = _mm256_hadd_ps(vecs10_1, vecs11_1);\n  hsum3 = _mm256_hadd_ps(vecs12_1, vecs13_1);\n  hsum4 = _mm256_hadd_ps(vecs14_1, vecs15_1);\n\n  hsum5 = _mm256_hadd_ps(hsum1, hsum1);\n  hsum6 = _mm256_hadd_ps(hsum2, hsum2);\n  hsum7 = _mm256_hadd_ps(hsum3, hsum3);\n  hsum8 = _mm256_hadd_ps(hsum4, hsum4);\n\n  perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);\n  perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);\n  perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);\n  perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);\n\n  sum1 = _mm256_add_ps(perm1, hsum5);\n  sum2 = _mm256_add_ps(perm2, hsum6);\n  sum3 = _mm256_add_ps(perm3, hsum7);\n  sum4 = _mm256_add_ps(perm4, hsum8);\n\n  blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);\n  blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);\n\n  final_1 = _mm256_add_ps(final_1, _mm256_blend_ps(blend1, blend2, 0xf0));\n\n  __m512 final_output;\n\n  EIGEN_INSERT_8f_INTO_16f(final_output, final, final_1);\n  return final_output;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8d preduxp<Packet8d>(const Packet8d* vecs)\n{\n  Packet4d vecs0_0 = _mm512_extractf64x4_pd(vecs[0], 0);\n  Packet4d vecs0_1 = _mm512_extractf64x4_pd(vecs[0], 1);\n\n  Packet4d vecs1_0 = _mm512_extractf64x4_pd(vecs[1], 0);\n  Packet4d vecs1_1 = _mm512_extractf64x4_pd(vecs[1], 1);\n\n  Packet4d vecs2_0 = _mm512_extractf64x4_pd(vecs[2], 0);\n  Packet4d vecs2_1 = _mm512_extractf64x4_pd(vecs[2], 1);\n\n  Packet4d vecs3_0 = _mm512_extractf64x4_pd(vecs[3], 0);\n  Packet4d vecs3_1 = _mm512_extractf64x4_pd(vecs[3], 1);\n\n  Packet4d vecs4_0 = _mm512_extractf64x4_pd(vecs[4], 0);\n  Packet4d vecs4_1 = _mm512_extractf64x4_pd(vecs[4], 1);\n\n  Packet4d vecs5_0 = _mm512_extractf64x4_pd(vecs[5], 0);\n  Packet4d vecs5_1 = _mm512_extractf64x4_pd(vecs[5], 1);\n\n  Packet4d vecs6_0 = _mm512_extractf64x4_pd(vecs[6], 0);\n  Packet4d vecs6_1 = _mm512_extractf64x4_pd(vecs[6], 1);\n\n  Packet4d vecs7_0 = _mm512_extractf64x4_pd(vecs[7], 0);\n  Packet4d vecs7_1 = _mm512_extractf64x4_pd(vecs[7], 1);\n\n  Packet4d tmp0, tmp1;\n\n  tmp0 = _mm256_hadd_pd(vecs0_0, vecs1_0);\n  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));\n\n  tmp1 = _mm256_hadd_pd(vecs2_0, vecs3_0);\n  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));\n\n  __m256d final_0 = _mm256_blend_pd(tmp0, tmp1, 0xC);\n\n  tmp0 = _mm256_hadd_pd(vecs0_1, vecs1_1);\n  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));\n\n  tmp1 = _mm256_hadd_pd(vecs2_1, vecs3_1);\n  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));\n\n  final_0 = _mm256_add_pd(final_0, _mm256_blend_pd(tmp0, tmp1, 0xC));\n\n  tmp0 = _mm256_hadd_pd(vecs4_0, vecs5_0);\n  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));\n\n  tmp1 = _mm256_hadd_pd(vecs6_0, vecs7_0);\n  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));\n\n  __m256d final_1 = _mm256_blend_pd(tmp0, tmp1, 0xC);\n\n  tmp0 = _mm256_hadd_pd(vecs4_1, vecs5_1);\n  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));\n\n  tmp1 = _mm256_hadd_pd(vecs6_1, vecs7_1);\n  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));\n\n  final_1 = _mm256_add_pd(final_1, _mm256_blend_pd(tmp0, tmp1, 0xC));\n\n  __m512d final_output = _mm512_insertf64x4(final_output, final_0, 0);\n\n  return _mm512_insertf64x4(final_output, final_1, 1);\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE float predux<Packet16f>(const Packet16f& a) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  __m256 lane0 = _mm512_extractf32x8_ps(a, 0);\n  __m256 lane1 = _mm512_extractf32x8_ps(a, 1);\n  Packet8f x = _mm256_add_ps(lane0, lane1);\n  return predux<Packet8f>(x);\n#else\n  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);\n  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);\n  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);\n  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);\n  __m128 sum = _mm_add_ps(_mm_add_ps(lane0, lane1), _mm_add_ps(lane2, lane3));\n  sum = _mm_hadd_ps(sum, sum);\n  sum = _mm_hadd_ps(sum, _mm_permute_ps(sum, 1));\n  return _mm_cvtss_f32(sum);\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE double predux<Packet8d>(const Packet8d& a) {\n  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);\n  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);\n  __m256d sum = _mm256_add_pd(lane0, lane1);\n  __m256d tmp0 = _mm256_hadd_pd(sum, _mm256_permute2f128_pd(sum, sum, 1));\n  return _mm_cvtsd_f64(_mm256_castpd256_pd128(_mm256_hadd_pd(tmp0, tmp0)));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE Packet8f predux_downto4<Packet16f>(const Packet16f& a) {\n#ifdef EIGEN_VECTORIZE_AVX512DQ\n  __m256 lane0 = _mm512_extractf32x8_ps(a, 0);\n  __m256 lane1 = _mm512_extractf32x8_ps(a, 1);\n  return _mm256_add_ps(lane0, lane1);\n#else\n  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);\n  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);\n  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);\n  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);\n  __m128 sum0 = _mm_add_ps(lane0, lane2);\n  __m128 sum1 = _mm_add_ps(lane1, lane3);\n  return _mm256_insertf128_ps(_mm256_castps128_ps256(sum0), sum1, 1);\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet4d predux_downto4<Packet8d>(const Packet8d& a) {\n  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);\n  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);\n  __m256d res = _mm256_add_pd(lane0, lane1);\n  return res;\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE float predux_mul<Packet16f>(const Packet16f& a) {\n//#ifdef EIGEN_VECTORIZE_AVX512DQ\n#if 0\n  Packet8f lane0 = _mm512_extractf32x8_ps(a, 0);\n  Packet8f lane1 = _mm512_extractf32x8_ps(a, 1);\n  Packet8f res = pmul(lane0, lane1);\n  res = pmul(res, _mm256_permute2f128_ps(res, res, 1));\n  res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));\n  return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));\n#else\n  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);\n  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);\n  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);\n  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);\n  __m128 res = pmul(pmul(lane0, lane1), pmul(lane2, lane3));\n  res = pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));\n  return pfirst(pmul(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));\n#endif\n}\ntemplate <>\nEIGEN_STRONG_INLINE double predux_mul<Packet8d>(const Packet8d& a) {\n  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);\n  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);\n  __m256d res = pmul(lane0, lane1);\n  res = pmul(res, _mm256_permute2f128_pd(res, res, 1));\n  return pfirst(pmul(res, _mm256_shuffle_pd(res, res, 1)));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE float predux_min<Packet16f>(const Packet16f& a) {\n  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);\n  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);\n  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);\n  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);\n  __m128 res = _mm_min_ps(_mm_min_ps(lane0, lane1), _mm_min_ps(lane2, lane3));\n  res = _mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));\n  return pfirst(_mm_min_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));\n}\ntemplate <>\nEIGEN_STRONG_INLINE double predux_min<Packet8d>(const Packet8d& a) {\n  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);\n  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);\n  __m256d res = _mm256_min_pd(lane0, lane1);\n  res = _mm256_min_pd(res, _mm256_permute2f128_pd(res, res, 1));\n  return pfirst(_mm256_min_pd(res, _mm256_shuffle_pd(res, res, 1)));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE float predux_max<Packet16f>(const Packet16f& a) {\n  __m128 lane0 = _mm512_extractf32x4_ps(a, 0);\n  __m128 lane1 = _mm512_extractf32x4_ps(a, 1);\n  __m128 lane2 = _mm512_extractf32x4_ps(a, 2);\n  __m128 lane3 = _mm512_extractf32x4_ps(a, 3);\n  __m128 res = _mm_max_ps(_mm_max_ps(lane0, lane1), _mm_max_ps(lane2, lane3));\n  res = _mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 3, 2)));\n  return pfirst(_mm_max_ps(res, _mm_permute_ps(res, _MM_SHUFFLE(0, 0, 0, 1))));\n}\n\ntemplate <>\nEIGEN_STRONG_INLINE double predux_max<Packet8d>(const Packet8d& a) {\n  __m256d lane0 = _mm512_extractf64x4_pd(a, 0);\n  __m256d lane1 = _mm512_extractf64x4_pd(a, 1);\n  __m256d res = _mm256_max_pd(lane0, lane1);\n  res = _mm256_max_pd(res, _mm256_permute2f128_pd(res, res, 1));\n  return pfirst(_mm256_max_pd(res, _mm256_shuffle_pd(res, res, 1)));\n}\n\ntemplate <int Offset>\nstruct palign_impl<Offset, Packet16f> {\n  static EIGEN_STRONG_INLINE void run(Packet16f& first,\n                                      const Packet16f& second) {\n    if (Offset != 0) {\n      __m512i first_idx = _mm512_set_epi32(\n          Offset + 15, Offset + 14, Offset + 13, Offset + 12, Offset + 11,\n          Offset + 10, Offset + 9, Offset + 8, Offset + 7, Offset + 6,\n          Offset + 5, Offset + 4, Offset + 3, Offset + 2, Offset + 1, Offset);\n\n      __m512i second_idx =\n          _mm512_set_epi32(Offset - 1, Offset - 2, Offset - 3, Offset - 4,\n                           Offset - 5, Offset - 6, Offset - 7, Offset - 8,\n                           Offset - 9, Offset - 10, Offset - 11, Offset - 12,\n                           Offset - 13, Offset - 14, Offset - 15, Offset - 16);\n\n      unsigned short mask = 0xFFFF;\n      mask <<= (16 - Offset);\n\n      first = _mm512_permutexvar_ps(first_idx, first);\n      Packet16f tmp = _mm512_permutexvar_ps(second_idx, second);\n      first = _mm512_mask_blend_ps(mask, first, tmp);\n    }\n  }\n};\ntemplate <int Offset>\nstruct palign_impl<Offset, Packet8d> {\n  static EIGEN_STRONG_INLINE void run(Packet8d& first, const Packet8d& second) {\n    if (Offset != 0) {\n      __m512i first_idx = _mm512_set_epi32(\n          0, Offset + 7, 0, Offset + 6, 0, Offset + 5, 0, Offset + 4, 0,\n          Offset + 3, 0, Offset + 2, 0, Offset + 1, 0, Offset);\n\n      __m512i second_idx = _mm512_set_epi32(\n          0, Offset - 1, 0, Offset - 2, 0, Offset - 3, 0, Offset - 4, 0,\n          Offset - 5, 0, Offset - 6, 0, Offset - 7, 0, Offset - 8);\n\n      unsigned char mask = 0xFF;\n      mask <<= (8 - Offset);\n\n      first = _mm512_permutexvar_pd(first_idx, first);\n      Packet8d tmp = _mm512_permutexvar_pd(second_idx, second);\n      first = _mm512_mask_blend_pd(mask, first, tmp);\n    }\n  }\n};\n\n\n#define PACK_OUTPUT(OUTPUT, INPUT, INDEX, STRIDE) \\\n  EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[INDEX], INPUT[INDEX + STRIDE]);\n\nEIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 16>& kernel) {\n  __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);\n  __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);\n  __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);\n  __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);\n  __m512 T4 = _mm512_unpacklo_ps(kernel.packet[4], kernel.packet[5]);\n  __m512 T5 = _mm512_unpackhi_ps(kernel.packet[4], kernel.packet[5]);\n  __m512 T6 = _mm512_unpacklo_ps(kernel.packet[6], kernel.packet[7]);\n  __m512 T7 = _mm512_unpackhi_ps(kernel.packet[6], kernel.packet[7]);\n  __m512 T8 = _mm512_unpacklo_ps(kernel.packet[8], kernel.packet[9]);\n  __m512 T9 = _mm512_unpackhi_ps(kernel.packet[8], kernel.packet[9]);\n  __m512 T10 = _mm512_unpacklo_ps(kernel.packet[10], kernel.packet[11]);\n  __m512 T11 = _mm512_unpackhi_ps(kernel.packet[10], kernel.packet[11]);\n  __m512 T12 = _mm512_unpacklo_ps(kernel.packet[12], kernel.packet[13]);\n  __m512 T13 = _mm512_unpackhi_ps(kernel.packet[12], kernel.packet[13]);\n  __m512 T14 = _mm512_unpacklo_ps(kernel.packet[14], kernel.packet[15]);\n  __m512 T15 = _mm512_unpackhi_ps(kernel.packet[14], kernel.packet[15]);\n  __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S4 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S5 = _mm512_shuffle_ps(T4, T6, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S6 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S7 = _mm512_shuffle_ps(T5, T7, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S8 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S9 = _mm512_shuffle_ps(T8, T10, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S10 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S11 = _mm512_shuffle_ps(T9, T11, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S12 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S13 = _mm512_shuffle_ps(T12, T14, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S14 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S15 = _mm512_shuffle_ps(T13, T15, _MM_SHUFFLE(3, 2, 3, 2));\n\n  EIGEN_EXTRACT_8f_FROM_16f(S0, S0);\n  EIGEN_EXTRACT_8f_FROM_16f(S1, S1);\n  EIGEN_EXTRACT_8f_FROM_16f(S2, S2);\n  EIGEN_EXTRACT_8f_FROM_16f(S3, S3);\n  EIGEN_EXTRACT_8f_FROM_16f(S4, S4);\n  EIGEN_EXTRACT_8f_FROM_16f(S5, S5);\n  EIGEN_EXTRACT_8f_FROM_16f(S6, S6);\n  EIGEN_EXTRACT_8f_FROM_16f(S7, S7);\n  EIGEN_EXTRACT_8f_FROM_16f(S8, S8);\n  EIGEN_EXTRACT_8f_FROM_16f(S9, S9);\n  EIGEN_EXTRACT_8f_FROM_16f(S10, S10);\n  EIGEN_EXTRACT_8f_FROM_16f(S11, S11);\n  EIGEN_EXTRACT_8f_FROM_16f(S12, S12);\n  EIGEN_EXTRACT_8f_FROM_16f(S13, S13);\n  EIGEN_EXTRACT_8f_FROM_16f(S14, S14);\n  EIGEN_EXTRACT_8f_FROM_16f(S15, S15);\n\n  PacketBlock<Packet8f, 32> tmp;\n\n  tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S4_0, 0x20);\n  tmp.packet[1] = _mm256_permute2f128_ps(S1_0, S5_0, 0x20);\n  tmp.packet[2] = _mm256_permute2f128_ps(S2_0, S6_0, 0x20);\n  tmp.packet[3] = _mm256_permute2f128_ps(S3_0, S7_0, 0x20);\n  tmp.packet[4] = _mm256_permute2f128_ps(S0_0, S4_0, 0x31);\n  tmp.packet[5] = _mm256_permute2f128_ps(S1_0, S5_0, 0x31);\n  tmp.packet[6] = _mm256_permute2f128_ps(S2_0, S6_0, 0x31);\n  tmp.packet[7] = _mm256_permute2f128_ps(S3_0, S7_0, 0x31);\n\n  tmp.packet[8] = _mm256_permute2f128_ps(S0_1, S4_1, 0x20);\n  tmp.packet[9] = _mm256_permute2f128_ps(S1_1, S5_1, 0x20);\n  tmp.packet[10] = _mm256_permute2f128_ps(S2_1, S6_1, 0x20);\n  tmp.packet[11] = _mm256_permute2f128_ps(S3_1, S7_1, 0x20);\n  tmp.packet[12] = _mm256_permute2f128_ps(S0_1, S4_1, 0x31);\n  tmp.packet[13] = _mm256_permute2f128_ps(S1_1, S5_1, 0x31);\n  tmp.packet[14] = _mm256_permute2f128_ps(S2_1, S6_1, 0x31);\n  tmp.packet[15] = _mm256_permute2f128_ps(S3_1, S7_1, 0x31);\n\n  // Second set of _m256 outputs\n  tmp.packet[16] = _mm256_permute2f128_ps(S8_0, S12_0, 0x20);\n  tmp.packet[17] = _mm256_permute2f128_ps(S9_0, S13_0, 0x20);\n  tmp.packet[18] = _mm256_permute2f128_ps(S10_0, S14_0, 0x20);\n  tmp.packet[19] = _mm256_permute2f128_ps(S11_0, S15_0, 0x20);\n  tmp.packet[20] = _mm256_permute2f128_ps(S8_0, S12_0, 0x31);\n  tmp.packet[21] = _mm256_permute2f128_ps(S9_0, S13_0, 0x31);\n  tmp.packet[22] = _mm256_permute2f128_ps(S10_0, S14_0, 0x31);\n  tmp.packet[23] = _mm256_permute2f128_ps(S11_0, S15_0, 0x31);\n\n  tmp.packet[24] = _mm256_permute2f128_ps(S8_1, S12_1, 0x20);\n  tmp.packet[25] = _mm256_permute2f128_ps(S9_1, S13_1, 0x20);\n  tmp.packet[26] = _mm256_permute2f128_ps(S10_1, S14_1, 0x20);\n  tmp.packet[27] = _mm256_permute2f128_ps(S11_1, S15_1, 0x20);\n  tmp.packet[28] = _mm256_permute2f128_ps(S8_1, S12_1, 0x31);\n  tmp.packet[29] = _mm256_permute2f128_ps(S9_1, S13_1, 0x31);\n  tmp.packet[30] = _mm256_permute2f128_ps(S10_1, S14_1, 0x31);\n  tmp.packet[31] = _mm256_permute2f128_ps(S11_1, S15_1, 0x31);\n\n  // Pack them into the output\n  PACK_OUTPUT(kernel.packet, tmp.packet, 0, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 1, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 2, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 3, 16);\n\n  PACK_OUTPUT(kernel.packet, tmp.packet, 4, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 5, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 6, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 7, 16);\n\n  PACK_OUTPUT(kernel.packet, tmp.packet, 8, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 9, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 10, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 11, 16);\n\n  PACK_OUTPUT(kernel.packet, tmp.packet, 12, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 13, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 14, 16);\n  PACK_OUTPUT(kernel.packet, tmp.packet, 15, 16);\n}\n#define PACK_OUTPUT_2(OUTPUT, INPUT, INDEX, STRIDE)         \\\n  EIGEN_INSERT_8f_INTO_16f(OUTPUT[INDEX], INPUT[2 * INDEX], \\\n                           INPUT[2 * INDEX + STRIDE]);\n\nEIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet16f, 4>& kernel) {\n  __m512 T0 = _mm512_unpacklo_ps(kernel.packet[0], kernel.packet[1]);\n  __m512 T1 = _mm512_unpackhi_ps(kernel.packet[0], kernel.packet[1]);\n  __m512 T2 = _mm512_unpacklo_ps(kernel.packet[2], kernel.packet[3]);\n  __m512 T3 = _mm512_unpackhi_ps(kernel.packet[2], kernel.packet[3]);\n\n  __m512 S0 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S1 = _mm512_shuffle_ps(T0, T2, _MM_SHUFFLE(3, 2, 3, 2));\n  __m512 S2 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(1, 0, 1, 0));\n  __m512 S3 = _mm512_shuffle_ps(T1, T3, _MM_SHUFFLE(3, 2, 3, 2));\n\n  EIGEN_EXTRACT_8f_FROM_16f(S0, S0);\n  EIGEN_EXTRACT_8f_FROM_16f(S1, S1);\n  EIGEN_EXTRACT_8f_FROM_16f(S2, S2);\n  EIGEN_EXTRACT_8f_FROM_16f(S3, S3);\n\n  PacketBlock<Packet8f, 8> tmp;\n\n  tmp.packet[0] = _mm256_permute2f128_ps(S0_0, S1_0, 0x20);\n  tmp.packet[1] = _mm256_permute2f128_ps(S2_0, S3_0, 0x20);\n  tmp.packet[2] = _mm256_permute2f128_ps(S0_0, S1_0, 0x31);\n  tmp.packet[3] = _mm256_permute2f128_ps(S2_0, S3_0, 0x31);\n\n  tmp.packet[4] = _mm256_permute2f128_ps(S0_1, S1_1, 0x20);\n  tmp.packet[5] = _mm256_permute2f128_ps(S2_1, S3_1, 0x20);\n  tmp.packet[6] = _mm256_permute2f128_ps(S0_1, S1_1, 0x31);\n  tmp.packet[7] = _mm256_permute2f128_ps(S2_1, S3_1, 0x31);\n\n  PACK_OUTPUT_2(kernel.packet, tmp.packet, 0, 1);\n  PACK_OUTPUT_2(kernel.packet, tmp.packet, 1, 1);\n  PACK_OUTPUT_2(kernel.packet, tmp.packet, 2, 1);\n  PACK_OUTPUT_2(kernel.packet, tmp.packet, 3, 1);\n}\n\n#define PACK_OUTPUT_SQ_D(OUTPUT, INPUT, INDEX, STRIDE)                \\\n  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX], 0); \\\n  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[INDEX + STRIDE], 1);\n\n#define PACK_OUTPUT_D(OUTPUT, INPUT, INDEX, STRIDE)                         \\\n  OUTPUT[INDEX] = _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX)], 0); \\\n  OUTPUT[INDEX] =                                                           \\\n      _mm512_insertf64x4(OUTPUT[INDEX], INPUT[(2 * INDEX) + STRIDE], 1);\n\nEIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 4>& kernel) {\n  __m512d T0 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);\n  __m512d T1 = _mm512_shuffle_pd(kernel.packet[0], kernel.packet[1], 0xff);\n  __m512d T2 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);\n  __m512d T3 = _mm512_shuffle_pd(kernel.packet[2], kernel.packet[3], 0xff);\n\n  PacketBlock<Packet4d, 8> tmp;\n\n  tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),\n                                         _mm512_extractf64x4_pd(T2, 0), 0x20);\n  tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),\n                                         _mm512_extractf64x4_pd(T3, 0), 0x20);\n  tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),\n                                         _mm512_extractf64x4_pd(T2, 0), 0x31);\n  tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),\n                                         _mm512_extractf64x4_pd(T3, 0), 0x31);\n\n  tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),\n                                         _mm512_extractf64x4_pd(T2, 1), 0x20);\n  tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),\n                                         _mm512_extractf64x4_pd(T3, 1), 0x20);\n  tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),\n                                         _mm512_extractf64x4_pd(T2, 1), 0x31);\n  tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),\n                                         _mm512_extractf64x4_pd(T3, 1), 0x31);\n\n  PACK_OUTPUT_D(kernel.packet, tmp.packet, 0, 1);\n  PACK_OUTPUT_D(kernel.packet, tmp.packet, 1, 1);\n  PACK_OUTPUT_D(kernel.packet, tmp.packet, 2, 1);\n  PACK_OUTPUT_D(kernel.packet, tmp.packet, 3, 1);\n}\n\nEIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet8d, 8>& kernel) {\n  __m512d T0 = _mm512_unpacklo_pd(kernel.packet[0], kernel.packet[1]);\n  __m512d T1 = _mm512_unpackhi_pd(kernel.packet[0], kernel.packet[1]);\n  __m512d T2 = _mm512_unpacklo_pd(kernel.packet[2], kernel.packet[3]);\n  __m512d T3 = _mm512_unpackhi_pd(kernel.packet[2], kernel.packet[3]);\n  __m512d T4 = _mm512_unpacklo_pd(kernel.packet[4], kernel.packet[5]);\n  __m512d T5 = _mm512_unpackhi_pd(kernel.packet[4], kernel.packet[5]);\n  __m512d T6 = _mm512_unpacklo_pd(kernel.packet[6], kernel.packet[7]);\n  __m512d T7 = _mm512_unpackhi_pd(kernel.packet[6], kernel.packet[7]);\n\n  PacketBlock<Packet4d, 16> tmp;\n\n  tmp.packet[0] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),\n                                         _mm512_extractf64x4_pd(T2, 0), 0x20);\n  tmp.packet[1] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),\n                                         _mm512_extractf64x4_pd(T3, 0), 0x20);\n  tmp.packet[2] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 0),\n                                         _mm512_extractf64x4_pd(T2, 0), 0x31);\n  tmp.packet[3] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 0),\n                                         _mm512_extractf64x4_pd(T3, 0), 0x31);\n\n  tmp.packet[4] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),\n                                         _mm512_extractf64x4_pd(T2, 1), 0x20);\n  tmp.packet[5] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),\n                                         _mm512_extractf64x4_pd(T3, 1), 0x20);\n  tmp.packet[6] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T0, 1),\n                                         _mm512_extractf64x4_pd(T2, 1), 0x31);\n  tmp.packet[7] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T1, 1),\n                                         _mm512_extractf64x4_pd(T3, 1), 0x31);\n\n  tmp.packet[8] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),\n                                         _mm512_extractf64x4_pd(T6, 0), 0x20);\n  tmp.packet[9] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),\n                                         _mm512_extractf64x4_pd(T7, 0), 0x20);\n  tmp.packet[10] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 0),\n                                          _mm512_extractf64x4_pd(T6, 0), 0x31);\n  tmp.packet[11] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 0),\n                                          _mm512_extractf64x4_pd(T7, 0), 0x31);\n\n  tmp.packet[12] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),\n                                          _mm512_extractf64x4_pd(T6, 1), 0x20);\n  tmp.packet[13] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),\n                                          _mm512_extractf64x4_pd(T7, 1), 0x20);\n  tmp.packet[14] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T4, 1),\n                                          _mm512_extractf64x4_pd(T6, 1), 0x31);\n  tmp.packet[15] = _mm256_permute2f128_pd(_mm512_extractf64x4_pd(T5, 1),\n                                          _mm512_extractf64x4_pd(T7, 1), 0x31);\n\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 0, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 1, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 2, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 3, 8);\n\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 4, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 5, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 6, 8);\n  PACK_OUTPUT_SQ_D(kernel.packet, tmp.packet, 7, 8);\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet16f pblend(const Selector<16>& /*ifPacket*/,\n                                     const Packet16f& /*thenPacket*/,\n                                     const Packet16f& /*elsePacket*/) {\n  assert(false && \"To be implemented\");\n  return Packet16f();\n}\ntemplate <>\nEIGEN_STRONG_INLINE Packet8d pblend(const Selector<8>& /*ifPacket*/,\n                                    const Packet8d& /*thenPacket*/,\n                                    const Packet8d& /*elsePacket*/) {\n  assert(false && \"To be implemented\");\n  return Packet8d();\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_AVX512_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AltiVec/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010-2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX32_ALTIVEC_H\n#define EIGEN_COMPLEX32_ALTIVEC_H\n\nnamespace Eigen {\n\nnamespace internal {\n\nstatic Packet4ui  p4ui_CONJ_XOR = vec_mergeh((Packet4ui)p4i_ZERO, (Packet4ui)p4f_MZERO);//{ 0x00000000, 0x80000000, 0x00000000, 0x80000000 };\n#ifdef __VSX__\n#if defined(_BIG_ENDIAN)\nstatic Packet2ul  p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };\nstatic Packet2ul  p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO,  (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };\n#else\nstatic Packet2ul  p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO,  (Packet4ui) p2d_MZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };\nstatic Packet2ul  p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2d_MZERO, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };\n#endif\n#endif\n\n//---------- float ----------\nstruct Packet2cf\n{\n  EIGEN_STRONG_INLINE explicit Packet2cf() : v(p4f_ZERO) {}\n  EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}\n  Packet4f  v;\n};\n\ntemplate<> struct packet_traits<std::complex<float> >  : default_packet_traits\n{\n  typedef Packet2cf type;\n  typedef Packet2cf half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 2,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n#ifdef __VSX__\n    HasBlend  = 1,\n#endif\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)\n{\n  Packet2cf res;\n  if((std::ptrdiff_t(&from) % 16) == 0)\n    res.v = pload<Packet4f>((const float *)&from);\n  else\n    res.v = ploadu<Packet4f>((const float *)&from);\n  res.v = vec_perm(res.v, res.v, p16uc_PSET64_HI);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>*        from) { return Packet2cf(pload<Packet4f>((const float *) from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>*       from) { return Packet2cf(ploadu<Packet4f>((const float*) from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>*     from) { return pset1<Packet2cf>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { pstore((float*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { pstoreu((float*)to, from.v); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)\n{\n  std::complex<float> EIGEN_ALIGN16 af[2];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n  return pload<Packet2cf>(af);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)\n{\n  std::complex<float> EIGEN_ALIGN16 af[2];\n  pstore<std::complex<float> >((std::complex<float> *) af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v + b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(a.v - b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(a.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a) { return Packet2cf(pxor<Packet4f>(a.v, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR))); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  Packet4f v1, v2;\n\n  // Permute and multiply the real parts of a and b\n  v1 = vec_perm(a.v, a.v, p16uc_PSET32_WODD);\n  // Get the imaginary parts of a\n  v2 = vec_perm(a.v, a.v, p16uc_PSET32_WEVEN);\n  // multiply a_re * b \n  v1 = vec_madd(v1, b.v, p4f_ZERO);\n  // multiply a_im * b and get the conjugate result\n  v2 = vec_madd(v2, b.v, p4f_ZERO);\n  v2 = reinterpret_cast<Packet4f>(pxor(v2, reinterpret_cast<Packet4f>(p4ui_CONJ_XOR)));\n  // permute back to a proper order\n  v2 = vec_perm(v2, v2, p16uc_COMPLEX32_REV);\n  \n  return Packet2cf(padd<Packet4f>(v1, v2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v, b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v, b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v, b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v, b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> * addr)    { EIGEN_PPC_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)\n{\n  std::complex<float> EIGEN_ALIGN16 res[2];\n  pstore((float *)&res, a.v);\n\n  return res[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)\n{\n  Packet4f rev_a;\n  rev_a = vec_perm(a.v, a.v, p16uc_COMPLEX32_REV2);\n  return Packet2cf(rev_a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)\n{\n  Packet4f b;\n  b = vec_sld(a.v, a.v, 8);\n  b = padd<Packet4f>(a.v, b);\n  return pfirst<Packet2cf>(Packet2cf(b));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)\n{\n  Packet4f b1, b2;\n#ifdef _BIG_ENDIAN  \n  b1 = vec_sld(vecs[0].v, vecs[1].v, 8);\n  b2 = vec_sld(vecs[1].v, vecs[0].v, 8);\n#else\n  b1 = vec_sld(vecs[1].v, vecs[0].v, 8);\n  b2 = vec_sld(vecs[0].v, vecs[1].v, 8);\n#endif\n  b2 = vec_sld(b2, b2, 8);\n  b2 = padd<Packet4f>(b1, b2);\n\n  return Packet2cf(b2);\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)\n{\n  Packet4f b;\n  Packet2cf prod;\n  b = vec_sld(a.v, a.v, 8);\n  prod = pmul<Packet2cf>(a, Packet2cf(b));\n\n  return pfirst<Packet2cf>(prod);\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2cf>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)\n  {\n    if (Offset==1)\n    {\n#ifdef _BIG_ENDIAN\n      first.v = vec_sld(first.v, second.v, 8);\n#else\n      first.v = vec_sld(second.v, first.v, 8);\n#endif\n    }\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, false,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet4f, Packet2cf, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const\n  { return Packet2cf(internal::pmul<Packet4f>(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet4f, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const\n  { return Packet2cf(internal::pmul<Packet4f>(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  // TODO optimize it for AltiVec\n  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a, b);\n  Packet4f s = pmul<Packet4f>(b.v, b.v);\n  return Packet2cf(pdiv(res.v, padd<Packet4f>(s, vec_perm(s, s, p16uc_COMPLEX32_REV))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& x)\n{\n  return Packet2cf(vec_perm(x.v, x.v, p16uc_COMPLEX32_REV));\n}\n\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)\n{\n  Packet4f tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);\n  kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);\n  kernel.packet[0].v = tmp;\n}\n\n#ifdef __VSX__\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {\n  Packet2cf result;\n  result.v = reinterpret_cast<Packet4f>(pblend<Packet2d>(ifPacket, reinterpret_cast<Packet2d>(thenPacket.v), reinterpret_cast<Packet2d>(elsePacket.v)));\n  return result;\n}\n#endif\n\n//---------- double ----------\n#ifdef __VSX__\nstruct Packet1cd\n{\n  EIGEN_STRONG_INLINE Packet1cd() {}\n  EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}\n  Packet2d v;\n};\n\ntemplate<> struct packet_traits<std::complex<double> >  : default_packet_traits\n{\n  typedef Packet1cd type;\n  typedef Packet1cd half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 0,\n    size = 1,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { return Packet1cd(pload<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { return Packet1cd(ploadu<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { pstore((double*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { pstoreu((double*)to, from.v); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>&  from)\n{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)\n{\n  std::complex<double> EIGEN_ALIGN16 af[2];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n  return pload<Packet1cd>(af);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)\n{\n  std::complex<double> EIGEN_ALIGN16 af[2];\n  pstore<std::complex<double> >(af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(pxor(a.v, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR2))); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  Packet2d a_re, a_im, v1, v2;\n\n  // Permute and multiply the real parts of a and b\n  a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);\n  // Get the imaginary parts of a\n  a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);\n  // multiply a_re * b\n  v1 = vec_madd(a_re, b.v, p2d_ZERO);\n  // multiply a_im * b and get the conjugate result\n  v2 = vec_madd(a_im, b.v, p2d_ZERO);\n  v2 = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v2), reinterpret_cast<Packet4ui>(v2), 8));\n  v2 = pxor(v2, reinterpret_cast<Packet2d>(p2ul_CONJ_XOR1));\n\n  return Packet1cd(padd<Packet2d>(v1, v2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pand   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pand(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd por    <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(por(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pxor   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pxor(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(pandnot(a.v, b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>*     from)  { return pset1<Packet1cd>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> * addr)    { EIGEN_PPC_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double>  pfirst<Packet1cd>(const Packet1cd& a)\n{\n  std::complex<double> EIGEN_ALIGN16 res[2];\n  pstore<std::complex<double> >(res, a);\n\n  return res[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)        { return vecs[0]; }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet1cd>\n{\n  static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)\n  {\n    // FIXME is it sure we never have to align a Packet1cd?\n    // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, false,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\ntemplate<> struct conj_helper<Packet2d, Packet1cd, false,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const\n  { return Packet1cd(internal::pmul<Packet2d>(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet2d, false,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const\n  { return Packet1cd(internal::pmul<Packet2d>(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  // TODO optimize it for AltiVec\n  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);\n  Packet2d s = pmul<Packet2d>(b.v, b.v);\n  return Packet1cd(pdiv(res.v, padd<Packet2d>(s, vec_perm(s, s, p16uc_REVERSE64))));\n}\n\nEIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)\n{\n  return Packet1cd(preverse(Packet2d(x.v)));\n}\n\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)\n{\n  Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);\n  kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);\n  kernel.packet[0].v = tmp;\n}\n#endif // __VSX__\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX32_ALTIVEC_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AltiVec/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007 Julien Pommier\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* The sin, cos, exp, and log functions of this file come from\n * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/\n */\n\n#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n\nnamespace Eigen {\n\nnamespace internal {\n\nstatic _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\nstatic _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);\nstatic _EIGEN_DECLARE_CONST_Packet4i(23, 23);\n\nstatic _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);\n\n/* the smallest non denormalized float number */\nstatic _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);\nstatic _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf,     0xff800000); // -1.f/0.f\nstatic _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_nan,     0xffffffff);\n  \n/* natural logarithm computed for 4 simultaneous float\n  return NaN for x <= 0\n*/\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);\n\nstatic _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);\n\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);\n\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);\nstatic _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);\n\n#ifdef __VSX__\nstatic _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);\nstatic _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);\nstatic _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);\nstatic _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);\n\n#ifdef __POWER8_VECTOR__\nstatic Packet2l p2l_1023 = { 1023, 1023 };\nstatic Packet2ul p2ul_52 = { 52, 52 };\n#endif\n\n#endif\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f plog<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n\n  Packet4i emm0;\n\n  /* isvalid_mask is 0 if x < 0 or x is NaN. */\n  Packet4ui isvalid_mask = reinterpret_cast<Packet4ui>(vec_cmpge(x, p4f_ZERO));\n  Packet4ui iszero_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(x, p4f_ZERO));\n\n  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */\n  emm0 = vec_sr(reinterpret_cast<Packet4i>(x),\n                reinterpret_cast<Packet4ui>(p4i_23));\n\n  /* keep only the fractional part */\n  x = pand(x, p4f_inv_mant_mask);\n  x = por(x, p4f_half);\n\n  emm0 = psub(emm0, p4i_0x7f);\n  Packet4f e = padd(vec_ctf(emm0, 0), p4f_1);\n\n  /* part2:\n     if( x < SQRTHF ) {\n       e -= 1;\n       x = x + x - 1.0;\n     } else { x = x - 1.0; }\n  */\n  Packet4f mask = reinterpret_cast<Packet4f>(vec_cmplt(x, p4f_cephes_SQRTHF));\n  Packet4f tmp = pand(x, mask);\n  x = psub(x, p4f_1);\n  e = psub(e, pand(p4f_1, mask));\n  x = padd(x, tmp);\n\n  Packet4f x2 = pmul(x,x);\n  Packet4f x3 = pmul(x2,x);\n\n  Packet4f y, y1, y2;\n  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);\n  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);\n  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);\n  y  = pmadd(y , x, p4f_cephes_log_p2);\n  y1 = pmadd(y1, x, p4f_cephes_log_p5);\n  y2 = pmadd(y2, x, p4f_cephes_log_p8);\n  y = pmadd(y, x3, y1);\n  y = pmadd(y, x3, y2);\n  y = pmul(y, x3);\n\n  y1 = pmul(e, p4f_cephes_log_q1);\n  tmp = pmul(x2, p4f_half);\n  y = padd(y, y1);\n  x = psub(x, tmp);\n  y2 = pmul(e, p4f_cephes_log_q2);\n  x = padd(x, y);\n  x = padd(x, y2);\n  // negative arg will be NAN, 0 will be -INF\n  x = vec_sel(x, p4f_minus_inf, iszero_mask);\n  x = vec_sel(p4f_minus_nan, x, isvalid_mask);\n  return x;\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f pexp<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n\n  Packet4f tmp, fx;\n  Packet4i emm0;\n\n  // clamp x\n  x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);\n\n  // express exp(x) as exp(g + n*log(2))\n  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);\n\n  fx = pfloor(fx);\n\n  tmp = pmul(fx, p4f_cephes_exp_C1);\n  Packet4f z = pmul(fx, p4f_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  z = pmul(x,x);\n\n  Packet4f y = p4f_cephes_exp_p0;\n  y = pmadd(y, x, p4f_cephes_exp_p1);\n  y = pmadd(y, x, p4f_cephes_exp_p2);\n  y = pmadd(y, x, p4f_cephes_exp_p3);\n  y = pmadd(y, x, p4f_cephes_exp_p4);\n  y = pmadd(y, x, p4f_cephes_exp_p5);\n  y = pmadd(y, z, x);\n  y = padd(y, p4f_1);\n\n  // build 2^n\n  emm0 = vec_cts(fx, 0);\n  emm0 = vec_add(emm0, p4i_0x7f);\n  emm0 = vec_sl(emm0, reinterpret_cast<Packet4ui>(p4i_23));\n\n  // Altivec's max & min operators just drop silent NaNs. Check NaNs in \n  // inputs and return them unmodified.\n  Packet4ui isnumber_mask = reinterpret_cast<Packet4ui>(vec_cmpeq(_x, _x));\n  return vec_sel(_x, pmax(pmul(y, reinterpret_cast<Packet4f>(emm0)), _x),\n                 isnumber_mask);\n}\n\n#ifndef EIGEN_COMP_CLANG\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f prsqrt<Packet4f>(const Packet4f& x)\n{\n  return  vec_rsqrt(x);\n}\n#endif\n\n#ifdef __VSX__\n#ifndef EIGEN_COMP_CLANG\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d prsqrt<Packet2d>(const Packet2d& x)\n{\n  return  vec_rsqrt(x);\n}\n#endif\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f psqrt<Packet4f>(const Packet4f& x)\n{\n  return  vec_sqrt(x);\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d psqrt<Packet2d>(const Packet2d& x)\n{\n  return  vec_sqrt(x);\n}\n\n// VSX support varies between different compilers and even different\n// versions of the same compiler.  For gcc version >= 4.9.3, we can use\n// vec_cts to efficiently convert Packet2d to Packet2l.  Otherwise, use\n// a slow version that works with older compilers. \n// Update: apparently vec_cts/vec_ctf intrinsics for 64-bit doubles\n// are buggy, https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70963\nstatic inline Packet2l ConvertToPacket2l(const Packet2d& x) {\n#if EIGEN_GNUC_AT_LEAST(5, 4) || \\\n    (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)\n  return vec_cts(x, 0);    // TODO: check clang version.\n#else\n  double tmp[2];\n  memcpy(tmp, &x, sizeof(tmp));\n  Packet2l l = { static_cast<long long>(tmp[0]),\n                 static_cast<long long>(tmp[1]) };\n  return l;\n#endif\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d pexp<Packet2d>(const Packet2d& _x)\n{\n  Packet2d x = _x;\n\n  Packet2d tmp, fx;\n  Packet2l emm0;\n\n  // clamp x\n  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);\n\n  /* express exp(x) as exp(g + n*log(2)) */\n  fx = pmadd(x, p2d_cephes_LOG2EF, p2d_half);\n\n  fx = pfloor(fx);\n\n  tmp = pmul(fx, p2d_cephes_exp_C1);\n  Packet2d z = pmul(fx, p2d_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  Packet2d x2 = pmul(x,x);\n\n  Packet2d px = p2d_cephes_exp_p0;\n  px = pmadd(px, x2, p2d_cephes_exp_p1);\n  px = pmadd(px, x2, p2d_cephes_exp_p2);\n  px = pmul (px, x);\n\n  Packet2d qx = p2d_cephes_exp_q0;\n  qx = pmadd(qx, x2, p2d_cephes_exp_q1);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q2);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q3);\n\n  x = pdiv(px,psub(qx,px));\n  x = pmadd(p2d_2,x,p2d_1);\n\n  // build 2^n\n  emm0 = ConvertToPacket2l(fx);\n\n#ifdef __POWER8_VECTOR__ \n  emm0 = vec_add(emm0, p2l_1023);\n  emm0 = vec_sl(emm0, p2ul_52);\n#else\n  // Code is a bit complex for POWER7.  There is actually a\n  // vec_xxsldi intrinsic but it is not supported by some gcc versions.\n  // So we shift (52-32) bits and do a word swap with zeros.\n  _EIGEN_DECLARE_CONST_Packet4i(1023, 1023);\n  _EIGEN_DECLARE_CONST_Packet4i(20, 20);    // 52 - 32\n\n  Packet4i emm04i = reinterpret_cast<Packet4i>(emm0);\n  emm04i = vec_add(emm04i, p4i_1023);\n  emm04i = vec_sl(emm04i, reinterpret_cast<Packet4ui>(p4i_20));\n  static const Packet16uc perm = {\n    0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03, \n    0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };\n#ifdef  _BIG_ENDIAN\n  emm0 = reinterpret_cast<Packet2l>(vec_perm(p4i_ZERO, emm04i, perm));\n#else\n  emm0 = reinterpret_cast<Packet2l>(vec_perm(emm04i, p4i_ZERO, perm));\n#endif\n\n#endif\n\n  // Altivec's max & min operators just drop silent NaNs. Check NaNs in \n  // inputs and return them unmodified.\n  Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));\n  return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),\n                 isnumber_mask);\n}\n#endif\n\n}  // end namespace internal\n\n}  // end namespace Eigen\n\n#endif  // EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/AltiVec/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_ALTIVEC_H\n#define EIGEN_PACKET_MATH_ALTIVEC_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#endif\n\n// NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS  32\n#endif\n\ntypedef __vector float          Packet4f;\ntypedef __vector int            Packet4i;\ntypedef __vector unsigned int   Packet4ui;\ntypedef __vector __bool int     Packet4bi;\ntypedef __vector short int      Packet8i;\ntypedef __vector unsigned char  Packet16uc;\n\n// We don't want to write the same code all the time, but we need to reuse the constants\n// and it doesn't really work to declare them global, so we define macros instead\n\n#define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \\\n  Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(vec_splat_s32(X))\n\n#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \\\n  Packet4i p4i_##NAME = vec_splat_s32(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \\\n  Packet4f p4f_##NAME = pset1<Packet4f>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \\\n  Packet4i p4i_##NAME = pset1<Packet4i>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \\\n  Packet2d p2d_##NAME = pset1<Packet2d>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \\\n  Packet2l p2l_##NAME = pset1<Packet2l>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \\\n  const Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(pset1<Packet4i>(X))\n\n#define DST_CHAN 1\n#define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))\n\n\n// These constants are endian-agnostic\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0); //{ 0.0, 0.0, 0.0, 0.0}\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1); //{ 1, 1, 1, 1}\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16); //{ -16, -16, -16, -16}\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1); //{ -1, -1, -1, -1}\nstatic Packet4f p4f_MZERO = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1); //{ 0x80000000, 0x80000000, 0x80000000, 0x80000000}\n#ifndef __VSX__\nstatic Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0); //{ 1.0, 1.0, 1.0, 1.0}\n#endif\n\nstatic Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };\nstatic Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };\n\nstatic Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };\nstatic Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };\n\n// Mask alignment\n#ifdef __PPC64__\n#define _EIGEN_MASK_ALIGNMENT\t0xfffffffffffffff0\n#else\n#define _EIGEN_MASK_ALIGNMENT\t0xfffffff0\n#endif\n\n#define _EIGEN_ALIGNED_PTR(x)\t((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT)\n\n// Handle endianness properly while loading constants\n// Define global static constants:\n#ifdef _BIG_ENDIAN\nstatic Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0);\n#ifdef __VSX__\nstatic Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\n#endif\nstatic Packet16uc p16uc_PSET32_WODD   = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };\nstatic Packet16uc p16uc_PSET32_WEVEN  = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };\nstatic Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8);      //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};\n#else\nstatic Packet16uc p16uc_FORWARD = p16uc_REVERSE32; \nstatic Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\nstatic Packet16uc p16uc_PSET32_WODD = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 1), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };\nstatic Packet16uc p16uc_PSET32_WEVEN = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };\nstatic Packet16uc p16uc_HALF64_0_16 = vec_sld(vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 0), (Packet16uc)p4i_ZERO, 8);      //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};\n#endif // _BIG_ENDIAN\n\nstatic Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN);     //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };\nstatic Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN);     //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };\nstatic Packet16uc p16uc_TRANSPOSE64_HI = p16uc_PSET64_HI + p16uc_HALF64_0_16;                                         //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};\nstatic Packet16uc p16uc_TRANSPOSE64_LO = p16uc_PSET64_LO + p16uc_HALF64_0_16;                                         //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};\n\nstatic Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8);                                         //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };\n\n#ifdef _BIG_ENDIAN\nstatic Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);                                            //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\n#else\nstatic Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_PSET64_HI, p16uc_PSET64_LO, 8);                                            //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\n#endif // _BIG_ENDIAN\n\n#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC\n  #define EIGEN_PPC_PREFETCH(ADDR) __builtin_prefetch(ADDR);\n#else\n  #define EIGEN_PPC_PREFETCH(ADDR) asm( \"   dcbt [%[addr]]\\n\" :: [addr] \"r\" (ADDR) : \"cc\" );\n#endif\n\ntemplate<> struct packet_traits<float>  : default_packet_traits\n{\n  typedef Packet4f type;\n  typedef Packet4f half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket = 1,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 1,\n    HasMin  = 1,\n    HasMax  = 1,\n    HasAbs  = 1,\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 1,\n#ifdef __VSX__\n    HasSqrt = 1,\n#if !EIGEN_COMP_CLANG\n    HasRsqrt = 1,\n#else\n    HasRsqrt = 0,\n#endif\n#else\n    HasSqrt = 0,\n    HasRsqrt = 0,\n#endif\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1,\n    HasNegate = 1,\n    HasBlend = 1\n  };\n};\ntemplate<> struct packet_traits<int>    : default_packet_traits\n{\n  typedef Packet4i type;\n  typedef Packet4i half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 4,\n    HasHalfPacket = 0,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 0,\n    HasBlend = 1\n  };\n};\n\n\ntemplate<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };\ntemplate<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };\n\ninline std::ostream & operator <<(std::ostream & s, const Packet16uc & v)\n{\n  union {\n    Packet16uc   v;\n    unsigned char n[16];\n  } vt;\n  vt.v = v;\n  for (int i=0; i< 16; i++)\n    s << (int)vt.n[i] << \", \";\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet4f & v)\n{\n  union {\n    Packet4f   v;\n    float n[4];\n  } vt;\n  vt.v = v;\n  s << vt.n[0] << \", \" << vt.n[1] << \", \" << vt.n[2] << \", \" << vt.n[3];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet4i & v)\n{\n  union {\n    Packet4i   v;\n    int n[4];\n  } vt;\n  vt.v = v;\n  s << vt.n[0] << \", \" << vt.n[1] << \", \" << vt.n[2] << \", \" << vt.n[3];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)\n{\n  union {\n    Packet4ui   v;\n    unsigned int n[4];\n  } vt;\n  vt.v = v;\n  s << vt.n[0] << \", \" << vt.n[1] << \", \" << vt.n[2] << \", \" << vt.n[3];\n  return s;\n}\n\n// Need to define them first or we get specialization after instantiation errors\ntemplate<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n#ifdef __VSX__\n  return vec_vsx_ld(0, from);\n#else\n  return vec_ld(0, from);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n#ifdef __VSX__\n  return vec_vsx_ld(0, from);\n#else\n  return vec_ld(0, from);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n#ifdef __VSX__\n  vec_vsx_st(from, 0, to);\n#else\n  vec_st(from, 0, to);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n#ifdef __VSX__\n  vec_vsx_st(from, 0, to);\n#else\n  vec_st(from, 0, to);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) {\n  Packet4f v = {from, from, from, from};\n  return v;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   {\n  Packet4i v = {from, from, from, from};\n  return v;\n}\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet4f>(const float *a,\n                      Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)\n{\n  a3 = pload<Packet4f>(a);\n  a0 = vec_splat(a3, 0);\n  a1 = vec_splat(a3, 1);\n  a2 = vec_splat(a3, 2);\n  a3 = vec_splat(a3, 3);\n}\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet4i>(const int *a,\n                      Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3)\n{\n  a3 = pload<Packet4i>(a);\n  a0 = vec_splat(a3, 0);\n  a1 = vec_splat(a3, 1);\n  a2 = vec_splat(a3, 2);\n  a3 = vec_splat(a3, 3);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)\n{\n  float EIGEN_ALIGN16 af[4];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n  af[2] = from[2*stride];\n  af[3] = from[3*stride];\n return pload<Packet4f>(af);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)\n{\n  int EIGEN_ALIGN16 ai[4];\n  ai[0] = from[0*stride];\n  ai[1] = from[1*stride];\n  ai[2] = from[2*stride];\n  ai[3] = from[3*stride];\n return pload<Packet4i>(ai);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)\n{\n  float EIGEN_ALIGN16 af[4];\n  pstore<float>(af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n  to[2*stride] = af[2];\n  to[3*stride] = af[3];\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)\n{\n  int EIGEN_ALIGN16 ai[4];\n  pstore<int>((int *)ai, from);\n  to[0*stride] = ai[0];\n  to[1*stride] = ai[1];\n  to[2*stride] = ai[2];\n  to[3*stride] = ai[3];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return pset1<Packet4f>(a) + p4f_COUNTDOWN; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a)   { return pset1<Packet4i>(a) + p4i_COUNTDOWN; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return a + b; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return a + b; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return a - b; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return a - b; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return p4f_ZERO - a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return p4i_ZERO - a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b, p4f_MZERO); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return a * b; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n#ifndef __VSX__  // VSX actually provides a div instruction\n  Packet4f t, y_0, y_1;\n\n  // Altivec does not offer a divide instruction, we have to do a reciprocal approximation\n  y_0 = vec_re(b);\n\n  // Do one Newton-Raphson iteration to get the needed accuracy\n  t   = vec_nmsub(y_0, b, p4f_ONE);\n  y_1 = vec_madd(y_0, t, y_0);\n\n  return vec_madd(a, y_1, p4f_MZERO);\n#else\n  return vec_div(a, b);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)\n{ eigen_assert(false && \"packet integer division are not supported by AltiVec\");\n  return pset1<Packet4i>(0);\n}\n\n// for some weird raisons, it has to be overloaded for packet of integers\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a,b,c); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return a*b + c; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return vec_round(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const  Packet4f& a) { return vec_ceil(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return vec_floor(a); }\n\n#ifdef _BIG_ENDIAN\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n  Packet16uc MSQ, LSQ;\n  Packet16uc mask;\n  MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword\n  LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword\n  mask = vec_lvsl(0, from);                        // create the permute mask\n  return (Packet4f) vec_perm(MSQ, LSQ, mask);           // align the data\n\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html\n  Packet16uc MSQ, LSQ;\n  Packet16uc mask;\n  MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword\n  LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword\n  mask = vec_lvsl(0, from);                        // create the permute mask\n  return (Packet4i) vec_perm(MSQ, LSQ, mask);    // align the data\n}\n#else\n// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)\n{\n  EIGEN_DEBUG_UNALIGNED_LOAD\n  return (Packet4i) vec_vsx_ld((long)from & 15, (const int*) _EIGEN_ALIGNED_PTR(from));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)\n{\n  EIGEN_DEBUG_UNALIGNED_LOAD\n  return (Packet4f) vec_vsx_ld((long)from & 15, (const float*) _EIGEN_ALIGNED_PTR(from));\n}\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)\n{\n  Packet4f p;\n  if((std::ptrdiff_t(from) % 16) == 0)  p = pload<Packet4f>(from);\n  else                                  p = ploadu<Packet4f>(from);\n  return vec_perm(p, p, p16uc_DUPLICATE32_HI);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)\n{\n  Packet4i p;\n  if((std::ptrdiff_t(from) % 16) == 0)  p = pload<Packet4i>(from);\n  else                                  p = ploadu<Packet4i>(from);\n  return vec_perm(p, p, p16uc_DUPLICATE32_HI);\n}\n\n#ifdef _BIG_ENDIAN\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from)\n{\n  EIGEN_DEBUG_UNALIGNED_STORE\n  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html\n  // Warning: not thread safe!\n  Packet16uc MSQ, LSQ, edges;\n  Packet16uc edgeAlign, align;\n\n  MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword\n  LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword\n  edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges\n  edges=vec_perm(LSQ,MSQ,edgeAlign);                        // extract the edges\n  align = vec_lvsr( 0, to );                                // permute map to misalign data\n  MSQ = vec_perm(edges,(Packet16uc)from,align);             // misalign the data (MSQ)\n  LSQ = vec_perm((Packet16uc)from,edges,align);             // misalign the data (LSQ)\n  vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first\n  vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part\n}\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from)\n{\n  EIGEN_DEBUG_UNALIGNED_STORE\n  // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html\n  // Warning: not thread safe!\n  Packet16uc MSQ, LSQ, edges;\n  Packet16uc edgeAlign, align;\n\n  MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword\n  LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword\n  edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges\n  edges=vec_perm(LSQ, MSQ, edgeAlign);                      // extract the edges\n  align = vec_lvsr( 0, to );                                // permute map to misalign data\n  MSQ = vec_perm(edges, (Packet16uc) from, align);          // misalign the data (MSQ)\n  LSQ = vec_perm((Packet16uc) from, edges, align);          // misalign the data (LSQ)\n  vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first\n  vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part\n}\n#else\n// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int>(int*       to, const Packet4i& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n  vec_vsx_st(from, (long)to & 15, (int*) _EIGEN_ALIGNED_PTR(to));\n}\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet4f& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n  vec_vsx_st(from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));\n}\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr)    { EIGEN_PPC_PREFETCH(addr); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr)    { EIGEN_PPC_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x; vec_ste(a, 0, &x); return x; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)\n{\n  return reinterpret_cast<Packet4f>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)\n{\n  return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }\n\ntemplate<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)\n{\n  Packet4f b, sum;\n  b   = vec_sld(a, a, 8);\n  sum = a + b;\n  b   = vec_sld(sum, sum, 4);\n  sum += b;\n  return pfirst(sum);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)\n{\n  Packet4f v[4], sum[4];\n\n  // It's easier and faster to transpose then add as columns\n  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation\n  // Do the transpose, first set of moves\n  v[0] = vec_mergeh(vecs[0], vecs[2]);\n  v[1] = vec_mergel(vecs[0], vecs[2]);\n  v[2] = vec_mergeh(vecs[1], vecs[3]);\n  v[3] = vec_mergel(vecs[1], vecs[3]);\n  // Get the resulting vectors\n  sum[0] = vec_mergeh(v[0], v[2]);\n  sum[1] = vec_mergel(v[0], v[2]);\n  sum[2] = vec_mergeh(v[1], v[3]);\n  sum[3] = vec_mergel(v[1], v[3]);\n\n  // Now do the summation:\n  // Lines 0+1\n  sum[0] = sum[0] + sum[1];\n  // Lines 2+3\n  sum[1] = sum[2] + sum[3];\n  // Add the results\n  sum[0] = sum[0] + sum[1];\n\n  return sum[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)\n{\n  Packet4i sum;\n  sum = vec_sums(a, p4i_ZERO);\n#ifdef _BIG_ENDIAN\n  sum = vec_sld(sum, p4i_ZERO, 12);\n#else\n  sum = vec_sld(p4i_ZERO, sum, 4);\n#endif\n  return pfirst(sum);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)\n{\n  Packet4i v[4], sum[4];\n\n  // It's easier and faster to transpose then add as columns\n  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation\n  // Do the transpose, first set of moves\n  v[0] = vec_mergeh(vecs[0], vecs[2]);\n  v[1] = vec_mergel(vecs[0], vecs[2]);\n  v[2] = vec_mergeh(vecs[1], vecs[3]);\n  v[3] = vec_mergel(vecs[1], vecs[3]);\n  // Get the resulting vectors\n  sum[0] = vec_mergeh(v[0], v[2]);\n  sum[1] = vec_mergel(v[0], v[2]);\n  sum[2] = vec_mergeh(v[1], v[3]);\n  sum[3] = vec_mergel(v[1], v[3]);\n\n  // Now do the summation:\n  // Lines 0+1\n  sum[0] = sum[0] + sum[1];\n  // Lines 2+3\n  sum[1] = sum[2] + sum[3];\n  // Add the results\n  sum[0] = sum[0] + sum[1];\n\n  return sum[0];\n}\n\n// Other reduction functions:\n// mul\ntemplate<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)\n{\n  Packet4f prod;\n  prod = pmul(a, vec_sld(a, a, 8));\n  return pfirst(pmul(prod, vec_sld(prod, prod, 4)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)\n{\n  EIGEN_ALIGN16 int aux[4];\n  pstore(aux, a);\n  return aux[0] * aux[1] * aux[2] * aux[3];\n}\n\n// min\ntemplate<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)\n{\n  Packet4f b, res;\n  b = vec_min(a, vec_sld(a, a, 8));\n  res = vec_min(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\ntemplate<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)\n{\n  Packet4i b, res;\n  b = vec_min(a, vec_sld(a, a, 8));\n  res = vec_min(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)\n{\n  Packet4f b, res;\n  b = vec_max(a, vec_sld(a, a, 8));\n  res = vec_max(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\ntemplate<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)\n{\n  Packet4i b, res;\n  b = vec_max(a, vec_sld(a, a, 8));\n  res = vec_max(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4f>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)\n  {\n#ifdef _BIG_ENDIAN\n    switch (Offset % 4) {\n    case 1:\n      first = vec_sld(first, second, 4); break;\n    case 2:\n      first = vec_sld(first, second, 8); break;\n    case 3:\n      first = vec_sld(first, second, 12); break;\n    }\n#else\n    switch (Offset % 4) {\n    case 1:\n      first = vec_sld(second, first, 12); break;\n    case 2:\n      first = vec_sld(second, first, 8); break;\n    case 3:\n      first = vec_sld(second, first, 4); break;\n    }\n#endif\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4i>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)\n  {\n#ifdef _BIG_ENDIAN\n    switch (Offset % 4) {\n    case 1:\n      first = vec_sld(first, second, 4); break;\n    case 2:\n      first = vec_sld(first, second, 8); break;\n    case 3:\n      first = vec_sld(first, second, 12); break;\n    }\n#else\n    switch (Offset % 4) {\n    case 1:\n      first = vec_sld(second, first, 12); break;\n    case 2:\n      first = vec_sld(second, first, 8); break;\n    case 3:\n      first = vec_sld(second, first, 4); break;\n    }\n#endif\n  }\n};\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4f,4>& kernel) {\n  Packet4f t0, t1, t2, t3;\n  t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);\n  t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);\n  t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);\n  t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);\n  kernel.packet[0] = vec_mergeh(t0, t2);\n  kernel.packet[1] = vec_mergel(t0, t2);\n  kernel.packet[2] = vec_mergeh(t1, t3);\n  kernel.packet[3] = vec_mergel(t1, t3);\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4i,4>& kernel) {\n  Packet4i t0, t1, t2, t3;\n  t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);\n  t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);\n  t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);\n  t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);\n  kernel.packet[0] = vec_mergeh(t0, t2);\n  kernel.packet[1] = vec_mergel(t0, t2);\n  kernel.packet[2] = vec_mergeh(t1, t3);\n  kernel.packet[3] = vec_mergel(t1, t3);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {\n  Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };\n  Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));\n  return vec_sel(elsePacket, thenPacket, mask);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {\n  Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };\n  Packet4ui mask = reinterpret_cast<Packet4ui>(vec_cmpeq(reinterpret_cast<Packet4ui>(select), reinterpret_cast<Packet4ui>(p4i_ONE)));\n  return vec_sel(elsePacket, thenPacket, mask);\n}\n\n\n//---------- double ----------\n#ifdef __VSX__\ntypedef __vector double              Packet2d;\ntypedef __vector unsigned long long  Packet2ul;\ntypedef __vector long long           Packet2l;\n#if EIGEN_COMP_CLANG\ntypedef Packet2ul                    Packet2bl;\n#else\ntypedef __vector __bool long         Packet2bl;\n#endif\n\nstatic Packet2l  p2l_ONE  = { 1, 1 };\nstatic Packet2l  p2l_ZERO = reinterpret_cast<Packet2l>(p4i_ZERO);\nstatic Packet2d  p2d_ONE  = { 1.0, 1.0 }; \nstatic Packet2d  p2d_ZERO = reinterpret_cast<Packet2d>(p4f_ZERO);\nstatic Packet2d  p2d_MZERO = { -0.0, -0.0 };\n\n#ifdef _BIG_ENDIAN\nstatic Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(p2d_ZERO), reinterpret_cast<Packet4f>(p2d_ONE), 8));\n#else\nstatic Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(p2d_ONE), reinterpret_cast<Packet4f>(p2d_ZERO), 8));\n#endif\n\ntemplate<int index> Packet2d vec_splat_dbl(Packet2d& a);\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<0>(Packet2d& a)\n{\n  return reinterpret_cast<Packet2d>(vec_perm(a, a, p16uc_PSET64_HI));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d vec_splat_dbl<1>(Packet2d& a)\n{\n  return reinterpret_cast<Packet2d>(vec_perm(a, a, p16uc_PSET64_LO));\n}\n\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef Packet2d type;\n  typedef Packet2d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=2,\n    HasHalfPacket = 1,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 1,\n    HasMin  = 1,\n    HasMax  = 1,\n    HasAbs  = 1,\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1,\n    HasNegate = 1,\n    HasBlend = 1\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };\n\ninline std::ostream & operator <<(std::ostream & s, const Packet2l & v)\n{\n  union {\n    Packet2l   v;\n    int64_t n[2];\n  } vt;\n  vt.v = v;\n  s << vt.n[0] << \", \" << vt.n[1];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet2d & v)\n{\n  union {\n    Packet2d   v;\n    double n[2];\n  } vt;\n  vt.v = v;\n  s << vt.n[0] << \", \" << vt.n[1];\n  return s;\n}\n\n// Need to define them first or we get specialization after instantiation errors\ntemplate<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n#ifdef __VSX__\n  return vec_vsx_ld(0, from);\n#else\n  return vec_ld(0, from);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<double>(double*   to, const Packet2d& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n#ifdef __VSX__\n  vec_vsx_st(from, 0, to);\n#else\n  vec_st(from, 0, to);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double&  from) {\n  Packet2d v = {from, from};\n  return v;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet2d>(const double *a,\n                      Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)\n{\n  a1 = pload<Packet2d>(a);\n  a0 = vec_splat_dbl<0>(a1);\n  a1 = vec_splat_dbl<1>(a1);\n  a3 = pload<Packet2d>(a+2);\n  a2 = vec_splat_dbl<0>(a3);\n  a3 = vec_splat_dbl<1>(a3);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)\n{\n  double EIGEN_ALIGN16 af[2];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n return pload<Packet2d>(af);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)\n{\n  double EIGEN_ALIGN16 af[2];\n  pstore<double>(af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return pset1<Packet2d>(a) + p2d_COUNTDOWN; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return a + b; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return a - b; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return p2d_ZERO - a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_madd(a,b,p2d_MZERO); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_div(a,b); }\n\n// for some weird raisons, it has to be overloaded for packet of integers\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const  Packet2d& a) { return vec_ceil(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)\n{\n  EIGEN_DEBUG_ALIGNED_LOAD\n  return (Packet2d) vec_vsx_ld((long)from & 15, (const double*) _EIGEN_ALIGNED_PTR(from));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*   from)\n{\n  Packet2d p;\n  if((std::ptrdiff_t(from) % 16) == 0)  p = pload<Packet2d>(from);\n  else                                  p = ploadu<Packet2d>(from);\n  return vec_splat_dbl<0>(p);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<double>(double*  to, const Packet2d& from)\n{\n  EIGEN_DEBUG_ALIGNED_STORE\n  vec_vsx_st((Packet4f)from, (long)to & 15, (float*) _EIGEN_ALIGNED_PTR(to));\n}\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_PPC_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE double  pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore<double>(x, a); return x[0]; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)\n{\n  return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vec_abs(a); }\n\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)\n{\n  Packet2d b, sum;\n  b   = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(a), reinterpret_cast<Packet4f>(a), 8));\n  sum = a + b;\n  return pfirst<Packet2d>(sum);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)\n{\n  Packet2d v[2], sum;\n  v[0] = vecs[0] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[0]), reinterpret_cast<Packet4f>(vecs[0]), 8));\n  v[1] = vecs[1] + reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(vecs[1]), reinterpret_cast<Packet4f>(vecs[1]), 8));\n \n#ifdef _BIG_ENDIAN\n  sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(v[0]), reinterpret_cast<Packet4f>(v[1]), 8));\n#else\n  sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4f>(v[1]), reinterpret_cast<Packet4f>(v[0]), 8));\n#endif\n\n  return sum;\n}\n// Other reduction functions:\n// mul\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(a), reinterpret_cast<Packet4ui>(a), 8))));\n}\n\n// min\ntemplate<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmin(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(a), reinterpret_cast<Packet4ui>(a), 8))));\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmax(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(a), reinterpret_cast<Packet4ui>(a), 8))));\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2d>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)\n  {\n    if (Offset == 1)\n#ifdef _BIG_ENDIAN\n      first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(first), reinterpret_cast<Packet4ui>(second), 8));\n#else\n      first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(second), reinterpret_cast<Packet4ui>(first), 8));\n#endif\n  }\n};\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2d,2>& kernel) {\n  Packet2d t0, t1;\n  t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI);\n  t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO);\n  kernel.packet[0] = t0;\n  kernel.packet[1] = t1;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {\n  Packet2l select = { ifPacket.select[0], ifPacket.select[1] };\n  Packet2bl mask = vec_cmpeq(reinterpret_cast<Packet2d>(select), reinterpret_cast<Packet2d>(p2l_ONE));\n  return vec_sel(elsePacket, thenPacket, mask);\n}\n#endif // __VSX__\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_ALTIVEC_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_CUDA_H\n#define EIGEN_COMPLEX_CUDA_H\n\n// clang-format off\n\nnamespace Eigen {\n\nnamespace internal {\n\n#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)\n\n// Many std::complex methods such as operator+, operator-, operator* and\n// operator/ are not constexpr. Due to this, clang does not treat them as device\n// functions and thus Eigen functors making use of these operators fail to\n// compile. Here, we manually specialize these functors for complex types when\n// building for CUDA to avoid non-constexpr methods.\n\n// Sum\ntemplate<typename T> struct scalar_sum_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {\n  typedef typename std::complex<T> result_type;\n\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {\n    return std::complex<T>(numext::real(a) + numext::real(b),\n                           numext::imag(a) + numext::imag(b));\n  }\n};\n\ntemplate<typename T> struct scalar_sum_op<std::complex<T>, std::complex<T> > : scalar_sum_op<const std::complex<T>, const std::complex<T> > {};\n\n\n// Difference\ntemplate<typename T> struct scalar_difference_op<const std::complex<T>, const std::complex<T> >  : binary_op_base<const std::complex<T>, const std::complex<T> > {\n  typedef typename std::complex<T> result_type;\n\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {\n    return std::complex<T>(numext::real(a) - numext::real(b),\n                           numext::imag(a) - numext::imag(b));\n  }\n};\n\ntemplate<typename T> struct scalar_difference_op<std::complex<T>, std::complex<T> > : scalar_difference_op<const std::complex<T>, const std::complex<T> > {};\n\n\n// Product\ntemplate<typename T> struct scalar_product_op<const std::complex<T>, const std::complex<T> >  : binary_op_base<const std::complex<T>, const std::complex<T> > {\n  enum {\n    Vectorizable = packet_traits<std::complex<T>>::HasMul\n  };\n  typedef typename std::complex<T> result_type;\n\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {\n    const T a_real = numext::real(a);\n    const T a_imag = numext::imag(a);\n    const T b_real = numext::real(b);\n    const T b_imag = numext::imag(b);\n    return std::complex<T>(a_real * b_real - a_imag * b_imag,\n                           a_real * b_imag + a_imag * b_real);\n  }\n};\n\ntemplate<typename T> struct scalar_product_op<std::complex<T>, std::complex<T> > : scalar_product_op<const std::complex<T>, const std::complex<T> > {};\n\n\n// Quotient\ntemplate<typename T> struct scalar_quotient_op<const std::complex<T>, const std::complex<T> > : binary_op_base<const std::complex<T>, const std::complex<T> > {\n  enum {\n    Vectorizable = packet_traits<std::complex<T>>::HasDiv\n  };\n  typedef typename std::complex<T> result_type;\n\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<T> operator() (const std::complex<T>& a, const std::complex<T>& b) const {\n    const T a_real = numext::real(a);\n    const T a_imag = numext::imag(a);\n    const T b_real = numext::real(b);\n    const T b_imag = numext::imag(b);\n    const T norm = T(1) / (b_real * b_real + b_imag * b_imag);\n    return std::complex<T>((a_real * b_real + a_imag * b_imag) * norm,\n                           (a_imag * b_real - a_real * b_imag) * norm);\n  }\n};\n\ntemplate<typename T> struct scalar_quotient_op<std::complex<T>, std::complex<T> > : scalar_quotient_op<const std::complex<T>, const std::complex<T> > {};\n\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/Half.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n//\n// The conversion routines are Copyright (c) Fabian Giesen, 2016.\n// The original license follows:\n//\n// Copyright (c) Fabian Giesen, 2016\n// All rights reserved.\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted.\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n// Standard 16-bit float type, mostly useful for GPUs. Defines a new\n// type Eigen::half (inheriting from CUDA's __half struct) with\n// operator overloads such that it behaves basically as an arithmetic\n// type. It will be quite slow on CPUs (so it is recommended to stay\n// in fp32 for CPUs, except for simple parameter conversions, I/O\n// to disk and the likes), but fast on GPUs.\n\n\n#ifndef EIGEN_HALF_CUDA_H\n#define EIGEN_HALF_CUDA_H\n\n#if __cplusplus > 199711L\n#define EIGEN_EXPLICIT_CAST(tgt_type) explicit operator tgt_type()\n#else\n#define EIGEN_EXPLICIT_CAST(tgt_type) operator tgt_type()\n#endif\n\n\nnamespace Eigen {\n\nstruct half;\n\nnamespace half_impl {\n\n#if !defined(EIGEN_HAS_CUDA_FP16)\n\n// Make our own __half definition that is similar to CUDA's.\nstruct __half {\n  EIGEN_DEVICE_FUNC __half() : x(0) {}\n  explicit EIGEN_DEVICE_FUNC __half(unsigned short raw) : x(raw) {}\n  unsigned short x;\n};\n\n#endif\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half raw_uint16_to_half(unsigned short x);\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff);\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half h);\n\nstruct half_base : public __half {\n  EIGEN_DEVICE_FUNC half_base() {}\n  EIGEN_DEVICE_FUNC half_base(const half_base& h) : __half(h) {}\n  EIGEN_DEVICE_FUNC half_base(const __half& h) : __half(h) {}\n};\n\n} // namespace half_impl\n\n// Class definition.\nstruct half : public half_impl::half_base {\n  #if !defined(EIGEN_HAS_CUDA_FP16)\n    typedef half_impl::__half __half;\n  #endif\n\n  EIGEN_DEVICE_FUNC half() {}\n\n  EIGEN_DEVICE_FUNC half(const __half& h) : half_impl::half_base(h) {}\n  EIGEN_DEVICE_FUNC half(const half& h) : half_impl::half_base(h) {}\n\n  explicit EIGEN_DEVICE_FUNC half(bool b)\n      : half_impl::half_base(half_impl::raw_uint16_to_half(b ? 0x3c00 : 0)) {}\n  template<class T>\n  explicit EIGEN_DEVICE_FUNC half(const T& val)\n      : half_impl::half_base(half_impl::float_to_half_rtne(static_cast<float>(val))) {}\n  explicit EIGEN_DEVICE_FUNC half(float f)\n      : half_impl::half_base(half_impl::float_to_half_rtne(f)) {}\n\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(bool) const {\n    // +0.0 and -0.0 become false, everything else becomes true.\n    return (x & 0x7fff) != 0;\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(signed char) const {\n    return static_cast<signed char>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned char) const {\n    return static_cast<unsigned char>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(short) const {\n    return static_cast<short>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned short) const {\n    return static_cast<unsigned short>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(int) const {\n    return static_cast<int>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned int) const {\n    return static_cast<unsigned int>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long) const {\n    return static_cast<long>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long) const {\n    return static_cast<unsigned long>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(long long) const {\n    return static_cast<long long>(half_impl::half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(unsigned long long) const {\n    return static_cast<unsigned long long>(half_to_float(*this));\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(float) const {\n    return half_impl::half_to_float(*this);\n  }\n  EIGEN_DEVICE_FUNC EIGEN_EXPLICIT_CAST(double) const {\n    return static_cast<double>(half_impl::half_to_float(*this));\n  }\n\n  EIGEN_DEVICE_FUNC half& operator=(const half& other) {\n    x = other.x;\n    return *this;\n  }\n};\n\nnamespace half_impl {\n\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n\n// Intrinsics for native fp16 support. Note that on current hardware,\n// these are no faster than fp32 arithmetic (you need to use the half2\n// versions to get the ALU speed increased), but you do save the\n// conversion steps back and forth.\n\n__device__ half operator + (const half& a, const half& b) {\n  return __hadd(a, b);\n}\n__device__ half operator * (const half& a, const half& b) {\n  return __hmul(a, b);\n}\n__device__ half operator - (const half& a, const half& b) {\n  return __hsub(a, b);\n}\n__device__ half operator / (const half& a, const half& b) {\n  float num = __half2float(a);\n  float denom = __half2float(b);\n  return __float2half(num / denom);\n}\n__device__ half operator - (const half& a) {\n  return __hneg(a);\n}\n__device__ half& operator += (half& a, const half& b) {\n  a = a + b;\n  return a;\n}\n__device__ half& operator *= (half& a, const half& b) {\n  a = a * b;\n  return a;\n}\n__device__ half& operator -= (half& a, const half& b) {\n  a = a - b;\n  return a;\n}\n__device__ half& operator /= (half& a, const half& b) {\n  a = a / b;\n  return a;\n}\n__device__ bool operator == (const half& a, const half& b) {\n  return __heq(a, b);\n}\n__device__ bool operator != (const half& a, const half& b) {\n  return __hne(a, b);\n}\n__device__ bool operator < (const half& a, const half& b) {\n  return __hlt(a, b);\n}\n__device__ bool operator <= (const half& a, const half& b) {\n  return __hle(a, b);\n}\n__device__ bool operator > (const half& a, const half& b) {\n  return __hgt(a, b);\n}\n__device__ bool operator >= (const half& a, const half& b) {\n  return __hge(a, b);\n}\n\n#else  // Emulate support for half floats\n\n// Definitions for CPUs and older CUDA, mostly working through conversion\n// to/from fp32.\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator + (const half& a, const half& b) {\n  return half(float(a) + float(b));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator * (const half& a, const half& b) {\n  return half(float(a) * float(b));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a, const half& b) {\n  return half(float(a) - float(b));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, const half& b) {\n  return half(float(a) / float(b));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator - (const half& a) {\n  half result;\n  result.x = a.x ^ 0x8000;\n  return result;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator += (half& a, const half& b) {\n  a = half(float(a) + float(b));\n  return a;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator *= (half& a, const half& b) {\n  a = half(float(a) * float(b));\n  return a;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator -= (half& a, const half& b) {\n  a = half(float(a) - float(b));\n  return a;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half& operator /= (half& a, const half& b) {\n  a = half(float(a) / float(b));\n  return a;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator == (const half& a, const half& b) {\n  return float(a) == float(b);\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator != (const half& a, const half& b) {\n  return float(a) != float(b);\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator < (const half& a, const half& b) {\n  return float(a) < float(b);\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator <= (const half& a, const half& b) {\n  return float(a) <= float(b);\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator > (const half& a, const half& b) {\n  return float(a) > float(b);\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool operator >= (const half& a, const half& b) {\n  return float(a) >= float(b);\n}\n\n#endif  // Emulate support for half floats\n\n// Division by an index. Do it in full float precision to avoid accuracy\n// issues in converting the denominator to half.\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half operator / (const half& a, Index b) {\n  return half(static_cast<float>(a) / static_cast<float>(b));\n}\n\n// Conversion routines, including fallbacks for the host or older CUDA.\n// Note that newer Intel CPUs (Haswell or newer) have vectorized versions of\n// these in hardware. If we need more performance on older/other CPUs, they are\n// also possible to vectorize directly.\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half raw_uint16_to_half(unsigned short x) {\n  __half h;\n  h.x = x;\n  return h;\n}\n\nunion FP32 {\n  unsigned int u;\n  float f;\n};\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n  return __float2half(ff);\n\n#elif defined(EIGEN_HAS_FP16_C)\n  __half h;\n  h.x = _cvtss_sh(ff, 0);\n  return h;\n\n#else\n  FP32 f; f.f = ff;\n\n  const FP32 f32infty = { 255 << 23 };\n  const FP32 f16max = { (127 + 16) << 23 };\n  const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };\n  unsigned int sign_mask = 0x80000000u;\n  __half o;\n  o.x = static_cast<unsigned short>(0x0u);\n\n  unsigned int sign = f.u & sign_mask;\n  f.u ^= sign;\n\n  // NOTE all the integer compares in this function can be safely\n  // compiled into signed compares since all operands are below\n  // 0x80000000. Important if you want fast straight SSE2 code\n  // (since there's no unsigned PCMPGTD).\n\n  if (f.u >= f16max.u) {  // result is Inf or NaN (all exponent bits set)\n    o.x = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf\n  } else {  // (De)normalized number or zero\n    if (f.u < (113 << 23)) {  // resulting FP16 is subnormal or zero\n      // use a magic value to align our 10 mantissa bits at the bottom of\n      // the float. as long as FP addition is round-to-nearest-even this\n      // just works.\n      f.f += denorm_magic.f;\n\n      // and one integer subtract of the bias later, we have our final float!\n      o.x = static_cast<unsigned short>(f.u - denorm_magic.u);\n    } else {\n      unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd\n\n      // update exponent, rounding bias part 1\n      f.u += ((unsigned int)(15 - 127) << 23) + 0xfff;\n      // rounding bias part 2\n      f.u += mant_odd;\n      // take the bits!\n      o.x = static_cast<unsigned short>(f.u >> 13);\n    }\n  }\n\n  o.x |= static_cast<unsigned short>(sign >> 16);\n  return o;\n#endif\n}\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC float half_to_float(__half h) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n  return __half2float(h);\n\n#elif defined(EIGEN_HAS_FP16_C)\n  return _cvtsh_ss(h.x);\n\n#else\n  const FP32 magic = { 113 << 23 };\n  const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift\n  FP32 o;\n\n  o.u = (h.x & 0x7fff) << 13;             // exponent/mantissa bits\n  unsigned int exp = shifted_exp & o.u;   // just the exponent\n  o.u += (127 - 15) << 23;                // exponent adjust\n\n  // handle exponent special cases\n  if (exp == shifted_exp) {     // Inf/NaN?\n    o.u += (128 - 16) << 23;    // extra exp adjust\n  } else if (exp == 0) {        // Zero/Denormal?\n    o.u += 1 << 23;             // extra exp adjust\n    o.f -= magic.f;             // renormalize\n  }\n\n  o.u |= (h.x & 0x8000) << 16;    // sign bit\n  return o.f;\n#endif\n}\n\n// --- standard functions ---\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isinf)(const half& a) {\n  return (a.x & 0x7fff) == 0x7c00;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isnan)(const half& a) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n  return __hisnan(a);\n#else\n  return (a.x & 0x7fff) > 0x7c00;\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC bool (isfinite)(const half& a) {\n  return !(isinf EIGEN_NOT_A_MACRO (a)) && !(isnan EIGEN_NOT_A_MACRO (a));\n}\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half abs(const half& a) {\n  half result;\n  result.x = a.x & 0x7FFF;\n  return result;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half exp(const half& a) {\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530\n  return half(hexp(a));\n#else\n   return half(::expf(float(a)));\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half expm1(const half& a) {\n  return half(numext::expm1(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log(const half& a) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n  return half(::hlog(a));\n#else\n  return half(::logf(float(a)));\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log1p(const half& a) {\n  return half(numext::log1p(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half log10(const half& a) {\n  return half(::log10f(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sqrt(const half& a) {\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530\n  return half(hsqrt(a));\n#else\n    return half(::sqrtf(float(a)));\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half pow(const half& a, const half& b) {\n  return half(::powf(float(a), float(b)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half sin(const half& a) {\n  return half(::sinf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half cos(const half& a) {\n  return half(::cosf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tan(const half& a) {\n  return half(::tanf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half tanh(const half& a) {\n  return half(::tanhf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half floor(const half& a) {\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300\n  return half(hfloor(a));\n#else\n  return half(::floorf(float(a)));\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half ceil(const half& a) {\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300\n  return half(hceil(a));\n#else\n  return half(::ceilf(float(a)));\n#endif\n}\n\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (min)(const half& a, const half& b) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n  return __hlt(b, a) ? b : a;\n#else\n  const float f1 = static_cast<float>(a);\n  const float f2 = static_cast<float>(b);\n  return f2 < f1 ? b : a;\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC half (max)(const half& a, const half& b) {\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n  return __hlt(a, b) ? b : a;\n#else\n  const float f1 = static_cast<float>(a);\n  const float f2 = static_cast<float>(b);\n  return f1 < f2 ? b : a;\n#endif\n}\n\nEIGEN_ALWAYS_INLINE std::ostream& operator << (std::ostream& os, const half& v) {\n  os << static_cast<float>(v);\n  return os;\n}\n\n} // end namespace half_impl\n\n// import Eigen::half_impl::half into Eigen namespace\n// using half_impl::half;\n\nnamespace internal {\n\ntemplate<>\nstruct random_default_impl<half, false, false>\n{\n  static inline half run(const half& x, const half& y)\n  {\n    return x + (y-x) * half(float(std::rand()) / float(RAND_MAX));\n  }\n  static inline half run()\n  {\n    return run(half(-1.f), half(1.f));\n  }\n};\n\ntemplate<> struct is_arithmetic<half> { enum { value = true }; };\n\n} // end namespace internal\n\ntemplate<> struct NumTraits<Eigen::half>\n    : GenericNumTraits<Eigen::half>\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half epsilon() {\n    return half_impl::raw_uint16_to_half(0x0800);\n  }\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half dummy_precision() { return Eigen::half(1e-2f); }\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half highest() {\n    return half_impl::raw_uint16_to_half(0x7bff);\n  }\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half lowest() {\n    return half_impl::raw_uint16_to_half(0xfbff);\n  }\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half infinity() {\n    return half_impl::raw_uint16_to_half(0x7c00);\n  }\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Eigen::half quiet_NaN() {\n    return half_impl::raw_uint16_to_half(0x7c01);\n  }\n};\n\n} // end namespace Eigen\n\n// C-like standard mathematical functions and trancendentals.\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half fabsh(const Eigen::half& a) {\n  Eigen::half result;\n  result.x = a.x & 0x7FFF;\n  return result;\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half exph(const Eigen::half& a) {\n  return Eigen::half(::expf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half logh(const Eigen::half& a) {\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530\n  return Eigen::half(::hlog(a));\n#else\n  return Eigen::half(::logf(float(a)));\n#endif\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half sqrth(const Eigen::half& a) {\n  return Eigen::half(::sqrtf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half powh(const Eigen::half& a, const Eigen::half& b) {\n  return Eigen::half(::powf(float(a), float(b)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half floorh(const Eigen::half& a) {\n  return Eigen::half(::floorf(float(a)));\n}\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half ceilh(const Eigen::half& a) {\n  return Eigen::half(::ceilf(float(a)));\n}\n\nnamespace std {\n\n#if __cplusplus > 199711L\ntemplate <>\nstruct hash<Eigen::half> {\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::size_t operator()(const Eigen::half& a) const {\n    return static_cast<std::size_t>(a.x);\n  }\n};\n#endif\n\n} // end namespace std\n\n\n// Add the missing shfl_xor intrinsic\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n__device__ EIGEN_STRONG_INLINE Eigen::half __shfl_xor(Eigen::half var, int laneMask, int width=warpSize) {\n  return static_cast<Eigen::half>(__shfl_xor(static_cast<float>(var), laneMask, width));\n}\n#endif\n\n// ldg() has an overload for __half, but we also need one for Eigen::half.\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350\nEIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half __ldg(const Eigen::half* ptr) {\n  return Eigen::half_impl::raw_uint16_to_half(\n      __ldg(reinterpret_cast<const unsigned short*>(ptr)));\n}\n#endif\n\n\n#if defined(__CUDA_ARCH__)\nnamespace Eigen {\nnamespace numext {\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nbool (isnan)(const Eigen::half& h) {\n  return (half_impl::isnan)(h);\n}\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nbool (isinf)(const Eigen::half& h) {\n  return (half_impl::isinf)(h);\n}\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nbool (isfinite)(const Eigen::half& h) {\n  return (half_impl::isfinite)(h);\n}\n\n} // namespace Eigen\n}  // namespace numext\n#endif\n\n#endif // EIGEN_HALF_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATH_FUNCTIONS_CUDA_H\n#define EIGEN_MATH_FUNCTIONS_CUDA_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// Make sure this is only available when targeting a GPU: we don't want to\n// introduce conflicts between these packet_traits definitions and the ones\n// we'll use on the host side (SSE, AVX, ...)\n#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 plog<float4>(const float4& a)\n{\n  return make_float4(logf(a.x), logf(a.y), logf(a.z), logf(a.w));\n}\n\ntemplate<>  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 plog<double2>(const double2& a)\n{\n  using ::log;\n  return make_double2(log(a.x), log(a.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 plog1p<float4>(const float4& a)\n{\n  return make_float4(log1pf(a.x), log1pf(a.y), log1pf(a.z), log1pf(a.w));\n}\n\ntemplate<>  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 plog1p<double2>(const double2& a)\n{\n  return make_double2(log1p(a.x), log1p(a.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 pexp<float4>(const float4& a)\n{\n  return make_float4(expf(a.x), expf(a.y), expf(a.z), expf(a.w));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 pexp<double2>(const double2& a)\n{\n  using ::exp;\n  return make_double2(exp(a.x), exp(a.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 pexpm1<float4>(const float4& a)\n{\n  return make_float4(expm1f(a.x), expm1f(a.y), expm1f(a.z), expm1f(a.w));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 pexpm1<double2>(const double2& a)\n{\n  return make_double2(expm1(a.x), expm1(a.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 psqrt<float4>(const float4& a)\n{\n  return make_float4(sqrtf(a.x), sqrtf(a.y), sqrtf(a.z), sqrtf(a.w));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 psqrt<double2>(const double2& a)\n{\n  using ::sqrt;\n  return make_double2(sqrt(a.x), sqrt(a.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\nfloat4 prsqrt<float4>(const float4& a)\n{\n  return make_float4(rsqrtf(a.x), rsqrtf(a.y), rsqrtf(a.z), rsqrtf(a.w));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\ndouble2 prsqrt<double2>(const double2& a)\n{\n  return make_double2(rsqrt(a.x), rsqrt(a.y));\n}\n\n\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATH_FUNCTIONS_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_CUDA_H\n#define EIGEN_PACKET_MATH_CUDA_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// Make sure this is only available when targeting a GPU: we don't want to\n// introduce conflicts between these packet_traits definitions and the ones\n// we'll use on the host side (SSE, AVX, ...)\n#if defined(__CUDACC__) && defined(EIGEN_USE_GPU)\ntemplate<> struct is_arithmetic<float4>  { enum { value = true }; };\ntemplate<> struct is_arithmetic<double2> { enum { value = true }; };\n\ntemplate<> struct packet_traits<float> : default_packet_traits\n{\n  typedef float4 type;\n  typedef float4 half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket = 0,\n\n    HasDiv  = 1,\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasLGamma = 1,\n    HasDiGamma = 1,\n    HasZeta = 1,\n    HasPolygamma = 1,\n    HasErf = 1,\n    HasErfc = 1,\n    HasIGamma = 1,\n    HasIGammac = 1,\n    HasBetaInc = 1,\n\n    HasBlend = 0,\n  };\n};\n\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef double2 type;\n  typedef double2 half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=2,\n    HasHalfPacket = 0,\n\n    HasDiv  = 1,\n    HasLog  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasLGamma = 1,\n    HasDiGamma = 1,\n    HasZeta = 1,\n    HasPolygamma = 1,\n    HasErf = 1,\n    HasErfc = 1,\n    HasIGamma = 1,\n    HasIGammac = 1,\n    HasBetaInc = 1,\n\n    HasBlend = 0,\n  };\n};\n\n\ntemplate<> struct unpacket_traits<float4>  { typedef float  type; enum {size=4, alignment=Aligned16}; typedef float4 half; };\ntemplate<> struct unpacket_traits<double2> { typedef double type; enum {size=2, alignment=Aligned16}; typedef double2 half; };\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pset1<float4>(const float&  from) {\n  return make_float4(from, from, from, from);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pset1<double2>(const double& from) {\n  return make_double2(from, from);\n}\n\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 plset<float4>(const float& a) {\n  return make_float4(a, a+1, a+2, a+3);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 plset<double2>(const double& a) {\n  return make_double2(a, a+1);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 padd<float4>(const float4& a, const float4& b) {\n  return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 padd<double2>(const double2& a, const double2& b) {\n  return make_double2(a.x+b.x, a.y+b.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 psub<float4>(const float4& a, const float4& b) {\n  return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 psub<double2>(const double2& a, const double2& b) {\n  return make_double2(a.x-b.x, a.y-b.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pnegate(const float4& a) {\n  return make_float4(-a.x, -a.y, -a.z, -a.w);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pnegate(const double2& a) {\n  return make_double2(-a.x, -a.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pconj(const float4& a) { return a; }\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pconj(const double2& a) { return a; }\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmul<float4>(const float4& a, const float4& b) {\n  return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmul<double2>(const double2& a, const double2& b) {\n  return make_double2(a.x*b.x, a.y*b.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pdiv<float4>(const float4& a, const float4& b) {\n  return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pdiv<double2>(const double2& a, const double2& b) {\n  return make_double2(a.x/b.x, a.y/b.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmin<float4>(const float4& a, const float4& b) {\n  return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z), fminf(a.w, b.w));\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmin<double2>(const double2& a, const double2& b) {\n  return make_double2(fmin(a.x, b.x), fmin(a.y, b.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pmax<float4>(const float4& a, const float4& b) {\n  return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z), fmaxf(a.w, b.w));\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pmax<double2>(const double2& a, const double2& b) {\n  return make_double2(fmax(a.x, b.x), fmax(a.y, b.y));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pload<float4>(const float* from) {\n  return *reinterpret_cast<const float4*>(from);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 pload<double2>(const double* from) {\n  return *reinterpret_cast<const double2*>(from);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploadu<float4>(const float* from) {\n  return make_float4(from[0], from[1], from[2], from[3]);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploadu<double2>(const double* from) {\n  return make_double2(from[0], from[1]);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 ploaddup<float4>(const float*   from) {\n  return make_float4(from[0], from[0], from[1], from[1]);\n}\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 ploaddup<double2>(const double*  from) {\n  return make_double2(from[0], from[0]);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<float>(float*   to, const float4& from) {\n  *reinterpret_cast<float4*>(to) = from;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstore<double>(double* to, const double2& from) {\n  *reinterpret_cast<double2*>(to) = from;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const float4& from) {\n  to[0] = from.x;\n  to[1] = from.y;\n  to[2] = from.z;\n  to[3] = from.w;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const double2& from) {\n  to[0] = from.x;\n  to[1] = from.y;\n}\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Aligned>(const float* from) {\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350\n  return __ldg((const float4*)from);\n#else\n  return make_float4(from[0], from[1], from[2], from[3]);\n#endif\n}\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Aligned>(const double* from) {\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350\n  return __ldg((const double2*)from);\n#else\n  return make_double2(from[0], from[1]);\n#endif\n}\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float4 ploadt_ro<float4, Unaligned>(const float* from) {\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350\n  return make_float4(__ldg(from+0), __ldg(from+1), __ldg(from+2), __ldg(from+3));\n#else\n  return make_float4(from[0], from[1], from[2], from[3]);\n#endif\n}\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double2 ploadt_ro<double2, Unaligned>(const double* from) {\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350\n  return make_double2(__ldg(from+0), __ldg(from+1));\n#else\n  return make_double2(from[0], from[1]);\n#endif\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float4 pgather<float, float4>(const float* from, Index stride) {\n  return make_float4(from[0*stride], from[1*stride], from[2*stride], from[3*stride]);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline double2 pgather<double, double2>(const double* from, Index stride) {\n  return make_double2(from[0*stride], from[1*stride]);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, float4>(float* to, const float4& from, Index stride) {\n  to[stride*0] = from.x;\n  to[stride*1] = from.y;\n  to[stride*2] = from.z;\n  to[stride*3] = from.w;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, double2>(double* to, const double2& from, Index stride) {\n  to[stride*0] = from.x;\n  to[stride*1] = from.y;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float  pfirst<float4>(const float4& a) {\n  return a.x;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double pfirst<double2>(const double2& a) {\n  return a.x;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float  predux<float4>(const float4& a) {\n  return a.x + a.y + a.z + a.w;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double predux<double2>(const double2& a) {\n  return a.x + a.y;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float  predux_max<float4>(const float4& a) {\n  return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double predux_max<double2>(const double2& a) {\n  return fmax(a.x, a.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float  predux_min<float4>(const float4& a) {\n  return fminf(fminf(a.x, a.y), fminf(a.z, a.w));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double predux_min<double2>(const double2& a) {\n  return fmin(a.x, a.y);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float  predux_mul<float4>(const float4& a) {\n  return a.x * a.y * a.z * a.w;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double predux_mul<double2>(const double2& a) {\n  return a.x * a.y;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline float4  pabs<float4>(const float4& a) {\n  return make_float4(fabsf(a.x), fabsf(a.y), fabsf(a.z), fabsf(a.w));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline double2 pabs<double2>(const double2& a) {\n  return make_double2(fabs(a.x), fabs(a.y));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<float4,4>& kernel) {\n  float tmp = kernel.packet[0].y;\n  kernel.packet[0].y = kernel.packet[1].x;\n  kernel.packet[1].x = tmp;\n\n  tmp = kernel.packet[0].z;\n  kernel.packet[0].z = kernel.packet[2].x;\n  kernel.packet[2].x = tmp;\n\n  tmp = kernel.packet[0].w;\n  kernel.packet[0].w = kernel.packet[3].x;\n  kernel.packet[3].x = tmp;\n\n  tmp = kernel.packet[1].z;\n  kernel.packet[1].z = kernel.packet[2].y;\n  kernel.packet[2].y = tmp;\n\n  tmp = kernel.packet[1].w;\n  kernel.packet[1].w = kernel.packet[3].y;\n  kernel.packet[3].y = tmp;\n\n  tmp = kernel.packet[2].w;\n  kernel.packet[2].w = kernel.packet[3].z;\n  kernel.packet[3].z = tmp;\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<double2,2>& kernel) {\n  double tmp = kernel.packet[0].y;\n  kernel.packet[0].y = kernel.packet[1].x;\n  kernel.packet[1].x = tmp;\n}\n\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n\n#endif // EIGEN_PACKET_MATH_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/PacketMathHalf.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_HALF_CUDA_H\n#define EIGEN_PACKET_MATH_HALF_CUDA_H\n\n\nnamespace Eigen {\nnamespace internal {\n\n// Most of the following operations require arch >= 3.0\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDACC__) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n\ntemplate<> struct is_arithmetic<half2> { enum { value = true }; };\n\ntemplate<> struct packet_traits<Eigen::half> : default_packet_traits\n{\n  typedef half2 type;\n  typedef half2 half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=2,\n    HasHalfPacket = 0,\n    HasAdd    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasSqrt   = 1,\n    HasRsqrt  = 1,\n    HasExp    = 1,\n    HasExpm1  = 1,\n    HasLog    = 1,\n    HasLog1p  = 1\n  };\n};\n\ntemplate<> struct unpacket_traits<half2> { typedef Eigen::half type; enum {size=2, alignment=Aligned16}; typedef half2 half; };\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pset1<half2>(const Eigen::half& from) {\n  return __half2half2(from);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pload<half2>(const Eigen::half* from) {\n  return *reinterpret_cast<const half2*>(from);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 ploadu<half2>(const Eigen::half* from) {\n  return __halves2half2(from[0], from[1]);\n}\n\ntemplate<> EIGEN_STRONG_INLINE half2 ploaddup<half2>(const Eigen::half*  from) {\n  return __halves2half2(from[0], from[0]);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const half2& from) {\n  *reinterpret_cast<half2*>(to) = from;\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const half2& from) {\n  to[0] = __low2half(from);\n  to[1] = __high2half(from);\n}\n\ntemplate<>\n __device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Aligned>(const Eigen::half* from) {\n#if __CUDA_ARCH__ >= 350\n   return __ldg((const half2*)from);\n#else\n  return __halves2half2(*(from+0), *(from+1));\n#endif\n}\n\ntemplate<>\n__device__ EIGEN_ALWAYS_INLINE half2 ploadt_ro<half2, Unaligned>(const Eigen::half* from) {\n#if __CUDA_ARCH__ >= 350\n   return __halves2half2(__ldg(from+0), __ldg(from+1));\n#else\n  return __halves2half2(*(from+0), *(from+1));\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pgather<Eigen::half, half2>(const Eigen::half* from, Index stride) {\n  return __halves2half2(from[0*stride], from[1*stride]);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE void pscatter<Eigen::half, half2>(Eigen::half* to, const half2& from, Index stride) {\n  to[stride*0] = __low2half(from);\n  to[stride*1] = __high2half(from);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE Eigen::half pfirst<half2>(const half2& a) {\n  return __low2half(a);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pabs<half2>(const half2& a) {\n  half2 result;\n  result.x = a.x & 0x7FFF7FFF;\n  return result;\n}\n\n\n__device__ EIGEN_STRONG_INLINE void\nptranspose(PacketBlock<half2,2>& kernel) {\n  __half a1 = __low2half(kernel.packet[0]);\n  __half a2 = __high2half(kernel.packet[0]);\n  __half b1 = __low2half(kernel.packet[1]);\n  __half b2 = __high2half(kernel.packet[1]);\n  kernel.packet[0] = __halves2half2(a1, b1);\n  kernel.packet[1] = __halves2half2(a2, b2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 plset<half2>(const Eigen::half& a) {\n#if __CUDA_ARCH__ >= 530\n  return __halves2half2(a, __hadd(a, __float2half(1.0f)));\n#else\n  float f = __half2float(a) + 1.0f;\n  return __halves2half2(a, __float2half(f));\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 padd<half2>(const half2& a, const half2& b) {\n#if __CUDA_ARCH__ >= 530\n  return __hadd2(a, b);\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  float r1 = a1 + b1;\n  float r2 = a2 + b2;\n  return __floats2half2_rn(r1, r2);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 psub<half2>(const half2& a, const half2& b) {\n#if __CUDA_ARCH__ >= 530\n  return __hsub2(a, b);\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  float r1 = a1 - b1;\n  float r2 = a2 - b2;\n  return __floats2half2_rn(r1, r2);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pnegate(const half2& a) {\n#if __CUDA_ARCH__ >= 530\n  return __hneg2(a);\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  return __floats2half2_rn(-a1, -a2);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pconj(const half2& a) { return a; }\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pmul<half2>(const half2& a, const half2& b) {\n#if __CUDA_ARCH__ >= 530\n  return __hmul2(a, b);\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  float r1 = a1 * b1;\n  float r2 = a2 * b2;\n  return __floats2half2_rn(r1, r2);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pmadd<half2>(const half2& a, const half2& b, const half2& c) {\n#if __CUDA_ARCH__ >= 530\n   return __hfma2(a, b, c);\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  float c1 = __low2float(c);\n  float c2 = __high2float(c);\n  float r1 = a1 * b1 + c1;\n  float r2 = a2 * b2 + c2;\n  return __floats2half2_rn(r1, r2);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pdiv<half2>(const half2& a, const half2& b) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  float r1 = a1 / b1;\n  float r2 = a2 / b2;\n  return __floats2half2_rn(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pmin<half2>(const half2& a, const half2& b) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);\n  __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);\n  return __halves2half2(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pmax<half2>(const half2& a, const half2& b) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float b1 = __low2float(b);\n  float b2 = __high2float(b);\n  __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);\n  __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);\n  return __halves2half2(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE Eigen::half predux<half2>(const half2& a) {\n#if __CUDA_ARCH__ >= 530\n  return __hadd(__low2half(a), __high2half(a));\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 + a2)));\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_max<half2>(const half2& a) {\n#if __CUDA_ARCH__ >= 530\n  __half first = __low2half(a);\n  __half second = __high2half(a);\n  return __hgt(first, second) ? first : second;\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  return a1 > a2 ? __low2half(a) : __high2half(a);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_min<half2>(const half2& a) {\n#if __CUDA_ARCH__ >= 530\n  __half first = __low2half(a);\n  __half second = __high2half(a);\n  return __hlt(first, second) ? first : second;\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  return a1 < a2 ? __low2half(a) : __high2half(a);\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE Eigen::half predux_mul<half2>(const half2& a) {\n#if __CUDA_ARCH__ >= 530\n  return __hmul(__low2half(a), __high2half(a));\n#else\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  return Eigen::half(half_impl::raw_uint16_to_half(__float2half_rn(a1 * a2)));\n#endif\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 plog1p<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = log1pf(a1);\n  float r2 = log1pf(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pexpm1<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = expm1f(a1);\n  float r2 = expm1f(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\n#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530\n\ntemplate<>  __device__ EIGEN_STRONG_INLINE\nhalf2 plog<half2>(const half2& a) {\n  return h2log(a);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE\nhalf2 pexp<half2>(const half2& a) {\n  return h2exp(a);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE\nhalf2 psqrt<half2>(const half2& a) {\n  return h2sqrt(a);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE\nhalf2 prsqrt<half2>(const half2& a) {\n  return h2rsqrt(a);\n}\n\n#else\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 plog<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = logf(a1);\n  float r2 = logf(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 pexp<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = expf(a1);\n  float r2 = expf(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 psqrt<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = sqrtf(a1);\n  float r2 = sqrtf(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\ntemplate<> __device__ EIGEN_STRONG_INLINE half2 prsqrt<half2>(const half2& a) {\n  float a1 = __low2float(a);\n  float a2 = __high2float(a);\n  float r1 = rsqrtf(a1);\n  float r2 = rsqrtf(a2);\n  return __floats2half2_rn(r1, r2);\n}\n\n#endif\n\n#elif defined EIGEN_VECTORIZE_AVX512\n\ntypedef struct {\n  __m256i x;\n} Packet16h;\n\n\ntemplate<> struct is_arithmetic<Packet16h> { enum { value = true }; };\n\ntemplate <>\nstruct packet_traits<half> : default_packet_traits {\n  typedef Packet16h type;\n  // There is no half-size packet for Packet16h.\n  typedef Packet16h half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 16,\n    HasHalfPacket = 0,\n    HasAdd    = 0,\n    HasSub    = 0,\n    HasMul    = 0,\n    HasNegate = 0,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasConj   = 0,\n    HasSetLinear = 0,\n    HasDiv = 0,\n    HasSqrt = 0,\n    HasRsqrt = 0,\n    HasExp = 0,\n    HasLog = 0,\n    HasBlend = 0\n  };\n};\n\n\ntemplate<> struct unpacket_traits<Packet16h> { typedef Eigen::half type; enum {size=16, alignment=Aligned32}; typedef Packet16h half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h pset1<Packet16h>(const Eigen::half& from) {\n  Packet16h result;\n  result.x = _mm256_set1_epi16(from.x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet16h>(const Packet16h& from) {\n  return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm256_extract_epi16(from.x, 0)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h pload<Packet16h>(const Eigen::half* from) {\n  Packet16h result;\n  result.x = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h ploadu<Packet16h>(const Eigen::half* from) {\n  Packet16h result;\n  result.x = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<half>(Eigen::half* to, const Packet16h& from) {\n  _mm256_store_si256((__m256i*)to, from.x);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<half>(Eigen::half* to, const Packet16h& from) {\n  _mm256_storeu_si256((__m256i*)to, from.x);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h\nploadquad(const Eigen::half* from) {\n  Packet16h result;\n  unsigned short a = from[0].x;\n  unsigned short b = from[1].x;\n  unsigned short c = from[2].x;\n  unsigned short d = from[3].x;\n  result.x = _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);\n  return result;\n}\n\nEIGEN_STRONG_INLINE Packet16f half2float(const Packet16h& a) {\n#ifdef EIGEN_HAS_FP16_C\n  return _mm512_cvtph_ps(a.x);\n#else\n  EIGEN_ALIGN64 half aux[16];\n  pstore(aux, a);\n  float f0(aux[0]);\n  float f1(aux[1]);\n  float f2(aux[2]);\n  float f3(aux[3]);\n  float f4(aux[4]);\n  float f5(aux[5]);\n  float f6(aux[6]);\n  float f7(aux[7]);\n  float f8(aux[8]);\n  float f9(aux[9]);\n  float fa(aux[10]);\n  float fb(aux[11]);\n  float fc(aux[12]);\n  float fd(aux[13]);\n  float fe(aux[14]);\n  float ff(aux[15]);\n\n  return _mm512_set_ps(\n      ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);\n#endif\n}\n\nEIGEN_STRONG_INLINE Packet16h float2half(const Packet16f& a) {\n#ifdef EIGEN_HAS_FP16_C\n  Packet16h result;\n  result.x = _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);\n  return result;\n#else\n  EIGEN_ALIGN64 float aux[16];\n  pstore(aux, a);\n  half h0(aux[0]);\n  half h1(aux[1]);\n  half h2(aux[2]);\n  half h3(aux[3]);\n  half h4(aux[4]);\n  half h5(aux[5]);\n  half h6(aux[6]);\n  half h7(aux[7]);\n  half h8(aux[8]);\n  half h9(aux[9]);\n  half ha(aux[10]);\n  half hb(aux[11]);\n  half hc(aux[12]);\n  half hd(aux[13]);\n  half he(aux[14]);\n  half hf(aux[15]);\n\n  Packet16h result;\n  result.x = _mm256_set_epi16(\n      hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,\n      h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);\n  return result;\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(const Packet16h& a, const Packet16h& b) {\n  Packet16f af = half2float(a);\n  Packet16f bf = half2float(b);\n  Packet16f rf = padd(af, bf);\n  return float2half(rf);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(const Packet16h& a, const Packet16h& b) {\n  Packet16f af = half2float(a);\n  Packet16f bf = half2float(b);\n  Packet16f rf = pmul(af, bf);\n  return float2half(rf);\n}\n\ntemplate<> EIGEN_STRONG_INLINE half predux<Packet16h>(const Packet16h& from) {\n  Packet16f from_float = half2float(from);\n  return half(predux(from_float));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h pgather<Eigen::half, Packet16h>(const Eigen::half* from, Index stride)\n{\n  Packet16h result;\n  result.x = _mm256_set_epi16(\n      from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,\n      from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,\n      from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,\n      from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pscatter<half, Packet16h>(half* to, const Packet16h& from, Index stride)\n{\n  EIGEN_ALIGN64 half aux[16];\n  pstore(aux, from);\n  to[stride*0].x = aux[0].x;\n  to[stride*1].x = aux[1].x;\n  to[stride*2].x = aux[2].x;\n  to[stride*3].x = aux[3].x;\n  to[stride*4].x = aux[4].x;\n  to[stride*5].x = aux[5].x;\n  to[stride*6].x = aux[6].x;\n  to[stride*7].x = aux[7].x;\n  to[stride*8].x = aux[8].x;\n  to[stride*9].x = aux[9].x;\n  to[stride*10].x = aux[10].x;\n  to[stride*11].x = aux[11].x;\n  to[stride*12].x = aux[12].x;\n  to[stride*13].x = aux[13].x;\n  to[stride*14].x = aux[14].x;\n  to[stride*15].x = aux[15].x;\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet16h,16>& kernel) {\n  __m256i a = kernel.packet[0].x;\n  __m256i b = kernel.packet[1].x;\n  __m256i c = kernel.packet[2].x;\n  __m256i d = kernel.packet[3].x;\n  __m256i e = kernel.packet[4].x;\n  __m256i f = kernel.packet[5].x;\n  __m256i g = kernel.packet[6].x;\n  __m256i h = kernel.packet[7].x;\n  __m256i i = kernel.packet[8].x;\n  __m256i j = kernel.packet[9].x;\n  __m256i k = kernel.packet[10].x;\n  __m256i l = kernel.packet[11].x;\n  __m256i m = kernel.packet[12].x;\n  __m256i n = kernel.packet[13].x;\n  __m256i o = kernel.packet[14].x;\n  __m256i p = kernel.packet[15].x;\n\n  __m256i ab_07 = _mm256_unpacklo_epi16(a, b);\n  __m256i cd_07 = _mm256_unpacklo_epi16(c, d);\n  __m256i ef_07 = _mm256_unpacklo_epi16(e, f);\n  __m256i gh_07 = _mm256_unpacklo_epi16(g, h);\n  __m256i ij_07 = _mm256_unpacklo_epi16(i, j);\n  __m256i kl_07 = _mm256_unpacklo_epi16(k, l);\n  __m256i mn_07 = _mm256_unpacklo_epi16(m, n);\n  __m256i op_07 = _mm256_unpacklo_epi16(o, p);\n\n  __m256i ab_8f = _mm256_unpackhi_epi16(a, b);\n  __m256i cd_8f = _mm256_unpackhi_epi16(c, d);\n  __m256i ef_8f = _mm256_unpackhi_epi16(e, f);\n  __m256i gh_8f = _mm256_unpackhi_epi16(g, h);\n  __m256i ij_8f = _mm256_unpackhi_epi16(i, j);\n  __m256i kl_8f = _mm256_unpackhi_epi16(k, l);\n  __m256i mn_8f = _mm256_unpackhi_epi16(m, n);\n  __m256i op_8f = _mm256_unpackhi_epi16(o, p);\n\n  __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);\n  __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);\n  __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);\n  __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);\n  __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);\n  __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);\n  __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);\n  __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);\n\n  __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);\n  __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);\n  __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);\n  __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);\n  __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);\n  __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);\n  __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);\n  __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);\n\n  __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);\n  __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);\n  __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);\n  __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);\n  __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);\n  __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);\n  __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);\n  __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);\n  __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);\n  __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);\n  __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);\n  __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);\n  __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);\n  __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);\n  __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);\n  __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);\n\n  // NOTE: no unpacklo/hi instr in this case, so using permute instr.\n  __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);\n  __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);\n  __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);\n  __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);\n  __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);\n  __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);\n  __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);\n  __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);\n  __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);\n  __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);\n  __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);\n  __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);\n  __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);\n  __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);\n  __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);\n  __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);\n\n  kernel.packet[0].x = a_p_0;\n  kernel.packet[1].x = a_p_1;\n  kernel.packet[2].x = a_p_2;\n  kernel.packet[3].x = a_p_3;\n  kernel.packet[4].x = a_p_4;\n  kernel.packet[5].x = a_p_5;\n  kernel.packet[6].x = a_p_6;\n  kernel.packet[7].x = a_p_7;\n  kernel.packet[8].x = a_p_8;\n  kernel.packet[9].x = a_p_9;\n  kernel.packet[10].x = a_p_a;\n  kernel.packet[11].x = a_p_b;\n  kernel.packet[12].x = a_p_c;\n  kernel.packet[13].x = a_p_d;\n  kernel.packet[14].x = a_p_e;\n  kernel.packet[15].x = a_p_f;\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet16h,8>& kernel) {\n  EIGEN_ALIGN64 half in[8][16];\n  pstore<half>(in[0], kernel.packet[0]);\n  pstore<half>(in[1], kernel.packet[1]);\n  pstore<half>(in[2], kernel.packet[2]);\n  pstore<half>(in[3], kernel.packet[3]);\n  pstore<half>(in[4], kernel.packet[4]);\n  pstore<half>(in[5], kernel.packet[5]);\n  pstore<half>(in[6], kernel.packet[6]);\n  pstore<half>(in[7], kernel.packet[7]);\n\n  EIGEN_ALIGN64 half out[8][16];\n\n  for (int i = 0; i < 8; ++i) {\n    for (int j = 0; j < 8; ++j) {\n      out[i][j] = in[j][2*i];\n    }\n    for (int j = 0; j < 8; ++j) {\n      out[i][j+8] = in[j][2*i+1];\n    }\n  }\n\n  kernel.packet[0] = pload<Packet16h>(out[0]);\n  kernel.packet[1] = pload<Packet16h>(out[1]);\n  kernel.packet[2] = pload<Packet16h>(out[2]);\n  kernel.packet[3] = pload<Packet16h>(out[3]);\n  kernel.packet[4] = pload<Packet16h>(out[4]);\n  kernel.packet[5] = pload<Packet16h>(out[5]);\n  kernel.packet[6] = pload<Packet16h>(out[6]);\n  kernel.packet[7] = pload<Packet16h>(out[7]);\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet16h,4>& kernel) {\n  EIGEN_ALIGN64 half in[4][16];\n  pstore<half>(in[0], kernel.packet[0]);\n  pstore<half>(in[1], kernel.packet[1]);\n  pstore<half>(in[2], kernel.packet[2]);\n  pstore<half>(in[3], kernel.packet[3]);\n\n  EIGEN_ALIGN64 half out[4][16];\n\n  for (int i = 0; i < 4; ++i) {\n    for (int j = 0; j < 4; ++j) {\n      out[i][j] = in[j][4*i];\n    }\n    for (int j = 0; j < 4; ++j) {\n      out[i][j+4] = in[j][4*i+1];\n    }\n    for (int j = 0; j < 4; ++j) {\n      out[i][j+8] = in[j][4*i+2];\n    }\n    for (int j = 0; j < 4; ++j) {\n      out[i][j+12] = in[j][4*i+3];\n    }\n  }\n\n  kernel.packet[0] = pload<Packet16h>(out[0]);\n  kernel.packet[1] = pload<Packet16h>(out[1]);\n  kernel.packet[2] = pload<Packet16h>(out[2]);\n  kernel.packet[3] = pload<Packet16h>(out[3]);\n}\n\n\n#elif defined EIGEN_VECTORIZE_AVX\n\ntypedef struct {\n  __m128i x;\n} Packet8h;\n\n\ntemplate<> struct is_arithmetic<Packet8h> { enum { value = true }; };\n\ntemplate <>\nstruct packet_traits<Eigen::half> : default_packet_traits {\n  typedef Packet8h type;\n  // There is no half-size packet for Packet8h.\n  typedef Packet8h half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 8,\n    HasHalfPacket = 0,\n    HasAdd    = 0,\n    HasSub    = 0,\n    HasMul    = 0,\n    HasNegate = 0,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasConj   = 0,\n    HasSetLinear = 0,\n    HasDiv = 0,\n    HasSqrt = 0,\n    HasRsqrt = 0,\n    HasExp = 0,\n    HasLog = 0,\n    HasBlend = 0\n  };\n};\n\n\ntemplate<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16}; typedef Packet8h half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {\n  Packet8h result;\n  result.x = _mm_set1_epi16(from.x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {\n  return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_extract_epi16(from.x, 0)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {\n  Packet8h result;\n  result.x = _mm_load_si128(reinterpret_cast<const __m128i*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {\n  Packet8h result;\n  result.x = _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {\n  _mm_store_si128(reinterpret_cast<__m128i*>(to), from.x);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {\n  _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from.x);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h\nploadquad<Packet8h>(const Eigen::half* from) {\n  Packet8h result;\n  unsigned short a = from[0].x;\n  unsigned short b = from[1].x;\n  result.x = _mm_set_epi16(b, b, b, b, a, a, a, a);\n  return result;\n}\n\nEIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {\n#ifdef EIGEN_HAS_FP16_C\n  return _mm256_cvtph_ps(a.x);\n#else\n  EIGEN_ALIGN32 Eigen::half aux[8];\n  pstore(aux, a);\n  float f0(aux[0]);\n  float f1(aux[1]);\n  float f2(aux[2]);\n  float f3(aux[3]);\n  float f4(aux[4]);\n  float f5(aux[5]);\n  float f6(aux[6]);\n  float f7(aux[7]);\n\n  return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);\n#endif\n}\n\nEIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {\n#ifdef EIGEN_HAS_FP16_C\n  Packet8h result;\n  result.x = _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);\n  return result;\n#else\n  EIGEN_ALIGN32 float aux[8];\n  pstore(aux, a);\n  Eigen::half h0(aux[0]);\n  Eigen::half h1(aux[1]);\n  Eigen::half h2(aux[2]);\n  Eigen::half h3(aux[3]);\n  Eigen::half h4(aux[4]);\n  Eigen::half h5(aux[5]);\n  Eigen::half h6(aux[6]);\n  Eigen::half h7(aux[7]);\n\n  Packet8h result;\n  result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);\n  return result;\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {\n  Packet8f af = half2float(a);\n  Packet8f bf = half2float(b);\n  Packet8f rf = padd(af, bf);\n  return float2half(rf);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {\n  Packet8f af = half2float(a);\n  Packet8f bf = half2float(b);\n  Packet8f rf = pmul(af, bf);\n  return float2half(rf);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)\n{\n  Packet8h result;\n  result.x = _mm_set_epi16(from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)\n{\n  EIGEN_ALIGN32 Eigen::half aux[8];\n  pstore(aux, from);\n  to[stride*0].x = aux[0].x;\n  to[stride*1].x = aux[1].x;\n  to[stride*2].x = aux[2].x;\n  to[stride*3].x = aux[3].x;\n  to[stride*4].x = aux[4].x;\n  to[stride*5].x = aux[5].x;\n  to[stride*6].x = aux[6].x;\n  to[stride*7].x = aux[7].x;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {\n  Packet8f af = half2float(a);\n  float reduced = predux<Packet8f>(af);\n  return Eigen::half(reduced);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {\n  Packet8f af = half2float(a);\n  float reduced = predux_max<Packet8f>(af);\n  return Eigen::half(reduced);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {\n  Packet8f af = half2float(a);\n  float reduced = predux_min<Packet8f>(af);\n  return Eigen::half(reduced);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {\n  Packet8f af = half2float(a);\n  float reduced = predux_mul<Packet8f>(af);\n  return Eigen::half(reduced);\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet8h,8>& kernel) {\n  __m128i a = kernel.packet[0].x;\n  __m128i b = kernel.packet[1].x;\n  __m128i c = kernel.packet[2].x;\n  __m128i d = kernel.packet[3].x;\n  __m128i e = kernel.packet[4].x;\n  __m128i f = kernel.packet[5].x;\n  __m128i g = kernel.packet[6].x;\n  __m128i h = kernel.packet[7].x;\n\n  __m128i a03b03 = _mm_unpacklo_epi16(a, b);\n  __m128i c03d03 = _mm_unpacklo_epi16(c, d);\n  __m128i e03f03 = _mm_unpacklo_epi16(e, f);\n  __m128i g03h03 = _mm_unpacklo_epi16(g, h);\n  __m128i a47b47 = _mm_unpackhi_epi16(a, b);\n  __m128i c47d47 = _mm_unpackhi_epi16(c, d);\n  __m128i e47f47 = _mm_unpackhi_epi16(e, f);\n  __m128i g47h47 = _mm_unpackhi_epi16(g, h);\n\n  __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);\n  __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);\n  __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);\n  __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);\n  __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);\n  __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);\n  __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);\n  __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);\n\n  __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);\n  __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);\n  __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);\n  __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);\n  __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);\n  __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);\n  __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);\n  __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);\n\n  kernel.packet[0].x = a0b0c0d0e0f0g0h0;\n  kernel.packet[1].x = a1b1c1d1e1f1g1h1;\n  kernel.packet[2].x = a2b2c2d2e2f2g2h2;\n  kernel.packet[3].x = a3b3c3d3e3f3g3h3;\n  kernel.packet[4].x = a4b4c4d4e4f4g4h4;\n  kernel.packet[5].x = a5b5c5d5e5f5g5h5;\n  kernel.packet[6].x = a6b6c6d6e6f6g6h6;\n  kernel.packet[7].x = a7b7c7d7e7f7g7h7;\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet8h,4>& kernel) {\n  EIGEN_ALIGN32 Eigen::half in[4][8];\n  pstore<Eigen::half>(in[0], kernel.packet[0]);\n  pstore<Eigen::half>(in[1], kernel.packet[1]);\n  pstore<Eigen::half>(in[2], kernel.packet[2]);\n  pstore<Eigen::half>(in[3], kernel.packet[3]);\n\n  EIGEN_ALIGN32 Eigen::half out[4][8];\n\n  for (int i = 0; i < 4; ++i) {\n    for (int j = 0; j < 4; ++j) {\n      out[i][j] = in[j][2*i];\n    }\n    for (int j = 0; j < 4; ++j) {\n      out[i][j+4] = in[j][2*i+1];\n    }\n  }\n\n  kernel.packet[0] = pload<Packet8h>(out[0]);\n  kernel.packet[1] = pload<Packet8h>(out[1]);\n  kernel.packet[2] = pload<Packet8h>(out[2]);\n  kernel.packet[3] = pload<Packet8h>(out[3]);\n}\n\n\n// Disable the following code since it's broken on too many platforms / compilers.\n//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)\n#elif 0\n\ntypedef struct {\n  __m64 x;\n} Packet4h;\n\n\ntemplate<> struct is_arithmetic<Packet4h> { enum { value = true }; };\n\ntemplate <>\nstruct packet_traits<Eigen::half> : default_packet_traits {\n  typedef Packet4h type;\n  // There is no half-size packet for Packet4h.\n  typedef Packet4h half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 4,\n    HasHalfPacket = 0,\n    HasAdd    = 0,\n    HasSub    = 0,\n    HasMul    = 0,\n    HasNegate = 0,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasConj   = 0,\n    HasSetLinear = 0,\n    HasDiv = 0,\n    HasSqrt = 0,\n    HasRsqrt = 0,\n    HasExp = 0,\n    HasLog = 0,\n    HasBlend = 0\n  };\n};\n\n\ntemplate<> struct unpacket_traits<Packet4h> { typedef Eigen::half type; enum {size=4, alignment=Aligned16}; typedef Packet4h half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pset1<Packet4h>(const Eigen::half& from) {\n  Packet4h result;\n  result.x = _mm_set1_pi16(from.x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet4h>(const Packet4h& from) {\n  return half_impl::raw_uint16_to_half(static_cast<unsigned short>(_mm_cvtsi64_si32(from.x)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h padd<Packet4h>(const Packet4h& a, const Packet4h& b) {\n  __int64_t a64 = _mm_cvtm64_si64(a.x);\n  __int64_t b64 = _mm_cvtm64_si64(b.x);\n\n  Eigen::half h[4];\n\n  Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));\n  Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));\n  h[0] = ha + hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));\n  h[1] = ha + hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));\n  h[2] = ha + hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));\n  h[3] = ha + hb;\n  Packet4h result;\n  result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pmul<Packet4h>(const Packet4h& a, const Packet4h& b) {\n  __int64_t a64 = _mm_cvtm64_si64(a.x);\n  __int64_t b64 = _mm_cvtm64_si64(b.x);\n\n  Eigen::half h[4];\n\n  Eigen::half ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64));\n  Eigen::half hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64));\n  h[0] = ha * hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 16));\n  h[1] = ha * hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 32));\n  h[2] = ha * hb;\n  ha = half_impl::raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));\n  hb = half_impl::raw_uint16_to_half(static_cast<unsigned short>(b64 >> 48));\n  h[3] = ha * hb;\n  Packet4h result;\n  result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pload<Packet4h>(const Eigen::half* from) {\n  Packet4h result;\n  result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h ploadu<Packet4h>(const Eigen::half* from) {\n  Packet4h result;\n  result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet4h& from) {\n  __int64_t r = _mm_cvtm64_si64(from.x);\n  *(reinterpret_cast<__int64_t*>(to)) = r;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet4h& from) {\n  __int64_t r = _mm_cvtm64_si64(from.x);\n  *(reinterpret_cast<__int64_t*>(to)) = r;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h\nploadquad<Packet4h>(const Eigen::half* from) {\n  return pset1<Packet4h>(*from);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pgather<Eigen::half, Packet4h>(const Eigen::half* from, Index stride)\n{\n  Packet4h result;\n  result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet4h>(Eigen::half* to, const Packet4h& from, Index stride)\n{\n  __int64_t a = _mm_cvtm64_si64(from.x);\n  to[stride*0].x = static_cast<unsigned short>(a);\n  to[stride*1].x = static_cast<unsigned short>(a >> 16);\n  to[stride*2].x = static_cast<unsigned short>(a >> 32);\n  to[stride*3].x = static_cast<unsigned short>(a >> 48);\n}\n\nEIGEN_STRONG_INLINE void\nptranspose(PacketBlock<Packet4h,4>& kernel) {\n  __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);\n  __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);\n  __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);\n  __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);\n\n  kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);\n  kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);\n  kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);\n  kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);\n}\n\n#endif\n\n}\n}\n\n#endif // EIGEN_PACKET_MATH_HALF_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/CUDA/TypeCasting.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TYPE_CASTING_CUDA_H\n#define EIGEN_TYPE_CASTING_CUDA_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<>\nstruct scalar_cast_op<float, Eigen::half> {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)\n  typedef Eigen::half result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const float& a) const {\n    #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n      return __float2half(a);\n    #else\n      return Eigen::half(a);\n    #endif\n  }\n};\n\ntemplate<>\nstruct functor_traits<scalar_cast_op<float, Eigen::half> >\n{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };\n\n\ntemplate<>\nstruct scalar_cast_op<int, Eigen::half> {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)\n  typedef Eigen::half result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Eigen::half operator() (const int& a) const {\n    #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n      return __float2half(static_cast<float>(a));\n    #else\n      return Eigen::half(static_cast<float>(a));\n    #endif\n  }\n};\n\ntemplate<>\nstruct functor_traits<scalar_cast_op<int, Eigen::half> >\n{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };\n\n\ntemplate<>\nstruct scalar_cast_op<Eigen::half, float> {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)\n  typedef float result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float operator() (const Eigen::half& a) const {\n    #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n      return __half2float(a);\n    #else\n      return static_cast<float>(a);\n    #endif\n  }\n};\n\ntemplate<>\nstruct functor_traits<scalar_cast_op<Eigen::half, float> >\n{ enum { Cost = NumTraits<float>::AddCost, PacketAccess = false }; };\n\n\n\n#if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300\n\ntemplate <>\nstruct type_casting_traits<Eigen::half, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 2,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pcast<half2, float4>(const half2& a, const half2& b) {\n  float2 r1 = __half22float2(a);\n  float2 r2 = __half22float2(b);\n  return make_float4(r1.x, r1.y, r2.x, r2.y);\n}\n\ntemplate <>\nstruct type_casting_traits<float, Eigen::half> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 2\n  };\n};\n\ntemplate<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE half2 pcast<float4, half2>(const float4& a) {\n  // Simply discard the second half of the input\n  return __floats2half2_rn(a.x, a.y);\n}\n\n#elif defined EIGEN_VECTORIZE_AVX512\ntemplate <>\nstruct type_casting_traits<half, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet16f pcast<Packet16h, Packet16f>(const Packet16h& a) {\n  return half2float(a);\n}\n\ntemplate <>\nstruct type_casting_traits<float, half> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet16h pcast<Packet16f, Packet16h>(const Packet16f& a) {\n  return float2half(a);\n}\n\n#elif defined EIGEN_VECTORIZE_AVX\n\ntemplate <>\nstruct type_casting_traits<Eigen::half, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet8f pcast<Packet8h, Packet8f>(const Packet8h& a) {\n  return half2float(a);\n}\n\ntemplate <>\nstruct type_casting_traits<float, Eigen::half> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet8h pcast<Packet8f, Packet8h>(const Packet8f& a) {\n  return float2half(a);\n}\n\n// Disable the following code since it's broken on too many platforms / compilers.\n//#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC)\n#elif 0\n\ntemplate <>\nstruct type_casting_traits<Eigen::half, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4h, Packet4f>(const Packet4h& a) {\n  __int64_t a64 = _mm_cvtm64_si64(a.x);\n  Eigen::half h = raw_uint16_to_half(static_cast<unsigned short>(a64));\n  float f1 = static_cast<float>(h);\n  h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 16));\n  float f2 = static_cast<float>(h);\n  h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 32));\n  float f3 = static_cast<float>(h);\n  h = raw_uint16_to_half(static_cast<unsigned short>(a64 >> 48));\n  float f4 = static_cast<float>(h);\n  return _mm_set_ps(f4, f3, f2, f1);\n}\n\ntemplate <>\nstruct type_casting_traits<float, Eigen::half> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4h pcast<Packet4f, Packet4h>(const Packet4f& a) {\n  EIGEN_ALIGN16 float aux[4];\n  pstore(aux, a);\n  Eigen::half h0(aux[0]);\n  Eigen::half h1(aux[1]);\n  Eigen::half h2(aux[2]);\n  Eigen::half h3(aux[3]);\n\n  Packet4h result;\n  result.x = _mm_set_pi16(h3.x, h2.x, h1.x, h0.x);\n  return result;\n}\n\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TYPE_CASTING_CUDA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/Default/Settings.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n/* All the parameters defined in this file can be specialized in the\n * architecture specific files, and/or by the user.\n * More to come... */\n\n#ifndef EIGEN_DEFAULT_SETTINGS_H\n#define EIGEN_DEFAULT_SETTINGS_H\n\n/** Defines the maximal loop size to enable meta unrolling of loops.\n  * Note that the value here is expressed in Eigen's own notion of \"number of FLOPS\",\n  * it does not correspond to the number of iterations or the number of instructions\n  */\n#ifndef EIGEN_UNROLLING_LIMIT\n#define EIGEN_UNROLLING_LIMIT 100\n#endif\n\n/** Defines the threshold between a \"small\" and a \"large\" matrix.\n  * This threshold is mainly used to select the proper product implementation.\n  */\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8\n#endif\n\n/** Defines the maximal width of the blocks used in the triangular product and solver\n  * for vectors (level 2 blas xTRMV and xTRSV). The default is 8.\n  */\n#ifndef EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH\n#define EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH 8\n#endif\n\n\n/** Defines the default number of registers available for that architecture.\n  * Currently it must be 8 or 16. Other values will fail.\n  */\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 8\n#endif\n\n#endif // EIGEN_DEFAULT_SETTINGS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/NEON/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_NEON_H\n#define EIGEN_COMPLEX_NEON_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ninline uint32x4_t p4ui_CONJ_XOR() {\n// See bug 1325, clang fails to call vld1q_u64.\n#if EIGEN_COMP_CLANG\n  uint32x4_t ret = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };\n  return ret;\n#else\n  static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };\n  return vld1q_u32( conj_XOR_DATA );\n#endif\n}\n\ninline uint32x2_t p2ui_CONJ_XOR() {\n  static const uint32_t conj_XOR_DATA[] = { 0x00000000, 0x80000000 };\n  return vld1_u32( conj_XOR_DATA );\n}\n\n//---------- float ----------\nstruct Packet2cf\n{\n  EIGEN_STRONG_INLINE Packet2cf() {}\n  EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}\n  Packet4f  v;\n};\n\ntemplate<> struct packet_traits<std::complex<float> >  : default_packet_traits\n{\n  typedef Packet2cf type;\n  typedef Packet2cf half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 2,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)\n{\n  float32x2_t r64;\n  r64 = vld1_f32((float *)&from);\n\n  return Packet2cf(vcombine_f32(r64, r64));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate<Packet4f>(a.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)\n{\n  Packet4ui b = vreinterpretq_u32_f32(a.v);\n  return Packet2cf(vreinterpretq_f32_u32(veorq_u32(b, p4ui_CONJ_XOR())));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  Packet4f v1, v2;\n\n  // Get the real values of a | a1_re | a1_re | a2_re | a2_re |\n  v1 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 0), vdup_lane_f32(vget_high_f32(a.v), 0));\n  // Get the imag values of a | a1_im | a1_im | a2_im | a2_im |\n  v2 = vcombine_f32(vdup_lane_f32(vget_low_f32(a.v), 1), vdup_lane_f32(vget_high_f32(a.v), 1));\n  // Multiply the real a with b\n  v1 = vmulq_f32(v1, b.v);\n  // Multiply the imag a with b\n  v2 = vmulq_f32(v2, b.v);\n  // Conjugate v2 \n  v2 = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(v2), p4ui_CONJ_XOR()));\n  // Swap real/imag elements in v2.\n  v2 = vrev64q_f32(v2);\n  // Add and return the result\n  return Packet2cf(vaddq_f32(v1, v2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  return Packet2cf(vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  return Packet2cf(vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  return Packet2cf(vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  return Packet2cf(vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a.v),vreinterpretq_u32_f32(b.v))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pload<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)\n{\n  Packet4f res = pset1<Packet4f>(0.f);\n  res = vsetq_lane_f32(std::real(from[0*stride]), res, 0);\n  res = vsetq_lane_f32(std::imag(from[0*stride]), res, 1);\n  res = vsetq_lane_f32(std::real(from[1*stride]), res, 2);\n  res = vsetq_lane_f32(std::imag(from[1*stride]), res, 3);\n  return Packet2cf(res);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)\n{\n  to[stride*0] = std::complex<float>(vgetq_lane_f32(from.v, 0), vgetq_lane_f32(from.v, 1));\n  to[stride*1] = std::complex<float>(vgetq_lane_f32(from.v, 2), vgetq_lane_f32(from.v, 3));\n}\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *   addr) { EIGEN_ARM_PREFETCH((float *)addr); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)\n{\n  std::complex<float> EIGEN_ALIGN16 x[2];\n  vst1q_f32((float *)x, a.v);\n  return x[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)\n{\n  float32x2_t a_lo, a_hi;\n  Packet4f a_r128;\n\n  a_lo = vget_low_f32(a.v);\n  a_hi = vget_high_f32(a.v);\n  a_r128 = vcombine_f32(a_hi, a_lo);\n\n  return Packet2cf(a_r128);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pcplxflip<Packet2cf>(const Packet2cf& a)\n{\n  return Packet2cf(vrev64q_f32(a.v));\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)\n{\n  float32x2_t a1, a2;\n  std::complex<float> s;\n\n  a1 = vget_low_f32(a.v);\n  a2 = vget_high_f32(a.v);\n  a2 = vadd_f32(a1, a2);\n  vst1_f32((float *)&s, a2);\n\n  return s;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)\n{\n  Packet4f sum1, sum2, sum;\n\n  // Add the first two 64-bit float32x2_t of vecs[0]\n  sum1 = vcombine_f32(vget_low_f32(vecs[0].v), vget_low_f32(vecs[1].v));\n  sum2 = vcombine_f32(vget_high_f32(vecs[0].v), vget_high_f32(vecs[1].v));\n  sum = vaddq_f32(sum1, sum2);\n\n  return Packet2cf(sum);\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)\n{\n  float32x2_t a1, a2, v1, v2, prod;\n  std::complex<float> s;\n\n  a1 = vget_low_f32(a.v);\n  a2 = vget_high_f32(a.v);\n   // Get the real values of a | a1_re | a1_re | a2_re | a2_re |\n  v1 = vdup_lane_f32(a1, 0);\n  // Get the real values of a | a1_im | a1_im | a2_im | a2_im |\n  v2 = vdup_lane_f32(a1, 1);\n  // Multiply the real a with b\n  v1 = vmul_f32(v1, a2);\n  // Multiply the imag a with b\n  v2 = vmul_f32(v2, a2);\n  // Conjugate v2 \n  v2 = vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(v2), p2ui_CONJ_XOR()));\n  // Swap real/imag elements in v2.\n  v2 = vrev64_f32(v2);\n  // Add v1, v2\n  prod = vadd_f32(v1, v2);\n\n  vst1_f32((float *)&s, prod);\n\n  return s;\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2cf>\n{\n  EIGEN_STRONG_INLINE static void run(Packet2cf& first, const Packet2cf& second)\n  {\n    if (Offset==1)\n    {\n      first.v = vextq_f32(first.v, second.v, 2);\n    }\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, false,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  // TODO optimize it for NEON\n  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);\n  Packet4f s, rev_s;\n\n  // this computes the norm\n  s = vmulq_f32(b.v, b.v);\n  rev_s = vrev64q_f32(s);\n\n  return Packet2cf(pdiv(res.v, vaddq_f32(s,rev_s)));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2cf,2>& kernel) {\n  Packet4f tmp = vcombine_f32(vget_high_f32(kernel.packet[0].v), vget_high_f32(kernel.packet[1].v));\n  kernel.packet[0].v = vcombine_f32(vget_low_f32(kernel.packet[0].v), vget_low_f32(kernel.packet[1].v));\n  kernel.packet[1].v = tmp;\n}\n\n//---------- double ----------\n#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG\n\n// See bug 1325, clang fails to call vld1q_u64.\n#if EIGEN_COMP_CLANG\n  static uint64x2_t p2ul_CONJ_XOR = {0x0, 0x8000000000000000};\n#else\n  const uint64_t  p2ul_conj_XOR_DATA[] = { 0x0, 0x8000000000000000 };\n  static uint64x2_t p2ul_CONJ_XOR = vld1q_u64( p2ul_conj_XOR_DATA );\n#endif\n\nstruct Packet1cd\n{\n  EIGEN_STRONG_INLINE Packet1cd() {}\n  EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}\n  Packet2d v;\n};\n\ntemplate<> struct packet_traits<std::complex<double> >  : default_packet_traits\n{\n  typedef Packet1cd type;\n  typedef Packet1cd half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 0,\n    size = 1,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pload<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>&  from)\n{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(padd<Packet2d>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(psub<Packet2d>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate<Packet2d>(a.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v), p2ul_CONJ_XOR))); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  Packet2d v1, v2;\n\n  // Get the real values of a \n  v1 = vdupq_lane_f64(vget_low_f64(a.v), 0);\n  // Get the imag values of a\n  v2 = vdupq_lane_f64(vget_high_f64(a.v), 0);\n  // Multiply the real a with b\n  v1 = vmulq_f64(v1, b.v);\n  // Multiply the imag a with b\n  v2 = vmulq_f64(v2, b.v);\n  // Conjugate v2 \n  v2 = vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(v2), p2ul_CONJ_XOR));\n  // Swap real/imag elements in v2.\n  v2 = preverse<Packet2d>(v2);\n  // Add and return the result\n  return Packet1cd(vaddq_f64(v1, v2));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pand   <Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  return Packet1cd(vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet1cd por    <Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  return Packet1cd(vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pxor   <Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  return Packet1cd(vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  return Packet1cd(vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a.v),vreinterpretq_u64_f64(b.v))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *   addr) { EIGEN_ARM_PREFETCH((double *)addr); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride)\n{\n  Packet2d res = pset1<Packet2d>(0.0);\n  res = vsetq_lane_f64(std::real(from[0*stride]), res, 0);\n  res = vsetq_lane_f64(std::imag(from[0*stride]), res, 1);\n  return Packet1cd(res);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride)\n{\n  to[stride*0] = std::complex<double>(vgetq_lane_f64(from.v, 0), vgetq_lane_f64(from.v, 1));\n}\n\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double>  pfirst<Packet1cd>(const Packet1cd& a)\n{\n  std::complex<double> EIGEN_ALIGN16 res;\n  pstore<std::complex<double> >(&res, a);\n\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a) { return pfirst(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs) { return vecs[0]; }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a) { return pfirst(a); }\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet1cd>\n{\n  static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)\n  {\n    // FIXME is it sure we never have to align a Packet1cd?\n    // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, false,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  // TODO optimize it for NEON\n  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);\n  Packet2d s = pmul<Packet2d>(b.v, b.v);\n  Packet2d rev_s = preverse<Packet2d>(s);\n\n  return Packet1cd(pdiv(res.v, padd<Packet2d>(s,rev_s)));\n}\n\nEIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)\n{\n  return Packet1cd(preverse(Packet2d(x.v)));\n}\n\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)\n{\n  Packet2d tmp = vcombine_f64(vget_high_f64(kernel.packet[0].v), vget_high_f64(kernel.packet[1].v));\n  kernel.packet[0].v = vcombine_f64(vget_low_f64(kernel.packet[0].v), vget_low_f64(kernel.packet[1].v));\n  kernel.packet[1].v = tmp;\n}\n#endif // EIGEN_ARCH_ARM64\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_NEON_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/NEON/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* The sin, cos, exp, and log functions of this file come from\n * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/\n */\n\n#ifndef EIGEN_MATH_FUNCTIONS_NEON_H\n#define EIGEN_MATH_FUNCTIONS_NEON_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f pexp<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n  Packet4f tmp, fx;\n\n  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\n  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);\n  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);\n  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);\n\n  x = vminq_f32(x, p4f_exp_hi);\n  x = vmaxq_f32(x, p4f_exp_lo);\n\n  /* express exp(x) as exp(g + n*log(2)) */\n  fx = vmlaq_f32(p4f_half, x, p4f_cephes_LOG2EF);\n\n  /* perform a floorf */\n  tmp = vcvtq_f32_s32(vcvtq_s32_f32(fx));\n\n  /* if greater, substract 1 */\n  Packet4ui mask = vcgtq_f32(tmp, fx);\n  mask = vandq_u32(mask, vreinterpretq_u32_f32(p4f_1));\n\n  fx = vsubq_f32(tmp, vreinterpretq_f32_u32(mask));\n\n  tmp = vmulq_f32(fx, p4f_cephes_exp_C1);\n  Packet4f z = vmulq_f32(fx, p4f_cephes_exp_C2);\n  x = vsubq_f32(x, tmp);\n  x = vsubq_f32(x, z);\n\n  Packet4f y = vmulq_f32(p4f_cephes_exp_p0, x);\n  z = vmulq_f32(x, x);\n  y = vaddq_f32(y, p4f_cephes_exp_p1);\n  y = vmulq_f32(y, x);\n  y = vaddq_f32(y, p4f_cephes_exp_p2);\n  y = vmulq_f32(y, x);\n  y = vaddq_f32(y, p4f_cephes_exp_p3);\n  y = vmulq_f32(y, x);\n  y = vaddq_f32(y, p4f_cephes_exp_p4);\n  y = vmulq_f32(y, x);\n  y = vaddq_f32(y, p4f_cephes_exp_p5);\n\n  y = vmulq_f32(y, z);\n  y = vaddq_f32(y, x);\n  y = vaddq_f32(y, p4f_1);\n\n  /* build 2^n */\n  int32x4_t mm;\n  mm = vcvtq_s32_f32(fx);\n  mm = vaddq_s32(mm, p4i_0x7f);\n  mm = vshlq_n_s32(mm, 23);\n  Packet4f pow2n = vreinterpretq_f32_s32(mm);\n\n  y = vmulq_f32(y, pow2n);\n  return y;\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATH_FUNCTIONS_NEON_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/NEON/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Konstantinos Margaritis <markos@freevec.org>\n// Heavily based on Gael's SSE version.\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_NEON_H\n#define EIGEN_PACKET_MATH_NEON_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#endif\n\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#if EIGEN_ARCH_ARM64\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32\n#else\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 \n#endif\n#endif\n\ntypedef float32x2_t Packet2f;\ntypedef float32x4_t Packet4f;\ntypedef int32x4_t   Packet4i;\ntypedef int32x2_t   Packet2i;\ntypedef uint32x4_t  Packet4ui;\n\n#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \\\n  const Packet4f p4f_##NAME = pset1<Packet4f>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \\\n  const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int32_t>(X))\n\n#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \\\n  const Packet4i p4i_##NAME = pset1<Packet4i>(X)\n\n#if EIGEN_ARCH_ARM64\n  // __builtin_prefetch tends to do nothing on ARM64 compilers because the\n  // prefetch instructions there are too detailed for __builtin_prefetch to map\n  // meaningfully to them.\n  #define EIGEN_ARM_PREFETCH(ADDR)  __asm__ __volatile__(\"prfm pldl1keep, [%[addr]]\\n\" ::[addr] \"r\"(ADDR) : );\n#elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC\n  #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);\n#elif defined __pld\n  #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)\n#elif EIGEN_ARCH_ARM32\n  #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ (\"pld [%[addr]]\\n\" :: [addr] \"r\" (ADDR) : );\n#else\n  // by default no explicit prefetching\n  #define EIGEN_ARM_PREFETCH(ADDR)\n#endif\n\ntemplate<> struct packet_traits<float>  : default_packet_traits\n{\n  typedef Packet4f type;\n  typedef Packet4f half; // Packet2f intrinsics not implemented yet\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 4,\n    HasHalfPacket=0, // Packet2f intrinsics not implemented yet\n   \n    HasDiv  = 1,\n    // FIXME check the Has*\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 1,\n    HasSqrt = 0\n  };\n};\ntemplate<> struct packet_traits<int32_t>    : default_packet_traits\n{\n  typedef Packet4i type;\n  typedef Packet4i half; // Packet2i intrinsics not implemented yet\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket=0 // Packet2i intrinsics not implemented yet\n    // FIXME check the Has*\n  };\n};\n\n#if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM\n// workaround gcc 4.2, 4.3 and 4.4 compilatin issue\nEIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); }\nEIGEN_STRONG_INLINE float32x2_t vld1_f32 (const float* x) { return ::vld1_f32 ((const float32_t*)x); }\nEIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); }\nEIGEN_STRONG_INLINE void        vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }\nEIGEN_STRONG_INLINE void        vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }\n#endif\n\ntemplate<> struct unpacket_traits<Packet4f> { typedef float   type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };\ntemplate<> struct unpacket_traits<Packet4i> { typedef int32_t type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return vdupq_n_f32(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int32_t&    from)   { return vdupq_n_s32(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)\n{\n  const float32_t f[] = {0, 1, 2, 3};\n  Packet4f countdown = vld1q_f32(f);\n  return vaddq_f32(pset1<Packet4f>(a), countdown);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int32_t& a)\n{\n  const int32_t i[] = {0, 1, 2, 3};\n  Packet4i countdown = vld1q_s32(i);\n  return vaddq_s32(pset1<Packet4i>(a), countdown);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n#if EIGEN_ARCH_ARM64\n  return vdivq_f32(a,b);\n#else\n  Packet4f inv, restep, div;\n\n  // NEON does not offer a divide instruction, we have to do a reciprocal approximation\n  // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers\n  // a reciprocal estimate AND a reciprocal step -which saves a few instructions\n  // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with\n  // Newton-Raphson and vrecpsq_f32()\n  inv = vrecpeq_f32(b);\n\n  // This returns a differential, by which we will have to multiply inv to get a better\n  // approximation of 1/b.\n  restep = vrecpsq_f32(b, inv);\n  inv = vmulq_f32(restep, inv);\n\n  // Finally, multiply a by 1/b and get the wanted result of the division.\n  div = vmulq_f32(a, inv);\n\n  return div;\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)\n{ eigen_assert(false && \"packet integer division are not supported by NEON\");\n  return pset1<Packet4i>(0);\n}\n\n// Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available,\n// then implements a slow software scalar fallback calling fmaf()!\n// Filed LLVM bug:\n//     https://llvm.org/bugs/show_bug.cgi?id=27216\n#if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM)\n// See bug 936.\n// FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4.\n// FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding.\n// MLA is not fused i.e. does 2 roundings.\n// In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4):\n// MLA: 10 GFlop/s ; FMA: 12 GFlops/s.\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); }\n#else\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) {\n#if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM\n  // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu,\n  // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on\n  // -march=armv7-a, that is a very common case.\n  // See e.g. this thread:\n  //     http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html\n  // Filed LLVM bug:\n  //     https://llvm.org/bugs/show_bug.cgi?id=27219\n  Packet4f r = c;\n  asm volatile(\n    \"vmla.f32 %q[r], %q[a], %q[b]\"\n    : [r] \"+w\" (r)\n    : [a] \"w\" (a),\n      [b] \"w\" (b)\n    : );\n  return r;\n#else\n  return vmlaq_f32(c,a,b);\n#endif\n}\n#endif\n\n// No FMA instruction for int, so use MLA unconditionally.\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); }\n\n// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics\ntemplate<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b)));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*    from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int32_t*  from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*   from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float* from)\n{\n  float32x2_t lo, hi;\n  lo = vld1_dup_f32(from);\n  hi = vld1_dup_f32(from+1);\n  return vcombine_f32(lo, hi);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int32_t* from)\n{\n  int32x2_t lo, hi;\n  lo = vld1_dup_s32(from);\n  hi = vld1_dup_s32(from+1);\n  return vcombine_s32(lo, hi);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<float>  (float*    to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstore<int32_t>(int32_t*  to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to, from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>  (float*   to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int32_t>(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to, from); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)\n{\n  Packet4f res = pset1<Packet4f>(0.f);\n  res = vsetq_lane_f32(from[0*stride], res, 0);\n  res = vsetq_lane_f32(from[1*stride], res, 1);\n  res = vsetq_lane_f32(from[2*stride], res, 2);\n  res = vsetq_lane_f32(from[3*stride], res, 3);\n  return res;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int32_t, Packet4i>(const int32_t* from, Index stride)\n{\n  Packet4i res = pset1<Packet4i>(0);\n  res = vsetq_lane_s32(from[0*stride], res, 0);\n  res = vsetq_lane_s32(from[1*stride], res, 1);\n  res = vsetq_lane_s32(from[2*stride], res, 2);\n  res = vsetq_lane_s32(from[3*stride], res, 3);\n  return res;\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)\n{\n  to[stride*0] = vgetq_lane_f32(from, 0);\n  to[stride*1] = vgetq_lane_f32(from, 1);\n  to[stride*2] = vgetq_lane_f32(from, 2);\n  to[stride*3] = vgetq_lane_f32(from, 3);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<int32_t, Packet4i>(int32_t* to, const Packet4i& from, Index stride)\n{\n  to[stride*0] = vgetq_lane_s32(from, 0);\n  to[stride*1] = vgetq_lane_s32(from, 1);\n  to[stride*2] = vgetq_lane_s32(from, 2);\n  to[stride*3] = vgetq_lane_s32(from, 3);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>  (const float*    addr) { EIGEN_ARM_PREFETCH(addr); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int32_t>(const int32_t*  addr) { EIGEN_ARM_PREFETCH(addr); }\n\n// FIXME only store the 2 first elements ?\ntemplate<> EIGEN_STRONG_INLINE float   pfirst<Packet4f>(const Packet4f& a) { float   EIGEN_ALIGN16 x[4]; vst1q_f32(x, a); return x[0]; }\ntemplate<> EIGEN_STRONG_INLINE int32_t pfirst<Packet4i>(const Packet4i& a) { int32_t EIGEN_ALIGN16 x[4]; vst1q_s32(x, a); return x[0]; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) {\n  float32x2_t a_lo, a_hi;\n  Packet4f a_r64;\n\n  a_r64 = vrev64q_f32(a);\n  a_lo = vget_low_f32(a_r64);\n  a_hi = vget_high_f32(a_r64);\n  return vcombine_f32(a_hi, a_lo);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) {\n  int32x2_t a_lo, a_hi;\n  Packet4i a_r64;\n\n  a_r64 = vrev64q_s32(a);\n  a_lo = vget_low_s32(a_r64);\n  a_hi = vget_high_s32(a_r64);\n  return vcombine_s32(a_hi, a_lo);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); }\n\ntemplate<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)\n{\n  float32x2_t a_lo, a_hi, sum;\n\n  a_lo = vget_low_f32(a);\n  a_hi = vget_high_f32(a);\n  sum = vpadd_f32(a_lo, a_hi);\n  sum = vpadd_f32(sum, sum);\n  return vget_lane_f32(sum, 0);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)\n{\n  float32x4x2_t vtrn1, vtrn2, res1, res2;\n  Packet4f sum1, sum2, sum;\n\n  // NEON zip performs interleaving of the supplied vectors.\n  // We perform two interleaves in a row to acquire the transposed vector\n  vtrn1 = vzipq_f32(vecs[0], vecs[2]);\n  vtrn2 = vzipq_f32(vecs[1], vecs[3]);\n  res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);\n  res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);\n\n  // Do the addition of the resulting vectors\n  sum1 = vaddq_f32(res1.val[0], res1.val[1]);\n  sum2 = vaddq_f32(res2.val[0], res2.val[1]);\n  sum = vaddq_f32(sum1, sum2);\n\n  return sum;\n}\n\ntemplate<> EIGEN_STRONG_INLINE int32_t predux<Packet4i>(const Packet4i& a)\n{\n  int32x2_t a_lo, a_hi, sum;\n\n  a_lo = vget_low_s32(a);\n  a_hi = vget_high_s32(a);\n  sum = vpadd_s32(a_lo, a_hi);\n  sum = vpadd_s32(sum, sum);\n  return vget_lane_s32(sum, 0);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)\n{\n  int32x4x2_t vtrn1, vtrn2, res1, res2;\n  Packet4i sum1, sum2, sum;\n\n  // NEON zip performs interleaving of the supplied vectors.\n  // We perform two interleaves in a row to acquire the transposed vector\n  vtrn1 = vzipq_s32(vecs[0], vecs[2]);\n  vtrn2 = vzipq_s32(vecs[1], vecs[3]);\n  res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);\n  res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);\n\n  // Do the addition of the resulting vectors\n  sum1 = vaddq_s32(res1.val[0], res1.val[1]);\n  sum2 = vaddq_s32(res2.val[0], res2.val[1]);\n  sum = vaddq_s32(sum1, sum2);\n\n  return sum;\n}\n\n// Other reduction functions:\n// mul\ntemplate<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)\n{\n  float32x2_t a_lo, a_hi, prod;\n\n  // Get a_lo = |a1|a2| and a_hi = |a3|a4|\n  a_lo = vget_low_f32(a);\n  a_hi = vget_high_f32(a);\n  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|\n  prod = vmul_f32(a_lo, a_hi);\n  // Multiply prod with its swapped value |a2*a4|a1*a3|\n  prod = vmul_f32(prod, vrev64_f32(prod));\n\n  return vget_lane_f32(prod, 0);\n}\ntemplate<> EIGEN_STRONG_INLINE int32_t predux_mul<Packet4i>(const Packet4i& a)\n{\n  int32x2_t a_lo, a_hi, prod;\n\n  // Get a_lo = |a1|a2| and a_hi = |a3|a4|\n  a_lo = vget_low_s32(a);\n  a_hi = vget_high_s32(a);\n  // Get the product of a_lo * a_hi -> |a1*a3|a2*a4|\n  prod = vmul_s32(a_lo, a_hi);\n  // Multiply prod with its swapped value |a2*a4|a1*a3|\n  prod = vmul_s32(prod, vrev64_s32(prod));\n\n  return vget_lane_s32(prod, 0);\n}\n\n// min\ntemplate<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)\n{\n  float32x2_t a_lo, a_hi, min;\n\n  a_lo = vget_low_f32(a);\n  a_hi = vget_high_f32(a);\n  min = vpmin_f32(a_lo, a_hi);\n  min = vpmin_f32(min, min);\n\n  return vget_lane_f32(min, 0);\n}\n\ntemplate<> EIGEN_STRONG_INLINE int32_t predux_min<Packet4i>(const Packet4i& a)\n{\n  int32x2_t a_lo, a_hi, min;\n\n  a_lo = vget_low_s32(a);\n  a_hi = vget_high_s32(a);\n  min = vpmin_s32(a_lo, a_hi);\n  min = vpmin_s32(min, min);\n  \n  return vget_lane_s32(min, 0);\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)\n{\n  float32x2_t a_lo, a_hi, max;\n\n  a_lo = vget_low_f32(a);\n  a_hi = vget_high_f32(a);\n  max = vpmax_f32(a_lo, a_hi);\n  max = vpmax_f32(max, max);\n\n  return vget_lane_f32(max, 0);\n}\n\ntemplate<> EIGEN_STRONG_INLINE int32_t predux_max<Packet4i>(const Packet4i& a)\n{\n  int32x2_t a_lo, a_hi, max;\n\n  a_lo = vget_low_s32(a);\n  a_hi = vget_high_s32(a);\n  max = vpmax_s32(a_lo, a_hi);\n  max = vpmax_s32(max, max);\n\n  return vget_lane_s32(max, 0);\n}\n\n// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,\n// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074\n#define PALIGN_NEON(Offset,Type,Command) \\\ntemplate<>\\\nstruct palign_impl<Offset,Type>\\\n{\\\n    EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\\\n    {\\\n        if (Offset!=0)\\\n            first = Command(first, second, Offset);\\\n    }\\\n};\\\n\nPALIGN_NEON(0,Packet4f,vextq_f32)\nPALIGN_NEON(1,Packet4f,vextq_f32)\nPALIGN_NEON(2,Packet4f,vextq_f32)\nPALIGN_NEON(3,Packet4f,vextq_f32)\nPALIGN_NEON(0,Packet4i,vextq_s32)\nPALIGN_NEON(1,Packet4i,vextq_s32)\nPALIGN_NEON(2,Packet4i,vextq_s32)\nPALIGN_NEON(3,Packet4i,vextq_s32)\n\n#undef PALIGN_NEON\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4f,4>& kernel) {\n  float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);\n  float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);\n\n  kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));\n  kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));\n  kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1]));\n  kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4i,4>& kernel) {\n  int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);\n  int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);\n  kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));\n  kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));\n  kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));\n  kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1]));\n}\n\n//---------- double ----------\n\n// Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double.\n// Confirmed at least with __apple_build_version__ = 6000054.\n#ifdef __apple_build_version__\n// Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed.\n// https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with\n// major toolchain updates.\n#define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000)\n#else\n#define EIGEN_APPLE_DOUBLE_NEON_BUG 0\n#endif\n\n#if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG\n\n// Bug 907: workaround missing declarations of the following two functions in the ADK\n// Defining these functions as templates ensures that if these intrinsics are\n// already defined in arm_neon.h, then our workaround doesn't cause a conflict\n// and has lower priority in overload resolution.\ntemplate <typename T>\nuint64x2_t vreinterpretq_u64_f64(T a)\n{\n  return (uint64x2_t) a;\n}\n\ntemplate <typename T>\nfloat64x2_t vreinterpretq_f64_u64(T a)\n{\n  return (float64x2_t) a;\n}\n\ntypedef float64x2_t Packet2d;\ntypedef float64x1_t Packet1d;\n\ntemplate<> struct packet_traits<double>  : default_packet_traits\n{\n  typedef Packet2d type;\n  typedef Packet2d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 2,\n    HasHalfPacket=0,\n   \n    HasDiv  = 1,\n    // FIXME check the Has*\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 0,\n    HasSqrt = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2d> { typedef double  type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double&  from) { return vdupq_n_f64(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a)\n{\n  const double countdown_raw[] = {0.0,1.0};\n  const Packet2d countdown = vld1q_f64(countdown_raw);\n  return vaddq_f64(pset1<Packet2d>(a), countdown);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); }\n\n#ifdef __ARM_FEATURE_FMA\n// See bug 936. See above comment about FMA for float.\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); }\n#else\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); }\n\n// Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics\ntemplate<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b)\n{\n  return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b)\n{\n  return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b)\n{\n  return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b)\n{\n  return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*   from)\n{\n  return vld1q_dup_f64(from);\n}\ntemplate<> EIGEN_STRONG_INLINE void pstore<double>(double*   to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to, from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<double>(double*  to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to, from); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)\n{\n  Packet2d res = pset1<Packet2d>(0.0);\n  res = vsetq_lane_f64(from[0*stride], res, 0);\n  res = vsetq_lane_f64(from[1*stride], res, 1);\n  return res;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)\n{\n  to[stride*0] = vgetq_lane_f64(from, 0);\n  to[stride*1] = vgetq_lane_f64(from, 1);\n}\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ARM_PREFETCH(addr); }\n\n// FIXME only store the 2 first elements ?\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(a, 0); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); }\n\n#if EIGEN_COMP_CLANG && defined(__apple_build_version__)\n// workaround ICE, see bug 907\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; }\n#else\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)\n{\n  float64x2_t trn1, trn2;\n\n  // NEON zip performs interleaving of the supplied vectors.\n  // We perform two interleaves in a row to acquire the transposed vector\n  trn1 = vzip1q_f64(vecs[0], vecs[1]);\n  trn2 = vzip2q_f64(vecs[0], vecs[1]);\n\n  // Do the addition of the resulting vectors\n  return vaddq_f64(trn1, trn2);\n}\n// Other reduction functions:\n// mul\n#if EIGEN_COMP_CLANG && defined(__apple_build_version__)\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; }\n#else\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); }\n#endif\n\n// min\ntemplate<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a, a), 0); }\n\n// max\ntemplate<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a, a), 0); }\n\n// this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors,\n// see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074\n#define PALIGN_NEON(Offset,Type,Command) \\\ntemplate<>\\\nstruct palign_impl<Offset,Type>\\\n{\\\n    EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\\\n    {\\\n        if (Offset!=0)\\\n            first = Command(first, second, Offset);\\\n    }\\\n};\\\n\nPALIGN_NEON(0,Packet2d,vextq_f64)\nPALIGN_NEON(1,Packet2d,vextq_f64)\n#undef PALIGN_NEON\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2d,2>& kernel) {\n  float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);\n  float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);\n\n  kernel.packet[0] = trn1;\n  kernel.packet[1] = trn2;\n}\n#endif // EIGEN_ARCH_ARM64 \n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_NEON_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/SSE/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_SSE_H\n#define EIGEN_COMPLEX_SSE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n//---------- float ----------\nstruct Packet2cf\n{\n  EIGEN_STRONG_INLINE Packet2cf() {}\n  EIGEN_STRONG_INLINE explicit Packet2cf(const __m128& a) : v(a) {}\n  __m128  v;\n};\n\n// Use the packet_traits defined in AVX/PacketMath.h instead if we're going\n// to leverage AVX instructions.\n#ifndef EIGEN_VECTORIZE_AVX\ntemplate<> struct packet_traits<std::complex<float> >  : default_packet_traits\n{\n  typedef Packet2cf type;\n  typedef Packet2cf half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 2,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0,\n    HasBlend = 1\n  };\n};\n#endif\n\ntemplate<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a)\n{\n  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));\n  return Packet2cf(_mm_xor_ps(a.v,mask));\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)\n{\n  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));\n  return Packet2cf(_mm_xor_ps(a.v,mask));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  #ifdef EIGEN_VECTORIZE_SSE3\n  return Packet2cf(_mm_addsub_ps(_mm_mul_ps(_mm_moveldup_ps(a.v), b.v),\n                                 _mm_mul_ps(_mm_movehdup_ps(a.v),\n                                            vec4f_swizzle1(b.v, 1, 0, 3, 2))));\n//   return Packet2cf(_mm_addsub_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),\n//                                  _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),\n//                                             vec4f_swizzle1(b.v, 1, 0, 3, 2))));\n  #else\n  const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x00000000,0x80000000,0x00000000));\n  return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),\n                              _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),\n                                                    vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));\n  #endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_and_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_or_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_xor_ps(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_andnot_ps(a.v,b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>(&numext::real_ref(*from))); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>(&numext::real_ref(*from))); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)\n{\n  Packet2cf res;\n#if EIGEN_GNUC_AT_MOST(4,2)\n  // Workaround annoying \"may be used uninitialized in this function\" warning with gcc 4.2\n  res.v = _mm_loadl_pi(_mm_set1_ps(0.0f), reinterpret_cast<const __m64*>(&from));\n#elif EIGEN_GNUC_AT_LEAST(4,6)\n  // Suppress annoying \"may be used uninitialized in this function\" warning with gcc >= 4.6\n  #pragma GCC diagnostic push\n  #pragma GCC diagnostic ignored \"-Wuninitialized\"\n  res.v = _mm_loadl_pi(res.v, (const __m64*)&from);\n  #pragma GCC diagnostic pop\n#else\n  res.v = _mm_loadl_pi(res.v, (const __m64*)&from);\n#endif\n  return Packet2cf(_mm_movelh_ps(res.v,res.v));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>* from) { return pset1<Packet2cf>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore(&numext::real_ref(*to), Packet4f(from.v)); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *   to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(&numext::real_ref(*to), Packet4f(from.v)); }\n\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)\n{\n  return Packet2cf(_mm_set_ps(std::imag(from[1*stride]), std::real(from[1*stride]),\n                              std::imag(from[0*stride]), std::real(from[0*stride])));\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)\n{\n  to[stride*0] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 0)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 1)));\n  to[stride*1] = std::complex<float>(_mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 2)),\n                                     _mm_cvtss_f32(_mm_shuffle_ps(from.v, from.v, 3)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)\n{\n  #if EIGEN_GNUC_AT_MOST(4,3)\n  // Workaround gcc 4.2 ICE - this is not performance wise ideal, but who cares...\n  // This workaround also fix invalid code generation with gcc 4.3\n  EIGEN_ALIGN16 std::complex<float> res[2];\n  _mm_store_ps((float*)res, a.v);\n  return res[0];\n  #else\n  std::complex<float> res;\n  _mm_storel_pi((__m64*)&res, a.v);\n  return res;\n  #endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a) { return Packet2cf(_mm_castpd_ps(preverse(Packet2d(_mm_castps_pd(a.v))))); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)\n{\n  return pfirst(Packet2cf(_mm_add_ps(a.v, _mm_movehl_ps(a.v,a.v))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)\n{\n  return Packet2cf(_mm_add_ps(_mm_movelh_ps(vecs[0].v,vecs[1].v), _mm_movehl_ps(vecs[1].v,vecs[0].v)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)\n{\n  return pfirst(pmul(a, Packet2cf(_mm_movehl_ps(a.v,a.v))));\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2cf>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)\n  {\n    if (Offset==1)\n    {\n      first.v = _mm_movehl_ps(first.v, first.v);\n      first.v = _mm_movelh_ps(first.v, second.v);\n    }\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, false,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return internal::pmul(a, pconj(b));\n    #else\n    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));\n    return Packet2cf(_mm_add_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),\n                                _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),\n                                           vec4f_swizzle1(b.v, 1, 0, 3, 2))));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return internal::pmul(pconj(a), b);\n    #else\n    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));\n    return Packet2cf(_mm_add_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v),\n                                _mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),\n                                                      vec4f_swizzle1(b.v, 1, 0, 3, 2)), mask)));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return pconj(internal::pmul(a, b));\n    #else\n    const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0x00000000,0x80000000,0x00000000,0x80000000));\n    return Packet2cf(_mm_sub_ps(_mm_xor_ps(_mm_mul_ps(vec4f_swizzle1(a.v, 0, 0, 2, 2), b.v), mask),\n                                _mm_mul_ps(vec4f_swizzle1(a.v, 1, 1, 3, 3),\n                                           vec4f_swizzle1(b.v, 1, 0, 3, 2))));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet4f, Packet2cf, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet4f& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet4f& x, const Packet2cf& y) const\n  { return Packet2cf(Eigen::internal::pmul<Packet4f>(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet4f, false,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet4f& y, const Packet2cf& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& x, const Packet4f& y) const\n  { return Packet2cf(Eigen::internal::pmul<Packet4f>(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  // TODO optimize it for SSE3 and 4\n  Packet2cf res = conj_helper<Packet2cf,Packet2cf,false,true>().pmul(a,b);\n  __m128 s = _mm_mul_ps(b.v,b.v);\n  return Packet2cf(_mm_div_ps(res.v,_mm_add_ps(s,_mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(s), 0xb1)))));\n}\n\nEIGEN_STRONG_INLINE Packet2cf pcplxflip/* <Packet2cf> */(const Packet2cf& x)\n{\n  return Packet2cf(vec4f_swizzle1(x.v, 1, 0, 3, 2));\n}\n\n\n//---------- double ----------\nstruct Packet1cd\n{\n  EIGEN_STRONG_INLINE Packet1cd() {}\n  EIGEN_STRONG_INLINE explicit Packet1cd(const __m128d& a) : v(a) {}\n  __m128d  v;\n};\n\n// Use the packet_traits defined in AVX/PacketMath.h instead if we're going\n// to leverage AVX instructions.\n#ifndef EIGEN_VECTORIZE_AVX\ntemplate<> struct packet_traits<std::complex<double> >  : default_packet_traits\n{\n  typedef Packet1cd type;\n  typedef Packet1cd half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 0,\n    size = 1,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n#endif\n\ntemplate<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a)\n{\n  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));\n  return Packet1cd(_mm_xor_pd(a.v,mask));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  #ifdef EIGEN_VECTORIZE_SSE3\n  return Packet1cd(_mm_addsub_pd(_mm_mul_pd(_mm_movedup_pd(a.v), b.v),\n                                 _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),\n                                            vec2d_swizzle1(b.v, 1, 0))));\n  #else\n  const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));\n  return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),\n                              _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),\n                                                    vec2d_swizzle1(b.v, 1, 0)), mask)));\n  #endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pand   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_and_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd por    <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_or_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pxor   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_xor_pd(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_andnot_pd(a.v,b.v)); }\n\n// FIXME force unaligned load, this is a temporary fix\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from)\n{ EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from)\n{ EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>&  from)\n{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>* from) { return pset1<Packet1cd>(*from); }\n\n// FIXME force unaligned store, this is a temporary fix\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, Packet2d(from.v)); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, Packet2d(from.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double>  pfirst<Packet1cd>(const Packet1cd& a)\n{\n  EIGEN_ALIGN16 double res[2];\n  _mm_store_pd(res, a.v);\n  return std::complex<double>(res[0],res[1]);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)\n{\n  return pfirst(a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)\n{\n  return vecs[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)\n{\n  return pfirst(a);\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet1cd>\n{\n  static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)\n  {\n    // FIXME is it sure we never have to align a Packet1cd?\n    // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, false,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return internal::pmul(a, pconj(b));\n    #else\n    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));\n    return Packet1cd(_mm_add_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),\n                                _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),\n                                           vec2d_swizzle1(b.v, 1, 0))));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return internal::pmul(pconj(a), b);\n    #else\n    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));\n    return Packet1cd(_mm_add_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v),\n                                _mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),\n                                                      vec2d_swizzle1(b.v, 1, 0)), mask)));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    #ifdef EIGEN_VECTORIZE_SSE3\n    return pconj(internal::pmul(a, b));\n    #else\n    const __m128d mask = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));\n    return Packet1cd(_mm_sub_pd(_mm_xor_pd(_mm_mul_pd(vec2d_swizzle1(a.v, 0, 0), b.v), mask),\n                                _mm_mul_pd(vec2d_swizzle1(a.v, 1, 1),\n                                           vec2d_swizzle1(b.v, 1, 0))));\n    #endif\n  }\n};\n\ntemplate<> struct conj_helper<Packet2d, Packet1cd, false,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet2d& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet2d& x, const Packet1cd& y) const\n  { return Packet1cd(Eigen::internal::pmul<Packet2d>(x, y.v)); }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet2d, false,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet2d& y, const Packet1cd& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& x, const Packet2d& y) const\n  { return Packet1cd(Eigen::internal::pmul<Packet2d>(x.v, y)); }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  // TODO optimize it for SSE3 and 4\n  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);\n  __m128d s = _mm_mul_pd(b.v,b.v);\n  return Packet1cd(_mm_div_pd(res.v, _mm_add_pd(s,_mm_shuffle_pd(s, s, 0x1))));\n}\n\nEIGEN_STRONG_INLINE Packet1cd pcplxflip/* <Packet1cd> */(const Packet1cd& x)\n{\n  return Packet1cd(preverse(Packet2d(x.v)));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2cf,2>& kernel) {\n  __m128d w1 = _mm_castps_pd(kernel.packet[0].v);\n  __m128d w2 = _mm_castps_pd(kernel.packet[1].v);\n\n  __m128 tmp = _mm_castpd_ps(_mm_unpackhi_pd(w1, w2));\n  kernel.packet[0].v = _mm_castpd_ps(_mm_unpacklo_pd(w1, w2));\n  kernel.packet[1].v = tmp;\n}\n\ntemplate<>  EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {\n  __m128d result = pblend<Packet2d>(ifPacket, _mm_castps_pd(thenPacket.v), _mm_castps_pd(elsePacket.v));\n  return Packet2cf(_mm_castpd_ps(result));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pinsertfirst(const Packet2cf& a, std::complex<float> b)\n{\n  return Packet2cf(_mm_loadl_pi(a.v, reinterpret_cast<const __m64*>(&b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pinsertfirst(const Packet1cd&, std::complex<double> b)\n{\n  return pset1<Packet1cd>(b);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pinsertlast(const Packet2cf& a, std::complex<float> b)\n{\n  return Packet2cf(_mm_loadh_pi(a.v, reinterpret_cast<const __m64*>(&b)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pinsertlast(const Packet1cd&, std::complex<double> b)\n{\n  return pset1<Packet1cd>(b);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/SSE/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007 Julien Pommier\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* The sin, cos, exp, and log functions of this file come from\n * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/\n */\n\n#ifndef EIGEN_MATH_FUNCTIONS_SSE_H\n#define EIGEN_MATH_FUNCTIONS_SSE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f plog<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\n  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);\n\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inv_mant_mask, ~0x7f800000);\n\n  /* the smallest non denormalized float number */\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(min_norm_pos,  0x00800000);\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(minus_inf,     0xff800000);//-1.f/0.f);\n\n  /* natural logarithm computed for 4 simultaneous float\n    return NaN for x <= 0\n  */\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_SQRTHF, 0.707106781186547524f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p0, 7.0376836292E-2f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p1, - 1.1514610310E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p2, 1.1676998740E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p3, - 1.2420140846E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p4, + 1.4249322787E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p5, - 1.6668057665E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p6, + 2.0000714765E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p7, - 2.4999993993E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_p8, + 3.3333331174E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q1, -2.12194440e-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_log_q2, 0.693359375f);\n\n\n  Packet4i emm0;\n\n  Packet4f invalid_mask = _mm_cmpnge_ps(x, _mm_setzero_ps()); // not greater equal is true if x is NaN\n  Packet4f iszero_mask = _mm_cmpeq_ps(x, _mm_setzero_ps());\n\n  x = pmax(x, p4f_min_norm_pos);  /* cut off denormalized stuff */\n  emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);\n\n  /* keep only the fractional part */\n  x = _mm_and_ps(x, p4f_inv_mant_mask);\n  x = _mm_or_ps(x, p4f_half);\n\n  emm0 = _mm_sub_epi32(emm0, p4i_0x7f);\n  Packet4f e = padd(Packet4f(_mm_cvtepi32_ps(emm0)), p4f_1);\n\n  /* part2:\n     if( x < SQRTHF ) {\n       e -= 1;\n       x = x + x - 1.0;\n     } else { x = x - 1.0; }\n  */\n  Packet4f mask = _mm_cmplt_ps(x, p4f_cephes_SQRTHF);\n  Packet4f tmp = pand(x, mask);\n  x = psub(x, p4f_1);\n  e = psub(e, pand(p4f_1, mask));\n  x = padd(x, tmp);\n\n  Packet4f x2 = pmul(x,x);\n  Packet4f x3 = pmul(x2,x);\n\n  Packet4f y, y1, y2;\n  y  = pmadd(p4f_cephes_log_p0, x, p4f_cephes_log_p1);\n  y1 = pmadd(p4f_cephes_log_p3, x, p4f_cephes_log_p4);\n  y2 = pmadd(p4f_cephes_log_p6, x, p4f_cephes_log_p7);\n  y  = pmadd(y , x, p4f_cephes_log_p2);\n  y1 = pmadd(y1, x, p4f_cephes_log_p5);\n  y2 = pmadd(y2, x, p4f_cephes_log_p8);\n  y = pmadd(y, x3, y1);\n  y = pmadd(y, x3, y2);\n  y = pmul(y, x3);\n\n  y1 = pmul(e, p4f_cephes_log_q1);\n  tmp = pmul(x2, p4f_half);\n  y = padd(y, y1);\n  x = psub(x, tmp);\n  y2 = pmul(e, p4f_cephes_log_q2);\n  x = padd(x, y);\n  x = padd(x, y2);\n  // negative arg will be NAN, 0 will be -INF\n  return _mm_or_ps(_mm_andnot_ps(iszero_mask, _mm_or_ps(x, invalid_mask)),\n                   _mm_and_ps(iszero_mask, p4f_minus_inf));\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f pexp<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\n  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\n  _EIGEN_DECLARE_CONST_Packet4i(0x7f, 0x7f);\n\n\n  _EIGEN_DECLARE_CONST_Packet4f(exp_hi,  88.3762626647950f);\n  _EIGEN_DECLARE_CONST_Packet4f(exp_lo, -88.3762626647949f);\n\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_LOG2EF, 1.44269504088896341f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C1, 0.693359375f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_C2, -2.12194440e-4f);\n\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p0, 1.9875691500E-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p1, 1.3981999507E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p2, 8.3334519073E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p3, 4.1665795894E-2f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p4, 1.6666665459E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_exp_p5, 5.0000001201E-1f);\n\n  Packet4f tmp, fx;\n  Packet4i emm0;\n\n  // clamp x\n  x = pmax(pmin(x, p4f_exp_hi), p4f_exp_lo);\n\n  /* express exp(x) as exp(g + n*log(2)) */\n  fx = pmadd(x, p4f_cephes_LOG2EF, p4f_half);\n\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  fx = _mm_floor_ps(fx);\n#else\n  emm0 = _mm_cvttps_epi32(fx);\n  tmp  = _mm_cvtepi32_ps(emm0);\n  /* if greater, substract 1 */\n  Packet4f mask = _mm_cmpgt_ps(tmp, fx);\n  mask = _mm_and_ps(mask, p4f_1);\n  fx = psub(tmp, mask);\n#endif\n\n  tmp = pmul(fx, p4f_cephes_exp_C1);\n  Packet4f z = pmul(fx, p4f_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  z = pmul(x,x);\n\n  Packet4f y = p4f_cephes_exp_p0;\n  y = pmadd(y, x, p4f_cephes_exp_p1);\n  y = pmadd(y, x, p4f_cephes_exp_p2);\n  y = pmadd(y, x, p4f_cephes_exp_p3);\n  y = pmadd(y, x, p4f_cephes_exp_p4);\n  y = pmadd(y, x, p4f_cephes_exp_p5);\n  y = pmadd(y, z, x);\n  y = padd(y, p4f_1);\n\n  // build 2^n\n  emm0 = _mm_cvttps_epi32(fx);\n  emm0 = _mm_add_epi32(emm0, p4i_0x7f);\n  emm0 = _mm_slli_epi32(emm0, 23);\n  return pmax(pmul(y, Packet4f(_mm_castsi128_ps(emm0))), _x);\n}\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d pexp<Packet2d>(const Packet2d& _x)\n{\n  Packet2d x = _x;\n\n  _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);\n  _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);\n  _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);\n\n  _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);\n  _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);\n\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);\n\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);\n\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);\n\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);\n  _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);\n  static const __m128i p4i_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0);\n\n  Packet2d tmp, fx;\n  Packet4i emm0;\n\n  // clamp x\n  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);\n  /* express exp(x) as exp(g + n*log(2)) */\n  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);\n\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  fx = _mm_floor_pd(fx);\n#else\n  emm0 = _mm_cvttpd_epi32(fx);\n  tmp  = _mm_cvtepi32_pd(emm0);\n  /* if greater, substract 1 */\n  Packet2d mask = _mm_cmpgt_pd(tmp, fx);\n  mask = _mm_and_pd(mask, p2d_1);\n  fx = psub(tmp, mask);\n#endif\n\n  tmp = pmul(fx, p2d_cephes_exp_C1);\n  Packet2d z = pmul(fx, p2d_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  Packet2d x2 = pmul(x,x);\n\n  Packet2d px = p2d_cephes_exp_p0;\n  px = pmadd(px, x2, p2d_cephes_exp_p1);\n  px = pmadd(px, x2, p2d_cephes_exp_p2);\n  px = pmul (px, x);\n\n  Packet2d qx = p2d_cephes_exp_q0;\n  qx = pmadd(qx, x2, p2d_cephes_exp_q1);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q2);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q3);\n\n  x = pdiv(px,psub(qx,px));\n  x = pmadd(p2d_2,x,p2d_1);\n\n  // build 2^n\n  emm0 = _mm_cvttpd_epi32(fx);\n  emm0 = _mm_add_epi32(emm0, p4i_1023_0);\n  emm0 = _mm_slli_epi32(emm0, 20);\n  emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3));\n  return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);\n}\n\n/* evaluation of 4 sines at onces, using SSE2 intrinsics.\n\n   The code is the exact rewriting of the cephes sinf function.\n   Precision is excellent as long as x < 8192 (I did not bother to\n   take into account the special handling they have for greater values\n   -- it does not return garbage for arguments over 8192, though, but\n   the extra precision is missing).\n\n   Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the\n   surprising but correct result.\n*/\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f psin<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\n  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\n\n  _EIGEN_DECLARE_CONST_Packet4i(1, 1);\n  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);\n  _EIGEN_DECLARE_CONST_Packet4i(2, 2);\n  _EIGEN_DECLARE_CONST_Packet4i(4, 4);\n\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000);\n\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI\n\n  Packet4f xmm1, xmm2, xmm3, sign_bit, y;\n\n  Packet4i emm0, emm2;\n  sign_bit = x;\n  /* take the absolute value */\n  x = pabs(x);\n\n  /* take the modulo */\n\n  /* extract the sign bit (upper one) */\n  sign_bit = _mm_and_ps(sign_bit, p4f_sign_mask);\n\n  /* scale by 4/Pi */\n  y = pmul(x, p4f_cephes_FOPI);\n\n  /* store the integer part of y in mm0 */\n  emm2 = _mm_cvttps_epi32(y);\n  /* j=(j+1) & (~1) (see the cephes sources) */\n  emm2 = _mm_add_epi32(emm2, p4i_1);\n  emm2 = _mm_and_si128(emm2, p4i_not1);\n  y = _mm_cvtepi32_ps(emm2);\n  /* get the swap sign flag */\n  emm0 = _mm_and_si128(emm2, p4i_4);\n  emm0 = _mm_slli_epi32(emm0, 29);\n  /* get the polynom selection mask\n     there is one polynom for 0 <= x <= Pi/4\n     and another one for Pi/4<x<=Pi/2\n\n     Both branches will be computed.\n  */\n  emm2 = _mm_and_si128(emm2, p4i_2);\n  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());\n\n  Packet4f swap_sign_bit = _mm_castsi128_ps(emm0);\n  Packet4f poly_mask = _mm_castsi128_ps(emm2);\n  sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);\n\n  /* The magic pass: \"Extended precision modular arithmetic\"\n     x = ((x - y * DP1) - y * DP2) - y * DP3; */\n  xmm1 = pmul(y, p4f_minus_cephes_DP1);\n  xmm2 = pmul(y, p4f_minus_cephes_DP2);\n  xmm3 = pmul(y, p4f_minus_cephes_DP3);\n  x = padd(x, xmm1);\n  x = padd(x, xmm2);\n  x = padd(x, xmm3);\n\n  /* Evaluate the first polynom  (0 <= x <= Pi/4) */\n  y = p4f_coscof_p0;\n  Packet4f z = _mm_mul_ps(x,x);\n\n  y = pmadd(y, z, p4f_coscof_p1);\n  y = pmadd(y, z, p4f_coscof_p2);\n  y = pmul(y, z);\n  y = pmul(y, z);\n  Packet4f tmp = pmul(z, p4f_half);\n  y = psub(y, tmp);\n  y = padd(y, p4f_1);\n\n  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */\n\n  Packet4f y2 = p4f_sincof_p0;\n  y2 = pmadd(y2, z, p4f_sincof_p1);\n  y2 = pmadd(y2, z, p4f_sincof_p2);\n  y2 = pmul(y2, z);\n  y2 = pmul(y2, x);\n  y2 = padd(y2, x);\n\n  /* select the correct result from the two polynoms */\n  y2 = _mm_and_ps(poly_mask, y2);\n  y = _mm_andnot_ps(poly_mask, y);\n  y = _mm_or_ps(y,y2);\n  /* update the sign */\n  return _mm_xor_ps(y, sign_bit);\n}\n\n/* almost the same as psin */\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f pcos<Packet4f>(const Packet4f& _x)\n{\n  Packet4f x = _x;\n  _EIGEN_DECLARE_CONST_Packet4f(1 , 1.0f);\n  _EIGEN_DECLARE_CONST_Packet4f(half, 0.5f);\n\n  _EIGEN_DECLARE_CONST_Packet4i(1, 1);\n  _EIGEN_DECLARE_CONST_Packet4i(not1, ~1);\n  _EIGEN_DECLARE_CONST_Packet4i(2, 2);\n  _EIGEN_DECLARE_CONST_Packet4i(4, 4);\n\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP1,-0.78515625f);\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP2, -2.4187564849853515625e-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(minus_cephes_DP3, -3.77489497744594108e-8f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p0, -1.9515295891E-4f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p1,  8.3321608736E-3f);\n  _EIGEN_DECLARE_CONST_Packet4f(sincof_p2, -1.6666654611E-1f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p0,  2.443315711809948E-005f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p1, -1.388731625493765E-003f);\n  _EIGEN_DECLARE_CONST_Packet4f(coscof_p2,  4.166664568298827E-002f);\n  _EIGEN_DECLARE_CONST_Packet4f(cephes_FOPI, 1.27323954473516f); // 4 / M_PI\n\n  Packet4f xmm1, xmm2, xmm3, y;\n  Packet4i emm0, emm2;\n\n  x = pabs(x);\n\n  /* scale by 4/Pi */\n  y = pmul(x, p4f_cephes_FOPI);\n\n  /* get the integer part of y */\n  emm2 = _mm_cvttps_epi32(y);\n  /* j=(j+1) & (~1) (see the cephes sources) */\n  emm2 = _mm_add_epi32(emm2, p4i_1);\n  emm2 = _mm_and_si128(emm2, p4i_not1);\n  y = _mm_cvtepi32_ps(emm2);\n\n  emm2 = _mm_sub_epi32(emm2, p4i_2);\n\n  /* get the swap sign flag */\n  emm0 = _mm_andnot_si128(emm2, p4i_4);\n  emm0 = _mm_slli_epi32(emm0, 29);\n  /* get the polynom selection mask */\n  emm2 = _mm_and_si128(emm2, p4i_2);\n  emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());\n\n  Packet4f sign_bit = _mm_castsi128_ps(emm0);\n  Packet4f poly_mask = _mm_castsi128_ps(emm2);\n\n  /* The magic pass: \"Extended precision modular arithmetic\"\n     x = ((x - y * DP1) - y * DP2) - y * DP3; */\n  xmm1 = pmul(y, p4f_minus_cephes_DP1);\n  xmm2 = pmul(y, p4f_minus_cephes_DP2);\n  xmm3 = pmul(y, p4f_minus_cephes_DP3);\n  x = padd(x, xmm1);\n  x = padd(x, xmm2);\n  x = padd(x, xmm3);\n\n  /* Evaluate the first polynom  (0 <= x <= Pi/4) */\n  y = p4f_coscof_p0;\n  Packet4f z = pmul(x,x);\n\n  y = pmadd(y,z,p4f_coscof_p1);\n  y = pmadd(y,z,p4f_coscof_p2);\n  y = pmul(y, z);\n  y = pmul(y, z);\n  Packet4f tmp = _mm_mul_ps(z, p4f_half);\n  y = psub(y, tmp);\n  y = padd(y, p4f_1);\n\n  /* Evaluate the second polynom  (Pi/4 <= x <= 0) */\n  Packet4f y2 = p4f_sincof_p0;\n  y2 = pmadd(y2, z, p4f_sincof_p1);\n  y2 = pmadd(y2, z, p4f_sincof_p2);\n  y2 = pmul(y2, z);\n  y2 = pmadd(y2, x, x);\n\n  /* select the correct result from the two polynoms */\n  y2 = _mm_and_ps(poly_mask, y2);\n  y  = _mm_andnot_ps(poly_mask, y);\n  y  = _mm_or_ps(y,y2);\n\n  /* update the sign */\n  return _mm_xor_ps(y, sign_bit);\n}\n\n#if EIGEN_FAST_MATH\n\n// Functions for sqrt.\n// The EIGEN_FAST_MATH version uses the _mm_rsqrt_ps approximation and one step\n// of Newton's method, at a cost of 1-2 bits of precision as opposed to the\n// exact solution. It does not handle +inf, or denormalized numbers correctly.\n// The main advantage of this approach is not just speed, but also the fact that\n// it can be inlined and pipelined with other computations, further reducing its\n// effective latency. This is similar to Quake3's fast inverse square root.\n// For detail see here: http://www.beyond3d.com/content/articles/8/\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f psqrt<Packet4f>(const Packet4f& _x)\n{\n  Packet4f half = pmul(_x, pset1<Packet4f>(.5f));\n  Packet4f denormal_mask = _mm_and_ps(\n      _mm_cmpge_ps(_x, _mm_setzero_ps()),\n      _mm_cmplt_ps(_x, pset1<Packet4f>((std::numeric_limits<float>::min)())));\n\n  // Compute approximate reciprocal sqrt.\n  Packet4f x = _mm_rsqrt_ps(_x);\n  // Do a single step of Newton's iteration.\n  x = pmul(x, psub(pset1<Packet4f>(1.5f), pmul(half, pmul(x,x))));\n  // Flush results for denormals to zero.\n  return _mm_andnot_ps(denormal_mask, pmul(_x,x));\n}\n\n#else\n\ntemplate<>EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f psqrt<Packet4f>(const Packet4f& x) { return _mm_sqrt_ps(x); }\n\n#endif\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d psqrt<Packet2d>(const Packet2d& x) { return _mm_sqrt_pd(x); }\n\n#if EIGEN_FAST_MATH\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f prsqrt<Packet4f>(const Packet4f& _x) {\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(inf, 0x7f800000);\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(nan, 0x7fc00000);\n  _EIGEN_DECLARE_CONST_Packet4f(one_point_five, 1.5f);\n  _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5f);\n  _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(flt_min, 0x00800000);\n\n  Packet4f neg_half = pmul(_x, p4f_minus_half);\n\n  // select only the inverse sqrt of positive normal inputs (denormals are\n  // flushed to zero and cause infs as well).\n  Packet4f le_zero_mask = _mm_cmple_ps(_x, p4f_flt_min);\n  Packet4f x = _mm_andnot_ps(le_zero_mask, _mm_rsqrt_ps(_x));\n\n  // Fill in NaNs and Infs for the negative/zero entries.\n  Packet4f neg_mask = _mm_cmplt_ps(_x, _mm_setzero_ps());\n  Packet4f zero_mask = _mm_andnot_ps(neg_mask, le_zero_mask);\n  Packet4f infs_and_nans = _mm_or_ps(_mm_and_ps(neg_mask, p4f_nan),\n                                     _mm_and_ps(zero_mask, p4f_inf));\n\n  // Do a single step of Newton's iteration.\n  x = pmul(x, pmadd(neg_half, pmul(x, x), p4f_one_point_five));\n\n  // Insert NaNs and Infs in all the right places.\n  return _mm_or_ps(x, infs_and_nans);\n}\n\n#else\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f prsqrt<Packet4f>(const Packet4f& x) {\n  // Unfortunately we can't use the much faster mm_rqsrt_ps since it only provides an approximation.\n  return _mm_div_ps(pset1<Packet4f>(1.0f), _mm_sqrt_ps(x));\n}\n\n#endif\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d prsqrt<Packet2d>(const Packet2d& x) {\n  // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.\n  return _mm_div_pd(pset1<Packet2d>(1.0), _mm_sqrt_pd(x));\n}\n\n// Hyperbolic Tangent function.\ntemplate <>\nEIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED Packet4f\nptanh<Packet4f>(const Packet4f& x) {\n  return internal::generic_fast_tanh_float(x);\n}\n\n} // end namespace internal\n\nnamespace numext {\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\nfloat sqrt(const float &x)\n{\n  return internal::pfirst(internal::Packet4f(_mm_sqrt_ss(_mm_set_ss(x))));\n}\n\ntemplate<>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE\ndouble sqrt(const double &x)\n{\n#if EIGEN_COMP_GNUC_STRICT\n  // This works around a GCC bug generating poor code for _mm_sqrt_pd\n  // See https://bitbucket.org/eigen/eigen/commits/14f468dba4d350d7c19c9b93072e19f7b3df563b\n  return internal::pfirst(internal::Packet2d(__builtin_ia32_sqrtsd(_mm_set_sd(x))));\n#else\n  return internal::pfirst(internal::Packet2d(_mm_sqrt_pd(_mm_set_sd(x))));\n#endif\n}\n\n} // end namespace numex\n\n} // end namespace Eigen\n\n#endif // EIGEN_MATH_FUNCTIONS_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/SSE/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_SSE_H\n#define EIGEN_PACKET_MATH_SSE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8\n#endif\n\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))\n#endif\n\n#ifdef __FMA__\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1\n#endif\n#endif\n\n#if (defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)\n// With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot\n// have overloads for both types without linking error.\n// One solution is to increase ABI version using -fabi-version=4 (or greater).\n// Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper\n// structure:\ntemplate<typename T>\nstruct eigen_packet_wrapper\n{\n  EIGEN_ALWAYS_INLINE operator T&() { return m_val; }\n  EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }\n  EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}\n  EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}\n  EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {\n    m_val = v;\n    return *this;\n  }\n\n  T m_val;\n};\ntypedef eigen_packet_wrapper<__m128>  Packet4f;\ntypedef eigen_packet_wrapper<__m128i> Packet4i;\ntypedef eigen_packet_wrapper<__m128d> Packet2d;\n#else\ntypedef __m128  Packet4f;\ntypedef __m128i Packet4i;\ntypedef __m128d Packet2d;\n#endif\n\ntemplate<> struct is_arithmetic<__m128>  { enum { value = true }; };\ntemplate<> struct is_arithmetic<__m128i> { enum { value = true }; };\ntemplate<> struct is_arithmetic<__m128d> { enum { value = true }; };\n\n#define vec4f_swizzle1(v,p,q,r,s) \\\n  (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))\n\n#define vec4i_swizzle1(v,p,q,r,s) \\\n  (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))\n\n#define vec2d_swizzle1(v,p,q) \\\n  (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))\n\n#define vec4f_swizzle2(a,b,p,q,r,s) \\\n  (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))\n\n#define vec4i_swizzle2(a,b,p,q,r,s) \\\n  (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))\n\n#define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \\\n  const Packet4f p4f_##NAME = pset1<Packet4f>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \\\n  const Packet2d p2d_##NAME = pset1<Packet2d>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \\\n  const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))\n\n#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \\\n  const Packet4i p4i_##NAME = pset1<Packet4i>(X)\n\n\n// Use the packet_traits defined in AVX/PacketMath.h instead if we're going\n// to leverage AVX instructions.\n#ifndef EIGEN_VECTORIZE_AVX\ntemplate<> struct packet_traits<float>  : default_packet_traits\n{\n  typedef Packet4f type;\n  typedef Packet4f half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket = 0,\n\n    HasDiv  = 1,\n    HasSin  = EIGEN_FAST_MATH,\n    HasCos  = EIGEN_FAST_MATH,\n    HasLog  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasTanh  = EIGEN_FAST_MATH,\n    HasBlend = 1\n\n#ifdef EIGEN_VECTORIZE_SSE4_1\n    ,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1\n#endif\n  };\n};\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef Packet2d type;\n  typedef Packet2d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=2,\n    HasHalfPacket = 0,\n\n    HasDiv  = 1,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasBlend = 1\n\n#ifdef EIGEN_VECTORIZE_SSE4_1\n    ,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1\n#endif\n  };\n};\n#endif\ntemplate<> struct packet_traits<int>    : default_packet_traits\n{\n  typedef Packet4i type;\n  typedef Packet4i half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n\n    HasBlend = 1\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };\ntemplate<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };\ntemplate<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };\n\n#ifndef EIGEN_VECTORIZE_AVX\ntemplate<> struct scalar_div_cost<float,true> { enum { value = 7 }; };\ntemplate<> struct scalar_div_cost<double,true> { enum { value = 8 }; };\n#endif\n\n#if EIGEN_COMP_MSVC==1500\n// Workaround MSVC 9 internal compiler error.\n// TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode\n// TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).\ntemplate<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }\n#else\ntemplate<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps1(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }\n#endif\n\n// GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction.\n// However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203)\n// Using inline assembly is also not an option because then gcc fails to reorder properly the instructions.\n// Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply.\n// Also note that with AVX, we want it to generate a vbroadcastss.\n#if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__)\ntemplate<> EIGEN_STRONG_INLINE Packet4f pload1<Packet4f>(const float *from) {\n  return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0);\n}\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)\n{\n  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));\n  return _mm_xor_ps(a,mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)\n{\n  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));\n  return _mm_xor_pd(a,mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)\n{\n  return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_mullo_epi32(a,b);\n#else\n  // this version is slightly faster than 4 scalar products\n  return vec4i_swizzle1(\n            vec4i_swizzle2(\n              _mm_mul_epu32(a,b),\n              _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),\n                            vec4i_swizzle1(b,1,0,3,2)),\n              0,2,0,2),\n            0,2,1,3);\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }\n\n// for some weird raisons, it has to be overloaded for packet of integers\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }\n#ifdef __FMA__\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) {\n#if EIGEN_COMP_GNUC\n  // There appears to be a bug in GCC, by which the optimizer may\n  // flip the argument order in calls to _mm_min_ps, so we have to\n  // resort to inline ASM here. This is supposed to be fixed in gcc6.3,\n  // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867\n  Packet4f res = b;\n  asm(\"minps %[a], %[res]\" : [res] \"+x\" (res) : [a] \"x\" (a));\n  return res;\n#else\n  // Arguments are reversed to match NaN propagation behavior of std::min.\n  return _mm_min_ps(b, a);\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) {\n#if EIGEN_COMP_GNUC\n  // There appears to be a bug in GCC, by which the optimizer may\n  // flip the argument order in calls to _mm_min_pd, so we have to\n  // resort to inline ASM here. This is supposed to be fixed in gcc6.3,\n  // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867\n  Packet2d res = b;\n  asm(\"minpd %[a], %[res]\" : [res] \"+x\" (res) : [a] \"x\" (a));\n  return res;\n#else\n  // Arguments are reversed to match NaN propagation behavior of std::min.\n  return _mm_min_pd(b, a);\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_min_epi32(a,b);\n#else\n  // after some bench, this version *is* faster than a scalar implementation\n  Packet4i mask = _mm_cmplt_epi32(a,b);\n  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) {\n#if EIGEN_COMP_GNUC\n  // There appears to be a bug in GCC, by which the optimizer may\n  // flip the argument order in calls to _mm_max_ps, so we have to\n  // resort to inline ASM here. This is supposed to be fixed in gcc6.3,\n  // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867\n  Packet4f res = b;\n  asm(\"maxps %[a], %[res]\" : [res] \"+x\" (res) : [a] \"x\" (a));\n  return res;\n#else\n  // Arguments are reversed to match NaN propagation behavior of std::max.\n  return _mm_max_ps(b, a);\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) {\n#if EIGEN_COMP_GNUC\n  // There appears to be a bug in GCC, by which the optimizer may\n  // flip the argument order in calls to _mm_max_pd, so we have to\n  // resort to inline ASM here. This is supposed to be fixed in gcc6.3,\n  // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867\n  Packet2d res = b;\n  asm(\"maxpd %[a], %[res]\" : [res] \"+x\" (res) : [a] \"x\" (a));\n  return res;\n#else\n  // Arguments are reversed to match NaN propagation behavior of std::max.\n  return _mm_max_pd(b, a);\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_max_epi32(a,b);\n#else\n  // after some bench, this version *is* faster than a scalar implementation\n  Packet4i mask = _mm_cmpgt_epi32(a,b);\n  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));\n#endif\n}\n\n#ifdef EIGEN_VECTORIZE_SSE4_1\ntemplate<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a) { return _mm_round_ps(a, 0); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return _mm_round_pd(a, 0); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const Packet4f& a) { return _mm_ceil_ps(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const Packet2d& a) { return _mm_ceil_pd(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a) { return _mm_floor_ps(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return _mm_floor_pd(a); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }\n\n#if EIGEN_COMP_MSVC\n  template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {\n    EIGEN_DEBUG_UNALIGNED_LOAD\n    #if (EIGEN_COMP_MSVC==1600)\n    // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps\n    // (i.e., it does not generate an unaligned load!!\n    __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));\n    res = _mm_loadh_pi(res, (const __m64*)(from+2));\n    return res;\n    #else\n    return _mm_loadu_ps(from);\n    #endif\n  }\n#else\n// NOTE: with the code below, MSVC's compiler crashes!\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)\n{\n  EIGEN_DEBUG_UNALIGNED_LOAD\n  return _mm_loadu_ps(from);\n}\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)\n{\n  EIGEN_DEBUG_UNALIGNED_LOAD\n  return _mm_loadu_pd(from);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)\n{\n  EIGEN_DEBUG_UNALIGNED_LOAD\n  return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));\n}\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)\n{\n  return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)\n{ return pset1<Packet2d>(from[0]); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)\n{\n  Packet4i tmp;\n  tmp = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(from));\n  return vec4i_swizzle1(tmp, 0, 0, 1, 1);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)\n{\n return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)\n{\n return _mm_set_pd(from[1*stride], from[0*stride]);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)\n{\n return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);\n }\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)\n{\n  to[stride*0] = _mm_cvtss_f32(from);\n  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1));\n  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2));\n  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)\n{\n  to[stride*0] = _mm_cvtsd_f64(from);\n  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1));\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)\n{\n  to[stride*0] = _mm_cvtsi128_si32(from);\n  to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1));\n  to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2));\n  to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3));\n}\n\n// some compilers might be tempted to perform multiple moves instead of using a vector path.\ntemplate<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)\n{\n  Packet4f pa = _mm_set_ss(a);\n  pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0)));\n}\n// some compilers might be tempted to perform multiple moves instead of using a vector path.\ntemplate<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)\n{\n  Packet2d pa = _mm_set_sd(a);\n  pstore(to, Packet2d(vec2d_swizzle1(pa,0,0)));\n}\n\n#ifndef EIGEN_VECTORIZE_AVX\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }\n#endif\n\n#if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64\n// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010\n// Direct of the struct members fixed bug #62.\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }\n#elif EIGEN_COMP_MSVC_STRICT\n// The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }\n#else\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }\n#endif\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)\n{ return _mm_shuffle_ps(a,a,0x1B); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)\n{ return _mm_shuffle_pd(a,a,0x1); }\ntemplate<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)\n{ return _mm_shuffle_epi32(a,0x1B); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)\n{\n  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));\n  return _mm_and_ps(a,mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)\n{\n  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));\n  return _mm_and_pd(a,mask);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)\n{\n  #ifdef EIGEN_VECTORIZE_SSSE3\n  return _mm_abs_epi32(a);\n  #else\n  Packet4i aux = _mm_srai_epi32(a,31);\n  return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);\n  #endif\n}\n\n// with AVX, the default implementations based on pload1 are faster\n#ifndef __AVX__\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet4f>(const float *a,\n                      Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)\n{\n  a3 = pload<Packet4f>(a);\n  a0 = vec4f_swizzle1(a3, 0,0,0,0);\n  a1 = vec4f_swizzle1(a3, 1,1,1,1);\n  a2 = vec4f_swizzle1(a3, 2,2,2,2);\n  a3 = vec4f_swizzle1(a3, 3,3,3,3);\n}\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet2d>(const double *a,\n                      Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)\n{\n#ifdef EIGEN_VECTORIZE_SSE3\n  a0 = _mm_loaddup_pd(a+0);\n  a1 = _mm_loaddup_pd(a+1);\n  a2 = _mm_loaddup_pd(a+2);\n  a3 = _mm_loaddup_pd(a+3);\n#else\n  a1 = pload<Packet2d>(a);\n  a0 = vec2d_swizzle1(a1, 0,0);\n  a1 = vec2d_swizzle1(a1, 1,1);\n  a3 = pload<Packet2d>(a+2);\n  a2 = vec2d_swizzle1(a3, 0,0);\n  a3 = vec2d_swizzle1(a3, 1,1);\n#endif\n}\n#endif\n\nEIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)\n{\n  vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));\n  vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));\n  vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));\n  vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));\n}\n\n#ifdef EIGEN_VECTORIZE_SSE3\ntemplate<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)\n{\n  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)\n{\n  return _mm_hadd_pd(vecs[0], vecs[1]);\n}\n\n#else\ntemplate<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)\n{\n  Packet4f tmp0, tmp1, tmp2;\n  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);\n  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);\n  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);\n  tmp0 = _mm_add_ps(tmp0, tmp1);\n  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);\n  tmp1 = _mm_add_ps(tmp1, tmp2);\n  tmp2 = _mm_movehl_ps(tmp1, tmp0);\n  tmp0 = _mm_movelh_ps(tmp0, tmp1);\n  return _mm_add_ps(tmp0, tmp2);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)\n{\n  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));\n}\n#endif  // SSE3\n\ntemplate<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)\n{\n  // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures\n  // (from Nehalem to Haswell)\n// #ifdef EIGEN_VECTORIZE_SSE3\n//   Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3));\n//   return pfirst<Packet4f>(_mm_hadd_ps(tmp, tmp));\n// #else\n  Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));\n  return pfirst<Packet4f>(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));\n// #endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)\n{\n  // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures\n  // (from Nehalem to Haswell)\n// #ifdef EIGEN_VECTORIZE_SSE3\n//   return pfirst<Packet2d>(_mm_hadd_pd(a, a));\n// #else\n  return pfirst<Packet2d>(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));\n// #endif\n}\n\n#ifdef EIGEN_VECTORIZE_SSSE3\ntemplate<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)\n{\n  return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));\n}\ntemplate<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)\n{\n  Packet4i tmp0 = _mm_hadd_epi32(a,a);\n  return pfirst<Packet4i>(_mm_hadd_epi32(tmp0,tmp0));\n}\n#else\ntemplate<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)\n{\n  Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));\n  return pfirst(tmp) + pfirst<Packet4i>(_mm_shuffle_epi32(tmp, 1));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)\n{\n  Packet4i tmp0, tmp1, tmp2;\n  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);\n  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);\n  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);\n  tmp0 = _mm_add_epi32(tmp0, tmp1);\n  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);\n  tmp1 = _mm_add_epi32(tmp1, tmp2);\n  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);\n  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);\n  return _mm_add_epi32(tmp0, tmp2);\n}\n#endif\n// Other reduction functions:\n\n// mul\ntemplate<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)\n{\n  Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));\n  return pfirst<Packet4f>(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)\n{\n  return pfirst<Packet2d>(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));\n}\ntemplate<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)\n{\n  // after some experiments, it is seems this is the fastest way to implement it\n  // for GCC (eg., reusing pmul is very slow !)\n  // TODO try to call _mm_mul_epu32 directly\n  EIGEN_ALIGN16 int aux[4];\n  pstore(aux, a);\n  return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;\n}\n\n// min\ntemplate<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)\n{\n  Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));\n  return pfirst<Packet4f>(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)\n{\n  return pfirst<Packet2d>(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));\n}\ntemplate<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));\n  return pfirst<Packet4i>(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));\n#else\n  // after some experiments, it is seems this is the fastest way to implement it\n  // for GCC (eg., it does not like using std::min after the pstore !!)\n  EIGEN_ALIGN16 int aux[4];\n  pstore(aux, a);\n  int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];\n  int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];\n  return aux0<aux2 ? aux0 : aux2;\n#endif // EIGEN_VECTORIZE_SSE4_1\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)\n{\n  Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));\n  return pfirst<Packet4f>(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));\n}\ntemplate<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)\n{\n  return pfirst<Packet2d>(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));\n}\ntemplate<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2)));\n  return pfirst<Packet4i>(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1)));\n#else\n  // after some experiments, it is seems this is the fastest way to implement it\n  // for GCC (eg., it does not like using std::min after the pstore !!)\n  EIGEN_ALIGN16 int aux[4];\n  pstore(aux, a);\n  int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];\n  int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];\n  return aux0>aux2 ? aux0 : aux2;\n#endif // EIGEN_VECTORIZE_SSE4_1\n}\n\n#if EIGEN_COMP_GNUC\n// template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)\n// {\n//   Packet4f res = b;\n//   asm(\"mulps %[a], %[b] \\n\\taddps %[c], %[b]\" : [b] \"+x\" (res) : [a] \"x\" (a), [c] \"x\" (c));\n//   return res;\n// }\n// EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)\n// {\n//   Packet4i res = a;\n//   asm(\"palignr %[i], %[a], %[b] \" : [b] \"+x\" (res) : [a] \"x\" (a), [i] \"i\" (i));\n//   return res;\n// }\n#endif\n\n#ifdef EIGEN_VECTORIZE_SSSE3\n// SSSE3 versions\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4f>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)\n  {\n    if (Offset!=0)\n      first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4i>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)\n  {\n    if (Offset!=0)\n      first = _mm_alignr_epi8(second,first, Offset*4);\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2d>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)\n  {\n    if (Offset==1)\n      first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));\n  }\n};\n#else\n// SSE2 versions\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4f>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)\n  {\n    if (Offset==1)\n    {\n      first = _mm_move_ss(first,second);\n      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));\n    }\n    else if (Offset==2)\n    {\n      first = _mm_movehl_ps(first,first);\n      first = _mm_movelh_ps(first,second);\n    }\n    else if (Offset==3)\n    {\n      first = _mm_move_ss(first,second);\n      first = _mm_shuffle_ps(first,second,0x93);\n    }\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4i>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)\n  {\n    if (Offset==1)\n    {\n      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));\n      first = _mm_shuffle_epi32(first,0x39);\n    }\n    else if (Offset==2)\n    {\n      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));\n      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));\n    }\n    else if (Offset==3)\n    {\n      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));\n      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));\n    }\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2d>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)\n  {\n    if (Offset==1)\n    {\n      first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));\n      first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));\n    }\n  }\n};\n#endif\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4f,4>& kernel) {\n  _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]);\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2d,2>& kernel) {\n  __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]);\n  kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]);\n  kernel.packet[1] = tmp;\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4i,4>& kernel) {\n  __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]);\n  __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]);\n  __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]);\n  __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]);\n\n  kernel.packet[0] = _mm_unpacklo_epi64(T0, T1);\n  kernel.packet[1] = _mm_unpackhi_epi64(T0, T1);\n  kernel.packet[2] = _mm_unpacklo_epi64(T2, T3);\n  kernel.packet[3] = _mm_unpackhi_epi64(T2, T3);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {\n  const __m128i zero = _mm_setzero_si128();\n  const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);\n  __m128i false_mask = _mm_cmpeq_epi32(select, zero);\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blendv_epi8(thenPacket, elsePacket, false_mask);\n#else\n  return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket));\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {\n  const __m128 zero = _mm_setzero_ps();\n  const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);\n  __m128 false_mask = _mm_cmpeq_ps(select, zero);\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blendv_ps(thenPacket, elsePacket, false_mask);\n#else\n  return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket));\n#endif\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {\n  const __m128d zero = _mm_setzero_pd();\n  const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]);\n  __m128d false_mask = _mm_cmpeq_pd(select, zero);\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blendv_pd(thenPacket, elsePacket, false_mask);\n#else\n  return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket));\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blend_ps(a,pset1<Packet4f>(b),1);\n#else\n  return _mm_move_ss(a, _mm_load_ss(&b));\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blend_pd(a,pset1<Packet2d>(b),1);\n#else\n  return _mm_move_sd(a, _mm_load_sd(&b));\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blend_ps(a,pset1<Packet4f>(b),(1<<3));\n#else\n  const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF));\n  return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1<Packet4f>(b)));\n#endif\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b)\n{\n#ifdef EIGEN_VECTORIZE_SSE4_1\n  return _mm_blend_pd(a,pset1<Packet2d>(b),(1<<1));\n#else\n  const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF));\n  return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1<Packet2d>(b)));\n#endif\n}\n\n// Scalar path for pmadd with FMA to ensure consistency with vectorized path.\n#ifdef __FMA__\ntemplate<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) {\n  return ::fmaf(a,b,c);\n}\ntemplate<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) {\n  return ::fma(a,b,c);\n}\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/SSE/TypeCasting.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Benoit Steiner <benoit.steiner.goog@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TYPE_CASTING_SSE_H\n#define EIGEN_TYPE_CASTING_SSE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate <>\nstruct type_casting_traits<float, int> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pcast<Packet4f, Packet4i>(const Packet4f& a) {\n  return _mm_cvttps_epi32(a);\n}\n\n\ntemplate <>\nstruct type_casting_traits<int, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pcast<Packet4i, Packet4f>(const Packet4i& a) {\n  return _mm_cvtepi32_ps(a);\n}\n\n\ntemplate <>\nstruct type_casting_traits<double, float> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 2,\n    TgtCoeffRatio = 1\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pcast<Packet2d, Packet4f>(const Packet2d& a, const Packet2d& b) {\n  return _mm_shuffle_ps(_mm_cvtpd_ps(a), _mm_cvtpd_ps(b), (1 << 2) | (1 << 6));\n}\n\ntemplate <>\nstruct type_casting_traits<float, double> {\n  enum {\n    VectorizedCast = 1,\n    SrcCoeffRatio = 1,\n    TgtCoeffRatio = 2\n  };\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pcast<Packet4f, Packet2d>(const Packet4f& a) {\n  // Simply discard the second half of the input\n  return _mm_cvtps_pd(a);\n}\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TYPE_CASTING_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/ZVector/Complex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX32_ALTIVEC_H\n#define EIGEN_COMPLEX32_ALTIVEC_H\n\nnamespace Eigen {\n\nnamespace internal {\n\nstatic Packet2ul  p2ul_CONJ_XOR1 = (Packet2ul) vec_sld((Packet4ui) p2d_ZERO_, (Packet4ui) p2l_ZERO, 8);//{ 0x8000000000000000, 0x0000000000000000 };\nstatic Packet2ul  p2ul_CONJ_XOR2 = (Packet2ul) vec_sld((Packet4ui) p2l_ZERO,  (Packet4ui) p2d_ZERO_, 8);//{ 0x8000000000000000, 0x0000000000000000 };\n\nstruct Packet1cd\n{\n  EIGEN_STRONG_INLINE Packet1cd() {}\n  EIGEN_STRONG_INLINE explicit Packet1cd(const Packet2d& a) : v(a) {}\n  Packet2d v;\n};\n\nstruct Packet2cf\n{\n  EIGEN_STRONG_INLINE Packet2cf() {}\n  EIGEN_STRONG_INLINE explicit Packet2cf(const Packet4f& a) : v(a) {}\n  union {\n    Packet4f v;\n    Packet1cd cd[2];\n  };\n};\n\ntemplate<> struct packet_traits<std::complex<float> >  : default_packet_traits\n{\n  typedef Packet2cf type;\n  typedef Packet2cf half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 2,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasBlend  = 1,\n    HasSetLinear = 0\n  };\n};\n\n\ntemplate<> struct packet_traits<std::complex<double> >  : default_packet_traits\n{\n  typedef Packet1cd type;\n  typedef Packet1cd half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 1,\n    HasHalfPacket = 0,\n\n    HasAdd    = 1,\n    HasSub    = 1,\n    HasMul    = 1,\n    HasDiv    = 1,\n    HasNegate = 1,\n    HasAbs    = 0,\n    HasAbs2   = 0,\n    HasMin    = 0,\n    HasMax    = 0,\n    HasSetLinear = 0\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet2cf> { typedef std::complex<float>  type; enum {size=2, alignment=Aligned16}; typedef Packet2cf half; };\ntemplate<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1, alignment=Aligned16}; typedef Packet1cd half; };\n\n/* Forward declaration */\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel);\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pload <Packet2cf>(const std::complex<float>* from)  { EIGEN_DEBUG_ALIGNED_LOAD return Packet2cf(pload<Packet4f>((const float*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pload <Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_ALIGNED_LOAD return Packet1cd(pload<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploadu<Packet2cf>(const std::complex<float>* from)  { EIGEN_DEBUG_UNALIGNED_LOAD return Packet2cf(ploadu<Packet4f>((const float*)from)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploadu<Packet1cd>(const std::complex<double>* from) { EIGEN_DEBUG_UNALIGNED_LOAD return Packet1cd(ploadu<Packet2d>((const double*)from)); }\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<float> >(std::complex<float> *     to, const Packet2cf& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((float*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstore <std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_ALIGNED_STORE pstore((double*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<float> >(std::complex<float> *     to, const Packet2cf& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((float*)to, from.v); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<std::complex<double> >(std::complex<double> *   to, const Packet1cd& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu((double*)to, from.v); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pset1<Packet1cd>(const std::complex<double>&  from)\n{ /* here we really have to use unaligned loads :( */ return ploadu<Packet1cd>(&from); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>&  from)\n{\n  Packet2cf res;\n  res.cd[0] = Packet1cd(vec_ld2f((const float *)&from));\n  res.cd[1] = res.cd[0];\n  return res;\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2cf pgather<std::complex<float>, Packet2cf>(const std::complex<float>* from, Index stride)\n{\n  std::complex<float> EIGEN_ALIGN16 af[2];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n  return pload<Packet2cf>(af);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline Packet1cd pgather<std::complex<double>, Packet1cd>(const std::complex<double>* from, Index stride EIGEN_UNUSED)\n{\n  return pload<Packet1cd>(from);\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<float>, Packet2cf>(std::complex<float>* to, const Packet2cf& from, Index stride)\n{\n  std::complex<float> EIGEN_ALIGN16 af[2];\n  pstore<std::complex<float> >((std::complex<float> *) af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n}\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<std::complex<double>, Packet1cd>(std::complex<double>* to, const Packet1cd& from, Index stride EIGEN_UNUSED)\n{\n  pstore<std::complex<double> >(to, from);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(padd<Packet4f>(a.v, b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v + b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(psub<Packet4f>(a.v, b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(a.v - b.v); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pnegate(const Packet1cd& a) { return Packet1cd(pnegate(Packet2d(a.v))); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf& a) { return Packet2cf(pnegate(Packet4f(a.v))); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pconj(const Packet1cd& a) { return Packet1cd((Packet2d)vec_xor((Packet2d)a.v, (Packet2d)p2ul_CONJ_XOR2)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf& a)\n{\n  Packet2cf res;\n  res.v.v4f[0] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0]))).v;\n  res.v.v4f[1] = pconj(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1]))).v;\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pmul<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  Packet2d a_re, a_im, v1, v2;\n\n  // Permute and multiply the real parts of a and b\n  a_re = vec_perm(a.v, a.v, p16uc_PSET64_HI);\n  // Get the imaginary parts of a\n  a_im = vec_perm(a.v, a.v, p16uc_PSET64_LO);\n  // multiply a_re * b\n  v1 = vec_madd(a_re, b.v, p2d_ZERO);\n  // multiply a_im * b and get the conjugate result\n  v2 = vec_madd(a_im, b.v, p2d_ZERO);\n  v2 = (Packet2d) vec_sld((Packet4ui)v2, (Packet4ui)v2, 8);\n  v2 = (Packet2d) vec_xor((Packet2d)v2, (Packet2d) p2ul_CONJ_XOR1);\n\n  return Packet1cd(v1 + v2);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pmul<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  Packet2cf res;\n  res.v.v4f[0] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[0])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[0]))).v;\n  res.v.v4f[1] = pmul(Packet1cd(reinterpret_cast<Packet2d>(a.v.v4f[1])), Packet1cd(reinterpret_cast<Packet2d>(b.v.v4f[1]))).v;\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pand   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pand   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pand<Packet4f>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd por    <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_or(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf por    <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(por<Packet4f>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pxor   <Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_xor(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pxor   <Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pxor<Packet4f>(a.v,b.v)); }\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pandnot<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(vec_and(a.v, vec_nor(b.v,b.v))); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pandnot<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(pandnot<Packet4f>(a.v,b.v)); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd ploaddup<Packet1cd>(const std::complex<double>*     from) {  return pset1<Packet1cd>(*from); }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf ploaddup<Packet2cf>(const std::complex<float>*      from) {  return pset1<Packet2cf>(*from); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<float> >(const std::complex<float> *     addr) { EIGEN_ZVECTOR_PREFETCH(addr); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<std::complex<double> >(const std::complex<double> *   addr) { EIGEN_ZVECTOR_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double>  pfirst<Packet1cd>(const Packet1cd& a)\n{\n  std::complex<double> EIGEN_ALIGN16 res;\n  pstore<std::complex<double> >(&res, a);\n\n  return res;\n}\ntemplate<> EIGEN_STRONG_INLINE std::complex<float>  pfirst<Packet2cf>(const Packet2cf& a)\n{\n  std::complex<float> EIGEN_ALIGN16 res[2];\n  pstore<std::complex<float> >(res, a);\n\n  return res[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preverse(const Packet1cd& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf& a)\n{\n  Packet2cf res;\n  res.cd[0] = a.cd[1];\n  res.cd[1] = a.cd[0];\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux<Packet1cd>(const Packet1cd& a)\n{\n  return pfirst(a);\n}\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux<Packet2cf>(const Packet2cf& a)\n{\n  std::complex<float> res;\n  Packet1cd b = padd<Packet1cd>(a.cd[0], a.cd[1]);\n  vec_st2f(b.v, (float*)&res);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd preduxp<Packet1cd>(const Packet1cd* vecs)\n{\n  return vecs[0];\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2cf preduxp<Packet2cf>(const Packet2cf* vecs)\n{\n  PacketBlock<Packet2cf,2> transpose;\n  transpose.packet[0] = vecs[0];\n  transpose.packet[1] = vecs[1];\n  ptranspose(transpose);\n\n  return padd<Packet2cf>(transpose.packet[0], transpose.packet[1]);\n} \n\ntemplate<> EIGEN_STRONG_INLINE std::complex<double> predux_mul<Packet1cd>(const Packet1cd& a)\n{\n  return pfirst(a);\n}\ntemplate<> EIGEN_STRONG_INLINE std::complex<float> predux_mul<Packet2cf>(const Packet2cf& a)\n{\n  std::complex<float> res;\n  Packet1cd b = pmul<Packet1cd>(a.cd[0], a.cd[1]);\n  vec_st2f(b.v, (float*)&res);\n  return res;\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet1cd>\n{\n  static EIGEN_STRONG_INLINE void run(Packet1cd& /*first*/, const Packet1cd& /*second*/)\n  {\n    // FIXME is it sure we never have to align a Packet1cd?\n    // Even though a std::complex<double> has 16 bytes, it is not necessarily aligned on a 16 bytes boundary...\n  }\n};\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2cf>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2cf& first, const Packet2cf& second)\n  {\n    if (Offset == 1) {\n      first.cd[0] = first.cd[1];\n      first.cd[1] = second.cd[0];\n    }\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, false,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,false>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet1cd, Packet1cd, true,true>\n{\n  EIGEN_STRONG_INLINE Packet1cd pmadd(const Packet1cd& x, const Packet1cd& y, const Packet1cd& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet1cd pmul(const Packet1cd& a, const Packet1cd& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, false,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(a, pconj(b));\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,false>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return internal::pmul(pconj(a), b);\n  }\n};\n\ntemplate<> struct conj_helper<Packet2cf, Packet2cf, true,true>\n{\n  EIGEN_STRONG_INLINE Packet2cf pmadd(const Packet2cf& x, const Packet2cf& y, const Packet2cf& c) const\n  { return padd(pmul(x,y),c); }\n\n  EIGEN_STRONG_INLINE Packet2cf pmul(const Packet2cf& a, const Packet2cf& b) const\n  {\n    return pconj(internal::pmul(a, b));\n  }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet1cd pdiv<Packet1cd>(const Packet1cd& a, const Packet1cd& b)\n{\n  // TODO optimize it for AltiVec\n  Packet1cd res = conj_helper<Packet1cd,Packet1cd,false,true>().pmul(a,b);\n  Packet2d s = vec_madd(b.v, b.v, p2d_ZERO_);\n  return Packet1cd(pdiv(res.v, s + vec_perm(s, s, p16uc_REVERSE64)));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pdiv<Packet2cf>(const Packet2cf& a, const Packet2cf& b)\n{\n  // TODO optimize it for AltiVec\n  Packet2cf res;\n  res.cd[0] = pdiv<Packet1cd>(a.cd[0], b.cd[0]);\n  res.cd[1] = pdiv<Packet1cd>(a.cd[1], b.cd[1]);\n  return res;\n}\n\nEIGEN_STRONG_INLINE Packet1cd pcplxflip/*<Packet1cd>*/(const Packet1cd& x)\n{\n  return Packet1cd(preverse(Packet2d(x.v)));\n}\n\nEIGEN_STRONG_INLINE Packet2cf pcplxflip/*<Packet2cf>*/(const Packet2cf& x)\n{\n  Packet2cf res;\n  res.cd[0] = pcplxflip(x.cd[0]);\n  res.cd[1] = pcplxflip(x.cd[1]);\n  return res;\n}\n\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet1cd,2>& kernel)\n{\n  Packet2d tmp = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_HI);\n  kernel.packet[1].v = vec_perm(kernel.packet[0].v, kernel.packet[1].v, p16uc_TRANSPOSE64_LO);\n  kernel.packet[0].v = tmp;\n}\n\nEIGEN_STRONG_INLINE void ptranspose(PacketBlock<Packet2cf,2>& kernel)\n{\n  Packet1cd tmp = kernel.packet[0].cd[1];\n  kernel.packet[0].cd[1] = kernel.packet[1].cd[0];\n  kernel.packet[1].cd[0] = tmp;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2cf pblend(const Selector<2>& ifPacket, const Packet2cf& thenPacket, const Packet2cf& elsePacket) {\n  Packet2cf result;\n  const Selector<4> ifPacket4 = { ifPacket.select[0], ifPacket.select[0], ifPacket.select[1], ifPacket.select[1] };\n  result.v = pblend<Packet4f>(ifPacket4, thenPacket.v, elsePacket.v);\n  return result;\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX32_ALTIVEC_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/ZVector/MathFunctions.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007 Julien Pommier\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* The sin, cos, exp, and log functions of this file come from\n * Julien Pommier's sse math library: http://gruntthepeon.free.fr/ssemath/\n */\n\n#ifndef EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n#define EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n\nnamespace Eigen {\n\nnamespace internal {\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(1 , 1.0);\nstatic _EIGEN_DECLARE_CONST_Packet2d(2 , 2.0);\nstatic _EIGEN_DECLARE_CONST_Packet2d(half, 0.5);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(exp_hi,  709.437);\nstatic _EIGEN_DECLARE_CONST_Packet2d(exp_lo, -709.436139303);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_LOG2EF, 1.4426950408889634073599);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p0, 1.26177193074810590878e-4);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p1, 3.02994407707441961300e-2);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_p2, 9.99999999999999999910e-1);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q0, 3.00198505138664455042e-6);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q1, 2.52448340349684104192e-3);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q2, 2.27265548208155028766e-1);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_q3, 2.00000000000000000009e0);\n\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C1, 0.693145751953125);\nstatic _EIGEN_DECLARE_CONST_Packet2d(cephes_exp_C2, 1.42860682030941723212e-6);\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d pexp<Packet2d>(const Packet2d& _x)\n{\n  Packet2d x = _x;\n\n  Packet2d tmp, fx;\n  Packet2l emm0;\n\n  // clamp x\n  x = pmax(pmin(x, p2d_exp_hi), p2d_exp_lo);\n  /* express exp(x) as exp(g + n*log(2)) */\n  fx = pmadd(p2d_cephes_LOG2EF, x, p2d_half);\n\n  fx = vec_floor(fx);\n\n  tmp = pmul(fx, p2d_cephes_exp_C1);\n  Packet2d z = pmul(fx, p2d_cephes_exp_C2);\n  x = psub(x, tmp);\n  x = psub(x, z);\n\n  Packet2d x2 = pmul(x,x);\n\n  Packet2d px = p2d_cephes_exp_p0;\n  px = pmadd(px, x2, p2d_cephes_exp_p1);\n  px = pmadd(px, x2, p2d_cephes_exp_p2);\n  px = pmul (px, x);\n\n  Packet2d qx = p2d_cephes_exp_q0;\n  qx = pmadd(qx, x2, p2d_cephes_exp_q1);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q2);\n  qx = pmadd(qx, x2, p2d_cephes_exp_q3);\n\n  x = pdiv(px,psub(qx,px));\n  x = pmadd(p2d_2,x,p2d_1);\n\n  // build 2^n\n  emm0 = vec_ctsl(fx, 0);\n\n  static const Packet2l p2l_1023 = { 1023, 1023 };\n  static const Packet2ul p2ul_52 = { 52, 52 };\n\n  emm0 = emm0 + p2l_1023;\n  emm0 = emm0 << reinterpret_cast<Packet2l>(p2ul_52);\n\n  // Altivec's max & min operators just drop silent NaNs. Check NaNs in \n  // inputs and return them unmodified.\n  Packet2ul isnumber_mask = reinterpret_cast<Packet2ul>(vec_cmpeq(_x, _x));\n  return vec_sel(_x, pmax(pmul(x, reinterpret_cast<Packet2d>(emm0)), _x),\n                 isnumber_mask);\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f pexp<Packet4f>(const Packet4f& x)\n{\n  Packet4f res;\n  res.v4f[0] = pexp<Packet2d>(x.v4f[0]);\n  res.v4f[1] = pexp<Packet2d>(x.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d psqrt<Packet2d>(const Packet2d& x)\n{\n  return  __builtin_s390_vfsqdb(x);\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f psqrt<Packet4f>(const Packet4f& x)\n{\n  Packet4f res;\n  res.v4f[0] = psqrt<Packet2d>(x.v4f[0]);\n  res.v4f[1] = psqrt<Packet2d>(x.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket2d prsqrt<Packet2d>(const Packet2d& x) {\n  // Unfortunately we can't use the much faster mm_rqsrt_pd since it only provides an approximation.\n  return pset1<Packet2d>(1.0) / psqrt<Packet2d>(x);\n}\n\ntemplate<> EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_UNUSED\nPacket4f prsqrt<Packet4f>(const Packet4f& x) {\n  Packet4f res;\n  res.v4f[0] = prsqrt<Packet2d>(x.v4f[0]);\n  res.v4f[1] = prsqrt<Packet2d>(x.v4f[1]);\n  return res;\n}\n\n}  // end namespace internal\n\n}  // end namespace Eigen\n\n#endif  // EIGEN_MATH_FUNCTIONS_ALTIVEC_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/arch/ZVector/PacketMath.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Konstantinos Margaritis <markos@freevec.org>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PACKET_MATH_ZVECTOR_H\n#define EIGEN_PACKET_MATH_ZVECTOR_H\n\n#include <stdint.h>\n\nnamespace Eigen {\n\nnamespace internal {\n\n#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD\n#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n#endif\n\n#ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n#endif\n\n#ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS\n#define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS  16\n#endif\n\ntypedef __vector int                 Packet4i;\ntypedef __vector unsigned int        Packet4ui;\ntypedef __vector __bool int          Packet4bi;\ntypedef __vector short int           Packet8i;\ntypedef __vector unsigned char       Packet16uc;\ntypedef __vector double              Packet2d;\ntypedef __vector unsigned long long  Packet2ul;\ntypedef __vector long long           Packet2l;\n\ntypedef struct {\n\tPacket2d  v4f[2];\n} Packet4f;\n\ntypedef union {\n  int32_t   i[4];\n  uint32_t ui[4];\n  int64_t   l[2];\n  uint64_t ul[2];\n  double    d[2];\n  Packet4i  v4i;\n  Packet4ui v4ui;\n  Packet2l  v2l;\n  Packet2ul v2ul;\n  Packet2d  v2d;\n} Packet;\n\n// We don't want to write the same code all the time, but we need to reuse the constants\n// and it doesn't really work to declare them global, so we define macros instead\n\n#define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \\\n  Packet4i p4i_##NAME = reinterpret_cast<Packet4i>(vec_splat_s32(X))\n\n#define _EIGEN_DECLARE_CONST_FAST_Packet2d(NAME,X) \\\n  Packet2d p2d_##NAME = reinterpret_cast<Packet2d>(vec_splat_s64(X))\n\n#define _EIGEN_DECLARE_CONST_FAST_Packet2l(NAME,X) \\\n  Packet2l p2l_##NAME = reinterpret_cast<Packet2l>(vec_splat_s64(X))\n\n#define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \\\n  Packet4i p4i_##NAME = pset1<Packet4i>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \\\n  Packet2d p2d_##NAME = pset1<Packet2d>(X)\n\n#define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \\\n  Packet2l p2l_##NAME = pset1<Packet2l>(X)\n\n// These constants are endian-agnostic\n//static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0); //{ 0, 0, 0, 0,}\nstatic _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE, 1); //{ 1, 1, 1, 1}\n\nstatic _EIGEN_DECLARE_CONST_FAST_Packet2d(ZERO, 0);\nstatic _EIGEN_DECLARE_CONST_FAST_Packet2l(ZERO, 0);\nstatic _EIGEN_DECLARE_CONST_FAST_Packet2l(ONE, 1);\n\nstatic Packet2d p2d_ONE = { 1.0, 1.0 }; \nstatic Packet2d p2d_ZERO_ = { -0.0, -0.0 };\n\nstatic Packet4i p4i_COUNTDOWN = { 0, 1, 2, 3 };\nstatic Packet4f p4f_COUNTDOWN = { 0.0, 1.0, 2.0, 3.0 };\nstatic Packet2d p2d_COUNTDOWN = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet16uc>(p2d_ZERO), reinterpret_cast<Packet16uc>(p2d_ONE), 8));\n\nstatic Packet16uc p16uc_PSET64_HI = { 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };\nstatic Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };\n\n// Mask alignment\n#define _EIGEN_MASK_ALIGNMENT\t0xfffffffffffffff0\n\n#define _EIGEN_ALIGNED_PTR(x)\t((std::ptrdiff_t)(x) & _EIGEN_MASK_ALIGNMENT)\n\n// Handle endianness properly while loading constants\n// Define global static constants:\n\nstatic Packet16uc p16uc_FORWARD =   { 0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15 };\nstatic Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };\nstatic Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\n\nstatic Packet16uc p16uc_PSET32_WODD   = vec_sld((Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 0), (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 2), 8);//{ 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };\nstatic Packet16uc p16uc_PSET32_WEVEN  = vec_sld(p16uc_DUPLICATE32_HI, (Packet16uc) vec_splat((Packet4ui)p16uc_FORWARD, 3), 8);//{ 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 };\n/*static Packet16uc p16uc_HALF64_0_16 = vec_sld((Packet16uc)p4i_ZERO, vec_splat((Packet16uc) vec_abs(p4i_MINUS16), 3), 8);      //{ 0,0,0,0, 0,0,0,0, 16,16,16,16, 16,16,16,16};\n\nstatic Packet16uc p16uc_PSET64_HI = (Packet16uc) vec_mergeh((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN);     //{ 0,1,2,3, 4,5,6,7, 0,1,2,3, 4,5,6,7 };*/\nstatic Packet16uc p16uc_PSET64_LO = (Packet16uc) vec_mergel((Packet4ui)p16uc_PSET32_WODD, (Packet4ui)p16uc_PSET32_WEVEN);     //{ 8,9,10,11, 12,13,14,15, 8,9,10,11, 12,13,14,15 };\n/*static Packet16uc p16uc_TRANSPOSE64_HI = vec_add(p16uc_PSET64_HI, p16uc_HALF64_0_16);                                         //{ 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};\nstatic Packet16uc p16uc_TRANSPOSE64_LO = vec_add(p16uc_PSET64_LO, p16uc_HALF64_0_16);                                         //{ 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};*/\nstatic Packet16uc p16uc_TRANSPOSE64_HI = { 0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};\nstatic Packet16uc p16uc_TRANSPOSE64_LO = { 8,9,10,11, 12,13,14,15, 24,25,26,27, 28,29,30,31};\n\n//static Packet16uc p16uc_COMPLEX32_REV = vec_sld(p16uc_REVERSE32, p16uc_REVERSE32, 8);                                         //{ 4,5,6,7, 0,1,2,3, 12,13,14,15, 8,9,10,11 };\n\n//static Packet16uc p16uc_COMPLEX32_REV2 = vec_sld(p16uc_FORWARD, p16uc_FORWARD, 8);                                            //{ 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };\n\n\n#if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC\n  #define EIGEN_ZVECTOR_PREFETCH(ADDR) __builtin_prefetch(ADDR);\n#else\n  #define EIGEN_ZVECTOR_PREFETCH(ADDR) asm( \"   pfd [%[addr]]\\n\" :: [addr] \"r\" (ADDR) : \"cc\" );\n#endif\n\ntemplate<> struct packet_traits<int>    : default_packet_traits\n{\n  typedef Packet4i type;\n  typedef Packet4i half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size = 4,\n    HasHalfPacket = 0,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 1,\n    HasBlend = 1\n  };\n};\n\ntemplate<> struct packet_traits<float> : default_packet_traits\n{\n  typedef Packet4f type;\n  typedef Packet4f half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=4,\n    HasHalfPacket = 0,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 1,\n    HasMin  = 1,\n    HasMax  = 1,\n    HasAbs  = 1,\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1,\n    HasNegate = 1,\n    HasBlend = 1\n  };\n};\n\ntemplate<> struct packet_traits<double> : default_packet_traits\n{\n  typedef Packet2d type;\n  typedef Packet2d half;\n  enum {\n    Vectorizable = 1,\n    AlignedOnScalar = 1,\n    size=2,\n    HasHalfPacket = 1,\n\n    HasAdd  = 1,\n    HasSub  = 1,\n    HasMul  = 1,\n    HasDiv  = 1,\n    HasMin  = 1,\n    HasMax  = 1,\n    HasAbs  = 1,\n    HasSin  = 0,\n    HasCos  = 0,\n    HasLog  = 0,\n    HasExp  = 1,\n    HasSqrt = 1,\n    HasRsqrt = 1,\n    HasRound = 1,\n    HasFloor = 1,\n    HasCeil = 1,\n    HasNegate = 1,\n    HasBlend = 1\n  };\n};\n\ntemplate<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4, alignment=Aligned16}; typedef Packet4i half; };\ntemplate<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4, alignment=Aligned16}; typedef Packet4f half; };\ntemplate<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2, alignment=Aligned16}; typedef Packet2d half; };\n\n/* Forward declaration */\nEIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock<Packet4f,4>& kernel);\n \ninline std::ostream & operator <<(std::ostream & s, const Packet4i & v)\n{\n  Packet vt;\n  vt.v4i = v;\n  s << vt.i[0] << \", \" << vt.i[1] << \", \" << vt.i[2] << \", \" << vt.i[3];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)\n{\n  Packet vt;\n  vt.v4ui = v;\n  s << vt.ui[0] << \", \" << vt.ui[1] << \", \" << vt.ui[2] << \", \" << vt.ui[3];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet2l & v)\n{\n  Packet vt;\n  vt.v2l = v;\n  s << vt.l[0] << \", \" << vt.l[1];\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet2ul & v)\n{\n  Packet vt;\n  vt.v2ul = v;\n  s << vt.ul[0] << \", \" << vt.ul[1] ;\n  return s;\n}\n\ninline std::ostream & operator <<(std::ostream & s, const Packet2d & v)\n{\n  Packet vt;\n  vt.v2d = v;\n  s << vt.d[0] << \", \" << vt.d[1];\n  return s;\n}\n\n/* Helper function to simulate a vec_splat_packet4f\n */\ntemplate<int element> EIGEN_STRONG_INLINE Packet4f vec_splat_packet4f(const Packet4f&   from)\n{\n  Packet4f splat;\n  switch (element) {\n  case 0:\n    splat.v4f[0] = vec_splat(from.v4f[0], 0);\n    splat.v4f[1] = splat.v4f[0];\n    break;\n  case 1:\n    splat.v4f[0] = vec_splat(from.v4f[0], 1);\n    splat.v4f[1] = splat.v4f[0];\n    break;\n  case 2:\n    splat.v4f[0] = vec_splat(from.v4f[1], 0);\n    splat.v4f[1] = splat.v4f[0];\n    break;\n  case 3:\n    splat.v4f[0] = vec_splat(from.v4f[1], 1);\n    splat.v4f[1] = splat.v4f[0];\n    break;\n  }\n  return splat;\n}\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4i>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)\n  {\n    switch (Offset % 4) {\n    case 1:\n      first = vec_sld(first, second, 4); break;\n    case 2:\n      first = vec_sld(first, second, 8); break;\n    case 3:\n      first = vec_sld(first, second, 12); break;\n    }\n  }\n};\n\n/* This is a tricky one, we have to translate float alignment to vector elements of sizeof double\n */\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet4f>\n{\n  static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)\n  {\n    switch (Offset % 4) {\n    case 1:\n      first.v4f[0] = vec_sld(first.v4f[0], first.v4f[1], 8);\n      first.v4f[1] = vec_sld(first.v4f[1], second.v4f[0], 8);\n      break;\n    case 2:\n      first.v4f[0] = first.v4f[1];\n      first.v4f[1] = second.v4f[0];\n      break;\n    case 3:\n      first.v4f[0] = vec_sld(first.v4f[1],  second.v4f[0], 8);\n      first.v4f[1] = vec_sld(second.v4f[0], second.v4f[1], 8);\n      break;\n    }\n  }\n};\n\n\ntemplate<int Offset>\nstruct palign_impl<Offset,Packet2d>\n{\n  static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)\n  {\n    if (Offset == 1)\n      first = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(first), reinterpret_cast<Packet4i>(second), 8));\n  }\n};\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_LOAD\n  Packet *vfrom;\n  vfrom = (Packet *) from;\n  return vfrom->v4i;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_LOAD\n  Packet4f vfrom;\n  vfrom.v4f[0] = vec_ld2f(&from[0]);\n  vfrom.v4f[1] = vec_ld2f(&from[2]);\n  return vfrom;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double* from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_LOAD\n  Packet *vfrom;\n  vfrom = (Packet *) from;\n  return vfrom->v2d;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_STORE\n  Packet *vto;\n  vto = (Packet *) to;\n  vto->v4i = from;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_STORE\n  vec_st2f(from.v4f[0], &to[0]);\n  vec_st2f(from.v4f[1], &to[2]);\n}\n\n\ntemplate<> EIGEN_STRONG_INLINE void pstore<double>(double*   to, const Packet2d& from)\n{\n  // FIXME: No intrinsic yet\n  EIGEN_DEBUG_ALIGNED_STORE\n  Packet *vto;\n  vto = (Packet *) to;\n  vto->v2d = from;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)\n{\n  return vec_splats(from);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) {\n  return vec_splats(from);\n}\ntemplate<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&    from)\n{\n  Packet4f to;\n  to.v4f[0] = pset1<Packet2d>(static_cast<const double&>(from));\n  to.v4f[1] = to.v4f[0];\n  return to;\n}\n\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet4i>(const int *a,\n                      Packet4i& a0, Packet4i& a1, Packet4i& a2, Packet4i& a3)\n{\n  a3 = pload<Packet4i>(a);\n  a0 = vec_splat(a3, 0);\n  a1 = vec_splat(a3, 1);\n  a2 = vec_splat(a3, 2);\n  a3 = vec_splat(a3, 3);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet4f>(const float *a,\n                      Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3)\n{\n  a3 = pload<Packet4f>(a);\n  a0 = vec_splat_packet4f<0>(a3);\n  a1 = vec_splat_packet4f<1>(a3);\n  a2 = vec_splat_packet4f<2>(a3);\n  a3 = vec_splat_packet4f<3>(a3);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void\npbroadcast4<Packet2d>(const double *a,\n                      Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3)\n{\n  a1 = pload<Packet2d>(a);\n  a0 = vec_splat(a1, 0);\n  a1 = vec_splat(a1, 1);\n  a3 = pload<Packet2d>(a+2);\n  a2 = vec_splat(a3, 0);\n  a3 = vec_splat(a3, 1);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4i pgather<int, Packet4i>(const int* from, Index stride)\n{\n  int EIGEN_ALIGN16 ai[4];\n  ai[0] = from[0*stride];\n  ai[1] = from[1*stride];\n  ai[2] = from[2*stride];\n  ai[3] = from[3*stride];\n return pload<Packet4i>(ai);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet4f pgather<float, Packet4f>(const float* from, Index stride)\n{\n  float EIGEN_ALIGN16 ai[4];\n  ai[0] = from[0*stride];\n  ai[1] = from[1*stride];\n  ai[2] = from[2*stride];\n  ai[3] = from[3*stride];\n return pload<Packet4f>(ai);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline Packet2d pgather<double, Packet2d>(const double* from, Index stride)\n{\n  double EIGEN_ALIGN16 af[2];\n  af[0] = from[0*stride];\n  af[1] = from[1*stride];\n return pload<Packet2d>(af);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<int, Packet4i>(int* to, const Packet4i& from, Index stride)\n{\n  int EIGEN_ALIGN16 ai[4];\n  pstore<int>((int *)ai, from);\n  to[0*stride] = ai[0];\n  to[1*stride] = ai[1];\n  to[2*stride] = ai[2];\n  to[3*stride] = ai[3];\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet4f>(float* to, const Packet4f& from, Index stride)\n{\n  float EIGEN_ALIGN16 ai[4];\n  pstore<float>((float *)ai, from);\n  to[0*stride] = ai[0];\n  to[1*stride] = ai[1];\n  to[2*stride] = ai[2];\n  to[3*stride] = ai[3];\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet2d>(double* to, const Packet2d& from, Index stride)\n{\n  double EIGEN_ALIGN16 af[2];\n  pstore<double>(af, from);\n  to[0*stride] = af[0];\n  to[1*stride] = af[1];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a + b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f c;\n  c.v4f[0] = a.v4f[0] + b.v4f[0];\n  c.v4f[1] = a.v4f[1] + b.v4f[1];\n  return c;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a + b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a - b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f c;\n  c.v4f[0] = a.v4f[0] - b.v4f[0];\n  c.v4f[1] = a.v4f[1] - b.v4f[1];\n  return c;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a - b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a * b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f c;\n  c.v4f[0] = a.v4f[0] * b.v4f[0];\n  c.v4f[1] = a.v4f[1] * b.v4f[1];\n  return c;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a * b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& a, const Packet4i& b) { return (a / b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f c;\n  c.v4f[0] = a.v4f[0] / b.v4f[0];\n  c.v4f[1] = a.v4f[1] / b.v4f[1];\n  return c;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return (a / b); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return (-a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)\n{\n  Packet4f c;\n  c.v4f[0] = -a.v4f[0];\n  c.v4f[1] = -a.v4f[1];\n  return c;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return (-a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd<Packet4i>(pmul<Packet4i>(a, b), c); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c)\n{\n  Packet4f res;\n  res.v4f[0] = vec_madd(a.v4f[0], b.v4f[0], c.v4f[0]);\n  res.v4f[1] = vec_madd(a.v4f[1], b.v4f[1], c.v4f[1]);\n  return res;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vec_madd(a, b, c); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i plset<Packet4i>(const int& a)    { return padd<Packet4i>(pset1<Packet4i>(a), p4i_COUNTDOWN); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f plset<Packet4f>(const float& a)  { return padd<Packet4f>(pset1<Packet4f>(a), p4f_COUNTDOWN); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d plset<Packet2d>(const double& a) { return padd<Packet2d>(pset1<Packet2d>(a), p2d_COUNTDOWN); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_min(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pmin(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pmin(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_max(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pmax(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pmax(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pand(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pand(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_or(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pand(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pand(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_xor(a, b); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pand(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pand(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return pand<Packet4i>(a, vec_nor(b, b)); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return vec_and(a, vec_nor(b, b)); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b)\n{\n  Packet4f res;\n  res.v4f[0] = pandnot(a.v4f[0], b.v4f[0]);\n  res.v4f[1] = pandnot(a.v4f[1], b.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pround<Packet4f>(const Packet4f& a)\n{\n  Packet4f res;\n  res.v4f[0] = vec_round(a.v4f[0]);\n  res.v4f[1] = vec_round(a.v4f[1]);\n  return res;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pround<Packet2d>(const Packet2d& a) { return vec_round(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pceil<Packet4f>(const  Packet4f& a)\n{\n  Packet4f res;\n  res.v4f[0] = vec_ceil(a.v4f[0]);\n  res.v4f[1] = vec_ceil(a.v4f[1]);\n  return res;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pceil<Packet2d>(const  Packet2d& a) { return vec_ceil(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pfloor<Packet4f>(const Packet4f& a)\n{\n  Packet4f res;\n  res.v4f[0] = vec_floor(a.v4f[0]);\n  res.v4f[1] = vec_floor(a.v4f[1]);\n  return res;\n}\ntemplate<> EIGEN_STRONG_INLINE Packet2d pfloor<Packet2d>(const Packet2d& a) { return vec_floor(a); }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*       from) { return pload<Packet4i>(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*     from) { return pload<Packet4f>(from); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double*    from) { return pload<Packet2d>(from); }\n\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)\n{\n  Packet4i p = pload<Packet4i>(from);\n  return vec_perm(p, p, p16uc_DUPLICATE32_HI);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*    from)\n{\n  Packet4f p = pload<Packet4f>(from);\n  p.v4f[1] = vec_splat(p.v4f[0], 1);\n  p.v4f[0] = vec_splat(p.v4f[0], 0);\n  return p;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*   from)\n{\n  Packet2d p = pload<Packet2d>(from);\n  return vec_perm(p, p, p16uc_PSET64_HI);\n}\n\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<int>(int*        to, const Packet4i& from) { pstore<int>(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<float>(float*    to, const Packet4f& from) { pstore<float>(to, from); }\ntemplate<> EIGEN_STRONG_INLINE void pstoreu<double>(double*  to, const Packet2d& from) { pstore<double>(to, from); }\n\ntemplate<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { EIGEN_ZVECTOR_PREFETCH(addr); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { EIGEN_ZVECTOR_PREFETCH(addr); }\ntemplate<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { EIGEN_ZVECTOR_PREFETCH(addr); }\n\ntemplate<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int    EIGEN_ALIGN16 x[4]; pstore(x, a); return x[0]; }\ntemplate<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float  EIGEN_ALIGN16 x[2]; vec_st2f(a.v4f[0], &x[0]); return x[0]; }\ntemplate<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double EIGEN_ALIGN16 x[2]; pstore(x, a); return x[0]; }\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)\n{\n  return reinterpret_cast<Packet4i>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE32));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)\n{\n  return reinterpret_cast<Packet2d>(vec_perm(reinterpret_cast<Packet16uc>(a), reinterpret_cast<Packet16uc>(a), p16uc_REVERSE64));\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)\n{\n  Packet4f rev;\n  rev.v4f[0] = preverse<Packet2d>(a.v4f[1]);\n  rev.v4f[1] = preverse<Packet2d>(a.v4f[0]);\n  return rev;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pabs<Packet4i>(const Packet4i& a) { return vec_abs(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet2d pabs<Packet2d>(const Packet2d& a) { return vec_abs(a); }\ntemplate<> EIGEN_STRONG_INLINE Packet4f pabs<Packet4f>(const Packet4f& a)\n{\n  Packet4f res;\n  res.v4f[0] = pabs(a.v4f[0]);\n  res.v4f[1] = pabs(a.v4f[1]);\n  return res;\n}\n\ntemplate<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)\n{\n  Packet4i b, sum;\n  b   = vec_sld(a, a, 8);\n  sum = padd<Packet4i>(a, b);\n  b   = vec_sld(sum, sum, 4);\n  sum = padd<Packet4i>(sum, b);\n  return pfirst(sum);\n}\n\ntemplate<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)\n{\n  Packet2d b, sum;\n  b   = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8));\n  sum = padd<Packet2d>(a, b);\n  return pfirst(sum);\n}\ntemplate<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)\n{\n  Packet2d sum;\n  sum = padd<Packet2d>(a.v4f[0], a.v4f[1]);\n  double first = predux<Packet2d>(sum);\n  return static_cast<float>(first);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)\n{\n  Packet4i v[4], sum[4];\n\n  // It's easier and faster to transpose then add as columns\n  // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation\n  // Do the transpose, first set of moves\n  v[0] = vec_mergeh(vecs[0], vecs[2]);\n  v[1] = vec_mergel(vecs[0], vecs[2]);\n  v[2] = vec_mergeh(vecs[1], vecs[3]);\n  v[3] = vec_mergel(vecs[1], vecs[3]);\n  // Get the resulting vectors\n  sum[0] = vec_mergeh(v[0], v[2]);\n  sum[1] = vec_mergel(v[0], v[2]);\n  sum[2] = vec_mergeh(v[1], v[3]);\n  sum[3] = vec_mergel(v[1], v[3]);\n\n  // Now do the summation:\n  // Lines 0+1\n  sum[0] = padd<Packet4i>(sum[0], sum[1]);\n  // Lines 2+3\n  sum[1] = padd<Packet4i>(sum[2], sum[3]);\n  // Add the results\n  sum[0] = padd<Packet4i>(sum[0], sum[1]);\n\n  return sum[0];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)\n{\n  Packet2d v[2], sum;\n  v[0] = padd<Packet2d>(vecs[0], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[0]), reinterpret_cast<Packet4ui>(vecs[0]), 8)));\n  v[1] = padd<Packet2d>(vecs[1], reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(vecs[1]), reinterpret_cast<Packet4ui>(vecs[1]), 8)));\n \n  sum = reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4ui>(v[0]), reinterpret_cast<Packet4ui>(v[1]), 8));\n\n  return sum;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)\n{\n  PacketBlock<Packet4f,4> transpose;\n  transpose.packet[0] = vecs[0];\n  transpose.packet[1] = vecs[1];\n  transpose.packet[2] = vecs[2];\n  transpose.packet[3] = vecs[3];\n  ptranspose(transpose);\n\n  Packet4f sum = padd(transpose.packet[0], transpose.packet[1]);\n  sum = padd(sum, transpose.packet[2]);\n  sum = padd(sum, transpose.packet[3]);\n  return sum;\n}\n\n// Other reduction functions:\n// mul\ntemplate<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)\n{\n  EIGEN_ALIGN16 int aux[4];\n  pstore(aux, a);\n  return aux[0] * aux[1] * aux[2] * aux[3];\n}\n\ntemplate<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmul(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)\n{\n  // Return predux_mul<Packet2d> of the subvectors product\n  return static_cast<float>(pfirst(predux_mul(pmul(a.v4f[0], a.v4f[1]))));\n}\n\n// min\ntemplate<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)\n{\n  Packet4i b, res;\n  b   = pmin<Packet4i>(a, vec_sld(a, a, 8));\n  res = pmin<Packet4i>(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\ntemplate<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmin<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)\n{\n  Packet2d b, res;\n  b   = pmin<Packet2d>(a.v4f[0], a.v4f[1]);\n  res = pmin<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));\n  return static_cast<float>(pfirst(res));\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)\n{\n  Packet4i b, res;\n  b = pmax<Packet4i>(a, vec_sld(a, a, 8));\n  res = pmax<Packet4i>(b, vec_sld(b, b, 4));\n  return pfirst(res);\n}\n\n// max\ntemplate<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)\n{\n  return pfirst(pmax<Packet2d>(a, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(a), reinterpret_cast<Packet4i>(a), 8))));\n}\n\ntemplate<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)\n{\n  Packet2d b, res;\n  b   = pmax<Packet2d>(a.v4f[0], a.v4f[1]);\n  res = pmax<Packet2d>(b, reinterpret_cast<Packet2d>(vec_sld(reinterpret_cast<Packet4i>(b), reinterpret_cast<Packet4i>(b), 8)));\n  return static_cast<float>(pfirst(res));\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4i,4>& kernel) {\n  Packet4i t0 = vec_mergeh(kernel.packet[0], kernel.packet[2]);\n  Packet4i t1 = vec_mergel(kernel.packet[0], kernel.packet[2]);\n  Packet4i t2 = vec_mergeh(kernel.packet[1], kernel.packet[3]);\n  Packet4i t3 = vec_mergel(kernel.packet[1], kernel.packet[3]);\n  kernel.packet[0] = vec_mergeh(t0, t2);\n  kernel.packet[1] = vec_mergel(t0, t2);\n  kernel.packet[2] = vec_mergeh(t1, t3);\n  kernel.packet[3] = vec_mergel(t1, t3);\n}\n\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet2d,2>& kernel) {\n  Packet2d t0 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_HI);\n  Packet2d t1 = vec_perm(kernel.packet[0], kernel.packet[1], p16uc_TRANSPOSE64_LO);\n  kernel.packet[0] = t0;\n  kernel.packet[1] = t1;\n}\n\n/* Split the Packet4f PacketBlock into 4 Packet2d PacketBlocks and transpose each one\n */\nEIGEN_DEVICE_FUNC inline void\nptranspose(PacketBlock<Packet4f,4>& kernel) {\n  PacketBlock<Packet2d,2> t0,t1,t2,t3;\n  // copy top-left 2x2 Packet2d block\n  t0.packet[0] = kernel.packet[0].v4f[0];\n  t0.packet[1] = kernel.packet[1].v4f[0];\n\n  // copy top-right 2x2 Packet2d block\n  t1.packet[0] = kernel.packet[0].v4f[1];\n  t1.packet[1] = kernel.packet[1].v4f[1];\n\n  // copy bottom-left 2x2 Packet2d block\n  t2.packet[0] = kernel.packet[2].v4f[0];\n  t2.packet[1] = kernel.packet[3].v4f[0];\n\n  // copy bottom-right 2x2 Packet2d block\n  t3.packet[0] = kernel.packet[2].v4f[1];\n  t3.packet[1] = kernel.packet[3].v4f[1];\n\n  // Transpose all 2x2 blocks\n  ptranspose(t0);\n  ptranspose(t1);\n  ptranspose(t2);\n  ptranspose(t3);\n\n  // Copy back transposed blocks, but exchange t1 and t2 due to transposition\n  kernel.packet[0].v4f[0] = t0.packet[0];\n  kernel.packet[0].v4f[1] = t2.packet[0];\n  kernel.packet[1].v4f[0] = t0.packet[1];\n  kernel.packet[1].v4f[1] = t2.packet[1];\n  kernel.packet[2].v4f[0] = t1.packet[0];\n  kernel.packet[2].v4f[1] = t3.packet[0];\n  kernel.packet[3].v4f[0] = t1.packet[1];\n  kernel.packet[3].v4f[1] = t3.packet[1];\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) {\n  Packet4ui select = { ifPacket.select[0], ifPacket.select[1], ifPacket.select[2], ifPacket.select[3] };\n  Packet4ui mask = vec_cmpeq(select, reinterpret_cast<Packet4ui>(p4i_ONE));\n  return vec_sel(elsePacket, thenPacket, mask);\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) {\n  Packet2ul select_hi = { ifPacket.select[0], ifPacket.select[1] };\n  Packet2ul select_lo = { ifPacket.select[2], ifPacket.select[3] };\n  Packet2ul mask_hi = vec_cmpeq(select_hi, reinterpret_cast<Packet2ul>(p2l_ONE));\n  Packet2ul mask_lo = vec_cmpeq(select_lo, reinterpret_cast<Packet2ul>(p2l_ONE));\n  Packet4f result;\n  result.v4f[0] = vec_sel(elsePacket.v4f[0], thenPacket.v4f[0], mask_hi);\n  result.v4f[1] = vec_sel(elsePacket.v4f[1], thenPacket.v4f[1], mask_lo);\n  return result;\n}\n\ntemplate<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) {\n  Packet2ul select = { ifPacket.select[0], ifPacket.select[1] };\n  Packet2ul mask = vec_cmpeq(select, reinterpret_cast<Packet2ul>(p2l_ONE));\n  return vec_sel(elsePacket, thenPacket, mask);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PACKET_MATH_ZVECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/AssignmentFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ASSIGNMENT_FUNCTORS_H\n#define EIGEN_ASSIGNMENT_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n  \n/** \\internal\n  * \\brief Template functor for scalar/packet assignment\n  *\n  */\ntemplate<typename DstScalar,typename SrcScalar> struct assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a = b; }\n  \n  template<int Alignment, typename Packet>\n  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const\n  { internal::pstoret<DstScalar,Packet,Alignment>(a,b); }\n};\n\n// Empty overload for void type (used by PermutationMatrix)\ntemplate<typename DstScalar> struct assign_op<DstScalar,void> {};\n\ntemplate<typename DstScalar,typename SrcScalar>\nstruct functor_traits<assign_op<DstScalar,SrcScalar> > {\n  enum {\n    Cost = NumTraits<DstScalar>::ReadCost,\n    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::Vectorizable && packet_traits<SrcScalar>::Vectorizable\n  };\n};\n\n/** \\internal\n  * \\brief Template functor for scalar/packet assignment with addition\n  *\n  */\ntemplate<typename DstScalar,typename SrcScalar> struct add_assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(add_assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a += b; }\n  \n  template<int Alignment, typename Packet>\n  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const\n  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::padd(internal::ploadt<Packet,Alignment>(a),b)); }\n};\ntemplate<typename DstScalar,typename SrcScalar>\nstruct functor_traits<add_assign_op<DstScalar,SrcScalar> > {\n  enum {\n    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost,\n    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasAdd\n  };\n};\n\n/** \\internal\n  * \\brief Template functor for scalar/packet assignment with subtraction\n  *\n  */\ntemplate<typename DstScalar,typename SrcScalar> struct sub_assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(sub_assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a -= b; }\n  \n  template<int Alignment, typename Packet>\n  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const\n  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::psub(internal::ploadt<Packet,Alignment>(a),b)); }\n};\ntemplate<typename DstScalar,typename SrcScalar>\nstruct functor_traits<sub_assign_op<DstScalar,SrcScalar> > {\n  enum {\n    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::AddCost,\n    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasSub\n  };\n};\n\n/** \\internal\n  * \\brief Template functor for scalar/packet assignment with multiplication\n  *\n  */\ntemplate<typename DstScalar, typename SrcScalar=DstScalar>\nstruct mul_assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(mul_assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a *= b; }\n  \n  template<int Alignment, typename Packet>\n  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const\n  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::pmul(internal::ploadt<Packet,Alignment>(a),b)); }\n};\ntemplate<typename DstScalar, typename SrcScalar>\nstruct functor_traits<mul_assign_op<DstScalar,SrcScalar> > {\n  enum {\n    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,\n    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasMul\n  };\n};\n\n/** \\internal\n  * \\brief Template functor for scalar/packet assignment with diviving\n  *\n  */\ntemplate<typename DstScalar, typename SrcScalar=DstScalar> struct div_assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(div_assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a /= b; }\n  \n  template<int Alignment, typename Packet>\n  EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const\n  { internal::pstoret<DstScalar,Packet,Alignment>(a,internal::pdiv(internal::ploadt<Packet,Alignment>(a),b)); }\n};\ntemplate<typename DstScalar, typename SrcScalar>\nstruct functor_traits<div_assign_op<DstScalar,SrcScalar> > {\n  enum {\n    Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,\n    PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasDiv\n  };\n};\n\n/** \\internal\n  * \\brief Template functor for scalar/packet assignment with swapping\n  *\n  * It works as follow. For a non-vectorized evaluation loop, we have:\n  *   for(i) func(A.coeffRef(i), B.coeff(i));\n  * where B is a SwapWrapper expression. The trick is to make SwapWrapper::coeff behaves like a non-const coeffRef.\n  * Actually, SwapWrapper might not even be needed since even if B is a plain expression, since it has to be writable\n  * B.coeff already returns a const reference to the underlying scalar value.\n  * \n  * The case of a vectorized loop is more tricky:\n  *   for(i,j) func.assignPacket<A_Align>(&A.coeffRef(i,j), B.packet<B_Align>(i,j));\n  * Here, B must be a SwapWrapper whose packet function actually returns a proxy object holding a Scalar*,\n  * the actual alignment and Packet type.\n  *\n  */\ntemplate<typename Scalar> struct swap_assign_op {\n\n  EIGEN_EMPTY_STRUCT_CTOR(swap_assign_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const\n  {\n#ifdef __CUDACC__\n    // FIXME is there some kind of cuda::swap?\n    Scalar t=b; const_cast<Scalar&>(b)=a; a=t;\n#else\n    using std::swap;\n    swap(a,const_cast<Scalar&>(b));\n#endif\n  }\n};\ntemplate<typename Scalar>\nstruct functor_traits<swap_assign_op<Scalar> > {\n  enum {\n    Cost = 3 * NumTraits<Scalar>::ReadCost,\n    PacketAccess = packet_traits<Scalar>::Vectorizable\n  };\n};\n\n} // namespace internal\n\n} // namespace Eigen\n\n#endif // EIGEN_ASSIGNMENT_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/BinaryFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BINARY_FUNCTORS_H\n#define EIGEN_BINARY_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n//---------- associative binary functors ----------\n\ntemplate<typename Arg1, typename Arg2>\nstruct binary_op_base\n{\n  typedef Arg1 first_argument_type;\n  typedef Arg2 second_argument_type;\n};\n\n/** \\internal\n  * \\brief Template functor to compute the sum of two scalars\n  *\n  * \\sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, DenseBase::sum()\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_sum_op : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_sum_op>::ReturnType result_type;\n#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op)\n#else\n  scalar_sum_op() {\n    EIGEN_SCALAR_BINARY_OP_PLUGIN\n  }\n#endif\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a + b; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::padd(a,b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const\n  { return internal::predux(a); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_sum_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2, // rough estimate!\n    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasAdd && packet_traits<RhsScalar>::HasAdd\n    // TODO vectorize mixed sum\n  };\n};\n\n/** \\internal\n  * \\brief Template specialization to deprecate the summation of boolean expressions.\n  * This is required to solve Bug 426.\n  * \\sa DenseBase::count(), DenseBase::any(), ArrayBase::cast(), MatrixBase::cast()\n  */\ntemplate<> struct scalar_sum_op<bool,bool> : scalar_sum_op<int,int> {\n  EIGEN_DEPRECATED\n  scalar_sum_op() {}\n};\n\n\n/** \\internal\n  * \\brief Template functor to compute the product of two scalars\n  *\n  * \\sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux()\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_product_op  : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_product_op>::ReturnType result_type;\n#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op)\n#else\n  scalar_product_op() {\n    EIGEN_SCALAR_BINARY_OP_PLUGIN\n  }\n#endif\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::pmul(a,b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const\n  { return internal::predux_mul(a); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_product_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::MulCost + NumTraits<RhsScalar>::MulCost)/2, // rough estimate!\n    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasMul && packet_traits<RhsScalar>::HasMul\n    // TODO vectorize mixed product\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the conjugate product of two scalars\n  *\n  * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y)\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_conj_product_op  : binary_op_base<LhsScalar,RhsScalar>\n{\n\n  enum {\n    Conj = NumTraits<LhsScalar>::IsComplex\n  };\n  \n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_conj_product_op>::ReturnType result_type;\n  \n  EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const\n  { return conj_helper<LhsScalar,RhsScalar,Conj,false>().pmul(a,b); }\n  \n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return conj_helper<Packet,Packet,Conj,false>().pmul(a,b); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_conj_product_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = NumTraits<LhsScalar>::MulCost,\n    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMul\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the min of two scalars\n  *\n  * \\sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff()\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_min_op : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_min_op>::ReturnType result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::mini(a, b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::pmin(a,b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const\n  { return internal::predux_min(a); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_min_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,\n    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMin\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the max of two scalars\n  *\n  * \\sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff()\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_max_op  : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_max_op>::ReturnType result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return numext::maxi(a, b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::pmax(a,b); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const\n  { return internal::predux_max(a); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_max_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,\n    PacketAccess = internal::is_same<LhsScalar, RhsScalar>::value && packet_traits<LhsScalar>::HasMax\n  };\n};\n\n/** \\internal\n  * \\brief Template functors for comparison of two scalars\n  * \\todo Implement packet-comparisons\n  */\ntemplate<typename LhsScalar, typename RhsScalar, ComparisonName cmp> struct scalar_cmp_op;\n\ntemplate<typename LhsScalar, typename RhsScalar, ComparisonName cmp>\nstruct functor_traits<scalar_cmp_op<LhsScalar,RhsScalar, cmp> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,\n    PacketAccess = false\n  };\n};\n\ntemplate<ComparisonName Cmp, typename LhsScalar, typename RhsScalar>\nstruct result_of<scalar_cmp_op<LhsScalar, RhsScalar, Cmp>(LhsScalar,RhsScalar)> {\n  typedef bool type;\n};\n\n\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_EQ> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a==b;}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_LT> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a<b;}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_LE> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a<=b;}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_GT> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>b;}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_GE> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a>=b;}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_UNORD> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return !(a<=b || b<=a);}\n};\ntemplate<typename LhsScalar, typename RhsScalar>\nstruct scalar_cmp_op<LhsScalar,RhsScalar, cmp_NEQ> : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef bool result_type;\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(const LhsScalar& a, const RhsScalar& b) const {return a!=b;}\n};\n\n\n/** \\internal\n  * \\brief Template functor to compute the hypot of two scalars\n  *\n  * \\sa MatrixBase::stableNorm(), class Redux\n  */\ntemplate<typename Scalar>\nstruct scalar_hypot_op<Scalar,Scalar> : binary_op_base<Scalar,Scalar>\n{\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op)\n//   typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const\n  {\n    EIGEN_USING_STD_MATH(sqrt)\n    Scalar p, qp;\n    if(_x>_y)\n    {\n      p = _x;\n      qp = _y / p;\n    }\n    else\n    {\n      p = _y;\n      qp = _x / p;\n    }\n    return p * sqrt(Scalar(1) + qp*qp);\n  }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_hypot_op<Scalar,Scalar> > {\n  enum\n  {\n    Cost = 3 * NumTraits<Scalar>::AddCost +\n           2 * NumTraits<Scalar>::MulCost +\n           2 * scalar_div_cost<Scalar,false>::value,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the pow of two scalars\n  */\ntemplate<typename Scalar, typename Exponent>\nstruct scalar_pow_op  : binary_op_base<Scalar,Exponent>\n{\n  typedef typename ScalarBinaryOpTraits<Scalar,Exponent,scalar_pow_op>::ReturnType result_type;\n#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_pow_op)\n#else\n  scalar_pow_op() {\n    typedef Scalar LhsScalar;\n    typedef Exponent RhsScalar;\n    EIGEN_SCALAR_BINARY_OP_PLUGIN\n  }\n#endif\n  EIGEN_DEVICE_FUNC\n  inline result_type operator() (const Scalar& a, const Exponent& b) const { return numext::pow(a, b); }\n};\ntemplate<typename Scalar, typename Exponent>\nstruct functor_traits<scalar_pow_op<Scalar,Exponent> > {\n  enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false };\n};\n\n\n\n//---------- non associative binary functors ----------\n\n/** \\internal\n  * \\brief Template functor to compute the difference of two scalars\n  *\n  * \\sa class CwiseBinaryOp, MatrixBase::operator-\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_difference_op : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_difference_op>::ReturnType result_type;\n#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op)\n#else\n  scalar_difference_op() {\n    EIGEN_SCALAR_BINARY_OP_PLUGIN\n  }\n#endif\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a - b; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::psub(a,b); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_difference_op<LhsScalar,RhsScalar> > {\n  enum {\n    Cost = (NumTraits<LhsScalar>::AddCost+NumTraits<RhsScalar>::AddCost)/2,\n    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasSub && packet_traits<RhsScalar>::HasSub\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the quotient of two scalars\n  *\n  * \\sa class CwiseBinaryOp, Cwise::operator/()\n  */\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct scalar_quotient_op  : binary_op_base<LhsScalar,RhsScalar>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar,scalar_quotient_op>::ReturnType result_type;\n#ifndef EIGEN_SCALAR_BINARY_OP_PLUGIN\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op)\n#else\n  scalar_quotient_op() {\n    EIGEN_SCALAR_BINARY_OP_PLUGIN\n  }\n#endif\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a / b; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const\n  { return internal::pdiv(a,b); }\n};\ntemplate<typename LhsScalar,typename RhsScalar>\nstruct functor_traits<scalar_quotient_op<LhsScalar,RhsScalar> > {\n  typedef typename scalar_quotient_op<LhsScalar,RhsScalar>::result_type result_type;\n  enum {\n    PacketAccess = is_same<LhsScalar,RhsScalar>::value && packet_traits<LhsScalar>::HasDiv && packet_traits<RhsScalar>::HasDiv,\n    Cost = scalar_div_cost<result_type,PacketAccess>::value\n  };\n};\n\n\n\n/** \\internal\n  * \\brief Template functor to compute the and of two booleans\n  *\n  * \\sa class CwiseBinaryOp, ArrayBase::operator&&\n  */\nstruct scalar_boolean_and_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; }\n};\ntemplate<> struct functor_traits<scalar_boolean_and_op> {\n  enum {\n    Cost = NumTraits<bool>::AddCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the or of two booleans\n  *\n  * \\sa class CwiseBinaryOp, ArrayBase::operator||\n  */\nstruct scalar_boolean_or_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; }\n};\ntemplate<> struct functor_traits<scalar_boolean_or_op> {\n  enum {\n    Cost = NumTraits<bool>::AddCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n * \\brief Template functor to compute the xor of two booleans\n *\n * \\sa class CwiseBinaryOp, ArrayBase::operator^\n */\nstruct scalar_boolean_xor_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_xor_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a ^ b; }\n};\ntemplate<> struct functor_traits<scalar_boolean_xor_op> {\n  enum {\n    Cost = NumTraits<bool>::AddCost,\n    PacketAccess = false\n  };\n};\n\n\n\n//---------- binary functors bound to a constant, thus appearing as a unary functor ----------\n\n// The following two classes permits to turn any binary functor into a unary one with one argument bound to a constant value.\n// They are analogues to std::binder1st/binder2nd but with the following differences:\n//  - they are compatible with packetOp\n//  - they are portable across C++ versions (the std::binder* are deprecated in C++11)\ntemplate<typename BinaryOp> struct bind1st_op : BinaryOp {\n\n  typedef typename BinaryOp::first_argument_type  first_argument_type;\n  typedef typename BinaryOp::second_argument_type second_argument_type;\n  typedef typename BinaryOp::result_type          result_type;\n\n  bind1st_op(const first_argument_type &val) : m_value(val) {}\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const second_argument_type& b) const { return BinaryOp::operator()(m_value,b); }\n\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& b) const\n  { return BinaryOp::packetOp(internal::pset1<Packet>(m_value), b); }\n\n  first_argument_type m_value;\n};\ntemplate<typename BinaryOp> struct functor_traits<bind1st_op<BinaryOp> > : functor_traits<BinaryOp> {};\n\n\ntemplate<typename BinaryOp> struct bind2nd_op : BinaryOp {\n\n  typedef typename BinaryOp::first_argument_type  first_argument_type;\n  typedef typename BinaryOp::second_argument_type second_argument_type;\n  typedef typename BinaryOp::result_type          result_type;\n\n  bind2nd_op(const second_argument_type &val) : m_value(val) {}\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const first_argument_type& a) const { return BinaryOp::operator()(a,m_value); }\n\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const\n  { return BinaryOp::packetOp(a,internal::pset1<Packet>(m_value)); }\n\n  second_argument_type m_value;\n};\ntemplate<typename BinaryOp> struct functor_traits<bind2nd_op<BinaryOp> > : functor_traits<BinaryOp> {};\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BINARY_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/NullaryFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_NULLARY_FUNCTORS_H\n#define EIGEN_NULLARY_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename Scalar>\nstruct scalar_constant_op {\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { }\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { }\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() () const { return m_other; }\n  template<typename PacketType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const PacketType packetOp() const { return internal::pset1<PacketType>(m_other); }\n  const Scalar m_other;\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_constant_op<Scalar> >\n{ enum { Cost = 0 /* as the constant value should be loaded in register only once for the whole expression */,\n         PacketAccess = packet_traits<Scalar>::Vectorizable, IsRepeatable = true }; };\n\ntemplate<typename Scalar> struct scalar_identity_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op)\n  template<typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType row, IndexType col) const { return row==col ? Scalar(1) : Scalar(0); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_identity_op<Scalar> >\n{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };\n\ntemplate <typename Scalar, typename Packet, bool IsInteger> struct linspaced_op_impl;\n\ntemplate <typename Scalar, typename Packet>\nstruct linspaced_op_impl<Scalar,Packet,/*IsInteger*/false>\n{\n  linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :\n    m_low(low), m_high(high), m_size1(num_steps==1 ? 1 : num_steps-1), m_step(num_steps==1 ? Scalar() : (high-low)/Scalar(num_steps-1)),\n    m_interPacket(plset<Packet>(0)),\n    m_flip(numext::abs(high)<numext::abs(low))\n  {}\n\n  template<typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const {\n    if(m_flip)\n      return (i==0)? m_low : (m_high - (m_size1-i)*m_step);\n    else\n      return (i==m_size1)? m_high : (m_low + i*m_step);\n  }\n\n  template<typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const\n  {\n    // Principle:\n    // [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) )\n    if(m_flip)\n    {\n      Packet pi = padd(pset1<Packet>(Scalar(i-m_size1)),m_interPacket);\n      Packet res = padd(pset1<Packet>(m_high), pmul(pset1<Packet>(m_step), pi));\n      if(i==0)\n        res = pinsertfirst(res, m_low);\n      return res;\n    }\n    else\n    {\n      Packet pi = padd(pset1<Packet>(Scalar(i)),m_interPacket);\n      Packet res = padd(pset1<Packet>(m_low), pmul(pset1<Packet>(m_step), pi));\n      if(i==m_size1-unpacket_traits<Packet>::size+1)\n        res = pinsertlast(res, m_high);\n      return res;\n    }\n  }\n\n  const Scalar m_low;\n  const Scalar m_high;\n  const Index m_size1;\n  const Scalar m_step;\n  const Packet m_interPacket;\n  const bool m_flip;\n};\n\ntemplate <typename Scalar, typename Packet>\nstruct linspaced_op_impl<Scalar,Packet,/*IsInteger*/true>\n{\n  linspaced_op_impl(const Scalar& low, const Scalar& high, Index num_steps) :\n    m_low(low),\n    m_multiplier((high-low)/convert_index<Scalar>(num_steps<=1 ? 1 : num_steps-1)),\n    m_divisor(convert_index<Scalar>((high>=low?num_steps:-num_steps)+(high-low))/((numext::abs(high-low)+1)==0?1:(numext::abs(high-low)+1))),\n    m_use_divisor(num_steps>1 && (numext::abs(high-low)+1)<num_steps)\n  {}\n\n  template<typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE\n  const Scalar operator() (IndexType i) const\n  {\n    if(m_use_divisor) return m_low + convert_index<Scalar>(i)/m_divisor;\n    else              return m_low + convert_index<Scalar>(i)*m_multiplier;\n  }\n\n  const Scalar m_low;\n  const Scalar m_multiplier;\n  const Scalar m_divisor;\n  const bool m_use_divisor;\n};\n\n// ----- Linspace functor ----------------------------------------------------------------\n\n// Forward declaration (we default to random access which does not really give\n// us a speed gain when using packet access but it allows to use the functor in\n// nested expressions).\ntemplate <typename Scalar, typename PacketType> struct linspaced_op;\ntemplate <typename Scalar, typename PacketType> struct functor_traits< linspaced_op<Scalar,PacketType> >\n{\n  enum\n  {\n    Cost = 1,\n    PacketAccess =   (!NumTraits<Scalar>::IsInteger) && packet_traits<Scalar>::HasSetLinear && packet_traits<Scalar>::HasBlend,\n                  /*&& ((!NumTraits<Scalar>::IsInteger) || packet_traits<Scalar>::HasDiv),*/ // <- vectorization for integer is currently disabled\n    IsRepeatable = true\n  };\n};\ntemplate <typename Scalar, typename PacketType> struct linspaced_op\n{\n  linspaced_op(const Scalar& low, const Scalar& high, Index num_steps)\n    : impl((num_steps==1 ? high : low),high,num_steps)\n  {}\n\n  template<typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (IndexType i) const { return impl(i); }\n\n  template<typename Packet,typename IndexType>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(IndexType i) const { return impl.packetOp(i); }\n\n  // This proxy object handles the actual required temporaries and the different\n  // implementations (integer vs. floating point).\n  const linspaced_op_impl<Scalar,PacketType,NumTraits<Scalar>::IsInteger> impl;\n};\n\n// Linear access is automatically determined from the operator() prototypes available for the given functor.\n// If it exposes an operator()(i,j), then we assume the i and j coefficients are required independently\n// and linear access is not possible. In all other cases, linear access is enabled.\n// Users should not have to deal with this structure.\ntemplate<typename Functor> struct functor_has_linear_access { enum { ret = !has_binary_operator<Functor>::value }; };\n\n// For unreliable compilers, let's specialize the has_*ary_operator\n// helpers so that at least built-in nullary functors work fine.\n#if !( (EIGEN_COMP_MSVC>1600) || (EIGEN_GNUC_AT_LEAST(4,8)) || (EIGEN_COMP_ICC>=1600))\ntemplate<typename Scalar,typename IndexType>\nstruct has_nullary_operator<scalar_constant_op<Scalar>,IndexType> { enum { value = 1}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_unary_operator<scalar_constant_op<Scalar>,IndexType> { enum { value = 0}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_binary_operator<scalar_constant_op<Scalar>,IndexType> { enum { value = 0}; };\n\ntemplate<typename Scalar,typename IndexType>\nstruct has_nullary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value = 0}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_unary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value = 0}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_binary_operator<scalar_identity_op<Scalar>,IndexType> { enum { value = 1}; };\n\ntemplate<typename Scalar, typename PacketType,typename IndexType>\nstruct has_nullary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };\ntemplate<typename Scalar, typename PacketType,typename IndexType>\nstruct has_unary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 1}; };\ntemplate<typename Scalar, typename PacketType,typename IndexType>\nstruct has_binary_operator<linspaced_op<Scalar,PacketType>,IndexType> { enum { value = 0}; };\n\ntemplate<typename Scalar,typename IndexType>\nstruct has_nullary_operator<scalar_random_op<Scalar>,IndexType> { enum { value = 1}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_unary_operator<scalar_random_op<Scalar>,IndexType> { enum { value = 0}; };\ntemplate<typename Scalar,typename IndexType>\nstruct has_binary_operator<scalar_random_op<Scalar>,IndexType> { enum { value = 0}; };\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_NULLARY_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/StlFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STL_FUNCTORS_H\n#define EIGEN_STL_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// default functor traits for STL functors:\n\ntemplate<typename T>\nstruct functor_traits<std::multiplies<T> >\n{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::divides<T> >\n{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::plus<T> >\n{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::minus<T> >\n{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::negate<T> >\n{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::logical_or<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::logical_and<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::logical_not<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::greater<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::less<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::greater_equal<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::less_equal<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::equal_to<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::not_equal_to<T> >\n{ enum { Cost = 1, PacketAccess = false }; };\n\n#if (__cplusplus < 201103L) && (EIGEN_COMP_MSVC <= 1900)\n// std::binder* are deprecated since c++11 and will be removed in c++17\ntemplate<typename T>\nstruct functor_traits<std::binder2nd<T> >\n{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::binder1st<T> >\n{ enum { Cost = functor_traits<T>::Cost, PacketAccess = false }; };\n#endif\n\ntemplate<typename T>\nstruct functor_traits<std::unary_negate<T> >\n{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };\n\ntemplate<typename T>\nstruct functor_traits<std::binary_negate<T> >\n{ enum { Cost = 1 + functor_traits<T>::Cost, PacketAccess = false }; };\n\n#ifdef EIGEN_STDEXT_SUPPORT\n\ntemplate<typename T0,typename T1>\nstruct functor_traits<std::project1st<T0,T1> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\ntemplate<typename T0,typename T1>\nstruct functor_traits<std::project2nd<T0,T1> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\ntemplate<typename T0,typename T1>\nstruct functor_traits<std::select2nd<std::pair<T0,T1> > >\n{ enum { Cost = 0, PacketAccess = false }; };\n\ntemplate<typename T0,typename T1>\nstruct functor_traits<std::select1st<std::pair<T0,T1> > >\n{ enum { Cost = 0, PacketAccess = false }; };\n\ntemplate<typename T0,typename T1>\nstruct functor_traits<std::unary_compose<T0,T1> >\n{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost, PacketAccess = false }; };\n\ntemplate<typename T0,typename T1,typename T2>\nstruct functor_traits<std::binary_compose<T0,T1,T2> >\n{ enum { Cost = functor_traits<T0>::Cost + functor_traits<T1>::Cost + functor_traits<T2>::Cost, PacketAccess = false }; };\n\n#endif // EIGEN_STDEXT_SUPPORT\n\n// allow to add new functors and specializations of functor_traits from outside Eigen.\n// this macro is really needed because functor_traits must be specialized after it is declared but before it is used...\n#ifdef EIGEN_FUNCTORS_PLUGIN\n#include EIGEN_FUNCTORS_PLUGIN\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_STL_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/TernaryFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Eugene Brevdo <ebrevdo@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TERNARY_FUNCTORS_H\n#define EIGEN_TERNARY_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n//---------- associative ternary functors ----------\n\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TERNARY_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/functors/UnaryFunctors.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_UNARY_FUNCTORS_H\n#define EIGEN_UNARY_FUNCTORS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/** \\internal\n  * \\brief Template functor to compute the opposite of a scalar\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::operator-\n  */\ntemplate<typename Scalar> struct scalar_opposite_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const\n  { return internal::pnegate(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_opposite_op<Scalar> >\n{ enum {\n    Cost = NumTraits<Scalar>::AddCost,\n    PacketAccess = packet_traits<Scalar>::HasNegate };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the absolute value of a scalar\n  *\n  * \\sa class CwiseUnaryOp, Cwise::abs\n  */\ntemplate<typename Scalar> struct scalar_abs_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs(a); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const\n  { return internal::pabs(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_abs_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::AddCost,\n    PacketAccess = packet_traits<Scalar>::HasAbs\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the score of a scalar, to chose a pivot\n  *\n  * \\sa class CwiseUnaryOp\n  */\ntemplate<typename Scalar> struct scalar_score_coeff_op : scalar_abs_op<Scalar>\n{\n  typedef void Score_is_abs;\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_score_coeff_op<Scalar> > : functor_traits<scalar_abs_op<Scalar> > {};\n\n/* Avoid recomputing abs when we know the score and they are the same. Not a true Eigen functor.  */\ntemplate<typename Scalar, typename=void> struct abs_knowing_score\n{\n  EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  template<typename Score>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a, const Score&) const { return numext::abs(a); }\n};\ntemplate<typename Scalar> struct abs_knowing_score<Scalar, typename scalar_score_coeff_op<Scalar>::Score_is_abs>\n{\n  EIGEN_EMPTY_STRUCT_CTOR(abs_knowing_score)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  template<typename Scal>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scal&, const result_type& a) const { return a; }\n};\n\n/** \\internal\n  * \\brief Template functor to compute the squared absolute value of a scalar\n  *\n  * \\sa class CwiseUnaryOp, Cwise::abs2\n  */\ntemplate<typename Scalar> struct scalar_abs2_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return numext::abs2(a); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const\n  { return internal::pmul(a,a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_abs2_op<Scalar> >\n{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasAbs2 }; };\n\n/** \\internal\n  * \\brief Template functor to compute the conjugate of a complex value\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::conjugate()\n  */\ntemplate<typename Scalar> struct scalar_conjugate_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op)\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { using numext::conj; return conj(a); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_conjugate_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,\n    PacketAccess = packet_traits<Scalar>::HasConj\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the phase angle of a complex\n  *\n  * \\sa class CwiseUnaryOp, Cwise::arg\n  */\ntemplate<typename Scalar> struct scalar_arg_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_arg_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { using numext::arg; return arg(a); }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const\n  { return internal::parg(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_arg_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::IsComplex ? 5 * NumTraits<Scalar>::MulCost : NumTraits<Scalar>::AddCost,\n    PacketAccess = packet_traits<Scalar>::HasArg\n  };\n};\n/** \\internal\n  * \\brief Template functor to cast a scalar to another type\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::cast()\n  */\ntemplate<typename Scalar, typename NewType>\nstruct scalar_cast_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op)\n  typedef NewType result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast<Scalar, NewType>(a); }\n};\ntemplate<typename Scalar, typename NewType>\nstruct functor_traits<scalar_cast_op<Scalar,NewType> >\n{ enum { Cost = is_same<Scalar, NewType>::value ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };\n\n/** \\internal\n  * \\brief Template functor to extract the real part of a complex\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::real()\n  */\ntemplate<typename Scalar>\nstruct scalar_real_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::real(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_real_op<Scalar> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\n/** \\internal\n  * \\brief Template functor to extract the imaginary part of a complex\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::imag()\n  */\ntemplate<typename Scalar>\nstruct scalar_imag_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return numext::imag(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_imag_op<Scalar> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\n/** \\internal\n  * \\brief Template functor to extract the real part of a complex as a reference\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::real()\n  */\ntemplate<typename Scalar>\nstruct scalar_real_ref_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::real_ref(*const_cast<Scalar*>(&a)); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_real_ref_op<Scalar> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\n/** \\internal\n  * \\brief Template functor to extract the imaginary part of a complex as a reference\n  *\n  * \\sa class CwiseUnaryOp, MatrixBase::imag()\n  */\ntemplate<typename Scalar>\nstruct scalar_imag_ref_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op)\n  typedef typename NumTraits<Scalar>::Real result_type;\n  EIGEN_DEVICE_FUNC\n  EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return numext::imag_ref(*const_cast<Scalar*>(&a)); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_imag_ref_op<Scalar> >\n{ enum { Cost = 0, PacketAccess = false }; };\n\n/** \\internal\n  *\n  * \\brief Template functor to compute the exponential of a scalar\n  *\n  * \\sa class CwiseUnaryOp, Cwise::exp()\n  */\ntemplate<typename Scalar> struct scalar_exp_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::exp(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexp(a); }\n};\ntemplate <typename Scalar>\nstruct functor_traits<scalar_exp_op<Scalar> > {\n  enum {\n    PacketAccess = packet_traits<Scalar>::HasExp,\n    // The following numbers are based on the AVX implementation.\n#ifdef EIGEN_VECTORIZE_FMA\n    // Haswell can issue 2 add/mul/madd per cycle.\n    Cost =\n    (sizeof(Scalar) == 4\n     // float: 8 pmadd, 4 pmul, 2 padd/psub, 6 other\n     ? (8 * NumTraits<Scalar>::AddCost + 6 * NumTraits<Scalar>::MulCost)\n     // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div,  13 other\n     : (14 * NumTraits<Scalar>::AddCost +\n        6 * NumTraits<Scalar>::MulCost +\n        scalar_div_cost<Scalar,packet_traits<Scalar>::HasDiv>::value))\n#else\n    Cost =\n    (sizeof(Scalar) == 4\n     // float: 7 pmadd, 6 pmul, 4 padd/psub, 10 other\n     ? (21 * NumTraits<Scalar>::AddCost + 13 * NumTraits<Scalar>::MulCost)\n     // double: 7 pmadd, 5 pmul, 3 padd/psub, 1 div,  13 other\n     : (23 * NumTraits<Scalar>::AddCost +\n        12 * NumTraits<Scalar>::MulCost +\n        scalar_div_cost<Scalar,packet_traits<Scalar>::HasDiv>::value))\n#endif\n  };\n};\n\n/** \\internal\n  *\n  * \\brief Template functor to compute the exponential of a scalar - 1.\n  *\n  * \\sa class CwiseUnaryOp, ArrayBase::expm1()\n  */\ntemplate<typename Scalar> struct scalar_expm1_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_expm1_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::expm1(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pexpm1(a); }\n};\ntemplate <typename Scalar>\nstruct functor_traits<scalar_expm1_op<Scalar> > {\n  enum {\n    PacketAccess = packet_traits<Scalar>::HasExpm1,\n    Cost = functor_traits<scalar_exp_op<Scalar> >::Cost // TODO measure cost of expm1\n  };\n};\n\n/** \\internal\n  *\n  * \\brief Template functor to compute the logarithm of a scalar\n  *\n  * \\sa class CwiseUnaryOp, ArrayBase::log()\n  */\ntemplate<typename Scalar> struct scalar_log_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog(a); }\n};\ntemplate <typename Scalar>\nstruct functor_traits<scalar_log_op<Scalar> > {\n  enum {\n    PacketAccess = packet_traits<Scalar>::HasLog,\n    Cost =\n    (PacketAccess\n     // The following numbers are based on the AVX implementation.\n#ifdef EIGEN_VECTORIZE_FMA\n     // 8 pmadd, 6 pmul, 8 padd/psub, 16 other, can issue 2 add/mul/madd per cycle.\n     ? (20 * NumTraits<Scalar>::AddCost + 7 * NumTraits<Scalar>::MulCost)\n#else\n     // 8 pmadd, 6 pmul, 8 padd/psub, 20 other\n     ? (36 * NumTraits<Scalar>::AddCost + 14 * NumTraits<Scalar>::MulCost)\n#endif\n     // Measured cost of std::log.\n     : sizeof(Scalar)==4 ? 40 : 85)\n  };\n};\n\n/** \\internal\n  *\n  * \\brief Template functor to compute the logarithm of 1 plus a scalar value\n  *\n  * \\sa class CwiseUnaryOp, ArrayBase::log1p()\n  */\ntemplate<typename Scalar> struct scalar_log1p_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_log1p_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::log1p(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog1p(a); }\n};\ntemplate <typename Scalar>\nstruct functor_traits<scalar_log1p_op<Scalar> > {\n  enum {\n    PacketAccess = packet_traits<Scalar>::HasLog1p,\n    Cost = functor_traits<scalar_log_op<Scalar> >::Cost // TODO measure cost of log1p\n  };\n};\n\n/** \\internal\n  *\n  * \\brief Template functor to compute the base-10 logarithm of a scalar\n  *\n  * \\sa class CwiseUnaryOp, Cwise::log10()\n  */\ntemplate<typename Scalar> struct scalar_log10_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_log10_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { EIGEN_USING_STD_MATH(log10) return log10(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::plog10(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_log10_op<Scalar> >\n{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasLog10 }; };\n\n/** \\internal\n  * \\brief Template functor to compute the square root of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::sqrt()\n  */\ntemplate<typename Scalar> struct scalar_sqrt_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sqrt(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); }\n};\ntemplate <typename Scalar>\nstruct functor_traits<scalar_sqrt_op<Scalar> > {\n  enum {\n#if EIGEN_FAST_MATH\n    // The following numbers are based on the AVX implementation.\n    Cost = (sizeof(Scalar) == 8 ? 28\n                                // 4 pmul, 1 pmadd, 3 other\n                                : (3 * NumTraits<Scalar>::AddCost +\n                                   5 * NumTraits<Scalar>::MulCost)),\n#else\n    // The following numbers are based on min VSQRT throughput on Haswell.\n    Cost = (sizeof(Scalar) == 8 ? 28 : 14),\n#endif\n    PacketAccess = packet_traits<Scalar>::HasSqrt\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the reciprocal square root of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::rsqrt()\n  */\ntemplate<typename Scalar> struct scalar_rsqrt_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_rsqrt_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return Scalar(1)/numext::sqrt(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::prsqrt(a); }\n};\n\ntemplate<typename Scalar>\nstruct functor_traits<scalar_rsqrt_op<Scalar> >\n{ enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasRsqrt\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the cosine of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::cos()\n  */\ntemplate<typename Scalar> struct scalar_cos_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op)\n  EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return numext::cos(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcos(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_cos_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasCos\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the sine of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::sin()\n  */\ntemplate<typename Scalar> struct scalar_sin_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sin(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psin(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_sin_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasSin\n  };\n};\n\n\n/** \\internal\n  * \\brief Template functor to compute the tan of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::tan()\n  */\ntemplate<typename Scalar> struct scalar_tan_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::tan(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::ptan(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_tan_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasTan\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the arc cosine of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::acos()\n  */\ntemplate<typename Scalar> struct scalar_acos_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::acos(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pacos(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_acos_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasACos\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the arc sine of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::asin()\n  */\ntemplate<typename Scalar> struct scalar_asin_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::asin(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pasin(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_asin_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasASin\n  };\n};\n\n\n/** \\internal\n  * \\brief Template functor to compute the atan of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::atan()\n  */\ntemplate<typename Scalar> struct scalar_atan_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_atan_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::atan(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::patan(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_atan_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasATan\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the tanh of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::tanh()\n  */\ntemplate <typename Scalar>\nstruct scalar_tanh_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_tanh_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator()(const Scalar& a) const { return numext::tanh(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& x) const { return ptanh(x); }\n};\n\ntemplate <typename Scalar>\nstruct functor_traits<scalar_tanh_op<Scalar> > {\n  enum {\n    PacketAccess = packet_traits<Scalar>::HasTanh,\n    Cost = ( (EIGEN_FAST_MATH && is_same<Scalar,float>::value)\n// The following numbers are based on the AVX implementation,\n#ifdef EIGEN_VECTORIZE_FMA\n                // Haswell can issue 2 add/mul/madd per cycle.\n                // 9 pmadd, 2 pmul, 1 div, 2 other\n                ? (2 * NumTraits<Scalar>::AddCost +\n                   6 * NumTraits<Scalar>::MulCost +\n                   scalar_div_cost<Scalar,packet_traits<Scalar>::HasDiv>::value)\n#else\n                ? (11 * NumTraits<Scalar>::AddCost +\n                   11 * NumTraits<Scalar>::MulCost +\n                   scalar_div_cost<Scalar,packet_traits<Scalar>::HasDiv>::value)\n#endif\n                // This number assumes a naive implementation of tanh\n                : (6 * NumTraits<Scalar>::AddCost +\n                   3 * NumTraits<Scalar>::MulCost +\n                   2 * scalar_div_cost<Scalar,packet_traits<Scalar>::HasDiv>::value +\n                   functor_traits<scalar_exp_op<Scalar> >::Cost))\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the sinh of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::sinh()\n  */\ntemplate<typename Scalar> struct scalar_sinh_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sinh_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::sinh(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psinh(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_sinh_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasSinh\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the cosh of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::cosh()\n  */\ntemplate<typename Scalar> struct scalar_cosh_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cosh_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const { return numext::cosh(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pcosh(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_cosh_op<Scalar> >\n{\n  enum {\n    Cost = 5 * NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasCosh\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the inverse of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::inverse()\n  */\ntemplate<typename Scalar>\nstruct scalar_inverse_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op)\n  EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const\n  { return internal::pdiv(pset1<Packet>(Scalar(1)),a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_inverse_op<Scalar> >\n{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasDiv }; };\n\n/** \\internal\n  * \\brief Template functor to compute the square of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::square()\n  */\ntemplate<typename Scalar>\nstruct scalar_square_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op)\n  EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const\n  { return internal::pmul(a,a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_square_op<Scalar> >\n{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };\n\n/** \\internal\n  * \\brief Template functor to compute the cube of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::cube()\n  */\ntemplate<typename Scalar>\nstruct scalar_cube_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op)\n  EIGEN_DEVICE_FUNC inline Scalar operator() (const Scalar& a) const { return a*a*a; }\n  template<typename Packet>\n  EIGEN_DEVICE_FUNC inline const Packet packetOp(const Packet& a) const\n  { return internal::pmul(a,pmul(a,a)); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_cube_op<Scalar> >\n{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = packet_traits<Scalar>::HasMul }; };\n\n/** \\internal\n  * \\brief Template functor to compute the rounded value of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::round()\n  */\ntemplate<typename Scalar> struct scalar_round_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_round_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::round(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pround(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_round_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasRound\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the floor of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::floor()\n  */\ntemplate<typename Scalar> struct scalar_floor_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_floor_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::floor(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pfloor(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_floor_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasFloor\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the ceil of a scalar\n  * \\sa class CwiseUnaryOp, ArrayBase::ceil()\n  */\ntemplate<typename Scalar> struct scalar_ceil_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_ceil_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return numext::ceil(a); }\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::pceil(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_ceil_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = packet_traits<Scalar>::HasCeil\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute whether a scalar is NaN\n  * \\sa class CwiseUnaryOp, ArrayBase::isnan()\n  */\ntemplate<typename Scalar> struct scalar_isnan_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_isnan_op)\n  typedef bool result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {\n#if defined(__SYCL_DEVICE_ONLY__)\n    return numext::isnan(a);\n#else  \n    return (numext::isnan)(a);\n#endif\n  }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_isnan_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to check whether a scalar is +/-inf\n  * \\sa class CwiseUnaryOp, ArrayBase::isinf()\n  */\ntemplate<typename Scalar> struct scalar_isinf_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_isinf_op)\n  typedef bool result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {\n#if defined(__SYCL_DEVICE_ONLY__)\n    return numext::isinf(a);\n#else\n    return (numext::isinf)(a);\n#endif\n  }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_isinf_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to check whether a scalar has a finite value\n  * \\sa class CwiseUnaryOp, ArrayBase::isfinite()\n  */\ntemplate<typename Scalar> struct scalar_isfinite_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_isfinite_op)\n  typedef bool result_type;\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const {\n#if defined(__SYCL_DEVICE_ONLY__)\n    return numext::isfinite(a);\n#else\n    return (numext::isfinite)(a);\n#endif\n  }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_isfinite_op<Scalar> >\n{\n  enum {\n    Cost = NumTraits<Scalar>::MulCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the logical not of a boolean\n  *\n  * \\sa class CwiseUnaryOp, ArrayBase::operator!\n  */\ntemplate<typename Scalar> struct scalar_boolean_not_op {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_not_op)\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator() (const bool& a) const { return !a; }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_boolean_not_op<Scalar> > {\n  enum {\n    Cost = NumTraits<bool>::AddCost,\n    PacketAccess = false\n  };\n};\n\n/** \\internal\n  * \\brief Template functor to compute the signum of a scalar\n  * \\sa class CwiseUnaryOp, Cwise::sign()\n  */\ntemplate<typename Scalar,bool iscpx=(NumTraits<Scalar>::IsComplex!=0) > struct scalar_sign_op;\ntemplate<typename Scalar>\nstruct scalar_sign_op<Scalar,false> {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const\n  {\n      return Scalar( (a>Scalar(0)) - (a<Scalar(0)) );\n  }\n  //TODO\n  //template <typename Packet>\n  //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); }\n};\ntemplate<typename Scalar>\nstruct scalar_sign_op<Scalar,true> {\n  EIGEN_EMPTY_STRUCT_CTOR(scalar_sign_op)\n  EIGEN_DEVICE_FUNC inline const Scalar operator() (const Scalar& a) const\n  {\n    typedef typename NumTraits<Scalar>::Real real_type;\n    real_type aa = numext::abs(a);\n    if (aa==real_type(0))\n      return Scalar(0);\n    aa = real_type(1)/aa;\n    return Scalar(real(a)*aa, imag(a)*aa );\n  }\n  //TODO\n  //template <typename Packet>\n  //EIGEN_DEVICE_FUNC inline Packet packetOp(const Packet& a) const { return internal::psign(a); }\n};\ntemplate<typename Scalar>\nstruct functor_traits<scalar_sign_op<Scalar> >\n{ enum {\n    Cost = \n        NumTraits<Scalar>::IsComplex\n        ? ( 8*NumTraits<Scalar>::MulCost  ) // roughly\n        : ( 3*NumTraits<Scalar>::AddCost),\n    PacketAccess = packet_traits<Scalar>::HasSign\n  };\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_FUNCTORS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralBlockPanelKernel.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERAL_BLOCK_PANEL_H\n#define EIGEN_GENERAL_BLOCK_PANEL_H\n\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs=false, bool _ConjRhs=false>\nclass gebp_traits;\n\n\n/** \\internal \\returns b if a<=0, and returns a otherwise. */\ninline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)\n{\n  return a<=0 ? b : a;\n}\n\n#if EIGEN_ARCH_i386_OR_x86_64\nconst std::ptrdiff_t defaultL1CacheSize = 32*1024;\nconst std::ptrdiff_t defaultL2CacheSize = 256*1024;\nconst std::ptrdiff_t defaultL3CacheSize = 2*1024*1024;\n#else\nconst std::ptrdiff_t defaultL1CacheSize = 16*1024;\nconst std::ptrdiff_t defaultL2CacheSize = 512*1024;\nconst std::ptrdiff_t defaultL3CacheSize = 512*1024;\n#endif\n\n/** \\internal */\nstruct CacheSizes {\n  CacheSizes(): m_l1(-1),m_l2(-1),m_l3(-1) {\n    int l1CacheSize, l2CacheSize, l3CacheSize;\n    queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize);\n    m_l1 = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize);\n    m_l2 = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize);\n    m_l3 = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize);\n  }\n\n  std::ptrdiff_t m_l1;\n  std::ptrdiff_t m_l2;\n  std::ptrdiff_t m_l3;\n};\n\n\n/** \\internal */\ninline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3)\n{\n  static CacheSizes m_cacheSizes;\n\n  if(action==SetAction)\n  {\n    // set the cpu cache size and cache all block sizes from a global cache size in byte\n    eigen_internal_assert(l1!=0 && l2!=0);\n    m_cacheSizes.m_l1 = *l1;\n    m_cacheSizes.m_l2 = *l2;\n    m_cacheSizes.m_l3 = *l3;\n  }\n  else if(action==GetAction)\n  {\n    eigen_internal_assert(l1!=0 && l2!=0);\n    *l1 = m_cacheSizes.m_l1;\n    *l2 = m_cacheSizes.m_l2;\n    *l3 = m_cacheSizes.m_l3;\n  }\n  else\n  {\n    eigen_internal_assert(false);\n  }\n}\n\n/* Helper for computeProductBlockingSizes.\n *\n * Given a m x k times k x n matrix product of scalar types \\c LhsScalar and \\c RhsScalar,\n * this function computes the blocking size parameters along the respective dimensions\n * for matrix products and related algorithms. The blocking sizes depends on various\n * parameters:\n * - the L1 and L2 cache sizes,\n * - the register level blocking sizes defined by gebp_traits,\n * - the number of scalars that fit into a packet (when vectorization is enabled).\n *\n * \\sa setCpuCacheSizes */\n\ntemplate<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>\nvoid evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1)\n{\n  typedef gebp_traits<LhsScalar,RhsScalar> Traits;\n\n  // Explanations:\n  // Let's recall that the product algorithms form mc x kc vertical panels A' on the lhs and\n  // kc x nc blocks B' on the rhs. B' has to fit into L2/L3 cache. Moreover, A' is processed\n  // per mr x kc horizontal small panels where mr is the blocking size along the m dimension\n  // at the register level. This small horizontal panel has to stay within L1 cache.\n  std::ptrdiff_t l1, l2, l3;\n  manage_caching_sizes(GetAction, &l1, &l2, &l3);\n\n  if (num_threads > 1) {\n    typedef typename Traits::ResScalar ResScalar;\n    enum {\n      kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),\n      ksub = Traits::mr * Traits::nr * sizeof(ResScalar),\n      kr = 8,\n      mr = Traits::mr,\n      nr = Traits::nr\n    };\n    // Increasing k gives us more time to prefetch the content of the \"C\"\n    // registers. However once the latency is hidden there is no point in\n    // increasing the value of k, so we'll cap it at 320 (value determined\n    // experimentally).\n    const Index k_cache = (numext::mini<Index>)((l1-ksub)/kdiv, 320);\n    if (k_cache < k) {\n      k = k_cache - (k_cache % kr);\n      eigen_internal_assert(k > 0);\n    }\n\n    const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k);\n    const Index n_per_thread = numext::div_ceil(n, num_threads);\n    if (n_cache <= n_per_thread) {\n      // Don't exceed the capacity of the l2 cache.\n      eigen_internal_assert(n_cache >= static_cast<Index>(nr));\n      n = n_cache - (n_cache % nr);\n      eigen_internal_assert(n > 0);\n    } else {\n      n = (numext::mini<Index>)(n, (n_per_thread + nr - 1) - ((n_per_thread + nr - 1) % nr));\n    }\n\n    if (l3 > l2) {\n      // l3 is shared between all cores, so we'll give each thread its own chunk of l3.\n      const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads);\n      const Index m_per_thread = numext::div_ceil(m, num_threads);\n      if(m_cache < m_per_thread && m_cache >= static_cast<Index>(mr)) {\n        m = m_cache - (m_cache % mr);\n        eigen_internal_assert(m > 0);\n      } else {\n        m = (numext::mini<Index>)(m, (m_per_thread + mr - 1) - ((m_per_thread + mr - 1) % mr));\n      }\n    }\n  }\n  else {\n    // In unit tests we do not want to use extra large matrices,\n    // so we reduce the cache size to check the blocking strategy is not flawed\n#ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS\n    l1 = 9*1024;\n    l2 = 32*1024;\n    l3 = 512*1024;\n#endif\n\n    // Early return for small problems because the computation below are time consuming for small problems.\n    // Perhaps it would make more sense to consider k*n*m??\n    // Note that for very tiny problem, this function should be bypassed anyway\n    // because we use the coefficient-based implementation for them.\n    if((numext::maxi)(k,(numext::maxi)(m,n))<48)\n      return;\n\n    typedef typename Traits::ResScalar ResScalar;\n    enum {\n      k_peeling = 8,\n      k_div = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)),\n      k_sub = Traits::mr * Traits::nr * sizeof(ResScalar)\n    };\n\n    // ---- 1st level of blocking on L1, yields kc ----\n\n    // Blocking on the third dimension (i.e., k) is chosen so that an horizontal panel\n    // of size mr x kc of the lhs plus a vertical panel of kc x nr of the rhs both fits within L1 cache.\n    // We also include a register-level block of the result (mx x nr).\n    // (In an ideal world only the lhs panel would stay in L1)\n    // Moreover, kc has to be a multiple of 8 to be compatible with loop peeling, leading to a maximum blocking size of:\n    const Index max_kc = numext::maxi<Index>(((l1-k_sub)/k_div) & (~(k_peeling-1)),1);\n    const Index old_k = k;\n    if(k>max_kc)\n    {\n      // We are really blocking on the third dimension:\n      // -> reduce blocking size to make sure the last block is as large as possible\n      //    while keeping the same number of sweeps over the result.\n      k = (k%max_kc)==0 ? max_kc\n                        : max_kc - k_peeling * ((max_kc-1-(k%max_kc))/(k_peeling*(k/max_kc+1)));\n\n      eigen_internal_assert(((old_k/k) == (old_k/max_kc)) && \"the number of sweeps has to remain the same\");\n    }\n\n    // ---- 2nd level of blocking on max(L2,L3), yields nc ----\n\n    // TODO find a reliable way to get the actual amount of cache per core to use for 2nd level blocking, that is:\n    //      actual_l2 = max(l2, l3/nb_core_sharing_l3)\n    // The number below is quite conservative: it is better to underestimate the cache size rather than overestimating it)\n    // For instance, it corresponds to 6MB of L3 shared among 4 cores.\n    #ifdef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS\n    const Index actual_l2 = l3;\n    #else\n    const Index actual_l2 = 1572864; // == 1.5 MB\n    #endif\n\n    // Here, nc is chosen such that a block of kc x nc of the rhs fit within half of L2.\n    // The second half is implicitly reserved to access the result and lhs coefficients.\n    // When k<max_kc, then nc can arbitrarily growth. In practice, it seems to be fruitful\n    // to limit this growth: we bound nc to growth by a factor x1.5.\n    // However, if the entire lhs block fit within L1, then we are not going to block on the rows at all,\n    // and it becomes fruitful to keep the packed rhs blocks in L1 if there is enough remaining space.\n    Index max_nc;\n    const Index lhs_bytes = m * k * sizeof(LhsScalar);\n    const Index remaining_l1 = l1- k_sub - lhs_bytes;\n    if(remaining_l1 >= Index(Traits::nr*sizeof(RhsScalar))*k)\n    {\n      // L1 blocking\n      max_nc = remaining_l1 / (k*sizeof(RhsScalar));\n    }\n    else\n    {\n      // L2 blocking\n      max_nc = (3*actual_l2)/(2*2*max_kc*sizeof(RhsScalar));\n    }\n    // WARNING Below, we assume that Traits::nr is a power of two.\n    Index nc = numext::mini<Index>(actual_l2/(2*k*sizeof(RhsScalar)), max_nc) & (~(Traits::nr-1));\n    if(n>nc)\n    {\n      // We are really blocking over the columns:\n      // -> reduce blocking size to make sure the last block is as large as possible\n      //    while keeping the same number of sweeps over the packed lhs.\n      //    Here we allow one more sweep if this gives us a perfect match, thus the commented \"-1\"\n      n = (n%nc)==0 ? nc\n                    : (nc - Traits::nr * ((nc/*-1*/-(n%nc))/(Traits::nr*(n/nc+1))));\n    }\n    else if(old_k==k)\n    {\n      // So far, no blocking at all, i.e., kc==k, and nc==n.\n      // In this case, let's perform a blocking over the rows such that the packed lhs data is kept in cache L1/L2\n      // TODO: part of this blocking strategy is now implemented within the kernel itself, so the L1-based heuristic here should be obsolete.\n      Index problem_size = k*n*sizeof(LhsScalar);\n      Index actual_lm = actual_l2;\n      Index max_mc = m;\n      if(problem_size<=1024)\n      {\n        // problem is small enough to keep in L1\n        // Let's choose m such that lhs's block fit in 1/3 of L1\n        actual_lm = l1;\n      }\n      else if(l3!=0 && problem_size<=32768)\n      {\n        // we have both L2 and L3, and problem is small enough to be kept in L2\n        // Let's choose m such that lhs's block fit in 1/3 of L2\n        actual_lm = l2;\n        max_mc = (numext::mini<Index>)(576,max_mc);\n      }\n      Index mc = (numext::mini<Index>)(actual_lm/(3*k*sizeof(LhsScalar)), max_mc);\n      if (mc > Traits::mr) mc -= mc % Traits::mr;\n      else if (mc==0) return;\n      m = (m%mc)==0 ? mc\n                    : (mc - Traits::mr * ((mc/*-1*/-(m%mc))/(Traits::mr*(m/mc+1))));\n    }\n  }\n}\n\ntemplate <typename Index>\ninline bool useSpecificBlockingSizes(Index& k, Index& m, Index& n)\n{\n#ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES\n  if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) {\n    k = numext::mini<Index>(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K);\n    m = numext::mini<Index>(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M);\n    n = numext::mini<Index>(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N);\n    return true;\n  }\n#else\n  EIGEN_UNUSED_VARIABLE(k)\n  EIGEN_UNUSED_VARIABLE(m)\n  EIGEN_UNUSED_VARIABLE(n)\n#endif\n  return false;\n}\n\n/** \\brief Computes the blocking parameters for a m x k times k x n matrix product\n  *\n  * \\param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension.\n  * \\param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension.\n  * \\param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension.\n  *\n  * Given a m x k times k x n matrix product of scalar types \\c LhsScalar and \\c RhsScalar,\n  * this function computes the blocking size parameters along the respective dimensions\n  * for matrix products and related algorithms.\n  *\n  * The blocking size parameters may be evaluated:\n  *   - either by a heuristic based on cache sizes;\n  *   - or using fixed prescribed values (for testing purposes).\n  *\n  * \\sa setCpuCacheSizes */\n\ntemplate<typename LhsScalar, typename RhsScalar, int KcFactor, typename Index>\nvoid computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)\n{\n  if (!useSpecificBlockingSizes(k, m, n)) {\n    evaluateProductBlockingSizesHeuristic<LhsScalar, RhsScalar, KcFactor, Index>(k, m, n, num_threads);\n  }\n}\n\ntemplate<typename LhsScalar, typename RhsScalar, typename Index>\ninline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1)\n{\n  computeProductBlockingSizes<LhsScalar,RhsScalar,1,Index>(k, m, n, num_threads);\n}\n\n#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD\n  #define CJMADD(CJ,A,B,C,T)  C = CJ.pmadd(A,B,C);\n#else\n\n  // FIXME (a bit overkill maybe ?)\n\n  template<typename CJ, typename A, typename B, typename C, typename T> struct gebp_madd_selector {\n    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/)\n    {\n      c = cj.pmadd(a,b,c);\n    }\n  };\n\n  template<typename CJ, typename T> struct gebp_madd_selector<CJ,T,T,T,T> {\n    EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t)\n    {\n      t = b; t = cj.pmul(a,t); c = padd(c,t);\n    }\n  };\n\n  template<typename CJ, typename A, typename B, typename C, typename T>\n  EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t)\n  {\n    gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);\n  }\n\n  #define CJMADD(CJ,A,B,C,T)  gebp_madd(CJ,A,B,C,T);\n//   #define CJMADD(CJ,A,B,C,T)  T = B; T = CJ.pmul(A,T); C = padd(C,T);\n#endif\n\n/* Vectorization logic\n *  real*real: unpack rhs to constant packets, ...\n * \n *  cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i),\n *          storing each res packet into two packets (2x2),\n *          at the end combine them: swap the second and addsub them \n *  cf*cf : same but with 2x4 blocks\n *  cplx*real : unpack rhs to constant packets, ...\n *  real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual\n */\ntemplate<typename _LhsScalar, typename _RhsScalar, bool _ConjLhs, bool _ConjRhs>\nclass gebp_traits\n{\npublic:\n  typedef _LhsScalar LhsScalar;\n  typedef _RhsScalar RhsScalar;\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n\n  enum {\n    ConjLhs = _ConjLhs,\n    ConjRhs = _ConjRhs,\n    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,\n    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,\n    \n    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,\n\n    // register block size along the N direction must be 1 or 4\n    nr = 4,\n\n    // register block size along the M direction (currently, this one cannot be modified)\n    default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,\n#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)\n    // we assume 16 registers\n    // See bug 992, if the scalar type is not vectorizable but that EIGEN_HAS_SINGLE_INSTRUCTION_MADD is defined,\n    // then using 3*LhsPacketSize triggers non-implemented paths in syrk.\n    mr = Vectorizable ? 3*LhsPacketSize : default_mr,\n#else\n    mr = default_mr,\n#endif\n    \n    LhsProgress = LhsPacketSize,\n    RhsProgress = 1\n  };\n\n  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;\n  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;\n  typedef typename packet_traits<ResScalar>::type  _ResPacket;\n\n  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;\n  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;\n  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;\n\n  typedef ResPacket AccPacket;\n  \n  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)\n  {\n    p = pset1<ResPacket>(ResScalar(0));\n  }\n  \n  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)\n  {\n    pbroadcast4(b, b0, b1, b2, b3);\n  }\n  \n//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)\n//   {\n//     pbroadcast2(b, b0, b1);\n//   }\n  \n  template<typename RhsPacketType>\n  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const\n  {\n    dest = pset1<RhsPacketType>(*b);\n  }\n  \n  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const\n  {\n    dest = ploadquad<RhsPacket>(b);\n  }\n\n  template<typename LhsPacketType>\n  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const\n  {\n    dest = pload<LhsPacketType>(a);\n  }\n\n  template<typename LhsPacketType>\n  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const\n  {\n    dest = ploadu<LhsPacketType>(a);\n  }\n\n  template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>\n  EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const\n  {\n    conj_helper<LhsPacketType,RhsPacketType,ConjLhs,ConjRhs> cj;\n    // It would be a lot cleaner to call pmadd all the time. Unfortunately if we\n    // let gcc allocate the register in which to store the result of the pmul\n    // (in the case where there is no FMA) gcc fails to figure out how to avoid\n    // spilling register.\n#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n    EIGEN_UNUSED_VARIABLE(tmp);\n    c = cj.pmadd(a,b,c);\n#else\n    tmp = b; tmp = cj.pmul(a,tmp); c = padd(c,tmp);\n#endif\n  }\n\n  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const\n  {\n    r = pmadd(c,alpha,r);\n  }\n  \n  template<typename ResPacketHalf>\n  EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const\n  {\n    r = pmadd(c,alpha,r);\n  }\n\n};\n\ntemplate<typename RealScalar, bool _ConjLhs>\nclass gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>\n{\npublic:\n  typedef std::complex<RealScalar> LhsScalar;\n  typedef RealScalar RhsScalar;\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n\n  enum {\n    ConjLhs = _ConjLhs,\n    ConjRhs = false,\n    Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,\n    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,\n    \n    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,\n    nr = 4,\n#if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX)\n    // we assume 16 registers\n    mr = 3*LhsPacketSize,\n#else\n    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize,\n#endif\n\n    LhsProgress = LhsPacketSize,\n    RhsProgress = 1\n  };\n\n  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;\n  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;\n  typedef typename packet_traits<ResScalar>::type  _ResPacket;\n\n  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;\n  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;\n  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;\n\n  typedef ResPacket AccPacket;\n\n  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)\n  {\n    p = pset1<ResPacket>(ResScalar(0));\n  }\n\n  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const\n  {\n    dest = pset1<RhsPacket>(*b);\n  }\n  \n  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const\n  {\n    dest = pset1<RhsPacket>(*b);\n  }\n\n  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = pload<LhsPacket>(a);\n  }\n\n  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = ploadu<LhsPacket>(a);\n  }\n\n  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)\n  {\n    pbroadcast4(b, b0, b1, b2, b3);\n  }\n  \n//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)\n//   {\n//     pbroadcast2(b, b0, b1);\n//   }\n\n  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const\n  {\n    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());\n  }\n\n  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const\n  {\n#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n    EIGEN_UNUSED_VARIABLE(tmp);\n    c.v = pmadd(a.v,b,c.v);\n#else\n    tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);\n#endif\n  }\n\n  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const\n  {\n    c += a * b;\n  }\n\n  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const\n  {\n    r = cj.pmadd(c,alpha,r);\n  }\n\nprotected:\n  conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;\n};\n\ntemplate<typename Packet>\nstruct DoublePacket\n{\n  Packet first;\n  Packet second;\n};\n\ntemplate<typename Packet>\nDoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)\n{\n  DoublePacket<Packet> res;\n  res.first  = padd(a.first, b.first);\n  res.second = padd(a.second,b.second);\n  return res;\n}\n\ntemplate<typename Packet>\nconst DoublePacket<Packet>& predux_downto4(const DoublePacket<Packet> &a)\n{\n  return a;\n}\n\ntemplate<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };\n// template<typename Packet>\n// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)\n// {\n//   DoublePacket<Packet> res;\n//   res.first  = padd(a.first, b.first);\n//   res.second = padd(a.second,b.second);\n//   return res;\n// }\n\ntemplate<typename RealScalar, bool _ConjLhs, bool _ConjRhs>\nclass gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >\n{\npublic:\n  typedef std::complex<RealScalar>  Scalar;\n  typedef std::complex<RealScalar>  LhsScalar;\n  typedef std::complex<RealScalar>  RhsScalar;\n  typedef std::complex<RealScalar>  ResScalar;\n  \n  enum {\n    ConjLhs = _ConjLhs,\n    ConjRhs = _ConjRhs,\n    Vectorizable = packet_traits<RealScalar>::Vectorizable\n                && packet_traits<Scalar>::Vectorizable,\n    RealPacketSize  = Vectorizable ? packet_traits<RealScalar>::size : 1,\n    ResPacketSize   = Vectorizable ? packet_traits<ResScalar>::size : 1,\n    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n\n    // FIXME: should depend on NumberOfRegisters\n    nr = 4,\n    mr = ResPacketSize,\n\n    LhsProgress = ResPacketSize,\n    RhsProgress = 1\n  };\n  \n  typedef typename packet_traits<RealScalar>::type RealPacket;\n  typedef typename packet_traits<Scalar>::type     ScalarPacket;\n  typedef DoublePacket<RealPacket> DoublePacketType;\n\n  typedef typename conditional<Vectorizable,RealPacket,  Scalar>::type LhsPacket;\n  typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type RhsPacket;\n  typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;\n  typedef typename conditional<Vectorizable,DoublePacketType,Scalar>::type AccPacket;\n  \n  EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); }\n\n  EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p)\n  {\n    p.first   = pset1<RealPacket>(RealScalar(0));\n    p.second  = pset1<RealPacket>(RealScalar(0));\n  }\n\n  // Scalar path\n  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const\n  {\n    dest = pset1<ResPacket>(*b);\n  }\n\n  // Vectorized path\n  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const\n  {\n    dest.first  = pset1<RealPacket>(real(*b));\n    dest.second = pset1<RealPacket>(imag(*b));\n  }\n  \n  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const\n  {\n    loadRhs(b,dest);\n  }\n  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const\n  {\n    eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);\n    loadRhs(b,dest);\n  }\n  \n  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)\n  {\n    // FIXME not sure that's the best way to implement it!\n    loadRhs(b+0, b0);\n    loadRhs(b+1, b1);\n    loadRhs(b+2, b2);\n    loadRhs(b+3, b3);\n  }\n  \n  // Vectorized path\n  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1)\n  {\n    // FIXME not sure that's the best way to implement it!\n    loadRhs(b+0, b0);\n    loadRhs(b+1, b1);\n  }\n  \n  // Scalar path\n  EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1)\n  {\n    // FIXME not sure that's the best way to implement it!\n    loadRhs(b+0, b0);\n    loadRhs(b+1, b1);\n  }\n\n  // nothing special here\n  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = pload<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));\n  }\n\n  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = ploadu<LhsPacket>((const typename unpacket_traits<LhsPacket>::type*)(a));\n  }\n\n  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const\n  {\n    c.first   = padd(pmul(a,b.first), c.first);\n    c.second  = padd(pmul(a,b.second),c.second);\n  }\n\n  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const\n  {\n    c = cj.pmadd(a,b,c);\n  }\n  \n  EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; }\n  \n  EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const\n  {\n    // assemble c\n    ResPacket tmp;\n    if((!ConjLhs)&&(!ConjRhs))\n    {\n      tmp = pcplxflip(pconj(ResPacket(c.second)));\n      tmp = padd(ResPacket(c.first),tmp);\n    }\n    else if((!ConjLhs)&&(ConjRhs))\n    {\n      tmp = pconj(pcplxflip(ResPacket(c.second)));\n      tmp = padd(ResPacket(c.first),tmp);\n    }\n    else if((ConjLhs)&&(!ConjRhs))\n    {\n      tmp = pcplxflip(ResPacket(c.second));\n      tmp = padd(pconj(ResPacket(c.first)),tmp);\n    }\n    else if((ConjLhs)&&(ConjRhs))\n    {\n      tmp = pcplxflip(ResPacket(c.second));\n      tmp = psub(pconj(ResPacket(c.first)),tmp);\n    }\n    \n    r = pmadd(tmp,alpha,r);\n  }\n\nprotected:\n  conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;\n};\n\ntemplate<typename RealScalar, bool _ConjRhs>\nclass gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >\n{\npublic:\n  typedef std::complex<RealScalar>  Scalar;\n  typedef RealScalar  LhsScalar;\n  typedef Scalar      RhsScalar;\n  typedef Scalar      ResScalar;\n\n  enum {\n    ConjLhs = false,\n    ConjRhs = _ConjRhs,\n    Vectorizable = packet_traits<RealScalar>::Vectorizable\n                && packet_traits<Scalar>::Vectorizable,\n    LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n    RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n    ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,\n    \n    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,\n    // FIXME: should depend on NumberOfRegisters\n    nr = 4,\n    mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize,\n\n    LhsProgress = ResPacketSize,\n    RhsProgress = 1\n  };\n\n  typedef typename packet_traits<LhsScalar>::type  _LhsPacket;\n  typedef typename packet_traits<RhsScalar>::type  _RhsPacket;\n  typedef typename packet_traits<ResScalar>::type  _ResPacket;\n\n  typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;\n  typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;\n  typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;\n\n  typedef ResPacket AccPacket;\n\n  EIGEN_STRONG_INLINE void initAcc(AccPacket& p)\n  {\n    p = pset1<ResPacket>(ResScalar(0));\n  }\n\n  EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const\n  {\n    dest = pset1<RhsPacket>(*b);\n  }\n  \n  void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3)\n  {\n    pbroadcast4(b, b0, b1, b2, b3);\n  }\n  \n//   EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1)\n//   {\n//     // FIXME not sure that's the best way to implement it!\n//     b0 = pload1<RhsPacket>(b+0);\n//     b1 = pload1<RhsPacket>(b+1);\n//   }\n\n  EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = ploaddup<LhsPacket>(a);\n  }\n  \n  EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const\n  {\n    eigen_internal_assert(unpacket_traits<RhsPacket>::size<=4);\n    loadRhs(b,dest);\n  }\n\n  EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const\n  {\n    dest = ploaddup<LhsPacket>(a);\n  }\n\n  EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const\n  {\n    madd_impl(a, b, c, tmp, typename conditional<Vectorizable,true_type,false_type>::type());\n  }\n\n  EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const\n  {\n#ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD\n    EIGEN_UNUSED_VARIABLE(tmp);\n    c.v = pmadd(a,b.v,c.v);\n#else\n    tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);\n#endif\n    \n  }\n\n  EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const\n  {\n    c += a * b;\n  }\n\n  EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const\n  {\n    r = cj.pmadd(alpha,c,r);\n  }\n\nprotected:\n  conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;\n};\n\n/* optimized GEneral packed Block * packed Panel product kernel\n *\n * Mixing type logic: C += A * B\n *  |  A  |  B  | comments\n *  |real |cplx | no vectorization yet, would require to pack A with duplication\n *  |cplx |real | easy vectorization\n */\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>\nstruct gebp_kernel\n{\n  typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;\n  typedef typename Traits::ResScalar ResScalar;\n  typedef typename Traits::LhsPacket LhsPacket;\n  typedef typename Traits::RhsPacket RhsPacket;\n  typedef typename Traits::ResPacket ResPacket;\n  typedef typename Traits::AccPacket AccPacket;\n\n  typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;\n  typedef typename SwappedTraits::ResScalar SResScalar;\n  typedef typename SwappedTraits::LhsPacket SLhsPacket;\n  typedef typename SwappedTraits::RhsPacket SRhsPacket;\n  typedef typename SwappedTraits::ResPacket SResPacket;\n  typedef typename SwappedTraits::AccPacket SAccPacket;\n\n  typedef typename DataMapper::LinearMapper LinearMapper;\n\n  enum {\n    Vectorizable  = Traits::Vectorizable,\n    LhsProgress   = Traits::LhsProgress,\n    RhsProgress   = Traits::RhsProgress,\n    ResPacketSize = Traits::ResPacketSize\n  };\n\n  EIGEN_DONT_INLINE\n  void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,\n                  Index rows, Index depth, Index cols, ResScalar alpha,\n                  Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0);\n};\n\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>\nEIGEN_DONT_INLINE\nvoid gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,ConjugateRhs>\n  ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB,\n               Index rows, Index depth, Index cols, ResScalar alpha,\n               Index strideA, Index strideB, Index offsetA, Index offsetB)\n  {\n    Traits traits;\n    SwappedTraits straits;\n    \n    if(strideA==-1) strideA = depth;\n    if(strideB==-1) strideB = depth;\n    conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;\n    Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;\n    const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;\n    const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;\n    const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;\n    enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)\n    const Index peeled_kc  = depth & ~(pk-1);\n    const Index prefetch_res_offset = 32/sizeof(ResScalar);    \n//     const Index depth2     = depth & ~1;\n\n    //---------- Process 3 * LhsProgress rows at once ----------\n    // This corresponds to 3*LhsProgress x nr register blocks.\n    // Usually, make sense only with FMA\n    if(mr>=3*Traits::LhsProgress)\n    {\n      // Here, the general idea is to loop on each largest micro horizontal panel of the lhs (3*Traits::LhsProgress x depth)\n      // and on each largest micro vertical panel of the rhs (depth * nr).\n      // Blocking sizes, i.e., 'depth' has been computed so that the micro horizontal panel of the lhs fit in L1.\n      // However, if depth is too small, we can extend the number of rows of these horizontal panels.\n      // This actual number of rows is computed as follow:\n      const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.\n      // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size\n      // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),\n      // or because we are testing specific blocking sizes.\n      const Index actual_panel_rows = (3*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 3*LhsProgress) ));\n      for(Index i1=0; i1<peeled_mc3; i1+=actual_panel_rows)\n      {\n        const Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc3);\n        for(Index j2=0; j2<packet_cols4; j2+=nr)\n        {\n          for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)\n          {\n          \n          // We selected a 3*Traits::LhsProgress x nr micro block of res which is entirely\n          // stored into 3 x nr registers.\n          \n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0, C1, C2,  C3,\n                    C4, C5, C6,  C7,\n                    C8, C9, C10, C11;\n          traits.initAcc(C0);  traits.initAcc(C1);  traits.initAcc(C2);  traits.initAcc(C3);\n          traits.initAcc(C4);  traits.initAcc(C5);  traits.initAcc(C6);  traits.initAcc(C7);\n          traits.initAcc(C8);  traits.initAcc(C9);  traits.initAcc(C10); traits.initAcc(C11);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);\n          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);\n          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);\n          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);\n\n          r0.prefetch(0);\n          r1.prefetch(0);\n          r2.prefetch(0);\n          r3.prefetch(0);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];\n          prefetch(&blB[0]);\n          LhsPacket A0, A1;\n\n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 3pX4\");\n            RhsPacket B_0, T0;\n            LhsPacket A2;\n\n#define EIGEN_GEBP_ONESTEP(K) \\\n            do { \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 3pX4\"); \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              internal::prefetch(blA+(3*K+16)*LhsProgress); \\\n              if (EIGEN_ARCH_ARM) { internal::prefetch(blB+(4*K+16)*RhsProgress); } /* Bug 953 */ \\\n              traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \\\n              traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \\\n              traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \\\n              traits.loadRhs(blB + (0+4*K)*Traits::RhsProgress, B_0); \\\n              traits.madd(A0, B_0, C0, T0); \\\n              traits.madd(A1, B_0, C4, T0); \\\n              traits.madd(A2, B_0, C8, B_0); \\\n              traits.loadRhs(blB + (1+4*K)*Traits::RhsProgress, B_0); \\\n              traits.madd(A0, B_0, C1, T0); \\\n              traits.madd(A1, B_0, C5, T0); \\\n              traits.madd(A2, B_0, C9, B_0); \\\n              traits.loadRhs(blB + (2+4*K)*Traits::RhsProgress, B_0); \\\n              traits.madd(A0, B_0, C2,  T0); \\\n              traits.madd(A1, B_0, C6,  T0); \\\n              traits.madd(A2, B_0, C10, B_0); \\\n              traits.loadRhs(blB + (3+4*K)*Traits::RhsProgress, B_0); \\\n              traits.madd(A0, B_0, C3 , T0); \\\n              traits.madd(A1, B_0, C7,  T0); \\\n              traits.madd(A2, B_0, C11, B_0); \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 3pX4\"); \\\n            } while(false)\n\n            internal::prefetch(blB);\n            EIGEN_GEBP_ONESTEP(0);\n            EIGEN_GEBP_ONESTEP(1);\n            EIGEN_GEBP_ONESTEP(2);\n            EIGEN_GEBP_ONESTEP(3);\n            EIGEN_GEBP_ONESTEP(4);\n            EIGEN_GEBP_ONESTEP(5);\n            EIGEN_GEBP_ONESTEP(6);\n            EIGEN_GEBP_ONESTEP(7);\n\n            blB += pk*4*RhsProgress;\n            blA += pk*3*Traits::LhsProgress;\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 3pX4\");\n          }\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0, T0;\n            LhsPacket A2;\n            EIGEN_GEBP_ONESTEP(0);\n            blB += 4*RhsProgress;\n            blA += 3*Traits::LhsProgress;\n          }\n\n#undef EIGEN_GEBP_ONESTEP\n\n          ResPacket R0, R1, R2;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r0.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r0.loadPacket(2 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          traits.acc(C4, alphav, R1);\n          traits.acc(C8, alphav, R2);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n          r0.storePacket(1 * Traits::ResPacketSize, R1);\n          r0.storePacket(2 * Traits::ResPacketSize, R2);\n\n          R0 = r1.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r1.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r1.loadPacket(2 * Traits::ResPacketSize);\n          traits.acc(C1, alphav, R0);\n          traits.acc(C5, alphav, R1);\n          traits.acc(C9, alphav, R2);\n          r1.storePacket(0 * Traits::ResPacketSize, R0);\n          r1.storePacket(1 * Traits::ResPacketSize, R1);\n          r1.storePacket(2 * Traits::ResPacketSize, R2);\n\n          R0 = r2.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r2.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r2.loadPacket(2 * Traits::ResPacketSize);\n          traits.acc(C2, alphav, R0);\n          traits.acc(C6, alphav, R1);\n          traits.acc(C10, alphav, R2);\n          r2.storePacket(0 * Traits::ResPacketSize, R0);\n          r2.storePacket(1 * Traits::ResPacketSize, R1);\n          r2.storePacket(2 * Traits::ResPacketSize, R2);\n\n          R0 = r3.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r3.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r3.loadPacket(2 * Traits::ResPacketSize);\n          traits.acc(C3, alphav, R0);\n          traits.acc(C7, alphav, R1);\n          traits.acc(C11, alphav, R2);\n          r3.storePacket(0 * Traits::ResPacketSize, R0);\n          r3.storePacket(1 * Traits::ResPacketSize, R1);\n          r3.storePacket(2 * Traits::ResPacketSize, R2);          \n          }\n        }\n\n        // Deal with remaining columns of the rhs\n        for(Index j2=packet_cols4; j2<cols; j2++)\n        {\n          for(Index i=i1; i<actual_panel_end; i+=3*LhsProgress)\n          {\n          // One column at a time\n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0, C4, C8;\n          traits.initAcc(C0);\n          traits.initAcc(C4);\n          traits.initAcc(C8);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2);\n          r0.prefetch(0);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB];\n          LhsPacket A0, A1, A2;\n          \n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 3pX1\");\n            RhsPacket B_0;\n#define EIGEN_GEBGP_ONESTEP(K) \\\n            do { \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 3pX1\"); \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0);  \\\n              traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1);  \\\n              traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2);  \\\n              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);   \\\n              traits.madd(A0, B_0, C0, B_0); \\\n              traits.madd(A1, B_0, C4, B_0); \\\n              traits.madd(A2, B_0, C8, B_0); \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 3pX1\"); \\\n            } while(false)\n        \n            EIGEN_GEBGP_ONESTEP(0);\n            EIGEN_GEBGP_ONESTEP(1);\n            EIGEN_GEBGP_ONESTEP(2);\n            EIGEN_GEBGP_ONESTEP(3);\n            EIGEN_GEBGP_ONESTEP(4);\n            EIGEN_GEBGP_ONESTEP(5);\n            EIGEN_GEBGP_ONESTEP(6);\n            EIGEN_GEBGP_ONESTEP(7);\n\n            blB += pk*RhsProgress;\n            blA += pk*3*Traits::LhsProgress;\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 3pX1\");\n          }\n\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0;\n            EIGEN_GEBGP_ONESTEP(0);\n            blB += RhsProgress;\n            blA += 3*Traits::LhsProgress;\n          }\n#undef EIGEN_GEBGP_ONESTEP\n          ResPacket R0, R1, R2;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r0.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r0.loadPacket(2 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          traits.acc(C4, alphav, R1);\n          traits.acc(C8, alphav, R2);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n          r0.storePacket(1 * Traits::ResPacketSize, R1);\n          r0.storePacket(2 * Traits::ResPacketSize, R2);          \n          }\n        }\n      }\n    }\n\n    //---------- Process 2 * LhsProgress rows at once ----------\n    if(mr>=2*Traits::LhsProgress)\n    {\n      const Index l1 = defaultL1CacheSize; // in Bytes, TODO, l1 should be passed to this function.\n      // The max(1, ...) here is needed because we may be using blocking params larger than what our known l1 cache size\n      // suggests we should be using: either because our known l1 cache size is inaccurate (e.g. on Android, we can only guess),\n      // or because we are testing specific blocking sizes.\n      Index actual_panel_rows = (2*LhsProgress) * std::max<Index>(1,( (l1 - sizeof(ResScalar)*mr*nr - depth*nr*sizeof(RhsScalar)) / (depth * sizeof(LhsScalar) * 2*LhsProgress) ));\n\n      for(Index i1=peeled_mc3; i1<peeled_mc2; i1+=actual_panel_rows)\n      {\n        Index actual_panel_end = (std::min)(i1+actual_panel_rows, peeled_mc2);\n        for(Index j2=0; j2<packet_cols4; j2+=nr)\n        {\n          for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)\n          {\n          \n          // We selected a 2*Traits::LhsProgress x nr micro block of res which is entirely\n          // stored into 2 x nr registers.\n          \n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0, C1, C2, C3,\n                    C4, C5, C6, C7;\n          traits.initAcc(C0); traits.initAcc(C1); traits.initAcc(C2); traits.initAcc(C3);\n          traits.initAcc(C4); traits.initAcc(C5); traits.initAcc(C6); traits.initAcc(C7);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);\n          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);\n          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);\n          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);\n\n          r0.prefetch(prefetch_res_offset);\n          r1.prefetch(prefetch_res_offset);\n          r2.prefetch(prefetch_res_offset);\n          r3.prefetch(prefetch_res_offset);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];\n          prefetch(&blB[0]);\n          LhsPacket A0, A1;\n\n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 2pX4\");\n            RhsPacket B_0, B1, B2, B3, T0;\n\n   #define EIGEN_GEBGP_ONESTEP(K) \\\n            do {                                                                \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 2pX4\");        \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                    \\\n              traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                    \\\n              traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \\\n              traits.madd(A0, B_0, C0, T0);                                     \\\n              traits.madd(A1, B_0, C4, B_0);                                    \\\n              traits.madd(A0, B1,  C1, T0);                                     \\\n              traits.madd(A1, B1,  C5, B1);                                     \\\n              traits.madd(A0, B2,  C2, T0);                                     \\\n              traits.madd(A1, B2,  C6, B2);                                     \\\n              traits.madd(A0, B3,  C3, T0);                                     \\\n              traits.madd(A1, B3,  C7, B3);                                     \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 2pX4\");          \\\n            } while(false)\n            \n            internal::prefetch(blB+(48+0));\n            EIGEN_GEBGP_ONESTEP(0);\n            EIGEN_GEBGP_ONESTEP(1);\n            EIGEN_GEBGP_ONESTEP(2);\n            EIGEN_GEBGP_ONESTEP(3);\n            internal::prefetch(blB+(48+16));\n            EIGEN_GEBGP_ONESTEP(4);\n            EIGEN_GEBGP_ONESTEP(5);\n            EIGEN_GEBGP_ONESTEP(6);\n            EIGEN_GEBGP_ONESTEP(7);\n\n            blB += pk*4*RhsProgress;\n            blA += pk*(2*Traits::LhsProgress);\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 2pX4\");\n          }\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0, B1, B2, B3, T0;\n            EIGEN_GEBGP_ONESTEP(0);\n            blB += 4*RhsProgress;\n            blA += 2*Traits::LhsProgress;\n          }\n#undef EIGEN_GEBGP_ONESTEP\n\n          ResPacket R0, R1, R2, R3;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r0.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r1.loadPacket(0 * Traits::ResPacketSize);\n          R3 = r1.loadPacket(1 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          traits.acc(C4, alphav, R1);\n          traits.acc(C1, alphav, R2);\n          traits.acc(C5, alphav, R3);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n          r0.storePacket(1 * Traits::ResPacketSize, R1);\n          r1.storePacket(0 * Traits::ResPacketSize, R2);\n          r1.storePacket(1 * Traits::ResPacketSize, R3);\n\n          R0 = r2.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r2.loadPacket(1 * Traits::ResPacketSize);\n          R2 = r3.loadPacket(0 * Traits::ResPacketSize);\n          R3 = r3.loadPacket(1 * Traits::ResPacketSize);\n          traits.acc(C2,  alphav, R0);\n          traits.acc(C6,  alphav, R1);\n          traits.acc(C3,  alphav, R2);\n          traits.acc(C7,  alphav, R3);\n          r2.storePacket(0 * Traits::ResPacketSize, R0);\n          r2.storePacket(1 * Traits::ResPacketSize, R1);\n          r3.storePacket(0 * Traits::ResPacketSize, R2);\n          r3.storePacket(1 * Traits::ResPacketSize, R3);\n          }\n        }\n      \n        // Deal with remaining columns of the rhs\n        for(Index j2=packet_cols4; j2<cols; j2++)\n        {\n          for(Index i=i1; i<actual_panel_end; i+=2*LhsProgress)\n          {\n          // One column at a time\n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0, C4;\n          traits.initAcc(C0);\n          traits.initAcc(C4);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2);\n          r0.prefetch(prefetch_res_offset);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB];\n          LhsPacket A0, A1;\n\n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 2pX1\");\n            RhsPacket B_0, B1;\n        \n#define EIGEN_GEBGP_ONESTEP(K) \\\n            do {                                                                  \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 2pX1\");          \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0);                      \\\n              traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1);                      \\\n              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                       \\\n              traits.madd(A0, B_0, C0, B1);                                       \\\n              traits.madd(A1, B_0, C4, B_0);                                      \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 2pX1\");            \\\n            } while(false)\n        \n            EIGEN_GEBGP_ONESTEP(0);\n            EIGEN_GEBGP_ONESTEP(1);\n            EIGEN_GEBGP_ONESTEP(2);\n            EIGEN_GEBGP_ONESTEP(3);\n            EIGEN_GEBGP_ONESTEP(4);\n            EIGEN_GEBGP_ONESTEP(5);\n            EIGEN_GEBGP_ONESTEP(6);\n            EIGEN_GEBGP_ONESTEP(7);\n\n            blB += pk*RhsProgress;\n            blA += pk*2*Traits::LhsProgress;\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 2pX1\");\n          }\n\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0, B1;\n            EIGEN_GEBGP_ONESTEP(0);\n            blB += RhsProgress;\n            blA += 2*Traits::LhsProgress;\n          }\n#undef EIGEN_GEBGP_ONESTEP\n          ResPacket R0, R1;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r0.loadPacket(1 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          traits.acc(C4, alphav, R1);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n          r0.storePacket(1 * Traits::ResPacketSize, R1);\n          }\n        }\n      }\n    }\n    //---------- Process 1 * LhsProgress rows at once ----------\n    if(mr>=1*Traits::LhsProgress)\n    {\n      // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)\n      for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)\n      {\n        // loops on each largest micro vertical panel of rhs (depth * nr)\n        for(Index j2=0; j2<packet_cols4; j2+=nr)\n        {\n          // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely\n          // stored into 1 x nr registers.\n          \n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0, C1, C2, C3;\n          traits.initAcc(C0);\n          traits.initAcc(C1);\n          traits.initAcc(C2);\n          traits.initAcc(C3);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2 + 0);\n          LinearMapper r1 = res.getLinearMapper(i, j2 + 1);\n          LinearMapper r2 = res.getLinearMapper(i, j2 + 2);\n          LinearMapper r3 = res.getLinearMapper(i, j2 + 3);\n\n          r0.prefetch(prefetch_res_offset);\n          r1.prefetch(prefetch_res_offset);\n          r2.prefetch(prefetch_res_offset);\n          r3.prefetch(prefetch_res_offset);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];\n          prefetch(&blB[0]);\n          LhsPacket A0;\n\n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 1pX4\");\n            RhsPacket B_0, B1, B2, B3;\n               \n#define EIGEN_GEBGP_ONESTEP(K) \\\n            do {                                                                \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 1pX4\");        \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \\\n              traits.broadcastRhs(&blB[(0+4*K)*RhsProgress], B_0, B1, B2, B3);  \\\n              traits.madd(A0, B_0, C0, B_0);                                    \\\n              traits.madd(A0, B1,  C1, B1);                                     \\\n              traits.madd(A0, B2,  C2, B2);                                     \\\n              traits.madd(A0, B3,  C3, B3);                                     \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 1pX4\");          \\\n            } while(false)\n            \n            internal::prefetch(blB+(48+0));\n            EIGEN_GEBGP_ONESTEP(0);\n            EIGEN_GEBGP_ONESTEP(1);\n            EIGEN_GEBGP_ONESTEP(2);\n            EIGEN_GEBGP_ONESTEP(3);\n            internal::prefetch(blB+(48+16));\n            EIGEN_GEBGP_ONESTEP(4);\n            EIGEN_GEBGP_ONESTEP(5);\n            EIGEN_GEBGP_ONESTEP(6);\n            EIGEN_GEBGP_ONESTEP(7);\n\n            blB += pk*4*RhsProgress;\n            blA += pk*1*LhsProgress;\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 1pX4\");\n          }\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0, B1, B2, B3;\n            EIGEN_GEBGP_ONESTEP(0);\n            blB += 4*RhsProgress;\n            blA += 1*LhsProgress;\n          }\n#undef EIGEN_GEBGP_ONESTEP\n\n          ResPacket R0, R1;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r1.loadPacket(0 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          traits.acc(C1,  alphav, R1);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n          r1.storePacket(0 * Traits::ResPacketSize, R1);\n\n          R0 = r2.loadPacket(0 * Traits::ResPacketSize);\n          R1 = r3.loadPacket(0 * Traits::ResPacketSize);\n          traits.acc(C2,  alphav, R0);\n          traits.acc(C3,  alphav, R1);\n          r2.storePacket(0 * Traits::ResPacketSize, R0);\n          r3.storePacket(0 * Traits::ResPacketSize, R1);\n        }\n\n        // Deal with remaining columns of the rhs\n        for(Index j2=packet_cols4; j2<cols; j2++)\n        {\n          // One column at a time\n          const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];\n          prefetch(&blA[0]);\n\n          // gets res block as register\n          AccPacket C0;\n          traits.initAcc(C0);\n\n          LinearMapper r0 = res.getLinearMapper(i, j2);\n\n          // performs \"inner\" products\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB];\n          LhsPacket A0;\n\n          for(Index k=0; k<peeled_kc; k+=pk)\n          {\n            EIGEN_ASM_COMMENT(\"begin gebp micro kernel 1pX1\");\n            RhsPacket B_0;\n        \n#define EIGEN_GEBGP_ONESTEP(K) \\\n            do {                                                                \\\n              EIGEN_ASM_COMMENT(\"begin step of gebp micro kernel 1pX1\");        \\\n              EIGEN_ASM_COMMENT(\"Note: these asm comments work around bug 935!\"); \\\n              traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0);                    \\\n              traits.loadRhs(&blB[(0+K)*RhsProgress], B_0);                     \\\n              traits.madd(A0, B_0, C0, B_0);                                    \\\n              EIGEN_ASM_COMMENT(\"end step of gebp micro kernel 1pX1\");          \\\n            } while(false);\n\n            EIGEN_GEBGP_ONESTEP(0);\n            EIGEN_GEBGP_ONESTEP(1);\n            EIGEN_GEBGP_ONESTEP(2);\n            EIGEN_GEBGP_ONESTEP(3);\n            EIGEN_GEBGP_ONESTEP(4);\n            EIGEN_GEBGP_ONESTEP(5);\n            EIGEN_GEBGP_ONESTEP(6);\n            EIGEN_GEBGP_ONESTEP(7);\n\n            blB += pk*RhsProgress;\n            blA += pk*1*Traits::LhsProgress;\n\n            EIGEN_ASM_COMMENT(\"end gebp micro kernel 1pX1\");\n          }\n\n          // process remaining peeled loop\n          for(Index k=peeled_kc; k<depth; k++)\n          {\n            RhsPacket B_0;\n            EIGEN_GEBGP_ONESTEP(0);\n            blB += RhsProgress;\n            blA += 1*Traits::LhsProgress;\n          }\n#undef EIGEN_GEBGP_ONESTEP\n          ResPacket R0;\n          ResPacket alphav = pset1<ResPacket>(alpha);\n          R0 = r0.loadPacket(0 * Traits::ResPacketSize);\n          traits.acc(C0, alphav, R0);\n          r0.storePacket(0 * Traits::ResPacketSize, R0);\n        }\n      }\n    }\n    //---------- Process remaining rows, 1 at once ----------\n    if(peeled_mc1<rows)\n    {\n      // loop on each panel of the rhs\n      for(Index j2=0; j2<packet_cols4; j2+=nr)\n      {\n        // loop on each row of the lhs (1*LhsProgress x depth)\n        for(Index i=peeled_mc1; i<rows; i+=1)\n        {\n          const LhsScalar* blA = &blockA[i*strideA+offsetA];\n          prefetch(&blA[0]);\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];\n\n          // The following piece of code wont work for 512 bit registers\n          // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size\n          // as nr (which is currently 4) for the return type.\n          typedef typename unpacket_traits<SResPacket>::half SResPacketHalf;\n          if ((SwappedTraits::LhsProgress % 4) == 0 &&\n              (SwappedTraits::LhsProgress <= 8) &&\n              (SwappedTraits::LhsProgress!=8 || unpacket_traits<SResPacketHalf>::size==nr))\n          {\n            SAccPacket C0, C1, C2, C3;\n            straits.initAcc(C0);\n            straits.initAcc(C1);\n            straits.initAcc(C2);\n            straits.initAcc(C3);\n\n            const Index spk   = (std::max)(1,SwappedTraits::LhsProgress/4);\n            const Index endk  = (depth/spk)*spk;\n            const Index endk4 = (depth/(spk*4))*(spk*4);\n\n            Index k=0;\n            for(; k<endk4; k+=4*spk)\n            {\n              SLhsPacket A0,A1;\n              SRhsPacket B_0,B_1;\n\n              straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);\n              straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A1);\n\n              straits.loadRhsQuad(blA+0*spk, B_0);\n              straits.loadRhsQuad(blA+1*spk, B_1);\n              straits.madd(A0,B_0,C0,B_0);\n              straits.madd(A1,B_1,C1,B_1);\n\n              straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);\n              straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A1);\n              straits.loadRhsQuad(blA+2*spk, B_0);\n              straits.loadRhsQuad(blA+3*spk, B_1);\n              straits.madd(A0,B_0,C2,B_0);\n              straits.madd(A1,B_1,C3,B_1);\n\n              blB += 4*SwappedTraits::LhsProgress;\n              blA += 4*spk;\n            }\n            C0 = padd(padd(C0,C1),padd(C2,C3));\n            for(; k<endk; k+=spk)\n            {\n              SLhsPacket A0;\n              SRhsPacket B_0;\n\n              straits.loadLhsUnaligned(blB, A0);\n              straits.loadRhsQuad(blA, B_0);\n              straits.madd(A0,B_0,C0,B_0);\n\n              blB += SwappedTraits::LhsProgress;\n              blA += spk;\n            }\n            if(SwappedTraits::LhsProgress==8)\n            {\n              // Special case where we have to first reduce the accumulation register C0\n              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;\n              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;\n              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;\n              typedef typename conditional<SwappedTraits::LhsProgress>=8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;\n\n              SResPacketHalf R = res.template gatherPacket<SResPacketHalf>(i, j2);\n              SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);\n\n              if(depth-endk>0)\n              {\n                // We have to handle the last row of the rhs which corresponds to a half-packet\n                SLhsPacketHalf a0;\n                SRhsPacketHalf b0;\n                straits.loadLhsUnaligned(blB, a0);\n                straits.loadRhs(blA, b0);\n                SAccPacketHalf c0 = predux_downto4(C0);\n                straits.madd(a0,b0,c0,b0);\n                straits.acc(c0, alphav, R);\n              }\n              else\n              {\n                straits.acc(predux_downto4(C0), alphav, R);\n              }\n              res.scatterPacket(i, j2, R);\n            }\n            else\n            {\n              SResPacket R = res.template gatherPacket<SResPacket>(i, j2);\n              SResPacket alphav = pset1<SResPacket>(alpha);\n              straits.acc(C0, alphav, R);\n              res.scatterPacket(i, j2, R);\n            }\n          }\n          else // scalar path\n          {\n            // get a 1 x 4 res block as registers\n            ResScalar C0(0), C1(0), C2(0), C3(0);\n\n            for(Index k=0; k<depth; k++)\n            {\n              LhsScalar A0;\n              RhsScalar B_0, B_1;\n\n              A0 = blA[k];\n\n              B_0 = blB[0];\n              B_1 = blB[1];\n              CJMADD(cj,A0,B_0,C0,  B_0);\n              CJMADD(cj,A0,B_1,C1,  B_1);\n              \n              B_0 = blB[2];\n              B_1 = blB[3];\n              CJMADD(cj,A0,B_0,C2,  B_0);\n              CJMADD(cj,A0,B_1,C3,  B_1);\n              \n              blB += 4;\n            }\n            res(i, j2 + 0) += alpha * C0;\n            res(i, j2 + 1) += alpha * C1;\n            res(i, j2 + 2) += alpha * C2;\n            res(i, j2 + 3) += alpha * C3;\n          }\n        }\n      }\n      // remaining columns\n      for(Index j2=packet_cols4; j2<cols; j2++)\n      {\n        // loop on each row of the lhs (1*LhsProgress x depth)\n        for(Index i=peeled_mc1; i<rows; i+=1)\n        {\n          const LhsScalar* blA = &blockA[i*strideA+offsetA];\n          prefetch(&blA[0]);\n          // gets a 1 x 1 res block as registers\n          ResScalar C0(0);\n          const RhsScalar* blB = &blockB[j2*strideB+offsetB];\n          for(Index k=0; k<depth; k++)\n          {\n            LhsScalar A0 = blA[k];\n            RhsScalar B_0 = blB[k];\n            CJMADD(cj, A0, B_0, C0, B_0);\n          }\n          res(i, j2) += alpha * C0;\n        }\n      }\n    }\n  }\n\n\n#undef CJMADD\n\n// pack a block of the lhs\n// The traversal is as follow (mr==4):\n//   0  4  8 12 ...\n//   1  5  9 13 ...\n//   2  6 10 14 ...\n//   3  7 11 15 ...\n//\n//  16 20 24 28 ...\n//  17 21 25 29 ...\n//  18 22 26 30 ...\n//  19 23 27 31 ...\n//\n//  32 33 34 35 ...\n//  36 36 38 39 ...\ntemplate<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>\nstruct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>\n{\n  typedef typename DataMapper::LinearMapper LinearMapper;\n  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);\n};\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>\nEIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, ColMajor, Conjugate, PanelMode>\n  ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)\n{\n  typedef typename packet_traits<Scalar>::type Packet;\n  enum { PacketSize = packet_traits<Scalar>::size };\n\n  EIGEN_ASM_COMMENT(\"EIGEN PRODUCT PACK LHS\");\n  EIGEN_UNUSED_VARIABLE(stride);\n  EIGEN_UNUSED_VARIABLE(offset);\n  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));\n  eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );\n  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;\n  Index count = 0;\n\n  const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;\n  const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;\n  const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;\n  const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1\n                         : Pack2>1             ? (rows/Pack2)*Pack2 : 0;\n\n  Index i=0;\n\n  // Pack 3 packets\n  if(Pack1>=3*PacketSize)\n  {\n    for(; i<peeled_mc3; i+=3*PacketSize)\n    {\n      if(PanelMode) count += (3*PacketSize) * offset;\n\n      for(Index k=0; k<depth; k++)\n      {\n        Packet A, B, C;\n        A = lhs.loadPacket(i+0*PacketSize, k);\n        B = lhs.loadPacket(i+1*PacketSize, k);\n        C = lhs.loadPacket(i+2*PacketSize, k);\n        pstore(blockA+count, cj.pconj(A)); count+=PacketSize;\n        pstore(blockA+count, cj.pconj(B)); count+=PacketSize;\n        pstore(blockA+count, cj.pconj(C)); count+=PacketSize;\n      }\n      if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);\n    }\n  }\n  // Pack 2 packets\n  if(Pack1>=2*PacketSize)\n  {\n    for(; i<peeled_mc2; i+=2*PacketSize)\n    {\n      if(PanelMode) count += (2*PacketSize) * offset;\n\n      for(Index k=0; k<depth; k++)\n      {\n        Packet A, B;\n        A = lhs.loadPacket(i+0*PacketSize, k);\n        B = lhs.loadPacket(i+1*PacketSize, k);\n        pstore(blockA+count, cj.pconj(A)); count+=PacketSize;\n        pstore(blockA+count, cj.pconj(B)); count+=PacketSize;\n      }\n      if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);\n    }\n  }\n  // Pack 1 packets\n  if(Pack1>=1*PacketSize)\n  {\n    for(; i<peeled_mc1; i+=1*PacketSize)\n    {\n      if(PanelMode) count += (1*PacketSize) * offset;\n\n      for(Index k=0; k<depth; k++)\n      {\n        Packet A;\n        A = lhs.loadPacket(i+0*PacketSize, k);\n        pstore(blockA+count, cj.pconj(A));\n        count+=PacketSize;\n      }\n      if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);\n    }\n  }\n  // Pack scalars\n  if(Pack2<PacketSize && Pack2>1)\n  {\n    for(; i<peeled_mc0; i+=Pack2)\n    {\n      if(PanelMode) count += Pack2 * offset;\n\n      for(Index k=0; k<depth; k++)\n        for(Index w=0; w<Pack2; w++)\n          blockA[count++] = cj(lhs(i+w, k));\n\n      if(PanelMode) count += Pack2 * (stride-offset-depth);\n    }\n  }\n  for(; i<rows; i++)\n  {\n    if(PanelMode) count += offset;\n    for(Index k=0; k<depth; k++)\n      blockA[count++] = cj(lhs(i, k));\n    if(PanelMode) count += (stride-offset-depth);\n  }\n}\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>\nstruct gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>\n{\n  typedef typename DataMapper::LinearMapper LinearMapper;\n  EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0);\n};\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, bool Conjugate, bool PanelMode>\nEIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, DataMapper, Pack1, Pack2, RowMajor, Conjugate, PanelMode>\n  ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset)\n{\n  typedef typename packet_traits<Scalar>::type Packet;\n  enum { PacketSize = packet_traits<Scalar>::size };\n\n  EIGEN_ASM_COMMENT(\"EIGEN PRODUCT PACK LHS\");\n  EIGEN_UNUSED_VARIABLE(stride);\n  EIGEN_UNUSED_VARIABLE(offset);\n  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));\n  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;\n  Index count = 0;\n\n//   const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;\n//   const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;\n//   const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;\n\n  int pack = Pack1;\n  Index i = 0;\n  while(pack>0)\n  {\n    Index remaining_rows = rows-i;\n    Index peeled_mc = i+(remaining_rows/pack)*pack;\n    for(; i<peeled_mc; i+=pack)\n    {\n      if(PanelMode) count += pack * offset;\n\n      const Index peeled_k = (depth/PacketSize)*PacketSize;\n      Index k=0;\n      if(pack>=PacketSize)\n      {\n        for(; k<peeled_k; k+=PacketSize)\n        {\n          for (Index m = 0; m < pack; m += PacketSize)\n          {\n            PacketBlock<Packet> kernel;\n            for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k);\n            ptranspose(kernel);\n            for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack)*p, cj.pconj(kernel.packet[p]));\n          }\n          count += PacketSize*pack;\n        }\n      }\n      for(; k<depth; k++)\n      {\n        Index w=0;\n        for(; w<pack-3; w+=4)\n        {\n          Scalar a(cj(lhs(i+w+0, k))),\n                 b(cj(lhs(i+w+1, k))),\n                 c(cj(lhs(i+w+2, k))),\n                 d(cj(lhs(i+w+3, k)));\n          blockA[count++] = a;\n          blockA[count++] = b;\n          blockA[count++] = c;\n          blockA[count++] = d;\n        }\n        if(pack%4)\n          for(;w<pack;++w)\n            blockA[count++] = cj(lhs(i+w, k));\n      }\n\n      if(PanelMode) count += pack * (stride-offset-depth);\n    }\n\n    pack -= PacketSize;\n    if(pack<Pack2 && (pack+PacketSize)!=Pack2)\n      pack = Pack2;\n  }\n\n  for(; i<rows; i++)\n  {\n    if(PanelMode) count += offset;\n    for(Index k=0; k<depth; k++)\n      blockA[count++] = cj(lhs(i, k));\n    if(PanelMode) count += (stride-offset-depth);\n  }\n}\n\n// copy a complete panel of the rhs\n// this version is optimized for column major matrices\n// The traversal order is as follow: (nr==4):\n//  0  1  2  3   12 13 14 15   24 27\n//  4  5  6  7   16 17 18 19   25 28\n//  8  9 10 11   20 21 22 23   26 29\n//  .  .  .  .    .  .  .  .    .  .\ntemplate<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>\nstruct gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>\n{\n  typedef typename packet_traits<Scalar>::type Packet;\n  typedef typename DataMapper::LinearMapper LinearMapper;\n  enum { PacketSize = packet_traits<Scalar>::size };\n  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);\n};\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>\nEIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode>\n  ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)\n{\n  EIGEN_ASM_COMMENT(\"EIGEN PRODUCT PACK RHS COLMAJOR\");\n  EIGEN_UNUSED_VARIABLE(stride);\n  EIGEN_UNUSED_VARIABLE(offset);\n  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));\n  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;\n  Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;\n  Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;\n  Index count = 0;\n  const Index peeled_k = (depth/PacketSize)*PacketSize;\n//   if(nr>=8)\n//   {\n//     for(Index j2=0; j2<packet_cols8; j2+=8)\n//     {\n//       // skip what we have before\n//       if(PanelMode) count += 8 * offset;\n//       const Scalar* b0 = &rhs[(j2+0)*rhsStride];\n//       const Scalar* b1 = &rhs[(j2+1)*rhsStride];\n//       const Scalar* b2 = &rhs[(j2+2)*rhsStride];\n//       const Scalar* b3 = &rhs[(j2+3)*rhsStride];\n//       const Scalar* b4 = &rhs[(j2+4)*rhsStride];\n//       const Scalar* b5 = &rhs[(j2+5)*rhsStride];\n//       const Scalar* b6 = &rhs[(j2+6)*rhsStride];\n//       const Scalar* b7 = &rhs[(j2+7)*rhsStride];\n//       Index k=0;\n//       if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4\n//       {\n//         for(; k<peeled_k; k+=PacketSize) {\n//           PacketBlock<Packet> kernel;\n//           for (int p = 0; p < PacketSize; ++p) {\n//             kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);\n//           }\n//           ptranspose(kernel);\n//           for (int p = 0; p < PacketSize; ++p) {\n//             pstoreu(blockB+count, cj.pconj(kernel.packet[p]));\n//             count+=PacketSize;\n//           }\n//         }\n//       }\n//       for(; k<depth; k++)\n//       {\n//         blockB[count+0] = cj(b0[k]);\n//         blockB[count+1] = cj(b1[k]);\n//         blockB[count+2] = cj(b2[k]);\n//         blockB[count+3] = cj(b3[k]);\n//         blockB[count+4] = cj(b4[k]);\n//         blockB[count+5] = cj(b5[k]);\n//         blockB[count+6] = cj(b6[k]);\n//         blockB[count+7] = cj(b7[k]);\n//         count += 8;\n//       }\n//       // skip what we have after\n//       if(PanelMode) count += 8 * (stride-offset-depth);\n//     }\n//   }\n\n  if(nr>=4)\n  {\n    for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)\n    {\n      // skip what we have before\n      if(PanelMode) count += 4 * offset;\n      const LinearMapper dm0 = rhs.getLinearMapper(0, j2 + 0);\n      const LinearMapper dm1 = rhs.getLinearMapper(0, j2 + 1);\n      const LinearMapper dm2 = rhs.getLinearMapper(0, j2 + 2);\n      const LinearMapper dm3 = rhs.getLinearMapper(0, j2 + 3);\n\n      Index k=0;\n      if((PacketSize%4)==0) // TODO enable vectorized transposition for PacketSize==2 ??\n      {\n        for(; k<peeled_k; k+=PacketSize) {\n          PacketBlock<Packet,(PacketSize%4)==0?4:PacketSize> kernel;\n          kernel.packet[0] = dm0.loadPacket(k);\n          kernel.packet[1%PacketSize] = dm1.loadPacket(k);\n          kernel.packet[2%PacketSize] = dm2.loadPacket(k);\n          kernel.packet[3%PacketSize] = dm3.loadPacket(k);\n          ptranspose(kernel);\n          pstoreu(blockB+count+0*PacketSize, cj.pconj(kernel.packet[0]));\n          pstoreu(blockB+count+1*PacketSize, cj.pconj(kernel.packet[1%PacketSize]));\n          pstoreu(blockB+count+2*PacketSize, cj.pconj(kernel.packet[2%PacketSize]));\n          pstoreu(blockB+count+3*PacketSize, cj.pconj(kernel.packet[3%PacketSize]));\n          count+=4*PacketSize;\n        }\n      }\n      for(; k<depth; k++)\n      {\n        blockB[count+0] = cj(dm0(k));\n        blockB[count+1] = cj(dm1(k));\n        blockB[count+2] = cj(dm2(k));\n        blockB[count+3] = cj(dm3(k));\n        count += 4;\n      }\n      // skip what we have after\n      if(PanelMode) count += 4 * (stride-offset-depth);\n    }\n  }\n\n  // copy the remaining columns one at a time (nr==1)\n  for(Index j2=packet_cols4; j2<cols; ++j2)\n  {\n    if(PanelMode) count += offset;\n    const LinearMapper dm0 = rhs.getLinearMapper(0, j2);\n    for(Index k=0; k<depth; k++)\n    {\n      blockB[count] = cj(dm0(k));\n      count += 1;\n    }\n    if(PanelMode) count += (stride-offset-depth);\n  }\n}\n\n// this version is optimized for row major matrices\ntemplate<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>\nstruct gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>\n{\n  typedef typename packet_traits<Scalar>::type Packet;\n  typedef typename DataMapper::LinearMapper LinearMapper;\n  enum { PacketSize = packet_traits<Scalar>::size };\n  EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0);\n};\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int nr, bool Conjugate, bool PanelMode>\nEIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode>\n  ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset)\n{\n  EIGEN_ASM_COMMENT(\"EIGEN PRODUCT PACK RHS ROWMAJOR\");\n  EIGEN_UNUSED_VARIABLE(stride);\n  EIGEN_UNUSED_VARIABLE(offset);\n  eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));\n  conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;\n  Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;\n  Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;\n  Index count = 0;\n\n//   if(nr>=8)\n//   {\n//     for(Index j2=0; j2<packet_cols8; j2+=8)\n//     {\n//       // skip what we have before\n//       if(PanelMode) count += 8 * offset;\n//       for(Index k=0; k<depth; k++)\n//       {\n//         if (PacketSize==8) {\n//           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);\n//           pstoreu(blockB+count, cj.pconj(A));\n//         } else if (PacketSize==4) {\n//           Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);\n//           Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);\n//           pstoreu(blockB+count, cj.pconj(A));\n//           pstoreu(blockB+count+PacketSize, cj.pconj(B));\n//         } else {\n//           const Scalar* b0 = &rhs[k*rhsStride + j2];\n//           blockB[count+0] = cj(b0[0]);\n//           blockB[count+1] = cj(b0[1]);\n//           blockB[count+2] = cj(b0[2]);\n//           blockB[count+3] = cj(b0[3]);\n//           blockB[count+4] = cj(b0[4]);\n//           blockB[count+5] = cj(b0[5]);\n//           blockB[count+6] = cj(b0[6]);\n//           blockB[count+7] = cj(b0[7]);\n//         }\n//         count += 8;\n//       }\n//       // skip what we have after\n//       if(PanelMode) count += 8 * (stride-offset-depth);\n//     }\n//   }\n  if(nr>=4)\n  {\n    for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)\n    {\n      // skip what we have before\n      if(PanelMode) count += 4 * offset;\n      for(Index k=0; k<depth; k++)\n      {\n        if (PacketSize==4) {\n          Packet A = rhs.loadPacket(k, j2);\n          pstoreu(blockB+count, cj.pconj(A));\n          count += PacketSize;\n        } else {\n          const LinearMapper dm0 = rhs.getLinearMapper(k, j2);\n          blockB[count+0] = cj(dm0(0));\n          blockB[count+1] = cj(dm0(1));\n          blockB[count+2] = cj(dm0(2));\n          blockB[count+3] = cj(dm0(3));\n          count += 4;\n        }\n      }\n      // skip what we have after\n      if(PanelMode) count += 4 * (stride-offset-depth);\n    }\n  }\n  // copy the remaining columns one at a time (nr==1)\n  for(Index j2=packet_cols4; j2<cols; ++j2)\n  {\n    if(PanelMode) count += offset;\n    for(Index k=0; k<depth; k++)\n    {\n      blockB[count] = cj(rhs(k, j2));\n      count += 1;\n    }\n    if(PanelMode) count += stride-offset-depth;\n  }\n}\n\n} // end namespace internal\n\n/** \\returns the currently set level 1 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.\n  * \\sa setCpuCacheSize */\ninline std::ptrdiff_t l1CacheSize()\n{\n  std::ptrdiff_t l1, l2, l3;\n  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);\n  return l1;\n}\n\n/** \\returns the currently set level 2 cpu cache size (in bytes) used to estimate the ideal blocking size parameters.\n  * \\sa setCpuCacheSize */\ninline std::ptrdiff_t l2CacheSize()\n{\n  std::ptrdiff_t l1, l2, l3;\n  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);\n  return l2;\n}\n\n/** \\returns the currently set level 3 cpu cache size (in bytes) used to estimate the ideal blocking size paramete\\\nrs.                                                                                                                \n* \\sa setCpuCacheSize */\ninline std::ptrdiff_t l3CacheSize()\n{\n  std::ptrdiff_t l1, l2, l3;\n  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);\n  return l3;\n}\n\n/** Set the cpu L1 and L2 cache sizes (in bytes).\n  * These values are use to adjust the size of the blocks\n  * for the algorithms working per blocks.\n  *\n  * \\sa computeProductBlockingSizes */\ninline void setCpuCacheSizes(std::ptrdiff_t l1, std::ptrdiff_t l2, std::ptrdiff_t l3)\n{\n  internal::manage_caching_sizes(SetAction, &l1, &l2, &l3);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_BLOCK_PANEL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H\n#define EIGEN_GENERAL_MATRIX_MATRIX_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename _LhsScalar, typename _RhsScalar> class level3_blocking;\n\n/* Specialization for a row-major destination matrix => simple transposition of the product */\ntemplate<\n  typename Index,\n  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>\nstruct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>\n{\n  typedef gebp_traits<RhsScalar,LhsScalar> Traits;\n\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n  static EIGEN_STRONG_INLINE void run(\n    Index rows, Index cols, Index depth,\n    const LhsScalar* lhs, Index lhsStride,\n    const RhsScalar* rhs, Index rhsStride,\n    ResScalar* res, Index resStride,\n    ResScalar alpha,\n    level3_blocking<RhsScalar,LhsScalar>& blocking,\n    GemmParallelInfo<Index>* info = 0)\n  {\n    // transpose the product such that the result is column major\n    general_matrix_matrix_product<Index,\n      RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,\n      LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,\n      ColMajor>\n    ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);\n  }\n};\n\n/*  Specialization for a col-major destination matrix\n *    => Blocking algorithm following Goto's paper */\ntemplate<\n  typename Index,\n  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>\nstruct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>\n{\n\ntypedef gebp_traits<LhsScalar,RhsScalar> Traits;\n\ntypedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\nstatic void run(Index rows, Index cols, Index depth,\n  const LhsScalar* _lhs, Index lhsStride,\n  const RhsScalar* _rhs, Index rhsStride,\n  ResScalar* _res, Index resStride,\n  ResScalar alpha,\n  level3_blocking<LhsScalar,RhsScalar>& blocking,\n  GemmParallelInfo<Index>* info = 0)\n{\n  typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;\n  typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;\n  typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n  LhsMapper lhs(_lhs,lhsStride);\n  RhsMapper rhs(_rhs,rhsStride);\n  ResMapper res(_res, resStride);\n\n  Index kc = blocking.kc();                   // cache block size along the K direction\n  Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n  Index nc = (std::min)(cols,blocking.nc());  // cache block size along the N direction\n\n  gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n  gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;\n  gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;\n\n#ifdef EIGEN_HAS_OPENMP\n  if(info)\n  {\n    // this is the parallel version!\n    int tid = omp_get_thread_num();\n    int threads = omp_get_num_threads();\n\n    LhsScalar* blockA = blocking.blockA();\n    eigen_internal_assert(blockA!=0);\n\n    std::size_t sizeB = kc*nc;\n    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);\n\n    // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...\n    for(Index k=0; k<depth; k+=kc)\n    {\n      const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'\n\n      // In order to reduce the chance that a thread has to wait for the other,\n      // let's start by packing B'.\n      pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc);\n\n      // Pack A_k to A' in a parallel fashion:\n      // each thread packs the sub block A_k,i to A'_i where i is the thread id.\n\n      // However, before copying to A'_i, we have to make sure that no other thread is still using it,\n      // i.e., we test that info[tid].users equals 0.\n      // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.\n      while(info[tid].users!=0) {}\n      info[tid].users += threads;\n\n      pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length);\n\n      // Notify the other threads that the part A'_i is ready to go.\n      info[tid].sync = k;\n\n      // Computes C_i += A' * B' per A'_i\n      for(int shift=0; shift<threads; ++shift)\n      {\n        int i = (tid+shift)%threads;\n\n        // At this point we have to make sure that A'_i has been updated by the thread i,\n        // we use testAndSetOrdered to mimic a volatile access.\n        // However, no need to wait for the B' part which has been updated by the current thread!\n        if (shift>0) {\n          while(info[i].sync!=k) {\n          }\n        }\n\n        gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);\n      }\n\n      // Then keep going as usual with the remaining B'\n      for(Index j=nc; j<cols; j+=nc)\n      {\n        const Index actual_nc = (std::min)(j+nc,cols)-j;\n\n        // pack B_k,j to B'\n        pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc);\n\n        // C_j += A' * B'\n        gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha);\n      }\n\n      // Release all the sub blocks A'_i of A' for the current thread,\n      // i.e., we simply decrement the number of users by 1\n      for(Index i=0; i<threads; ++i)\n        #pragma omp atomic\n        info[i].users -= 1;\n    }\n  }\n  else\n#endif // EIGEN_HAS_OPENMP\n  {\n    EIGEN_UNUSED_VARIABLE(info);\n\n    // this is the sequential version!\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*nc;\n\n    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());\n\n    const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols;\n\n    // For each horizontal panel of the rhs, and corresponding panel of the lhs...\n    for(Index i2=0; i2<rows; i2+=mc)\n    {\n      const Index actual_mc = (std::min)(i2+mc,rows)-i2;\n\n      for(Index k2=0; k2<depth; k2+=kc)\n      {\n        const Index actual_kc = (std::min)(k2+kc,depth)-k2;\n\n        // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.\n        // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)\n        // Note that this panel will be read as many times as the number of blocks in the rhs's\n        // horizontal panel which is, in practice, a very low number.\n        pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc);\n\n        // For each kc x nc block of the rhs's horizontal panel...\n        for(Index j2=0; j2<cols; j2+=nc)\n        {\n          const Index actual_nc = (std::min)(j2+nc,cols)-j2;\n\n          // We pack the rhs's block into a sequential chunk of memory (L2 caching)\n          // Note that this block will be read a very high number of times, which is equal to the number of\n          // micro horizontal panel of the large rhs's panel (e.g., rows/12 times).\n          if((!pack_rhs_once) || i2==0)\n            pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc);\n\n          // Everything is packed, we can now call the panel * block kernel:\n          gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha);\n        }\n      }\n    }\n  }\n}\n\n};\n\n/*********************************************************************************\n*  Specialization of generic_product_impl for \"large\" GEMM, i.e.,\n*  implementation of the high level wrapper to general_matrix_matrix_product\n**********************************************************************************/\n\ntemplate<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>\nstruct gemm_functor\n{\n  gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)\n    : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)\n  {}\n\n  void initParallelSession(Index num_threads) const\n  {\n    m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads);\n    m_blocking.allocateA();\n  }\n\n  void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const\n  {\n    if(cols==-1)\n      cols = m_rhs.cols();\n\n    Gemm::run(rows, cols, m_lhs.cols(),\n              &m_lhs.coeffRef(row,0), m_lhs.outerStride(),\n              &m_rhs.coeffRef(0,col), m_rhs.outerStride(),\n              (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),\n              m_actualAlpha, m_blocking, info);\n  }\n\n  typedef typename Gemm::Traits Traits;\n\n  protected:\n    const Lhs& m_lhs;\n    const Rhs& m_rhs;\n    Dest& m_dest;\n    Scalar m_actualAlpha;\n    BlockingType& m_blocking;\n};\n\ntemplate<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,\nbool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;\n\ntemplate<typename _LhsScalar, typename _RhsScalar>\nclass level3_blocking\n{\n    typedef _LhsScalar LhsScalar;\n    typedef _RhsScalar RhsScalar;\n\n  protected:\n    LhsScalar* m_blockA;\n    RhsScalar* m_blockB;\n\n    Index m_mc;\n    Index m_nc;\n    Index m_kc;\n\n  public:\n\n    level3_blocking()\n      : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0)\n    {}\n\n    inline Index mc() const { return m_mc; }\n    inline Index nc() const { return m_nc; }\n    inline Index kc() const { return m_kc; }\n\n    inline LhsScalar* blockA() { return m_blockA; }\n    inline RhsScalar* blockB() { return m_blockB; }\n};\n\ntemplate<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>\nclass gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */>\n  : public level3_blocking<\n      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,\n      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>\n{\n    enum {\n      Transpose = StorageOrder==RowMajor,\n      ActualRows = Transpose ? MaxCols : MaxRows,\n      ActualCols = Transpose ? MaxRows : MaxCols\n    };\n    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;\n    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;\n    typedef gebp_traits<LhsScalar,RhsScalar> Traits;\n    enum {\n      SizeA = ActualRows * MaxDepth,\n      SizeB = ActualCols * MaxDepth\n    };\n\n#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES\n    EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA];\n    EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB];\n#else\n    EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];\n    EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1];\n#endif\n\n  public:\n\n    gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/)\n    {\n      this->m_mc = ActualRows;\n      this->m_nc = ActualCols;\n      this->m_kc = MaxDepth;\n#if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES\n      this->m_blockA = m_staticA;\n      this->m_blockB = m_staticB;\n#else\n      this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));\n      this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1));\n#endif\n    }\n\n    void initParallel(Index, Index, Index, Index)\n    {}\n\n    inline void allocateA() {}\n    inline void allocateB() {}\n    inline void allocateAll() {}\n};\n\ntemplate<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>\nclass gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>\n  : public level3_blocking<\n      typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,\n      typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>\n{\n    enum {\n      Transpose = StorageOrder==RowMajor\n    };\n    typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;\n    typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;\n    typedef gebp_traits<LhsScalar,RhsScalar> Traits;\n\n    Index m_sizeA;\n    Index m_sizeB;\n\n  public:\n\n    gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking)\n    {\n      this->m_mc = Transpose ? cols : rows;\n      this->m_nc = Transpose ? rows : cols;\n      this->m_kc = depth;\n\n      if(l3_blocking)\n      {\n        computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads);\n      }\n      else  // no l3 blocking\n      {\n        Index n = this->m_nc;\n        computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads);\n      }\n\n      m_sizeA = this->m_mc * this->m_kc;\n      m_sizeB = this->m_kc * this->m_nc;\n    }\n\n    void initParallel(Index rows, Index cols, Index depth, Index num_threads)\n    {\n      this->m_mc = Transpose ? cols : rows;\n      this->m_nc = Transpose ? rows : cols;\n      this->m_kc = depth;\n\n      eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0);\n      Index m = this->m_mc;\n      computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads);\n      m_sizeA = this->m_mc * this->m_kc;\n      m_sizeB = this->m_kc * this->m_nc;\n    }\n\n    void allocateA()\n    {\n      if(this->m_blockA==0)\n        this->m_blockA = aligned_new<LhsScalar>(m_sizeA);\n    }\n\n    void allocateB()\n    {\n      if(this->m_blockB==0)\n        this->m_blockB = aligned_new<RhsScalar>(m_sizeB);\n    }\n\n    void allocateAll()\n    {\n      allocateA();\n      allocateB();\n    }\n\n    ~gemm_blocking_space()\n    {\n      aligned_delete(this->m_blockA, m_sizeA);\n      aligned_delete(this->m_blockB, m_sizeB);\n    }\n};\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs>\nstruct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct>\n  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  typedef typename Lhs::Scalar LhsScalar;\n  typedef typename Rhs::Scalar RhsScalar;\n\n  typedef internal::blas_traits<Lhs> LhsBlasTraits;\n  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n  typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;\n\n  typedef internal::blas_traits<Rhs> RhsBlasTraits;\n  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n  typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;\n\n  enum {\n    MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)\n  };\n\n  typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct;\n\n  template<typename Dst>\n  static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)\n      lazyproduct::evalTo(dst, lhs, rhs);\n    else\n    {\n      dst.setZero();\n      scaleAndAddTo(dst, lhs, rhs, Scalar(1));\n    }\n  }\n\n  template<typename Dst>\n  static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)\n      lazyproduct::addTo(dst, lhs, rhs);\n    else\n      scaleAndAddTo(dst,lhs, rhs, Scalar(1));\n  }\n\n  template<typename Dst>\n  static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0)\n      lazyproduct::subTo(dst, lhs, rhs);\n    else\n      scaleAndAddTo(dst, lhs, rhs, Scalar(-1));\n  }\n\n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha)\n  {\n    eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());\n    if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0)\n      return;\n\n    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);\n    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);\n\n    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)\n                               * RhsBlasTraits::extractScalarFactor(a_rhs);\n\n    typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,\n            Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;\n\n    typedef internal::gemm_functor<\n      Scalar, Index,\n      internal::general_matrix_matrix_product<\n        Index,\n        LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),\n        RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),\n        (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,\n      ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor;\n\n    BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true);\n    internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>\n        (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixMatrixTriangular.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H\n#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H\n\nnamespace Eigen { \n\ntemplate<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjLhs, bool ConjRhs>\nstruct selfadjoint_rank1_update;\n\nnamespace internal {\n\n/**********************************************************************\n* This file implements a general A * B product while\n* evaluating only one triangular part of the product.\n* This is a more general version of self adjoint product (C += A A^T)\n* as the level 3 SYRK Blas routine.\n**********************************************************************/\n\n// forward declarations (defined at the end of this file)\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>\nstruct tribb_kernel;\n  \n/* Optimized matrix-matrix product evaluating only one triangular half */\ntemplate <typename Index,\n          typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,\n                              int ResStorageOrder, int  UpLo, int Version = Specialized>\nstruct general_matrix_matrix_triangular_product;\n\n// as usual if the result is row major => we transpose the product\ntemplate <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>\nstruct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,UpLo,Version>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* lhs, Index lhsStride,\n                                      const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride,\n                                      const ResScalar& alpha, level3_blocking<RhsScalar,LhsScalar>& blocking)\n  {\n    general_matrix_matrix_triangular_product<Index,\n        RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,\n        LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,\n        ColMajor, UpLo==Lower?Upper:Lower>\n      ::run(size,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking);\n  }\n};\n\ntemplate <typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n                          typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int  UpLo, int Version>\nstruct general_matrix_matrix_triangular_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Version>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const LhsScalar* _lhs, Index lhsStride,\n                                      const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride,\n                                      const ResScalar& alpha, level3_blocking<LhsScalar,RhsScalar>& blocking)\n  {\n    typedef gebp_traits<LhsScalar,RhsScalar> Traits;\n\n    typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper;\n    typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n    LhsMapper lhs(_lhs,lhsStride);\n    RhsMapper rhs(_rhs,rhsStride);\n    ResMapper res(_res, resStride);\n\n    Index kc = blocking.kc();\n    Index mc = (std::min)(size,blocking.mc());\n\n    // !!! mc must be a multiple of nr:\n    if(mc > Traits::nr)\n      mc = (mc/Traits::nr)*Traits::nr;\n\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*size;\n\n    ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());\n\n    gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n    gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;\n    gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;\n    tribb_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs, UpLo> sybb;\n\n    for(Index k2=0; k2<depth; k2+=kc)\n    {\n      const Index actual_kc = (std::min)(k2+kc,depth)-k2;\n\n      // note that the actual rhs is the transpose/adjoint of mat\n      pack_rhs(blockB, rhs.getSubMapper(k2,0), actual_kc, size);\n\n      for(Index i2=0; i2<size; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(i2+mc,size)-i2;\n\n        pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);\n\n        // the selected actual_mc * size panel of res is split into three different part:\n        //  1 - before the diagonal => processed with gebp or skipped\n        //  2 - the actual_mc x actual_mc symmetric block => processed with a special kernel\n        //  3 - after the diagonal => processed with gebp or skipped\n        if (UpLo==Lower)\n          gebp(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc,\n               (std::min)(size,i2), alpha, -1, -1, 0, 0);\n\n\n        sybb(_res+resStride*i2 + i2, resStride, blockA, blockB + actual_kc*i2, actual_mc, actual_kc, alpha);\n\n        if (UpLo==Upper)\n        {\n          Index j2 = i2+actual_mc;\n          gebp(res.getSubMapper(i2, j2), blockA, blockB+actual_kc*j2, actual_mc,\n               actual_kc, (std::max)(Index(0), size-j2), alpha, -1, -1, 0, 0);\n        }\n      }\n    }\n  }\n};\n\n// Optimized packed Block * packed Block product kernel evaluating only one given triangular part\n// This kernel is built on top of the gebp kernel:\n// - the current destination block is processed per panel of actual_mc x BlockSize\n//   where BlockSize is set to the minimal value allowing gebp to be as fast as possible\n// - then, as usual, each panel is split into three parts along the diagonal,\n//   the sub blocks above and below the diagonal are processed as usual,\n//   while the triangular block overlapping the diagonal is evaluated into a\n//   small temporary buffer which is then accumulated into the result using a\n//   triangular traversal.\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int mr, int nr, bool ConjLhs, bool ConjRhs, int UpLo>\nstruct tribb_kernel\n{\n  typedef gebp_traits<LhsScalar,RhsScalar,ConjLhs,ConjRhs> Traits;\n  typedef typename Traits::ResScalar ResScalar;\n\n  enum {\n    BlockSize  = meta_least_common_multiple<EIGEN_PLAIN_ENUM_MAX(mr,nr),EIGEN_PLAIN_ENUM_MIN(mr,nr)>::ret\n  };\n  void operator()(ResScalar* _res, Index resStride, const LhsScalar* blockA, const RhsScalar* blockB, Index size, Index depth, const ResScalar& alpha)\n  {\n    typedef blas_data_mapper<ResScalar, Index, ColMajor> ResMapper;\n    ResMapper res(_res, resStride);\n    gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, mr, nr, ConjLhs, ConjRhs> gebp_kernel;\n\n    Matrix<ResScalar,BlockSize,BlockSize,ColMajor> buffer((internal::constructor_without_unaligned_array_assert()));\n\n    // let's process the block per panel of actual_mc x BlockSize,\n    // again, each is split into three parts, etc.\n    for (Index j=0; j<size; j+=BlockSize)\n    {\n      Index actualBlockSize = std::min<Index>(BlockSize,size - j);\n      const RhsScalar* actual_b = blockB+j*depth;\n\n      if(UpLo==Upper)\n        gebp_kernel(res.getSubMapper(0, j), blockA, actual_b, j, depth, actualBlockSize, alpha,\n                    -1, -1, 0, 0);\n\n      // selfadjoint micro block\n      {\n        Index i = j;\n        buffer.setZero();\n        // 1 - apply the kernel on the temporary buffer\n        gebp_kernel(ResMapper(buffer.data(), BlockSize), blockA+depth*i, actual_b, actualBlockSize, depth, actualBlockSize, alpha,\n                    -1, -1, 0, 0);\n        // 2 - triangular accumulation\n        for(Index j1=0; j1<actualBlockSize; ++j1)\n        {\n          ResScalar* r = &res(i, j + j1);\n          for(Index i1=UpLo==Lower ? j1 : 0;\n              UpLo==Lower ? i1<actualBlockSize : i1<=j1; ++i1)\n            r[i1] += buffer(i1,j1);\n        }\n      }\n\n      if(UpLo==Lower)\n      {\n        Index i = j+actualBlockSize;\n        gebp_kernel(res.getSubMapper(i, j), blockA+depth*i, actual_b, size-i, \n                    depth, actualBlockSize, alpha, -1, -1, 0, 0);\n      }\n    }\n  }\n};\n\n} // end namespace internal\n\n// high level API\n\ntemplate<typename MatrixType, typename ProductType, int UpLo, bool IsOuterProduct>\nstruct general_product_to_triangular_selector;\n\n\ntemplate<typename MatrixType, typename ProductType, int UpLo>\nstruct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,true>\n{\n  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta)\n  {\n    typedef typename MatrixType::Scalar Scalar;\n    \n    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;\n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;\n    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;\n    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());\n    \n    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;\n    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;\n    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());\n\n    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());\n\n    if(!beta)\n      mat.template triangularView<UpLo>().setZero();\n\n    enum {\n      StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n      UseLhsDirectly = _ActualLhs::InnerStrideAtCompileTime==1,\n      UseRhsDirectly = _ActualRhs::InnerStrideAtCompileTime==1\n    };\n    \n    internal::gemv_static_vector_if<Scalar,Lhs::SizeAtCompileTime,Lhs::MaxSizeAtCompileTime,!UseLhsDirectly> static_lhs;\n    ei_declare_aligned_stack_constructed_variable(Scalar, actualLhsPtr, actualLhs.size(),\n      (UseLhsDirectly ? const_cast<Scalar*>(actualLhs.data()) : static_lhs.data()));\n    if(!UseLhsDirectly) Map<typename _ActualLhs::PlainObject>(actualLhsPtr, actualLhs.size()) = actualLhs;\n    \n    internal::gemv_static_vector_if<Scalar,Rhs::SizeAtCompileTime,Rhs::MaxSizeAtCompileTime,!UseRhsDirectly> static_rhs;\n    ei_declare_aligned_stack_constructed_variable(Scalar, actualRhsPtr, actualRhs.size(),\n      (UseRhsDirectly ? const_cast<Scalar*>(actualRhs.data()) : static_rhs.data()));\n    if(!UseRhsDirectly) Map<typename _ActualRhs::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;\n    \n    \n    selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,\n                              LhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex,\n                              RhsBlasTraits::NeedToConjugate && NumTraits<Scalar>::IsComplex>\n          ::run(actualLhs.size(), mat.data(), mat.outerStride(), actualLhsPtr, actualRhsPtr, actualAlpha);\n  }\n};\n\ntemplate<typename MatrixType, typename ProductType, int UpLo>\nstruct general_product_to_triangular_selector<MatrixType,ProductType,UpLo,false>\n{\n  static void run(MatrixType& mat, const ProductType& prod, const typename MatrixType::Scalar& alpha, bool beta)\n  {\n    typedef typename internal::remove_all<typename ProductType::LhsNested>::type Lhs;\n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhs;\n    typedef typename internal::remove_all<ActualLhs>::type _ActualLhs;\n    typename internal::add_const_on_value_type<ActualLhs>::type actualLhs = LhsBlasTraits::extract(prod.lhs());\n    \n    typedef typename internal::remove_all<typename ProductType::RhsNested>::type Rhs;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhs;\n    typedef typename internal::remove_all<ActualRhs>::type _ActualRhs;\n    typename internal::add_const_on_value_type<ActualRhs>::type actualRhs = RhsBlasTraits::extract(prod.rhs());\n\n    typename ProductType::Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs().derived()) * RhsBlasTraits::extractScalarFactor(prod.rhs().derived());\n\n    if(!beta)\n      mat.template triangularView<UpLo>().setZero();\n\n    enum {\n      IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0,\n      LhsIsRowMajor = _ActualLhs::Flags&RowMajorBit ? 1 : 0,\n      RhsIsRowMajor = _ActualRhs::Flags&RowMajorBit ? 1 : 0\n    };\n\n    Index size = mat.cols();\n    Index depth = actualLhs.cols();\n\n    typedef internal::gemm_blocking_space<IsRowMajor ? RowMajor : ColMajor,typename Lhs::Scalar,typename Rhs::Scalar,\n          MatrixType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime, _ActualRhs::MaxColsAtCompileTime> BlockingType;\n\n    BlockingType blocking(size, size, depth, 1, false);\n\n    internal::general_matrix_matrix_triangular_product<Index,\n      typename Lhs::Scalar, LhsIsRowMajor ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,\n      typename Rhs::Scalar, RhsIsRowMajor ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,\n      IsRowMajor ? RowMajor : ColMajor, UpLo>\n      ::run(size, depth,\n            &actualLhs.coeffRef(0,0), actualLhs.outerStride(), &actualRhs.coeffRef(0,0), actualRhs.outerStride(),\n            mat.data(), mat.outerStride(), actualAlpha, blocking);\n  }\n};\n\ntemplate<typename MatrixType, unsigned int UpLo>\ntemplate<typename ProductType>\nEIGEN_DEVICE_FUNC TriangularView<MatrixType,UpLo>& TriangularViewImpl<MatrixType,UpLo,Dense>::_assignProduct(const ProductType& prod, const Scalar& alpha, bool beta)\n{\n  eigen_assert(derived().nestedExpression().rows() == prod.rows() && derived().cols() == prod.cols());\n\n  general_product_to_triangular_selector<MatrixType, ProductType, UpLo, internal::traits<ProductType>::InnerSize==1>::run(derived().nestedExpression().const_cast_derived(), prod, alpha, beta);\n\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixMatrixTriangular_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Level 3 BLAS SYRK/HERK implementation.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H\n#define EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate <typename Index, typename Scalar, int AStorageOrder, bool ConjugateA, int ResStorageOrder, int  UpLo>\nstruct general_matrix_matrix_rankupdate :\n       general_matrix_matrix_triangular_product<\n         Index,Scalar,AStorageOrder,ConjugateA,Scalar,AStorageOrder,ConjugateA,ResStorageOrder,UpLo,BuiltIn> {};\n\n\n// try to go to BLAS specialization\n#define EIGEN_BLAS_RANKUPDATE_SPECIALIZE(Scalar) \\\ntemplate <typename Index, int LhsStorageOrder, bool ConjugateLhs, \\\n                          int RhsStorageOrder, bool ConjugateRhs, int  UpLo> \\\nstruct general_matrix_matrix_triangular_product<Index,Scalar,LhsStorageOrder,ConjugateLhs, \\\n               Scalar,RhsStorageOrder,ConjugateRhs,ColMajor,UpLo,Specialized> { \\\n  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const Scalar* lhs, Index lhsStride, \\\n                          const Scalar* rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar, Scalar>& blocking) \\\n  { \\\n    if (lhs==rhs) { \\\n      general_matrix_matrix_rankupdate<Index,Scalar,LhsStorageOrder,ConjugateLhs,ColMajor,UpLo> \\\n      ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \\\n    } else { \\\n      general_matrix_matrix_triangular_product<Index, \\\n        Scalar, LhsStorageOrder, ConjugateLhs, \\\n        Scalar, RhsStorageOrder, ConjugateRhs, \\\n        ColMajor, UpLo, BuiltIn> \\\n      ::run(size,depth,lhs,lhsStride,rhs,rhsStride,res,resStride,alpha,blocking); \\\n    } \\\n  } \\\n};\n\nEIGEN_BLAS_RANKUPDATE_SPECIALIZE(double)\nEIGEN_BLAS_RANKUPDATE_SPECIALIZE(float)\n// TODO handle complex cases\n// EIGEN_BLAS_RANKUPDATE_SPECIALIZE(dcomplex)\n// EIGEN_BLAS_RANKUPDATE_SPECIALIZE(scomplex)\n\n// SYRK for float/double\n#define EIGEN_BLAS_RANKUPDATE_R(EIGTYPE, BLASTYPE, BLASFUNC) \\\ntemplate <typename Index, int AStorageOrder, bool ConjugateA, int  UpLo> \\\nstruct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \\\n  enum { \\\n    IsLower = (UpLo&Lower) == Lower, \\\n    LowUp = IsLower ? Lower : Upper, \\\n    conjA = ((AStorageOrder==ColMajor) && ConjugateA) ? 1 : 0 \\\n  }; \\\n  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \\\n                          const EIGTYPE* /*rhs*/, Index /*rhsStride*/, EIGTYPE* res, Index resStride, EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n  /* typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs;*/ \\\n\\\n   BlasIndex lda=convert_index<BlasIndex>(lhsStride), ldc=convert_index<BlasIndex>(resStride), n=convert_index<BlasIndex>(size), k=convert_index<BlasIndex>(depth); \\\n   char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'T':'N'); \\\n   EIGTYPE beta(1); \\\n   BLASFUNC(&uplo, &trans, &n, &k, &numext::real_ref(alpha), lhs, &lda, &numext::real_ref(beta), res, &ldc); \\\n  } \\\n};\n\n// HERK for complex data\n#define EIGEN_BLAS_RANKUPDATE_C(EIGTYPE, BLASTYPE, RTYPE, BLASFUNC) \\\ntemplate <typename Index, int AStorageOrder, bool ConjugateA, int  UpLo> \\\nstruct general_matrix_matrix_rankupdate<Index,EIGTYPE,AStorageOrder,ConjugateA,ColMajor,UpLo> { \\\n  enum { \\\n    IsLower = (UpLo&Lower) == Lower, \\\n    LowUp = IsLower ? Lower : Upper, \\\n    conjA = (((AStorageOrder==ColMajor) && ConjugateA) || ((AStorageOrder==RowMajor) && !ConjugateA)) ? 1 : 0 \\\n  }; \\\n  static EIGEN_STRONG_INLINE void run(Index size, Index depth,const EIGTYPE* lhs, Index lhsStride, \\\n                          const EIGTYPE* /*rhs*/, Index /*rhsStride*/, EIGTYPE* res, Index resStride, EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, AStorageOrder> MatrixType; \\\n\\\n   BlasIndex lda=convert_index<BlasIndex>(lhsStride), ldc=convert_index<BlasIndex>(resStride), n=convert_index<BlasIndex>(size), k=convert_index<BlasIndex>(depth); \\\n   char uplo=((IsLower) ? 'L' : 'U'), trans=((AStorageOrder==RowMajor) ? 'C':'N'); \\\n   RTYPE alpha_, beta_; \\\n   const EIGTYPE* a_ptr; \\\n\\\n   alpha_ = alpha.real(); \\\n   beta_ = 1.0; \\\n/* Copy with conjugation in some cases*/ \\\n   MatrixType a; \\\n   if (conjA) { \\\n     Map<const MatrixType, 0, OuterStride<> > mapA(lhs,n,k,OuterStride<>(lhsStride)); \\\n     a = mapA.conjugate(); \\\n     lda = a.outerStride(); \\\n     a_ptr = a.data(); \\\n   } else a_ptr=lhs; \\\n   BLASFUNC(&uplo, &trans, &n, &k, &alpha_, (BLASTYPE*)a_ptr, &lda, &beta_, (BLASTYPE*)res, &ldc); \\\n  } \\\n};\n\n\nEIGEN_BLAS_RANKUPDATE_R(double, double, dsyrk_)\nEIGEN_BLAS_RANKUPDATE_R(float,  float,  ssyrk_)\n\n// TODO hanlde complex cases\n// EIGEN_BLAS_RANKUPDATE_C(dcomplex, double, double, zherk_)\n// EIGEN_BLAS_RANKUPDATE_C(scomplex, float,  float, cherk_)\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_MATRIX_TRIANGULAR_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixMatrix_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   General matrix-matrix product functionality based on ?GEMM.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H\n#define EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/**********************************************************************\n* This file implements general matrix-matrix multiplication using BLAS\n* gemm function via partial specialization of\n* general_matrix_matrix_product::run(..) method for float, double,\n* std::complex<float> and std::complex<double> types\n**********************************************************************/\n\n// gemm specialization\n\n#define GEMM_SPECIALIZATION(EIGTYPE, EIGPREFIX, BLASTYPE, BLASPREFIX) \\\ntemplate< \\\n  typename Index, \\\n  int LhsStorageOrder, bool ConjugateLhs, \\\n  int RhsStorageOrder, bool ConjugateRhs> \\\nstruct general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor> \\\n{ \\\ntypedef gebp_traits<EIGTYPE,EIGTYPE> Traits; \\\n\\\nstatic void run(Index rows, Index cols, Index depth, \\\n  const EIGTYPE* _lhs, Index lhsStride, \\\n  const EIGTYPE* _rhs, Index rhsStride, \\\n  EIGTYPE* res, Index resStride, \\\n  EIGTYPE alpha, \\\n  level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/, \\\n  GemmParallelInfo<Index>* /*info = 0*/) \\\n{ \\\n  using std::conj; \\\n\\\n  char transa, transb; \\\n  BlasIndex m, n, k, lda, ldb, ldc; \\\n  const EIGTYPE *a, *b; \\\n  EIGTYPE beta(1); \\\n  MatrixX##EIGPREFIX a_tmp, b_tmp; \\\n\\\n/* Set transpose options */ \\\n  transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \\\n  transb = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \\\n\\\n/* Set m, n, k */ \\\n  m = convert_index<BlasIndex>(rows);  \\\n  n = convert_index<BlasIndex>(cols);  \\\n  k = convert_index<BlasIndex>(depth); \\\n\\\n/* Set lda, ldb, ldc */ \\\n  lda = convert_index<BlasIndex>(lhsStride); \\\n  ldb = convert_index<BlasIndex>(rhsStride); \\\n  ldc = convert_index<BlasIndex>(resStride); \\\n\\\n/* Set a, b, c */ \\\n  if ((LhsStorageOrder==ColMajor) && (ConjugateLhs)) { \\\n    Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,k,OuterStride<>(lhsStride)); \\\n    a_tmp = lhs.conjugate(); \\\n    a = a_tmp.data(); \\\n    lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n  } else a = _lhs; \\\n\\\n  if ((RhsStorageOrder==ColMajor) && (ConjugateRhs)) { \\\n    Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,k,n,OuterStride<>(rhsStride)); \\\n    b_tmp = rhs.conjugate(); \\\n    b = b_tmp.data(); \\\n    ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n  } else b = _rhs; \\\n\\\n  BLASPREFIX##gemm_(&transa, &transb, &m, &n, &k, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \\\n}};\n\nGEMM_SPECIALIZATION(double,   d,  double, d)\nGEMM_SPECIALIZATION(float,    f,  float,  s)\nGEMM_SPECIALIZATION(dcomplex, cd, double, z)\nGEMM_SPECIALIZATION(scomplex, cf, float,  c)\n\n} // end namespase internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_MATRIX_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERAL_MATRIX_VECTOR_H\n#define EIGEN_GENERAL_MATRIX_VECTOR_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/* Optimized col-major matrix * vector product:\n * This algorithm processes the matrix per vertical panels,\n * which are then processed horizontaly per chunck of 8*PacketSize x 1 vertical segments.\n *\n * Mixing type logic: C += alpha * A * B\n *  |  A  |  B  |alpha| comments\n *  |real |cplx |cplx | no vectorization\n *  |real |cplx |real | alpha is converted to a cplx when calling the run function, no vectorization\n *  |cplx |real |cplx | invalid, the caller has to do tmp: = A * B; C += alpha*tmp\n *  |cplx |real |real | optimal case, vectorization possible via real-cplx mul\n *\n * The same reasoning apply for the transposed case.\n */\ntemplate<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>\nstruct general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n\nenum {\n  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable\n              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),\n  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1\n};\n\ntypedef typename packet_traits<LhsScalar>::type  _LhsPacket;\ntypedef typename packet_traits<RhsScalar>::type  _RhsPacket;\ntypedef typename packet_traits<ResScalar>::type  _ResPacket;\n\ntypedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;\ntypedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;\ntypedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;\n\nEIGEN_DONT_INLINE static void run(\n  Index rows, Index cols,\n  const LhsMapper& lhs,\n  const RhsMapper& rhs,\n        ResScalar* res, Index resIncr,\n  RhsScalar alpha);\n};\n\ntemplate<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>\nEIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(\n  Index rows, Index cols,\n  const LhsMapper& alhs,\n  const RhsMapper& rhs,\n        ResScalar* res, Index resIncr,\n  RhsScalar alpha)\n{\n  EIGEN_UNUSED_VARIABLE(resIncr);\n  eigen_internal_assert(resIncr==1);\n\n  // The following copy tells the compiler that lhs's attributes are not modified outside this function\n  // This helps GCC to generate propoer code.\n  LhsMapper lhs(alhs);\n\n  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;\n  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;\n  const Index lhsStride = lhs.stride();\n  // TODO: for padded aligned inputs, we could enable aligned reads\n  enum { LhsAlignment = Unaligned };\n\n  const Index n8 = rows-8*ResPacketSize+1;\n  const Index n4 = rows-4*ResPacketSize+1;\n  const Index n3 = rows-3*ResPacketSize+1;\n  const Index n2 = rows-2*ResPacketSize+1;\n  const Index n1 = rows-1*ResPacketSize+1;\n\n  // TODO: improve the following heuristic:\n  const Index block_cols = cols<128 ? cols : (lhsStride*sizeof(LhsScalar)<32000?16:4);\n  ResPacket palpha = pset1<ResPacket>(alpha);\n\n  for(Index j2=0; j2<cols; j2+=block_cols)\n  {\n    Index jend = numext::mini(j2+block_cols,cols);\n    Index i=0;\n    for(; i<n8; i+=ResPacketSize*8)\n    {\n      ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n                c1 = pset1<ResPacket>(ResScalar(0)),\n                c2 = pset1<ResPacket>(ResScalar(0)),\n                c3 = pset1<ResPacket>(ResScalar(0)),\n                c4 = pset1<ResPacket>(ResScalar(0)),\n                c5 = pset1<ResPacket>(ResScalar(0)),\n                c6 = pset1<ResPacket>(ResScalar(0)),\n                c7 = pset1<ResPacket>(ResScalar(0));\n\n      for(Index j=j2; j<jend; j+=1)\n      {\n        RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));\n        c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);\n        c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);\n        c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);\n        c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*3,j),b0,c3);\n        c4 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*4,j),b0,c4);\n        c5 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*5,j),b0,c5);\n        c6 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*6,j),b0,c6);\n        c7 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*7,j),b0,c7);\n      }\n      pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));\n      pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));\n      pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));\n      pstoreu(res+i+ResPacketSize*3, pmadd(c3,palpha,ploadu<ResPacket>(res+i+ResPacketSize*3)));\n      pstoreu(res+i+ResPacketSize*4, pmadd(c4,palpha,ploadu<ResPacket>(res+i+ResPacketSize*4)));\n      pstoreu(res+i+ResPacketSize*5, pmadd(c5,palpha,ploadu<ResPacket>(res+i+ResPacketSize*5)));\n      pstoreu(res+i+ResPacketSize*6, pmadd(c6,palpha,ploadu<ResPacket>(res+i+ResPacketSize*6)));\n      pstoreu(res+i+ResPacketSize*7, pmadd(c7,palpha,ploadu<ResPacket>(res+i+ResPacketSize*7)));\n    }\n    if(i<n4)\n    {\n      ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n                c1 = pset1<ResPacket>(ResScalar(0)),\n                c2 = pset1<ResPacket>(ResScalar(0)),\n                c3 = pset1<ResPacket>(ResScalar(0));\n\n      for(Index j=j2; j<jend; j+=1)\n      {\n        RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));\n        c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);\n        c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);\n        c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);\n        c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*3,j),b0,c3);\n      }\n      pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));\n      pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));\n      pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));\n      pstoreu(res+i+ResPacketSize*3, pmadd(c3,palpha,ploadu<ResPacket>(res+i+ResPacketSize*3)));\n\n      i+=ResPacketSize*4;\n    }\n    if(i<n3)\n    {\n      ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n                c1 = pset1<ResPacket>(ResScalar(0)),\n                c2 = pset1<ResPacket>(ResScalar(0));\n\n      for(Index j=j2; j<jend; j+=1)\n      {\n        RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));\n        c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);\n        c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);\n        c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*2,j),b0,c2);\n      }\n      pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));\n      pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));\n      pstoreu(res+i+ResPacketSize*2, pmadd(c2,palpha,ploadu<ResPacket>(res+i+ResPacketSize*2)));\n\n      i+=ResPacketSize*3;\n    }\n    if(i<n2)\n    {\n      ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n                c1 = pset1<ResPacket>(ResScalar(0));\n\n      for(Index j=j2; j<jend; j+=1)\n      {\n        RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));\n        c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*0,j),b0,c0);\n        c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+LhsPacketSize*1,j),b0,c1);\n      }\n      pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));\n      pstoreu(res+i+ResPacketSize*1, pmadd(c1,palpha,ploadu<ResPacket>(res+i+ResPacketSize*1)));\n      i+=ResPacketSize*2;\n    }\n    if(i<n1)\n    {\n      ResPacket c0 = pset1<ResPacket>(ResScalar(0));\n      for(Index j=j2; j<jend; j+=1)\n      {\n        RhsPacket b0 = pset1<RhsPacket>(rhs(j,0));\n        c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);\n      }\n      pstoreu(res+i+ResPacketSize*0, pmadd(c0,palpha,ploadu<ResPacket>(res+i+ResPacketSize*0)));\n      i+=ResPacketSize;\n    }\n    for(;i<rows;++i)\n    {\n      ResScalar c0(0);\n      for(Index j=j2; j<jend; j+=1)\n        c0 += cj.pmul(lhs(i,j), rhs(j,0));\n      res[i] += alpha*c0;\n    }\n  }\n}\n\n/* Optimized row-major matrix * vector product:\n * This algorithm processes 4 rows at onces that allows to both reduce\n * the number of load/stores of the result by a factor 4 and to reduce\n * the instruction dependency. Moreover, we know that all bands have the\n * same alignment pattern.\n *\n * Mixing type logic:\n *  - alpha is always a complex (or converted to a complex)\n *  - no vectorization\n */\ntemplate<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>\nstruct general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>\n{\ntypedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n\nenum {\n  Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable\n              && int(packet_traits<LhsScalar>::size)==int(packet_traits<RhsScalar>::size),\n  LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,\n  RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,\n  ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1\n};\n\ntypedef typename packet_traits<LhsScalar>::type  _LhsPacket;\ntypedef typename packet_traits<RhsScalar>::type  _RhsPacket;\ntypedef typename packet_traits<ResScalar>::type  _ResPacket;\n\ntypedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;\ntypedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;\ntypedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;\n\nEIGEN_DONT_INLINE static void run(\n  Index rows, Index cols,\n  const LhsMapper& lhs,\n  const RhsMapper& rhs,\n        ResScalar* res, Index resIncr,\n  ResScalar alpha);\n};\n\ntemplate<typename Index, typename LhsScalar, typename LhsMapper, bool ConjugateLhs, typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version>\nEIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjugateLhs,RhsScalar,RhsMapper,ConjugateRhs,Version>::run(\n  Index rows, Index cols,\n  const LhsMapper& alhs,\n  const RhsMapper& rhs,\n  ResScalar* res, Index resIncr,\n  ResScalar alpha)\n{\n  // The following copy tells the compiler that lhs's attributes are not modified outside this function\n  // This helps GCC to generate propoer code.\n  LhsMapper lhs(alhs);\n\n  eigen_internal_assert(rhs.stride()==1);\n  conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;\n  conj_helper<LhsPacket,RhsPacket,ConjugateLhs,ConjugateRhs> pcj;\n\n  // TODO: fine tune the following heuristic. The rationale is that if the matrix is very large,\n  //       processing 8 rows at once might be counter productive wrt cache.\n  const Index n8 = lhs.stride()*sizeof(LhsScalar)>32000 ? 0 : rows-7;\n  const Index n4 = rows-3;\n  const Index n2 = rows-1;\n\n  // TODO: for padded aligned inputs, we could enable aligned reads\n  enum { LhsAlignment = Unaligned };\n\n  Index i=0;\n  for(; i<n8; i+=8)\n  {\n    ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n              c1 = pset1<ResPacket>(ResScalar(0)),\n              c2 = pset1<ResPacket>(ResScalar(0)),\n              c3 = pset1<ResPacket>(ResScalar(0)),\n              c4 = pset1<ResPacket>(ResScalar(0)),\n              c5 = pset1<ResPacket>(ResScalar(0)),\n              c6 = pset1<ResPacket>(ResScalar(0)),\n              c7 = pset1<ResPacket>(ResScalar(0));\n\n    Index j=0;\n    for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)\n    {\n      RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);\n\n      c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);\n      c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);\n      c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+2,j),b0,c2);\n      c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+3,j),b0,c3);\n      c4 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+4,j),b0,c4);\n      c5 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+5,j),b0,c5);\n      c6 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+6,j),b0,c6);\n      c7 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+7,j),b0,c7);\n    }\n    ResScalar cc0 = predux(c0);\n    ResScalar cc1 = predux(c1);\n    ResScalar cc2 = predux(c2);\n    ResScalar cc3 = predux(c3);\n    ResScalar cc4 = predux(c4);\n    ResScalar cc5 = predux(c5);\n    ResScalar cc6 = predux(c6);\n    ResScalar cc7 = predux(c7);\n    for(; j<cols; ++j)\n    {\n      RhsScalar b0 = rhs(j,0);\n\n      cc0 += cj.pmul(lhs(i+0,j), b0);\n      cc1 += cj.pmul(lhs(i+1,j), b0);\n      cc2 += cj.pmul(lhs(i+2,j), b0);\n      cc3 += cj.pmul(lhs(i+3,j), b0);\n      cc4 += cj.pmul(lhs(i+4,j), b0);\n      cc5 += cj.pmul(lhs(i+5,j), b0);\n      cc6 += cj.pmul(lhs(i+6,j), b0);\n      cc7 += cj.pmul(lhs(i+7,j), b0);\n    }\n    res[(i+0)*resIncr] += alpha*cc0;\n    res[(i+1)*resIncr] += alpha*cc1;\n    res[(i+2)*resIncr] += alpha*cc2;\n    res[(i+3)*resIncr] += alpha*cc3;\n    res[(i+4)*resIncr] += alpha*cc4;\n    res[(i+5)*resIncr] += alpha*cc5;\n    res[(i+6)*resIncr] += alpha*cc6;\n    res[(i+7)*resIncr] += alpha*cc7;\n  }\n  for(; i<n4; i+=4)\n  {\n    ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n              c1 = pset1<ResPacket>(ResScalar(0)),\n              c2 = pset1<ResPacket>(ResScalar(0)),\n              c3 = pset1<ResPacket>(ResScalar(0));\n\n    Index j=0;\n    for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)\n    {\n      RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);\n\n      c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);\n      c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);\n      c2 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+2,j),b0,c2);\n      c3 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+3,j),b0,c3);\n    }\n    ResScalar cc0 = predux(c0);\n    ResScalar cc1 = predux(c1);\n    ResScalar cc2 = predux(c2);\n    ResScalar cc3 = predux(c3);\n    for(; j<cols; ++j)\n    {\n      RhsScalar b0 = rhs(j,0);\n\n      cc0 += cj.pmul(lhs(i+0,j), b0);\n      cc1 += cj.pmul(lhs(i+1,j), b0);\n      cc2 += cj.pmul(lhs(i+2,j), b0);\n      cc3 += cj.pmul(lhs(i+3,j), b0);\n    }\n    res[(i+0)*resIncr] += alpha*cc0;\n    res[(i+1)*resIncr] += alpha*cc1;\n    res[(i+2)*resIncr] += alpha*cc2;\n    res[(i+3)*resIncr] += alpha*cc3;\n  }\n  for(; i<n2; i+=2)\n  {\n    ResPacket c0 = pset1<ResPacket>(ResScalar(0)),\n              c1 = pset1<ResPacket>(ResScalar(0));\n\n    Index j=0;\n    for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)\n    {\n      RhsPacket b0 = rhs.template load<RhsPacket, Unaligned>(j,0);\n\n      c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+0,j),b0,c0);\n      c1 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i+1,j),b0,c1);\n    }\n    ResScalar cc0 = predux(c0);\n    ResScalar cc1 = predux(c1);\n    for(; j<cols; ++j)\n    {\n      RhsScalar b0 = rhs(j,0);\n\n      cc0 += cj.pmul(lhs(i+0,j), b0);\n      cc1 += cj.pmul(lhs(i+1,j), b0);\n    }\n    res[(i+0)*resIncr] += alpha*cc0;\n    res[(i+1)*resIncr] += alpha*cc1;\n  }\n  for(; i<rows; ++i)\n  {\n    ResPacket c0 = pset1<ResPacket>(ResScalar(0));\n    Index j=0;\n    for(; j+LhsPacketSize<=cols; j+=LhsPacketSize)\n    {\n      RhsPacket b0 = rhs.template load<RhsPacket,Unaligned>(j,0);\n      c0 = pcj.pmadd(lhs.template load<LhsPacket,LhsAlignment>(i,j),b0,c0);\n    }\n    ResScalar cc0 = predux(c0);\n    for(; j<cols; ++j)\n    {\n      cc0 += cj.pmul(lhs(i,j), rhs(j,0));\n    }\n    res[i*resIncr] += alpha*cc0;\n  }\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_VECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/GeneralMatrixVector_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   General matrix-vector product functionality based on ?GEMV.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H\n#define EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/**********************************************************************\n* This file implements general matrix-vector multiplication using BLAS\n* gemv function via partial specialization of\n* general_matrix_vector_product::run(..) method for float, double,\n* std::complex<float> and std::complex<double> types\n**********************************************************************/\n\n// gemv specialization\n\ntemplate<typename Index, typename LhsScalar, int StorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>\nstruct general_matrix_vector_product_gemv;\n\n#define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \\\ntemplate<typename Index, bool ConjugateLhs, bool ConjugateRhs> \\\nstruct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,Specialized> { \\\nstatic void run( \\\n  Index rows, Index cols, \\\n  const const_blas_data_mapper<Scalar,Index,ColMajor> &lhs, \\\n  const const_blas_data_mapper<Scalar,Index,RowMajor> &rhs, \\\n  Scalar* res, Index resIncr, Scalar alpha) \\\n{ \\\n  if (ConjugateLhs) { \\\n    general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,BuiltIn>::run( \\\n      rows, cols, lhs, rhs, res, resIncr, alpha); \\\n  } else { \\\n    general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \\\n      rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \\\n  } \\\n} \\\n}; \\\ntemplate<typename Index, bool ConjugateLhs, bool ConjugateRhs> \\\nstruct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ConjugateRhs,Specialized> { \\\nstatic void run( \\\n  Index rows, Index cols, \\\n  const const_blas_data_mapper<Scalar,Index,RowMajor> &lhs, \\\n  const const_blas_data_mapper<Scalar,Index,ColMajor> &rhs, \\\n  Scalar* res, Index resIncr, Scalar alpha) \\\n{ \\\n    general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \\\n      rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \\\n} \\\n}; \\\n\nEIGEN_BLAS_GEMV_SPECIALIZE(double)\nEIGEN_BLAS_GEMV_SPECIALIZE(float)\nEIGEN_BLAS_GEMV_SPECIALIZE(dcomplex)\nEIGEN_BLAS_GEMV_SPECIALIZE(scomplex)\n\n#define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASPREFIX) \\\ntemplate<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \\\nstruct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \\\n{ \\\ntypedef Matrix<EIGTYPE,Dynamic,1,ColMajor> GEMVVector;\\\n\\\nstatic void run( \\\n  Index rows, Index cols, \\\n  const EIGTYPE* lhs, Index lhsStride, \\\n  const EIGTYPE* rhs, Index rhsIncr, \\\n  EIGTYPE* res, Index resIncr, EIGTYPE alpha) \\\n{ \\\n  BlasIndex m=convert_index<BlasIndex>(rows), n=convert_index<BlasIndex>(cols), \\\n            lda=convert_index<BlasIndex>(lhsStride), incx=convert_index<BlasIndex>(rhsIncr), incy=convert_index<BlasIndex>(resIncr); \\\n  const EIGTYPE beta(1); \\\n  const EIGTYPE *x_ptr; \\\n  char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \\\n  if (LhsStorageOrder==RowMajor) { \\\n    m = convert_index<BlasIndex>(cols); \\\n    n = convert_index<BlasIndex>(rows); \\\n  }\\\n  GEMVVector x_tmp; \\\n  if (ConjugateRhs) { \\\n    Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \\\n    x_tmp=map_x.conjugate(); \\\n    x_ptr=x_tmp.data(); \\\n    incx=1; \\\n  } else x_ptr=rhs; \\\n  BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \\\n}\\\n};\n\nEIGEN_BLAS_GEMV_SPECIALIZATION(double,   double, d)\nEIGEN_BLAS_GEMV_SPECIALIZATION(float,    float,  s)\nEIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, z)\nEIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float,  c)\n\n} // end namespase internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/Parallelizer.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARALLELIZER_H\n#define EIGEN_PARALLELIZER_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/** \\internal */\ninline void manage_multi_threading(Action action, int* v)\n{\n  static EIGEN_UNUSED int m_maxThreads = -1;\n\n  if(action==SetAction)\n  {\n    eigen_internal_assert(v!=0);\n    m_maxThreads = *v;\n  }\n  else if(action==GetAction)\n  {\n    eigen_internal_assert(v!=0);\n    #ifdef EIGEN_HAS_OPENMP\n    if(m_maxThreads>0)\n      *v = m_maxThreads;\n    else\n      *v = omp_get_max_threads();\n    #else\n    *v = 1;\n    #endif\n  }\n  else\n  {\n    eigen_internal_assert(false);\n  }\n}\n\n}\n\n/** Must be call first when calling Eigen from multiple threads */\ninline void initParallel()\n{\n  int nbt;\n  internal::manage_multi_threading(GetAction, &nbt);\n  std::ptrdiff_t l1, l2, l3;\n  internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);\n}\n\n/** \\returns the max number of threads reserved for Eigen\n  * \\sa setNbThreads */\ninline int nbThreads()\n{\n  int ret;\n  internal::manage_multi_threading(GetAction, &ret);\n  return ret;\n}\n\n/** Sets the max number of threads reserved for Eigen\n  * \\sa nbThreads */\ninline void setNbThreads(int v)\n{\n  internal::manage_multi_threading(SetAction, &v);\n}\n\nnamespace internal {\n\ntemplate<typename Index> struct GemmParallelInfo\n{\n  GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}\n\n  Index volatile sync;\n  int volatile users;\n\n  Index lhs_start;\n  Index lhs_length;\n};\n\ntemplate<bool Condition, typename Functor, typename Index>\nvoid parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)\n{\n  // TODO when EIGEN_USE_BLAS is defined,\n  // we should still enable OMP for other scalar types\n#if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)\n  // FIXME the transpose variable is only needed to properly split\n  // the matrix product when multithreading is enabled. This is a temporary\n  // fix to support row-major destination matrices. This whole\n  // parallelizer mechanism has to be redisigned anyway.\n  EIGEN_UNUSED_VARIABLE(depth);\n  EIGEN_UNUSED_VARIABLE(transpose);\n  func(0,rows, 0,cols);\n#else\n\n  // Dynamically check whether we should enable or disable OpenMP.\n  // The conditions are:\n  // - the max number of threads we can create is greater than 1\n  // - we are not already in a parallel code\n  // - the sizes are large enough\n\n  // compute the maximal number of threads from the size of the product:\n  // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.\n  Index size = transpose ? rows : cols;\n  Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);\n\n  // compute the maximal number of threads from the total amount of work:\n  double work = static_cast<double>(rows) * static_cast<double>(cols) *\n      static_cast<double>(depth);\n  double kMinTaskSize = 50000;  // FIXME improve this heuristic.\n  pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));\n\n  // compute the number of threads we are going to use\n  Index threads = std::min<Index>(nbThreads(), pb_max_threads);\n\n  // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,\n  // then abort multi-threading\n  // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?\n  if((!Condition) || (threads==1) || (omp_get_num_threads()>1))\n    return func(0,rows, 0,cols);\n\n  Eigen::initParallel();\n  func.initParallelSession(threads);\n\n  if(transpose)\n    std::swap(rows,cols);\n\n  ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);\n\n  #pragma omp parallel num_threads(threads)\n  {\n    Index i = omp_get_thread_num();\n    // Note that the actual number of threads might be lower than the number of request ones.\n    Index actual_threads = omp_get_num_threads();\n\n    Index blockCols = (cols / actual_threads) & ~Index(0x3);\n    Index blockRows = (rows / actual_threads);\n    blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;\n\n    Index r0 = i*blockRows;\n    Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;\n\n    Index c0 = i*blockCols;\n    Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;\n\n    info[i].lhs_start = r0;\n    info[i].lhs_length = actualBlockRows;\n\n    if(transpose) func(c0, actualBlockCols, 0, rows, info);\n    else          func(0, rows, c0, actualBlockCols, info);\n  }\n#endif\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARALLELIZER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointMatrixMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_H\n#define EIGEN_SELFADJOINT_MATRIX_MATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// pack a selfadjoint block diagonal for use with the gebp_kernel\ntemplate<typename Scalar, typename Index, int Pack1, int Pack2_dummy, int StorageOrder>\nstruct symm_pack_lhs\n{\n  template<int BlockRows> inline\n  void pack(Scalar* blockA, const const_blas_data_mapper<Scalar,Index,StorageOrder>& lhs, Index cols, Index i, Index& count)\n  {\n    // normal copy\n    for(Index k=0; k<i; k++)\n      for(Index w=0; w<BlockRows; w++)\n        blockA[count++] = lhs(i+w,k);           // normal\n    // symmetric copy\n    Index h = 0;\n    for(Index k=i; k<i+BlockRows; k++)\n    {\n      for(Index w=0; w<h; w++)\n        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed\n\n      blockA[count++] = numext::real(lhs(k,k));   // real (diagonal)\n\n      for(Index w=h+1; w<BlockRows; w++)\n        blockA[count++] = lhs(i+w, k);          // normal\n      ++h;\n    }\n    // transposed copy\n    for(Index k=i+BlockRows; k<cols; k++)\n      for(Index w=0; w<BlockRows; w++)\n        blockA[count++] = numext::conj(lhs(k, i+w)); // transposed\n  }\n  void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)\n  {\n    enum { PacketSize = packet_traits<Scalar>::size };\n    const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);\n    Index count = 0;\n    //Index peeled_mc3 = (rows/Pack1)*Pack1;\n    \n    const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;\n    const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;\n    const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;\n    \n    if(Pack1>=3*PacketSize)\n      for(Index i=0; i<peeled_mc3; i+=3*PacketSize)\n        pack<3*PacketSize>(blockA, lhs, cols, i, count);\n    \n    if(Pack1>=2*PacketSize)\n      for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize)\n        pack<2*PacketSize>(blockA, lhs, cols, i, count);\n    \n    if(Pack1>=1*PacketSize)\n      for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)\n        pack<1*PacketSize>(blockA, lhs, cols, i, count);\n\n    // do the same with mr==1\n    for(Index i=peeled_mc1; i<rows; i++)\n    {\n      for(Index k=0; k<i; k++)\n        blockA[count++] = lhs(i, k);                   // normal\n\n      blockA[count++] = numext::real(lhs(i, i));       // real (diagonal)\n\n      for(Index k=i+1; k<cols; k++)\n        blockA[count++] = numext::conj(lhs(k, i));     // transposed\n    }\n  }\n};\n\ntemplate<typename Scalar, typename Index, int nr, int StorageOrder>\nstruct symm_pack_rhs\n{\n  enum { PacketSize = packet_traits<Scalar>::size };\n  void operator()(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2)\n  {\n    Index end_k = k2 + rows;\n    Index count = 0;\n    const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);\n    Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;\n    Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;\n\n    // first part: normal case\n    for(Index j2=0; j2<k2; j2+=nr)\n    {\n      for(Index k=k2; k<end_k; k++)\n      {\n        blockB[count+0] = rhs(k,j2+0);\n        blockB[count+1] = rhs(k,j2+1);\n        if (nr>=4)\n        {\n          blockB[count+2] = rhs(k,j2+2);\n          blockB[count+3] = rhs(k,j2+3);\n        }\n        if (nr>=8)\n        {\n          blockB[count+4] = rhs(k,j2+4);\n          blockB[count+5] = rhs(k,j2+5);\n          blockB[count+6] = rhs(k,j2+6);\n          blockB[count+7] = rhs(k,j2+7);\n        }\n        count += nr;\n      }\n    }\n\n    // second part: diagonal block\n    Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2;\n    if(nr>=8)\n    {\n      for(Index j2=k2; j2<end8; j2+=8)\n      {\n        // again we can split vertically in three different parts (transpose, symmetric, normal)\n        // transpose\n        for(Index k=k2; k<j2; k++)\n        {\n          blockB[count+0] = numext::conj(rhs(j2+0,k));\n          blockB[count+1] = numext::conj(rhs(j2+1,k));\n          blockB[count+2] = numext::conj(rhs(j2+2,k));\n          blockB[count+3] = numext::conj(rhs(j2+3,k));\n          blockB[count+4] = numext::conj(rhs(j2+4,k));\n          blockB[count+5] = numext::conj(rhs(j2+5,k));\n          blockB[count+6] = numext::conj(rhs(j2+6,k));\n          blockB[count+7] = numext::conj(rhs(j2+7,k));\n          count += 8;\n        }\n        // symmetric\n        Index h = 0;\n        for(Index k=j2; k<j2+8; k++)\n        {\n          // normal\n          for (Index w=0 ; w<h; ++w)\n            blockB[count+w] = rhs(k,j2+w);\n\n          blockB[count+h] = numext::real(rhs(k,k));\n\n          // transpose\n          for (Index w=h+1 ; w<8; ++w)\n            blockB[count+w] = numext::conj(rhs(j2+w,k));\n          count += 8;\n          ++h;\n        }\n        // normal\n        for(Index k=j2+8; k<end_k; k++)\n        {\n          blockB[count+0] = rhs(k,j2+0);\n          blockB[count+1] = rhs(k,j2+1);\n          blockB[count+2] = rhs(k,j2+2);\n          blockB[count+3] = rhs(k,j2+3);\n          blockB[count+4] = rhs(k,j2+4);\n          blockB[count+5] = rhs(k,j2+5);\n          blockB[count+6] = rhs(k,j2+6);\n          blockB[count+7] = rhs(k,j2+7);\n          count += 8;\n        }\n      }\n    }\n    if(nr>=4)\n    {\n      for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4)\n      {\n        // again we can split vertically in three different parts (transpose, symmetric, normal)\n        // transpose\n        for(Index k=k2; k<j2; k++)\n        {\n          blockB[count+0] = numext::conj(rhs(j2+0,k));\n          blockB[count+1] = numext::conj(rhs(j2+1,k));\n          blockB[count+2] = numext::conj(rhs(j2+2,k));\n          blockB[count+3] = numext::conj(rhs(j2+3,k));\n          count += 4;\n        }\n        // symmetric\n        Index h = 0;\n        for(Index k=j2; k<j2+4; k++)\n        {\n          // normal\n          for (Index w=0 ; w<h; ++w)\n            blockB[count+w] = rhs(k,j2+w);\n\n          blockB[count+h] = numext::real(rhs(k,k));\n\n          // transpose\n          for (Index w=h+1 ; w<4; ++w)\n            blockB[count+w] = numext::conj(rhs(j2+w,k));\n          count += 4;\n          ++h;\n        }\n        // normal\n        for(Index k=j2+4; k<end_k; k++)\n        {\n          blockB[count+0] = rhs(k,j2+0);\n          blockB[count+1] = rhs(k,j2+1);\n          blockB[count+2] = rhs(k,j2+2);\n          blockB[count+3] = rhs(k,j2+3);\n          count += 4;\n        }\n      }\n    }\n\n    // third part: transposed\n    if(nr>=8)\n    {\n      for(Index j2=k2+rows; j2<packet_cols8; j2+=8)\n      {\n        for(Index k=k2; k<end_k; k++)\n        {\n          blockB[count+0] = numext::conj(rhs(j2+0,k));\n          blockB[count+1] = numext::conj(rhs(j2+1,k));\n          blockB[count+2] = numext::conj(rhs(j2+2,k));\n          blockB[count+3] = numext::conj(rhs(j2+3,k));\n          blockB[count+4] = numext::conj(rhs(j2+4,k));\n          blockB[count+5] = numext::conj(rhs(j2+5,k));\n          blockB[count+6] = numext::conj(rhs(j2+6,k));\n          blockB[count+7] = numext::conj(rhs(j2+7,k));\n          count += 8;\n        }\n      }\n    }\n    if(nr>=4)\n    {\n      for(Index j2=(std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)\n      {\n        for(Index k=k2; k<end_k; k++)\n        {\n          blockB[count+0] = numext::conj(rhs(j2+0,k));\n          blockB[count+1] = numext::conj(rhs(j2+1,k));\n          blockB[count+2] = numext::conj(rhs(j2+2,k));\n          blockB[count+3] = numext::conj(rhs(j2+3,k));\n          count += 4;\n        }\n      }\n    }\n\n    // copy the remaining columns one at a time (=> the same with nr==1)\n    for(Index j2=packet_cols4; j2<cols; ++j2)\n    {\n      // transpose\n      Index half = (std::min)(end_k,j2);\n      for(Index k=k2; k<half; k++)\n      {\n        blockB[count] = numext::conj(rhs(j2,k));\n        count += 1;\n      }\n\n      if(half==j2 && half<k2+rows)\n      {\n        blockB[count] = numext::real(rhs(j2,j2));\n        count += 1;\n      }\n      else\n        half--;\n\n      // normal\n      for(Index k=half+1; k<k2+rows; k++)\n      {\n        blockB[count] = rhs(k,j2);\n        count += 1;\n      }\n    }\n  }\n};\n\n/* Optimized selfadjoint matrix * matrix (_SYMM) product built on top of\n * the general matrix matrix product.\n */\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,\n          int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs,\n          int ResStorageOrder>\nstruct product_selfadjoint_matrix;\n\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool LhsSelfAdjoint, bool ConjugateLhs,\n          int RhsStorageOrder, bool RhsSelfAdjoint, bool ConjugateRhs>\nstruct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,LhsSelfAdjoint,ConjugateLhs, RhsStorageOrder,RhsSelfAdjoint,ConjugateRhs,RowMajor>\n{\n\n  static EIGEN_STRONG_INLINE void run(\n    Index rows, Index cols,\n    const Scalar* lhs, Index lhsStride,\n    const Scalar* rhs, Index rhsStride,\n    Scalar* res,       Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    product_selfadjoint_matrix<Scalar, Index,\n      EIGEN_LOGICAL_XOR(RhsSelfAdjoint,RhsStorageOrder==RowMajor) ? ColMajor : RowMajor,\n      RhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsSelfAdjoint,ConjugateRhs),\n      EIGEN_LOGICAL_XOR(LhsSelfAdjoint,LhsStorageOrder==RowMajor) ? ColMajor : RowMajor,\n      LhsSelfAdjoint, NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsSelfAdjoint,ConjugateLhs),\n      ColMajor>\n      ::run(cols, rows,  rhs, rhsStride,  lhs, lhsStride,  res, resStride,  alpha, blocking);\n  }\n};\n\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs>\nstruct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>\n{\n\n  static EIGEN_DONT_INLINE void run(\n    Index rows, Index cols,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);\n};\n\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs>\nEIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,true,ConjugateLhs, RhsStorageOrder,false,ConjugateRhs,ColMajor>::run(\n    Index rows, Index cols,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* _res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    Index size = rows;\n\n    typedef gebp_traits<Scalar,Scalar> Traits;\n\n    typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;\n    typedef const_blas_data_mapper<Scalar, Index, (LhsStorageOrder == RowMajor) ? ColMajor : RowMajor> LhsTransposeMapper;\n    typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;\n    typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n    LhsMapper lhs(_lhs,lhsStride);\n    LhsTransposeMapper lhs_transpose(_lhs,lhsStride);\n    RhsMapper rhs(_rhs,rhsStride);\n    ResMapper res(_res, resStride);\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n    // kc must be smaller than mc\n    kc = (std::min)(kc,mc);\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*cols;\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;\n    symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;\n    gemm_pack_lhs<Scalar, Index, LhsTransposeMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder==RowMajor?ColMajor:RowMajor, true> pack_lhs_transposed;\n\n    for(Index k2=0; k2<size; k2+=kc)\n    {\n      const Index actual_kc = (std::min)(k2+kc,size)-k2;\n\n      // we have selected one row panel of rhs and one column panel of lhs\n      // pack rhs's panel into a sequential chunk of memory\n      // and expand each coeff to a constant packet for further reuse\n      pack_rhs(blockB, rhs.getSubMapper(k2,0), actual_kc, cols);\n\n      // the select lhs's panel has to be split in three different parts:\n      //  1 - the transposed panel above the diagonal block => transposed packed copy\n      //  2 - the diagonal block => special packed copy\n      //  3 - the panel below the diagonal block => generic packed copy\n      for(Index i2=0; i2<k2; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(i2+mc,k2)-i2;\n        // transposed packed copy\n        pack_lhs_transposed(blockA, lhs_transpose.getSubMapper(i2, k2), actual_kc, actual_mc);\n\n        gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);\n      }\n      // the block diagonal\n      {\n        const Index actual_mc = (std::min)(k2+kc,size)-k2;\n        // symmetric packed copy\n        pack_lhs(blockA, &lhs(k2,k2), lhsStride, actual_kc, actual_mc);\n\n        gebp_kernel(res.getSubMapper(k2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);\n      }\n\n      for(Index i2=k2+kc; i2<size; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(i2+mc,size)-i2;\n        gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder,false>()\n          (blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);\n\n        gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);\n      }\n    }\n  }\n\n// matrix * selfadjoint product\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs>\nstruct product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>\n{\n\n  static EIGEN_DONT_INLINE void run(\n    Index rows, Index cols,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);\n};\n\ntemplate <typename Scalar, typename Index,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs>\nEIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,false,ConjugateLhs, RhsStorageOrder,true,ConjugateRhs,ColMajor>::run(\n    Index rows, Index cols,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* _res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    Index size = cols;\n\n    typedef gebp_traits<Scalar,Scalar> Traits;\n\n    typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;\n    typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n    LhsMapper lhs(_lhs,lhsStride);\n    ResMapper res(_res,resStride);\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*cols;\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;\n    gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n    symm_pack_rhs<Scalar, Index, Traits::nr,RhsStorageOrder> pack_rhs;\n\n    for(Index k2=0; k2<size; k2+=kc)\n    {\n      const Index actual_kc = (std::min)(k2+kc,size)-k2;\n\n      pack_rhs(blockB, _rhs, rhsStride, actual_kc, cols, k2);\n\n      // => GEPP\n      for(Index i2=0; i2<rows; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(i2+mc,rows)-i2;\n        pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc);\n\n        gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, alpha);\n      }\n    }\n  }\n\n} // end namespace internal\n\n/***************************************************************************\n* Wrapper to product_selfadjoint_matrix\n***************************************************************************/\n\nnamespace internal {\n  \ntemplate<typename Lhs, int LhsMode, typename Rhs, int RhsMode>\nstruct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,RhsMode,false>\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  typedef internal::blas_traits<Lhs> LhsBlasTraits;\n  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n  typedef internal::blas_traits<Rhs> RhsBlasTraits;\n  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n  \n  enum {\n    LhsIsUpper = (LhsMode&(Upper|Lower))==Upper,\n    LhsIsSelfAdjoint = (LhsMode&SelfAdjoint)==SelfAdjoint,\n    RhsIsUpper = (RhsMode&(Upper|Lower))==Upper,\n    RhsIsSelfAdjoint = (RhsMode&SelfAdjoint)==SelfAdjoint\n  };\n  \n  template<typename Dest>\n  static void run(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)\n  {\n    eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols());\n\n    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);\n    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);\n\n    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)\n                               * RhsBlasTraits::extractScalarFactor(a_rhs);\n\n    typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,\n              Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,1> BlockingType;\n\n    BlockingType blocking(lhs.rows(), rhs.cols(), lhs.cols(), 1, false);\n\n    internal::product_selfadjoint_matrix<Scalar, Index,\n      EIGEN_LOGICAL_XOR(LhsIsUpper,internal::traits<Lhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, LhsIsSelfAdjoint,\n      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(LhsIsUpper,bool(LhsBlasTraits::NeedToConjugate)),\n      EIGEN_LOGICAL_XOR(RhsIsUpper,internal::traits<Rhs>::Flags &RowMajorBit) ? RowMajor : ColMajor, RhsIsSelfAdjoint,\n      NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(RhsIsUpper,bool(RhsBlasTraits::NeedToConjugate)),\n      internal::traits<Dest>::Flags&RowMajorBit  ? RowMajor : ColMajor>\n      ::run(\n        lhs.rows(), rhs.cols(),                 // sizes\n        &lhs.coeffRef(0,0), lhs.outerStride(),  // lhs info\n        &rhs.coeffRef(0,0), rhs.outerStride(),  // rhs info\n        &dst.coeffRef(0,0), dst.outerStride(),  // result info\n        actualAlpha, blocking                   // alpha\n      );\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointMatrixMatrix_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Self adjoint matrix * matrix product functionality based on ?SYMM/?HEMM.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H\n#define EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n\n/* Optimized selfadjoint matrix * matrix (?SYMM/?HEMM) product */\n\n#define EIGEN_BLAS_SYMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \\\n{\\\n\\\n  static void run( \\\n    Index rows, Index cols, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n    char side='L', uplo='L'; \\\n    BlasIndex m, n, lda, ldb, ldc; \\\n    const EIGTYPE *a, *b; \\\n    EIGTYPE beta(1); \\\n    MatrixX##EIGPREFIX b_tmp; \\\n\\\n/* Set transpose options */ \\\n/* Set m, n, k */ \\\n    m = convert_index<BlasIndex>(rows);  \\\n    n = convert_index<BlasIndex>(cols);  \\\n\\\n/* Set lda, ldb, ldc */ \\\n    lda = convert_index<BlasIndex>(lhsStride); \\\n    ldb = convert_index<BlasIndex>(rhsStride); \\\n    ldc = convert_index<BlasIndex>(resStride); \\\n\\\n/* Set a, b, c */ \\\n    if (LhsStorageOrder==RowMajor) uplo='U'; \\\n    a = _lhs; \\\n\\\n    if (RhsStorageOrder==RowMajor) { \\\n      Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \\\n      b_tmp = rhs.adjoint(); \\\n      b = b_tmp.data(); \\\n      ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n    } else b = _rhs; \\\n\\\n    BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \\\n\\\n  } \\\n};\n\n\n#define EIGEN_BLAS_HEMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,true,ConjugateLhs,RhsStorageOrder,false,ConjugateRhs,ColMajor> \\\n{\\\n  static void run( \\\n    Index rows, Index cols, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n    char side='L', uplo='L'; \\\n    BlasIndex m, n, lda, ldb, ldc; \\\n    const EIGTYPE *a, *b; \\\n    EIGTYPE beta(1); \\\n    MatrixX##EIGPREFIX b_tmp; \\\n    Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> a_tmp; \\\n\\\n/* Set transpose options */ \\\n/* Set m, n, k */ \\\n    m = convert_index<BlasIndex>(rows); \\\n    n = convert_index<BlasIndex>(cols); \\\n\\\n/* Set lda, ldb, ldc */ \\\n    lda = convert_index<BlasIndex>(lhsStride); \\\n    ldb = convert_index<BlasIndex>(rhsStride); \\\n    ldc = convert_index<BlasIndex>(resStride); \\\n\\\n/* Set a, b, c */ \\\n    if (((LhsStorageOrder==ColMajor) && ConjugateLhs) || ((LhsStorageOrder==RowMajor) && (!ConjugateLhs))) { \\\n      Map<const Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder>, 0, OuterStride<> > lhs(_lhs,m,m,OuterStride<>(lhsStride)); \\\n      a_tmp = lhs.conjugate(); \\\n      a = a_tmp.data(); \\\n      lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n    } else a = _lhs; \\\n    if (LhsStorageOrder==RowMajor) uplo='U'; \\\n\\\n    if (RhsStorageOrder==ColMajor && (!ConjugateRhs)) { \\\n       b = _rhs; } \\\n    else { \\\n      if (RhsStorageOrder==ColMajor && ConjugateRhs) { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,m,n,OuterStride<>(rhsStride)); \\\n        b_tmp = rhs.conjugate(); \\\n      } else \\\n      if (ConjugateRhs) { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \\\n        b_tmp = rhs.adjoint(); \\\n      } else { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > rhs(_rhs,n,m,OuterStride<>(rhsStride)); \\\n        b_tmp = rhs.transpose(); \\\n      } \\\n      b = b_tmp.data(); \\\n      ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n    } \\\n\\\n    BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \\\n\\\n  } \\\n};\n\nEIGEN_BLAS_SYMM_L(double, double, d, d)\nEIGEN_BLAS_SYMM_L(float, float, f, s)\nEIGEN_BLAS_HEMM_L(dcomplex, double, cd, z)\nEIGEN_BLAS_HEMM_L(scomplex, float, cf, c)\n\n\n/* Optimized matrix * selfadjoint matrix (?SYMM/?HEMM) product */\n\n#define EIGEN_BLAS_SYMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \\\n{\\\n\\\n  static void run( \\\n    Index rows, Index cols, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n    char side='R', uplo='L'; \\\n    BlasIndex m, n, lda, ldb, ldc; \\\n    const EIGTYPE *a, *b; \\\n    EIGTYPE beta(1); \\\n    MatrixX##EIGPREFIX b_tmp; \\\n\\\n/* Set m, n, k */ \\\n    m = convert_index<BlasIndex>(rows);  \\\n    n = convert_index<BlasIndex>(cols);  \\\n\\\n/* Set lda, ldb, ldc */ \\\n    lda = convert_index<BlasIndex>(rhsStride); \\\n    ldb = convert_index<BlasIndex>(lhsStride); \\\n    ldc = convert_index<BlasIndex>(resStride); \\\n\\\n/* Set a, b, c */ \\\n    if (RhsStorageOrder==RowMajor) uplo='U'; \\\n    a = _rhs; \\\n\\\n    if (LhsStorageOrder==RowMajor) { \\\n      Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(rhsStride)); \\\n      b_tmp = lhs.adjoint(); \\\n      b = b_tmp.data(); \\\n      ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n    } else b = _lhs; \\\n\\\n    BLASPREFIX##symm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \\\n\\\n  } \\\n};\n\n\n#define EIGEN_BLAS_HEMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_selfadjoint_matrix<EIGTYPE,Index,LhsStorageOrder,false,ConjugateLhs,RhsStorageOrder,true,ConjugateRhs,ColMajor> \\\n{\\\n  static void run( \\\n    Index rows, Index cols, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE, EIGTYPE>& /*blocking*/) \\\n  { \\\n    char side='R', uplo='L'; \\\n    BlasIndex m, n, lda, ldb, ldc; \\\n    const EIGTYPE *a, *b; \\\n    EIGTYPE beta(1); \\\n    MatrixX##EIGPREFIX b_tmp; \\\n    Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> a_tmp; \\\n\\\n/* Set m, n, k */ \\\n    m = convert_index<BlasIndex>(rows); \\\n    n = convert_index<BlasIndex>(cols); \\\n\\\n/* Set lda, ldb, ldc */ \\\n    lda = convert_index<BlasIndex>(rhsStride); \\\n    ldb = convert_index<BlasIndex>(lhsStride); \\\n    ldc = convert_index<BlasIndex>(resStride); \\\n\\\n/* Set a, b, c */ \\\n    if (((RhsStorageOrder==ColMajor) && ConjugateRhs) || ((RhsStorageOrder==RowMajor) && (!ConjugateRhs))) { \\\n      Map<const Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder>, 0, OuterStride<> > rhs(_rhs,n,n,OuterStride<>(rhsStride)); \\\n      a_tmp = rhs.conjugate(); \\\n      a = a_tmp.data(); \\\n      lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n    } else a = _rhs; \\\n    if (RhsStorageOrder==RowMajor) uplo='U'; \\\n\\\n    if (LhsStorageOrder==ColMajor && (!ConjugateLhs)) { \\\n       b = _lhs; } \\\n    else { \\\n      if (LhsStorageOrder==ColMajor && ConjugateLhs) { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,m,n,OuterStride<>(lhsStride)); \\\n        b_tmp = lhs.conjugate(); \\\n      } else \\\n      if (ConjugateLhs) { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \\\n        b_tmp = lhs.adjoint(); \\\n      } else { \\\n        Map<const MatrixX##EIGPREFIX, 0, OuterStride<> > lhs(_lhs,n,m,OuterStride<>(lhsStride)); \\\n        b_tmp = lhs.transpose(); \\\n      } \\\n      b = b_tmp.data(); \\\n      ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n    } \\\n\\\n    BLASPREFIX##hemm_(&side, &uplo, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)b, &ldb, &numext::real_ref(beta), (BLASTYPE*)res, &ldc); \\\n  } \\\n};\n\nEIGEN_BLAS_SYMM_R(double, double, d, d)\nEIGEN_BLAS_SYMM_R(float, float, f, s)\nEIGEN_BLAS_HEMM_R(dcomplex, double, cd, z)\nEIGEN_BLAS_HEMM_R(scomplex, float, cf, c)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINT_MATRIX_MATRIX_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointMatrixVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H\n#define EIGEN_SELFADJOINT_MATRIX_VECTOR_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/* Optimized selfadjoint matrix * vector product:\n * This algorithm processes 2 columns at onces that allows to both reduce\n * the number of load/stores of the result by a factor 2 and to reduce\n * the instruction dependency.\n */\n\ntemplate<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version=Specialized>\nstruct selfadjoint_matrix_vector_product;\n\ntemplate<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>\nstruct selfadjoint_matrix_vector_product\n\n{\nstatic EIGEN_DONT_INLINE void run(\n  Index size,\n  const Scalar*  lhs, Index lhsStride,\n  const Scalar*  rhs,\n  Scalar* res,\n  Scalar alpha);\n};\n\ntemplate<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs, int Version>\nEIGEN_DONT_INLINE void selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Version>::run(\n  Index size,\n  const Scalar*  lhs, Index lhsStride,\n  const Scalar*  rhs,\n  Scalar* res,\n  Scalar alpha)\n{\n  typedef typename packet_traits<Scalar>::type Packet;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  const Index PacketSize = sizeof(Packet)/sizeof(Scalar);\n\n  enum {\n    IsRowMajor = StorageOrder==RowMajor ? 1 : 0,\n    IsLower = UpLo == Lower ? 1 : 0,\n    FirstTriangular = IsRowMajor == IsLower\n  };\n\n  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;\n  conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;\n  conj_helper<RealScalar,Scalar,false, ConjugateRhs> cjd;\n\n  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;\n  conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;\n\n  Scalar cjAlpha = ConjugateRhs ? numext::conj(alpha) : alpha;\n\n\n  Index bound = (std::max)(Index(0),size-8) & 0xfffffffe;\n  if (FirstTriangular)\n    bound = size - bound;\n\n  for (Index j=FirstTriangular ? bound : 0;\n       j<(FirstTriangular ? size : bound);j+=2)\n  {\n    const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;\n    const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;\n\n    Scalar t0 = cjAlpha * rhs[j];\n    Packet ptmp0 = pset1<Packet>(t0);\n    Scalar t1 = cjAlpha * rhs[j+1];\n    Packet ptmp1 = pset1<Packet>(t1);\n\n    Scalar t2(0);\n    Packet ptmp2 = pset1<Packet>(t2);\n    Scalar t3(0);\n    Packet ptmp3 = pset1<Packet>(t3);\n\n    Index starti = FirstTriangular ? 0 : j+2;\n    Index endi   = FirstTriangular ? j : size;\n    Index alignedStart = (starti) + internal::first_default_aligned(&res[starti], endi-starti);\n    Index alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);\n\n    res[j]   += cjd.pmul(numext::real(A0[j]), t0);\n    res[j+1] += cjd.pmul(numext::real(A1[j+1]), t1);\n    if(FirstTriangular)\n    {\n      res[j]   += cj0.pmul(A1[j],   t1);\n      t3       += cj1.pmul(A1[j],   rhs[j]);\n    }\n    else\n    {\n      res[j+1] += cj0.pmul(A0[j+1],t0);\n      t2 += cj1.pmul(A0[j+1], rhs[j+1]);\n    }\n\n    for (Index i=starti; i<alignedStart; ++i)\n    {\n      res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);\n      t2 += cj1.pmul(A0[i], rhs[i]);\n      t3 += cj1.pmul(A1[i], rhs[i]);\n    }\n    // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)\n    // gcc 4.2 does this optimization automatically.\n    const Scalar* EIGEN_RESTRICT a0It  = A0  + alignedStart;\n    const Scalar* EIGEN_RESTRICT a1It  = A1  + alignedStart;\n    const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;\n          Scalar* EIGEN_RESTRICT resIt = res + alignedStart;\n    for (Index i=alignedStart; i<alignedEnd; i+=PacketSize)\n    {\n      Packet A0i = ploadu<Packet>(a0It);  a0It  += PacketSize;\n      Packet A1i = ploadu<Packet>(a1It);  a1It  += PacketSize;\n      Packet Bi  = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases\n      Packet Xi  = pload <Packet>(resIt);\n\n      Xi    = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));\n      ptmp2 = pcj1.pmadd(A0i,  Bi, ptmp2);\n      ptmp3 = pcj1.pmadd(A1i,  Bi, ptmp3);\n      pstore(resIt,Xi); resIt += PacketSize;\n    }\n    for (Index i=alignedEnd; i<endi; i++)\n    {\n      res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);\n      t2 += cj1.pmul(A0[i], rhs[i]);\n      t3 += cj1.pmul(A1[i], rhs[i]);\n    }\n\n    res[j]   += alpha * (t2 + predux(ptmp2));\n    res[j+1] += alpha * (t3 + predux(ptmp3));\n  }\n  for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)\n  {\n    const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;\n\n    Scalar t1 = cjAlpha * rhs[j];\n    Scalar t2(0);\n    res[j] += cjd.pmul(numext::real(A0[j]), t1);\n    for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)\n    {\n      res[i] += cj0.pmul(A0[i], t1);\n      t2 += cj1.pmul(A0[i], rhs[i]);\n    }\n    res[j] += alpha * t2;\n  }\n}\n\n} // end namespace internal \n\n/***************************************************************************\n* Wrapper to product_selfadjoint_vector\n***************************************************************************/\n\nnamespace internal {\n\ntemplate<typename Lhs, int LhsMode, typename Rhs>\nstruct selfadjoint_product_impl<Lhs,LhsMode,false,Rhs,0,true>\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  typedef internal::blas_traits<Lhs> LhsBlasTraits;\n  typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n  typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;\n  \n  typedef internal::blas_traits<Rhs> RhsBlasTraits;\n  typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n  typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;\n\n  enum { LhsUpLo = LhsMode&(Upper|Lower) };\n\n  template<typename Dest>\n  static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)\n  {\n    typedef typename Dest::Scalar ResScalar;\n    typedef typename Rhs::Scalar RhsScalar;\n    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;\n    \n    eigen_assert(dest.rows()==a_lhs.rows() && dest.cols()==a_rhs.cols());\n\n    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);\n    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);\n\n    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)\n                               * RhsBlasTraits::extractScalarFactor(a_rhs);\n\n    enum {\n      EvalToDest = (Dest::InnerStrideAtCompileTime==1),\n      UseRhs = (ActualRhsTypeCleaned::InnerStrideAtCompileTime==1)\n    };\n    \n    internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;\n    internal::gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!UseRhs> static_rhs;\n\n    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),\n                                                  EvalToDest ? dest.data() : static_dest.data());\n                                                  \n    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),\n        UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());\n    \n    if(!EvalToDest)\n    {\n      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      Index size = dest.size();\n      EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      #endif\n      MappedDest(actualDestPtr, dest.size()) = dest;\n    }\n      \n    if(!UseRhs)\n    {\n      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      Index size = rhs.size();\n      EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      #endif\n      Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, rhs.size()) = rhs;\n    }\n      \n      \n    internal::selfadjoint_matrix_vector_product<Scalar, Index, (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n                                                int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>::run\n      (\n        lhs.rows(),                             // size\n        &lhs.coeffRef(0,0),  lhs.outerStride(), // lhs info\n        actualRhsPtr,                           // rhs info\n        actualDestPtr,                          // result info\n        actualAlpha                             // scale factor\n      );\n    \n    if(!EvalToDest)\n      dest = MappedDest(actualDestPtr, dest.size());\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int RhsMode>\nstruct selfadjoint_product_impl<Lhs,0,true,Rhs,RhsMode,false>\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  enum { RhsUpLo = RhsMode&(Upper|Lower)  };\n\n  template<typename Dest>\n  static void run(Dest& dest, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar& alpha)\n  {\n    // let's simply transpose the product\n    Transpose<Dest> destT(dest);\n    selfadjoint_product_impl<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,\n                             Transpose<const Lhs>, 0, true>::run(destT, a_rhs.transpose(), a_lhs.transpose(), alpha);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointMatrixVector_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Selfadjoint matrix-vector product functionality based on ?SYMV/HEMV.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H\n#define EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/**********************************************************************\n* This file implements selfadjoint matrix-vector multiplication using BLAS\n**********************************************************************/\n\n// symv/hemv specialization\n\ntemplate<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>\nstruct selfadjoint_matrix_vector_product_symv :\n  selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn> {};\n\n#define EIGEN_BLAS_SYMV_SPECIALIZE(Scalar) \\\ntemplate<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \\\nstruct selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,Specialized> { \\\nstatic void run( \\\n  Index size, const Scalar*  lhs, Index lhsStride, \\\n  const Scalar* _rhs, Scalar* res, Scalar alpha) { \\\n    enum {\\\n      IsColMajor = StorageOrder==ColMajor \\\n    }; \\\n    if (IsColMajor == ConjugateLhs) {\\\n      selfadjoint_matrix_vector_product<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs,BuiltIn>::run( \\\n        size, lhs, lhsStride, _rhs, res, alpha);  \\\n    } else {\\\n      selfadjoint_matrix_vector_product_symv<Scalar,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs>::run( \\\n        size, lhs, lhsStride, _rhs, res, alpha);  \\\n    }\\\n  } \\\n}; \\\n\nEIGEN_BLAS_SYMV_SPECIALIZE(double)\nEIGEN_BLAS_SYMV_SPECIALIZE(float)\nEIGEN_BLAS_SYMV_SPECIALIZE(dcomplex)\nEIGEN_BLAS_SYMV_SPECIALIZE(scomplex)\n\n#define EIGEN_BLAS_SYMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASFUNC) \\\ntemplate<typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs> \\\nstruct selfadjoint_matrix_vector_product_symv<EIGTYPE,Index,StorageOrder,UpLo,ConjugateLhs,ConjugateRhs> \\\n{ \\\ntypedef Matrix<EIGTYPE,Dynamic,1,ColMajor> SYMVVector;\\\n\\\nstatic void run( \\\nIndex size, const EIGTYPE*  lhs, Index lhsStride, \\\nconst EIGTYPE* _rhs, EIGTYPE* res, EIGTYPE alpha) \\\n{ \\\n  enum {\\\n    IsRowMajor = StorageOrder==RowMajor ? 1 : 0, \\\n    IsLower = UpLo == Lower ? 1 : 0 \\\n  }; \\\n  BlasIndex n=convert_index<BlasIndex>(size), lda=convert_index<BlasIndex>(lhsStride), incx=1, incy=1; \\\n  EIGTYPE beta(1); \\\n  const EIGTYPE *x_ptr; \\\n  char uplo=(IsRowMajor) ? (IsLower ? 'U' : 'L') : (IsLower ? 'L' : 'U'); \\\n  SYMVVector x_tmp; \\\n  if (ConjugateRhs) { \\\n    Map<const SYMVVector, 0 > map_x(_rhs,size,1); \\\n    x_tmp=map_x.conjugate(); \\\n    x_ptr=x_tmp.data(); \\\n  } else x_ptr=_rhs; \\\n  BLASFUNC(&uplo, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \\\n}\\\n};\n\nEIGEN_BLAS_SYMV_SPECIALIZATION(double,   double, dsymv_)\nEIGEN_BLAS_SYMV_SPECIALIZATION(float,    float,  ssymv_)\nEIGEN_BLAS_SYMV_SPECIALIZATION(dcomplex, double, zhemv_)\nEIGEN_BLAS_SYMV_SPECIALIZATION(scomplex, float,  chemv_)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINT_PRODUCT_H\n#define EIGEN_SELFADJOINT_PRODUCT_H\n\n/**********************************************************************\n* This file implements a self adjoint product: C += A A^T updating only\n* half of the selfadjoint matrix C.\n* It corresponds to the level 3 SYRK and level 2 SYR Blas routines.\n**********************************************************************/\n\nnamespace Eigen { \n\n\ntemplate<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>\nstruct selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo,ConjLhs,ConjRhs>\n{\n  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)\n  {\n    internal::conj_if<ConjRhs> cj;\n    typedef Map<const Matrix<Scalar,Dynamic,1> > OtherMap;\n    typedef typename internal::conditional<ConjLhs,typename OtherMap::ConjugateReturnType,const OtherMap&>::type ConjLhsType;\n    for (Index i=0; i<size; ++i)\n    {\n      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+(UpLo==Lower ? i : 0), (UpLo==Lower ? size-i : (i+1)))\n          += (alpha * cj(vecY[i])) * ConjLhsType(OtherMap(vecX+(UpLo==Lower ? i : 0),UpLo==Lower ? size-i : (i+1)));\n    }\n  }\n};\n\ntemplate<typename Scalar, typename Index, int UpLo, bool ConjLhs, bool ConjRhs>\nstruct selfadjoint_rank1_update<Scalar,Index,RowMajor,UpLo,ConjLhs,ConjRhs>\n{\n  static void run(Index size, Scalar* mat, Index stride, const Scalar* vecX, const Scalar* vecY, const Scalar& alpha)\n  {\n    selfadjoint_rank1_update<Scalar,Index,ColMajor,UpLo==Lower?Upper:Lower,ConjRhs,ConjLhs>::run(size,mat,stride,vecY,vecX,alpha);\n  }\n};\n\ntemplate<typename MatrixType, typename OtherType, int UpLo, bool OtherIsVector = OtherType::IsVectorAtCompileTime>\nstruct selfadjoint_product_selector;\n\ntemplate<typename MatrixType, typename OtherType, int UpLo>\nstruct selfadjoint_product_selector<MatrixType,OtherType,UpLo,true>\n{\n  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)\n  {\n    typedef typename MatrixType::Scalar Scalar;\n    typedef internal::blas_traits<OtherType> OtherBlasTraits;\n    typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;\n    typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;\n    typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());\n\n    Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());\n\n    enum {\n      StorageOrder = (internal::traits<MatrixType>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n      UseOtherDirectly = _ActualOtherType::InnerStrideAtCompileTime==1\n    };\n    internal::gemv_static_vector_if<Scalar,OtherType::SizeAtCompileTime,OtherType::MaxSizeAtCompileTime,!UseOtherDirectly> static_other;\n\n    ei_declare_aligned_stack_constructed_variable(Scalar, actualOtherPtr, other.size(),\n      (UseOtherDirectly ? const_cast<Scalar*>(actualOther.data()) : static_other.data()));\n      \n    if(!UseOtherDirectly)\n      Map<typename _ActualOtherType::PlainObject>(actualOtherPtr, actualOther.size()) = actualOther;\n    \n    selfadjoint_rank1_update<Scalar,Index,StorageOrder,UpLo,\n                              OtherBlasTraits::NeedToConjugate  && NumTraits<Scalar>::IsComplex,\n                            (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex>\n          ::run(other.size(), mat.data(), mat.outerStride(), actualOtherPtr, actualOtherPtr, actualAlpha);\n  }\n};\n\ntemplate<typename MatrixType, typename OtherType, int UpLo>\nstruct selfadjoint_product_selector<MatrixType,OtherType,UpLo,false>\n{\n  static void run(MatrixType& mat, const OtherType& other, const typename MatrixType::Scalar& alpha)\n  {\n    typedef typename MatrixType::Scalar Scalar;\n    typedef internal::blas_traits<OtherType> OtherBlasTraits;\n    typedef typename OtherBlasTraits::DirectLinearAccessType ActualOtherType;\n    typedef typename internal::remove_all<ActualOtherType>::type _ActualOtherType;\n    typename internal::add_const_on_value_type<ActualOtherType>::type actualOther = OtherBlasTraits::extract(other.derived());\n\n    Scalar actualAlpha = alpha * OtherBlasTraits::extractScalarFactor(other.derived());\n\n    enum {\n      IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0,\n      OtherIsRowMajor = _ActualOtherType::Flags&RowMajorBit ? 1 : 0\n    };\n\n    Index size = mat.cols();\n    Index depth = actualOther.cols();\n\n    typedef internal::gemm_blocking_space<IsRowMajor ? RowMajor : ColMajor,Scalar,Scalar,\n              MatrixType::MaxColsAtCompileTime, MatrixType::MaxColsAtCompileTime, _ActualOtherType::MaxColsAtCompileTime> BlockingType;\n\n    BlockingType blocking(size, size, depth, 1, false);\n\n\n    internal::general_matrix_matrix_triangular_product<Index,\n      Scalar, OtherIsRowMajor ? RowMajor : ColMajor,   OtherBlasTraits::NeedToConjugate  && NumTraits<Scalar>::IsComplex,\n      Scalar, OtherIsRowMajor ? ColMajor : RowMajor, (!OtherBlasTraits::NeedToConjugate) && NumTraits<Scalar>::IsComplex,\n      IsRowMajor ? RowMajor : ColMajor, UpLo>\n      ::run(size, depth,\n            &actualOther.coeffRef(0,0), actualOther.outerStride(), &actualOther.coeffRef(0,0), actualOther.outerStride(),\n            mat.data(), mat.outerStride(), actualAlpha, blocking);\n  }\n};\n\n// high level API\n\ntemplate<typename MatrixType, unsigned int UpLo>\ntemplate<typename DerivedU>\nEIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>\n::rankUpdate(const MatrixBase<DerivedU>& u, const Scalar& alpha)\n{\n  selfadjoint_product_selector<MatrixType,DerivedU,UpLo>::run(_expression().const_cast_derived(), u.derived(), alpha);\n\n  return *this;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINT_PRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/SelfadjointRank2Update.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINTRANK2UPTADE_H\n#define EIGEN_SELFADJOINTRANK2UPTADE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/* Optimized selfadjoint matrix += alpha * uv' + conj(alpha)*vu'\n * It corresponds to the Level2 syr2 BLAS routine\n */\n\ntemplate<typename Scalar, typename Index, typename UType, typename VType, int UpLo>\nstruct selfadjoint_rank2_update_selector;\n\ntemplate<typename Scalar, typename Index, typename UType, typename VType>\nstruct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Lower>\n{\n  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)\n  {\n    const Index size = u.size();\n    for (Index i=0; i<size; ++i)\n    {\n      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i+i, size-i) +=\n                        (numext::conj(alpha) * numext::conj(u.coeff(i))) * v.tail(size-i)\n                      + (alpha * numext::conj(v.coeff(i))) * u.tail(size-i);\n    }\n  }\n};\n\ntemplate<typename Scalar, typename Index, typename UType, typename VType>\nstruct selfadjoint_rank2_update_selector<Scalar,Index,UType,VType,Upper>\n{\n  static void run(Scalar* mat, Index stride, const UType& u, const VType& v, const Scalar& alpha)\n  {\n    const Index size = u.size();\n    for (Index i=0; i<size; ++i)\n      Map<Matrix<Scalar,Dynamic,1> >(mat+stride*i, i+1) +=\n                        (numext::conj(alpha)  * numext::conj(u.coeff(i))) * v.head(i+1)\n                      + (alpha * numext::conj(v.coeff(i))) * u.head(i+1);\n  }\n};\n\ntemplate<bool Cond, typename T> struct conj_expr_if\n  : conditional<!Cond, const T&,\n      CwiseUnaryOp<scalar_conjugate_op<typename traits<T>::Scalar>,T> > {};\n\n} // end namespace internal\n\ntemplate<typename MatrixType, unsigned int UpLo>\ntemplate<typename DerivedU, typename DerivedV>\nEIGEN_DEVICE_FUNC SelfAdjointView<MatrixType,UpLo>& SelfAdjointView<MatrixType,UpLo>\n::rankUpdate(const MatrixBase<DerivedU>& u, const MatrixBase<DerivedV>& v, const Scalar& alpha)\n{\n  typedef internal::blas_traits<DerivedU> UBlasTraits;\n  typedef typename UBlasTraits::DirectLinearAccessType ActualUType;\n  typedef typename internal::remove_all<ActualUType>::type _ActualUType;\n  typename internal::add_const_on_value_type<ActualUType>::type actualU = UBlasTraits::extract(u.derived());\n\n  typedef internal::blas_traits<DerivedV> VBlasTraits;\n  typedef typename VBlasTraits::DirectLinearAccessType ActualVType;\n  typedef typename internal::remove_all<ActualVType>::type _ActualVType;\n  typename internal::add_const_on_value_type<ActualVType>::type actualV = VBlasTraits::extract(v.derived());\n\n  // If MatrixType is row major, then we use the routine for lower triangular in the upper triangular case and\n  // vice versa, and take the complex conjugate of all coefficients and vector entries.\n\n  enum { IsRowMajor = (internal::traits<MatrixType>::Flags&RowMajorBit) ? 1 : 0 };\n  Scalar actualAlpha = alpha * UBlasTraits::extractScalarFactor(u.derived())\n                             * numext::conj(VBlasTraits::extractScalarFactor(v.derived()));\n  if (IsRowMajor)\n    actualAlpha = numext::conj(actualAlpha);\n\n  typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ UBlasTraits::NeedToConjugate,_ActualUType>::type>::type UType;\n  typedef typename internal::remove_all<typename internal::conj_expr_if<IsRowMajor ^ VBlasTraits::NeedToConjugate,_ActualVType>::type>::type VType;\n  internal::selfadjoint_rank2_update_selector<Scalar, Index, UType, VType,\n    (IsRowMajor ? int(UpLo==Upper ? Lower : Upper) : UpLo)>\n    ::run(_expression().const_cast_derived().data(),_expression().outerStride(),UType(actualU),VType(actualV),actualAlpha);\n\n  return *this;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINTRANK2UPTADE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularMatrixMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_H\n#define EIGEN_TRIANGULAR_MATRIX_MATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// template<typename Scalar, int mr, int StorageOrder, bool Conjugate, int Mode>\n// struct gemm_pack_lhs_triangular\n// {\n//   Matrix<Scalar,mr,mr,\n//   void operator()(Scalar* blockA, const EIGEN_RESTRICT Scalar* _lhs, int lhsStride, int depth, int rows)\n//   {\n//     conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;\n//     const_blas_data_mapper<Scalar, StorageOrder> lhs(_lhs,lhsStride);\n//     int count = 0;\n//     const int peeled_mc = (rows/mr)*mr;\n//     for(int i=0; i<peeled_mc; i+=mr)\n//     {\n//       for(int k=0; k<depth; k++)\n//         for(int w=0; w<mr; w++)\n//           blockA[count++] = cj(lhs(i+w, k));\n//     }\n//     for(int i=peeled_mc; i<rows; i++)\n//     {\n//       for(int k=0; k<depth; k++)\n//         blockA[count++] = cj(lhs(i, k));\n//     }\n//   }\n// };\n\n/* Optimized triangular matrix * matrix (_TRMM++) product built on top of\n * the general matrix matrix product.\n */\ntemplate <typename Scalar, typename Index,\n          int Mode, bool LhsIsTriangular,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs,\n          int ResStorageOrder, int Version = Specialized>\nstruct product_triangular_matrix_matrix;\n\ntemplate <typename Scalar, typename Index,\n          int Mode, bool LhsIsTriangular,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs, int Version>\nstruct product_triangular_matrix_matrix<Scalar,Index,Mode,LhsIsTriangular,\n                                           LhsStorageOrder,ConjugateLhs,\n                                           RhsStorageOrder,ConjugateRhs,RowMajor,Version>\n{\n  static EIGEN_STRONG_INLINE void run(\n    Index rows, Index cols, Index depth,\n    const Scalar* lhs, Index lhsStride,\n    const Scalar* rhs, Index rhsStride,\n    Scalar* res,       Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    product_triangular_matrix_matrix<Scalar, Index,\n      (Mode&(UnitDiag|ZeroDiag)) | ((Mode&Upper) ? Lower : Upper),\n      (!LhsIsTriangular),\n      RhsStorageOrder==RowMajor ? ColMajor : RowMajor,\n      ConjugateRhs,\n      LhsStorageOrder==RowMajor ? ColMajor : RowMajor,\n      ConjugateLhs,\n      ColMajor>\n      ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resStride, alpha, blocking);\n  }\n};\n\n// implements col-major += alpha * op(triangular) * op(general)\ntemplate <typename Scalar, typename Index, int Mode,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs, int Version>\nstruct product_triangular_matrix_matrix<Scalar,Index,Mode,true,\n                                           LhsStorageOrder,ConjugateLhs,\n                                           RhsStorageOrder,ConjugateRhs,ColMajor,Version>\n{\n  \n  typedef gebp_traits<Scalar,Scalar> Traits;\n  enum {\n    SmallPanelWidth   = 2 * EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),\n    IsLower = (Mode&Lower) == Lower,\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1\n  };\n\n  static EIGEN_DONT_INLINE void run(\n    Index _rows, Index _cols, Index _depth,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);\n};\n\ntemplate <typename Scalar, typename Index, int Mode,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs, int Version>\nEIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,true,\n                                                        LhsStorageOrder,ConjugateLhs,\n                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(\n    Index _rows, Index _cols, Index _depth,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* _res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    // strip zeros\n    Index diagSize  = (std::min)(_rows,_depth);\n    Index rows      = IsLower ? _rows : diagSize;\n    Index depth     = IsLower ? diagSize : _depth;\n    Index cols      = _cols;\n    \n    typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;\n    typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;\n    typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n    LhsMapper lhs(_lhs,lhsStride);\n    RhsMapper rhs(_rhs,rhsStride);\n    ResMapper res(_res, resStride);\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n    // The small panel size must not be larger than blocking size.\n    // Usually this should never be the case because SmallPanelWidth^2 is very small\n    // compared to L2 cache size, but let's be safe:\n    Index panelWidth = (std::min)(Index(SmallPanelWidth),(std::min)(kc,mc));\n\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*cols;\n\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,LhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));\n    triangularBuffer.setZero();\n    if((Mode&ZeroDiag)==ZeroDiag)\n      triangularBuffer.diagonal().setZero();\n    else\n      triangularBuffer.diagonal().setOnes();\n\n    gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;\n    gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;\n\n    for(Index k2=IsLower ? depth : 0;\n        IsLower ? k2>0 : k2<depth;\n        IsLower ? k2-=kc : k2+=kc)\n    {\n      Index actual_kc = (std::min)(IsLower ? k2 : depth-k2, kc);\n      Index actual_k2 = IsLower ? k2-actual_kc : k2;\n\n      // align blocks with the end of the triangular part for trapezoidal lhs\n      if((!IsLower)&&(k2<rows)&&(k2+actual_kc>rows))\n      {\n        actual_kc = rows-k2;\n        k2 = k2+actual_kc-kc;\n      }\n\n      pack_rhs(blockB, rhs.getSubMapper(actual_k2,0), actual_kc, cols);\n\n      // the selected lhs's panel has to be split in three different parts:\n      //  1 - the part which is zero => skip it\n      //  2 - the diagonal block => special kernel\n      //  3 - the dense panel below (lower case) or above (upper case) the diagonal block => GEPP\n\n      // the block diagonal, if any:\n      if(IsLower || actual_k2<rows)\n      {\n        // for each small vertical panels of lhs\n        for (Index k1=0; k1<actual_kc; k1+=panelWidth)\n        {\n          Index actualPanelWidth = std::min<Index>(actual_kc-k1, panelWidth);\n          Index lengthTarget = IsLower ? actual_kc-k1-actualPanelWidth : k1;\n          Index startBlock   = actual_k2+k1;\n          Index blockBOffset = k1;\n\n          // => GEBP with the micro triangular block\n          // The trick is to pack this micro block while filling the opposite triangular part with zeros.\n          // To this end we do an extra triangular copy to a small temporary buffer\n          for (Index k=0;k<actualPanelWidth;++k)\n          {\n            if (SetDiag)\n              triangularBuffer.coeffRef(k,k) = lhs(startBlock+k,startBlock+k);\n            for (Index i=IsLower ? k+1 : 0; IsLower ? i<actualPanelWidth : i<k; ++i)\n              triangularBuffer.coeffRef(i,k) = lhs(startBlock+i,startBlock+k);\n          }\n          pack_lhs(blockA, LhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()), actualPanelWidth, actualPanelWidth);\n\n          gebp_kernel(res.getSubMapper(startBlock, 0), blockA, blockB,\n                      actualPanelWidth, actualPanelWidth, cols, alpha,\n                      actualPanelWidth, actual_kc, 0, blockBOffset);\n\n          // GEBP with remaining micro panel\n          if (lengthTarget>0)\n          {\n            Index startTarget  = IsLower ? actual_k2+k1+actualPanelWidth : actual_k2;\n\n            pack_lhs(blockA, lhs.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);\n\n            gebp_kernel(res.getSubMapper(startTarget, 0), blockA, blockB,\n                        lengthTarget, actualPanelWidth, cols, alpha,\n                        actualPanelWidth, actual_kc, 0, blockBOffset);\n          }\n        }\n      }\n      // the part below (lower case) or above (upper case) the diagonal => GEPP\n      {\n        Index start = IsLower ? k2 : 0;\n        Index end   = IsLower ? rows : (std::min)(actual_k2,rows);\n        for(Index i2=start; i2<end; i2+=mc)\n        {\n          const Index actual_mc = (std::min)(i2+mc,end)-i2;\n          gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr,Traits::LhsProgress, LhsStorageOrder,false>()\n            (blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);\n\n          gebp_kernel(res.getSubMapper(i2, 0), blockA, blockB, actual_mc,\n                      actual_kc, cols, alpha, -1, -1, 0, 0);\n        }\n      }\n    }\n  }\n\n// implements col-major += alpha * op(general) * op(triangular)\ntemplate <typename Scalar, typename Index, int Mode,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs, int Version>\nstruct product_triangular_matrix_matrix<Scalar,Index,Mode,false,\n                                        LhsStorageOrder,ConjugateLhs,\n                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>\n{\n  typedef gebp_traits<Scalar,Scalar> Traits;\n  enum {\n    SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),\n    IsLower = (Mode&Lower) == Lower,\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1\n  };\n\n  static EIGEN_DONT_INLINE void run(\n    Index _rows, Index _cols, Index _depth,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking);\n};\n\ntemplate <typename Scalar, typename Index, int Mode,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs, int Version>\nEIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,\n                                                        LhsStorageOrder,ConjugateLhs,\n                                                        RhsStorageOrder,ConjugateRhs,ColMajor,Version>::run(\n    Index _rows, Index _cols, Index _depth,\n    const Scalar* _lhs, Index lhsStride,\n    const Scalar* _rhs, Index rhsStride,\n    Scalar* _res,        Index resStride,\n    const Scalar& alpha, level3_blocking<Scalar,Scalar>& blocking)\n  {\n    const Index PacketBytes = packet_traits<Scalar>::size*sizeof(Scalar);\n    // strip zeros\n    Index diagSize  = (std::min)(_cols,_depth);\n    Index rows      = _rows;\n    Index depth     = IsLower ? _depth : diagSize;\n    Index cols      = IsLower ? diagSize : _cols;\n    \n    typedef const_blas_data_mapper<Scalar, Index, LhsStorageOrder> LhsMapper;\n    typedef const_blas_data_mapper<Scalar, Index, RhsStorageOrder> RhsMapper;\n    typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper;\n    LhsMapper lhs(_lhs,lhsStride);\n    RhsMapper rhs(_rhs,rhsStride);\n    ResMapper res(_res, resStride);\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*cols+EIGEN_MAX_ALIGN_BYTES/sizeof(Scalar);\n\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    Matrix<Scalar,SmallPanelWidth,SmallPanelWidth,RhsStorageOrder> triangularBuffer((internal::constructor_without_unaligned_array_assert()));\n    triangularBuffer.setZero();\n    if((Mode&ZeroDiag)==ZeroDiag)\n      triangularBuffer.diagonal().setZero();\n    else\n      triangularBuffer.diagonal().setOnes();\n\n    gebp_kernel<Scalar, Scalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;\n    gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder> pack_rhs;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr,RhsStorageOrder,false,true> pack_rhs_panel;\n\n    for(Index k2=IsLower ? 0 : depth;\n        IsLower ? k2<depth  : k2>0;\n        IsLower ? k2+=kc   : k2-=kc)\n    {\n      Index actual_kc = (std::min)(IsLower ? depth-k2 : k2, kc);\n      Index actual_k2 = IsLower ? k2 : k2-actual_kc;\n\n      // align blocks with the end of the triangular part for trapezoidal rhs\n      if(IsLower && (k2<cols) && (actual_k2+actual_kc>cols))\n      {\n        actual_kc = cols-k2;\n        k2 = actual_k2 + actual_kc - kc;\n      }\n\n      // remaining size\n      Index rs = IsLower ? (std::min)(cols,actual_k2) : cols - k2;\n      // size of the triangular part\n      Index ts = (IsLower && actual_k2>=cols) ? 0 : actual_kc;\n\n      Scalar* geb = blockB+ts*ts;\n      geb = geb + internal::first_aligned<PacketBytes>(geb,PacketBytes/sizeof(Scalar));\n\n      pack_rhs(geb, rhs.getSubMapper(actual_k2,IsLower ? 0 : k2), actual_kc, rs);\n\n      // pack the triangular part of the rhs padding the unrolled blocks with zeros\n      if(ts>0)\n      {\n        for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)\n        {\n          Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);\n          Index actual_j2 = actual_k2 + j2;\n          Index panelOffset = IsLower ? j2+actualPanelWidth : 0;\n          Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;\n          // general part\n          pack_rhs_panel(blockB+j2*actual_kc,\n                         rhs.getSubMapper(actual_k2+panelOffset, actual_j2),\n                         panelLength, actualPanelWidth,\n                         actual_kc, panelOffset);\n\n          // append the triangular part via a temporary buffer\n          for (Index j=0;j<actualPanelWidth;++j)\n          {\n            if (SetDiag)\n              triangularBuffer.coeffRef(j,j) = rhs(actual_j2+j,actual_j2+j);\n            for (Index k=IsLower ? j+1 : 0; IsLower ? k<actualPanelWidth : k<j; ++k)\n              triangularBuffer.coeffRef(k,j) = rhs(actual_j2+k,actual_j2+j);\n          }\n\n          pack_rhs_panel(blockB+j2*actual_kc,\n                         RhsMapper(triangularBuffer.data(), triangularBuffer.outerStride()),\n                         actualPanelWidth, actualPanelWidth,\n                         actual_kc, j2);\n        }\n      }\n\n      for (Index i2=0; i2<rows; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(mc,rows-i2);\n        pack_lhs(blockA, lhs.getSubMapper(i2, actual_k2), actual_kc, actual_mc);\n\n        // triangular kernel\n        if(ts>0)\n        {\n          for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)\n          {\n            Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);\n            Index panelLength = IsLower ? actual_kc-j2 : j2+actualPanelWidth;\n            Index blockOffset = IsLower ? j2 : 0;\n\n            gebp_kernel(res.getSubMapper(i2, actual_k2 + j2),\n                        blockA, blockB+j2*actual_kc,\n                        actual_mc, panelLength, actualPanelWidth,\n                        alpha,\n                        actual_kc, actual_kc,  // strides\n                        blockOffset, blockOffset);// offsets\n          }\n        }\n        gebp_kernel(res.getSubMapper(i2, IsLower ? 0 : k2),\n                    blockA, geb, actual_mc, actual_kc, rs,\n                    alpha,\n                    -1, -1, 0, 0);\n      }\n    }\n  }\n\n/***************************************************************************\n* Wrapper to product_triangular_matrix_matrix\n***************************************************************************/\n\n} // end namespace internal\n\nnamespace internal {\ntemplate<int Mode, bool LhsIsTriangular, typename Lhs, typename Rhs>\nstruct triangular_product_impl<Mode,LhsIsTriangular,Lhs,false,Rhs,false>\n{\n  template<typename Dest> static void run(Dest& dst, const Lhs &a_lhs, const Rhs &a_rhs, const typename Dest::Scalar& alpha)\n  {\n    typedef typename Dest::Scalar     Scalar;\n    \n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n    typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n    typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;\n    \n    typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs);\n    typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs);\n\n    Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs)\n                               * RhsBlasTraits::extractScalarFactor(a_rhs);\n\n    typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,Scalar,Scalar,\n              Lhs::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime, Lhs::MaxColsAtCompileTime,4> BlockingType;\n\n    enum { IsLower = (Mode&Lower) == Lower };\n    Index stripedRows  = ((!LhsIsTriangular) || (IsLower))  ? lhs.rows() : (std::min)(lhs.rows(),lhs.cols());\n    Index stripedCols  = ((LhsIsTriangular)  || (!IsLower)) ? rhs.cols() : (std::min)(rhs.cols(),rhs.rows());\n    Index stripedDepth = LhsIsTriangular ? ((!IsLower) ? lhs.cols() : (std::min)(lhs.cols(),lhs.rows()))\n                                         : ((IsLower)  ? rhs.rows() : (std::min)(rhs.rows(),rhs.cols()));\n\n    BlockingType blocking(stripedRows, stripedCols, stripedDepth, 1, false);\n\n    internal::product_triangular_matrix_matrix<Scalar, Index,\n      Mode, LhsIsTriangular,\n      (internal::traits<ActualLhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, LhsBlasTraits::NeedToConjugate,\n      (internal::traits<ActualRhsTypeCleaned>::Flags&RowMajorBit) ? RowMajor : ColMajor, RhsBlasTraits::NeedToConjugate,\n      (internal::traits<Dest          >::Flags&RowMajorBit) ? RowMajor : ColMajor>\n      ::run(\n        stripedRows, stripedCols, stripedDepth,   // sizes\n        &lhs.coeffRef(0,0), lhs.outerStride(),    // lhs info\n        &rhs.coeffRef(0,0), rhs.outerStride(),    // rhs info\n        &dst.coeffRef(0,0), dst.outerStride(),    // result info\n        actualAlpha, blocking\n      );\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularMatrixMatrix_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Triangular matrix * matrix product functionality based on ?TRMM.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H\n#define EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n\ntemplate <typename Scalar, typename Index,\n          int Mode, bool LhsIsTriangular,\n          int LhsStorageOrder, bool ConjugateLhs,\n          int RhsStorageOrder, bool ConjugateRhs,\n          int ResStorageOrder>\nstruct product_triangular_matrix_matrix_trmm :\n       product_triangular_matrix_matrix<Scalar,Index,Mode,\n          LhsIsTriangular,LhsStorageOrder,ConjugateLhs,\n          RhsStorageOrder, ConjugateRhs, ResStorageOrder, BuiltIn> {};\n\n\n// try to go to BLAS specialization\n#define EIGEN_BLAS_TRMM_SPECIALIZE(Scalar, LhsIsTriangular) \\\ntemplate <typename Index, int Mode, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_triangular_matrix_matrix<Scalar,Index, Mode, LhsIsTriangular, \\\n           LhsStorageOrder,ConjugateLhs, RhsStorageOrder,ConjugateRhs,ColMajor,Specialized> { \\\n  static inline void run(Index _rows, Index _cols, Index _depth, const Scalar* _lhs, Index lhsStride,\\\n    const Scalar* _rhs, Index rhsStride, Scalar* res, Index resStride, Scalar alpha, level3_blocking<Scalar,Scalar>& blocking) { \\\n      product_triangular_matrix_matrix_trmm<Scalar,Index,Mode, \\\n        LhsIsTriangular,LhsStorageOrder,ConjugateLhs, \\\n        RhsStorageOrder, ConjugateRhs, ColMajor>::run( \\\n        _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \\\n  } \\\n};\n\nEIGEN_BLAS_TRMM_SPECIALIZE(double, true)\nEIGEN_BLAS_TRMM_SPECIALIZE(double, false)\nEIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, true)\nEIGEN_BLAS_TRMM_SPECIALIZE(dcomplex, false)\nEIGEN_BLAS_TRMM_SPECIALIZE(float, true)\nEIGEN_BLAS_TRMM_SPECIALIZE(float, false)\nEIGEN_BLAS_TRMM_SPECIALIZE(scomplex, true)\nEIGEN_BLAS_TRMM_SPECIALIZE(scomplex, false)\n\n// implements col-major += alpha * op(triangular) * op(general)\n#define EIGEN_BLAS_TRMM_L(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, int Mode, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,true, \\\n         LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \\\n{ \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    LowUp = IsLower ? Lower : Upper, \\\n    conjA = ((LhsStorageOrder==ColMajor) && ConjugateLhs) ? 1 : 0 \\\n  }; \\\n\\\n  static void run( \\\n    Index _rows, Index _cols, Index _depth, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \\\n  { \\\n   Index diagSize  = (std::min)(_rows,_depth); \\\n   Index rows      = IsLower ? _rows : diagSize; \\\n   Index depth     = IsLower ? diagSize : _depth; \\\n   Index cols      = _cols; \\\n\\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \\\n\\\n/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \\\n   if (rows != depth) { \\\n\\\n     /* FIXME handle mkl_domain_get_max_threads */ \\\n     /*int nthr = mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS);*/ int nthr = 1;\\\n\\\n     if (((nthr==1) && (((std::max)(rows,depth)-diagSize)/(double)diagSize < 0.5))) { \\\n     /* Most likely no benefit to call TRMM or GEMM from BLAS */ \\\n       product_triangular_matrix_matrix<EIGTYPE,Index,Mode,true, \\\n       LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \\\n           _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \\\n     /*std::cout << \"TRMM_L: A is not square! Go to Eigen TRMM implementation!\\n\";*/ \\\n     } else { \\\n     /* Make sense to call GEMM */ \\\n       Map<const MatrixLhs, 0, OuterStride<> > lhsMap(_lhs,rows,depth,OuterStride<>(lhsStride)); \\\n       MatrixLhs aa_tmp=lhsMap.template triangularView<Mode>(); \\\n       BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \\\n       gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \\\n       general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \\\n       rows, cols, depth, aa_tmp.data(), aStride, _rhs, rhsStride, res, resStride, alpha, gemm_blocking, 0); \\\n\\\n     /*std::cout << \"TRMM_L: A is not square! Go to BLAS GEMM implementation! \" << nthr<<\" \\n\";*/ \\\n     } \\\n     return; \\\n   } \\\n   char side = 'L', transa, uplo, diag = 'N'; \\\n   EIGTYPE *b; \\\n   const EIGTYPE *a; \\\n   BlasIndex m, n, lda, ldb; \\\n\\\n/* Set m, n */ \\\n   m = convert_index<BlasIndex>(diagSize); \\\n   n = convert_index<BlasIndex>(cols); \\\n\\\n/* Set trans */ \\\n   transa = (LhsStorageOrder==RowMajor) ? ((ConjugateLhs) ? 'C' : 'T') : 'N'; \\\n\\\n/* Set b, ldb */ \\\n   Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols,OuterStride<>(rhsStride)); \\\n   MatrixX##EIGPREFIX b_tmp; \\\n\\\n   if (ConjugateRhs) b_tmp = rhs.conjugate(); else b_tmp = rhs; \\\n   b = b_tmp.data(); \\\n   ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n\\\n/* Set uplo */ \\\n   uplo = IsLower ? 'L' : 'U'; \\\n   if (LhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \\\n/* Set a, lda */ \\\n   Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \\\n   MatrixLhs a_tmp; \\\n\\\n   if ((conjA!=0) || (SetDiag==0)) { \\\n     if (conjA) a_tmp = lhs.conjugate(); else a_tmp = lhs; \\\n     if (IsZeroDiag) \\\n       a_tmp.diagonal().setZero(); \\\n     else if (IsUnitDiag) \\\n       a_tmp.diagonal().setOnes();\\\n     a = a_tmp.data(); \\\n     lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n   } else { \\\n     a = _lhs; \\\n     lda = convert_index<BlasIndex>(lhsStride); \\\n   } \\\n   /*std::cout << \"TRMM_L: A is square! Go to BLAS TRMM implementation! \\n\";*/ \\\n/* call ?trmm*/ \\\n   BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \\\n\\\n/* Add op(a_triangular)*b into res*/ \\\n   Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \\\n   res_tmp=res_tmp+b_tmp; \\\n  } \\\n};\n\nEIGEN_BLAS_TRMM_L(double, double, d, d)\nEIGEN_BLAS_TRMM_L(dcomplex, double, cd, z)\nEIGEN_BLAS_TRMM_L(float, float, f, s)\nEIGEN_BLAS_TRMM_L(scomplex, float, cf, c)\n\n// implements col-major += alpha * op(general) * op(triangular)\n#define EIGEN_BLAS_TRMM_R(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate <typename Index, int Mode, \\\n          int LhsStorageOrder, bool ConjugateLhs, \\\n          int RhsStorageOrder, bool ConjugateRhs> \\\nstruct product_triangular_matrix_matrix_trmm<EIGTYPE,Index,Mode,false, \\\n         LhsStorageOrder,ConjugateLhs,RhsStorageOrder,ConjugateRhs,ColMajor> \\\n{ \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    LowUp = IsLower ? Lower : Upper, \\\n    conjA = ((RhsStorageOrder==ColMajor) && ConjugateRhs) ? 1 : 0 \\\n  }; \\\n\\\n  static void run( \\\n    Index _rows, Index _cols, Index _depth, \\\n    const EIGTYPE* _lhs, Index lhsStride, \\\n    const EIGTYPE* _rhs, Index rhsStride, \\\n    EIGTYPE* res,        Index resStride, \\\n    EIGTYPE alpha, level3_blocking<EIGTYPE,EIGTYPE>& blocking) \\\n  { \\\n   Index diagSize  = (std::min)(_cols,_depth); \\\n   Index rows      = _rows; \\\n   Index depth     = IsLower ? _depth : diagSize; \\\n   Index cols      = IsLower ? diagSize : _cols; \\\n\\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, LhsStorageOrder> MatrixLhs; \\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, RhsStorageOrder> MatrixRhs; \\\n\\\n/* Non-square case - doesn't fit to BLAS ?TRMM. Fall to default triangular product or call BLAS ?GEMM*/ \\\n   if (cols != depth) { \\\n\\\n     int nthr = 1 /*mkl_domain_get_max_threads(EIGEN_BLAS_DOMAIN_BLAS)*/; \\\n\\\n     if ((nthr==1) && (((std::max)(cols,depth)-diagSize)/(double)diagSize < 0.5)) { \\\n     /* Most likely no benefit to call TRMM or GEMM from BLAS*/ \\\n       product_triangular_matrix_matrix<EIGTYPE,Index,Mode,false, \\\n       LhsStorageOrder,ConjugateLhs, RhsStorageOrder, ConjugateRhs, ColMajor, BuiltIn>::run( \\\n           _rows, _cols, _depth, _lhs, lhsStride, _rhs, rhsStride, res, resStride, alpha, blocking); \\\n       /*std::cout << \"TRMM_R: A is not square! Go to Eigen TRMM implementation!\\n\";*/ \\\n     } else { \\\n     /* Make sense to call GEMM */ \\\n       Map<const MatrixRhs, 0, OuterStride<> > rhsMap(_rhs,depth,cols, OuterStride<>(rhsStride)); \\\n       MatrixRhs aa_tmp=rhsMap.template triangularView<Mode>(); \\\n       BlasIndex aStride = convert_index<BlasIndex>(aa_tmp.outerStride()); \\\n       gemm_blocking_space<ColMajor,EIGTYPE,EIGTYPE,Dynamic,Dynamic,Dynamic> gemm_blocking(_rows,_cols,_depth, 1, true); \\\n       general_matrix_matrix_product<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,RhsStorageOrder,ConjugateRhs,ColMajor>::run( \\\n       rows, cols, depth, _lhs, lhsStride, aa_tmp.data(), aStride, res, resStride, alpha, gemm_blocking, 0); \\\n\\\n     /*std::cout << \"TRMM_R: A is not square! Go to BLAS GEMM implementation! \" << nthr<<\" \\n\";*/ \\\n     } \\\n     return; \\\n   } \\\n   char side = 'R', transa, uplo, diag = 'N'; \\\n   EIGTYPE *b; \\\n   const EIGTYPE *a; \\\n   BlasIndex m, n, lda, ldb; \\\n\\\n/* Set m, n */ \\\n   m = convert_index<BlasIndex>(rows); \\\n   n = convert_index<BlasIndex>(diagSize); \\\n\\\n/* Set trans */ \\\n   transa = (RhsStorageOrder==RowMajor) ? ((ConjugateRhs) ? 'C' : 'T') : 'N'; \\\n\\\n/* Set b, ldb */ \\\n   Map<const MatrixLhs, 0, OuterStride<> > lhs(_lhs,rows,depth,OuterStride<>(lhsStride)); \\\n   MatrixX##EIGPREFIX b_tmp; \\\n\\\n   if (ConjugateLhs) b_tmp = lhs.conjugate(); else b_tmp = lhs; \\\n   b = b_tmp.data(); \\\n   ldb = convert_index<BlasIndex>(b_tmp.outerStride()); \\\n\\\n/* Set uplo */ \\\n   uplo = IsLower ? 'L' : 'U'; \\\n   if (RhsStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \\\n/* Set a, lda */ \\\n   Map<const MatrixRhs, 0, OuterStride<> > rhs(_rhs,depth,cols, OuterStride<>(rhsStride)); \\\n   MatrixRhs a_tmp; \\\n\\\n   if ((conjA!=0) || (SetDiag==0)) { \\\n     if (conjA) a_tmp = rhs.conjugate(); else a_tmp = rhs; \\\n     if (IsZeroDiag) \\\n       a_tmp.diagonal().setZero(); \\\n     else if (IsUnitDiag) \\\n       a_tmp.diagonal().setOnes();\\\n     a = a_tmp.data(); \\\n     lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n   } else { \\\n     a = _rhs; \\\n     lda = convert_index<BlasIndex>(rhsStride); \\\n   } \\\n   /*std::cout << \"TRMM_R: A is square! Go to BLAS TRMM implementation! \\n\";*/ \\\n/* call ?trmm*/ \\\n   BLASPREFIX##trmm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)b, &ldb); \\\n\\\n/* Add op(a_triangular)*b into res*/ \\\n   Map<MatrixX##EIGPREFIX, 0, OuterStride<> > res_tmp(res,rows,cols,OuterStride<>(resStride)); \\\n   res_tmp=res_tmp+b_tmp; \\\n  } \\\n};\n\nEIGEN_BLAS_TRMM_R(double, double, d, d)\nEIGEN_BLAS_TRMM_R(dcomplex, double, cd, z)\nEIGEN_BLAS_TRMM_R(float, float, f, s)\nEIGEN_BLAS_TRMM_R(scomplex, float, cf, c)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_MATRIX_MATRIX_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularMatrixVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIANGULARMATRIXVECTOR_H\n#define EIGEN_TRIANGULARMATRIXVECTOR_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder, int Version=Specialized>\nstruct triangular_matrix_vector_product;\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>\nstruct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n  enum {\n    IsLower = ((Mode&Lower)==Lower),\n    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,\n    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag\n  };\n  static EIGEN_DONT_INLINE  void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,\n                                     const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha);\n};\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int Version>\nEIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,ColMajor,Version>\n  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,\n        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const RhsScalar& alpha)\n  {\n    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;\n    Index size = (std::min)(_rows,_cols);\n    Index rows = IsLower ? _rows : (std::min)(_rows,_cols);\n    Index cols = IsLower ? (std::min)(_rows,_cols) : _cols;\n\n    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;\n    const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));\n    typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);\n\n    typedef Map<const Matrix<RhsScalar,Dynamic,1>, 0, InnerStride<> > RhsMap;\n    const RhsMap rhs(_rhs,cols,InnerStride<>(rhsIncr));\n    typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);\n\n    typedef Map<Matrix<ResScalar,Dynamic,1> > ResMap;\n    ResMap res(_res,rows);\n\n    typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;\n\n    for (Index pi=0; pi<size; pi+=PanelWidth)\n    {\n      Index actualPanelWidth = (std::min)(PanelWidth, size-pi);\n      for (Index k=0; k<actualPanelWidth; ++k)\n      {\n        Index i = pi + k;\n        Index s = IsLower ? ((HasUnitDiag||HasZeroDiag) ? i+1 : i ) : pi;\n        Index r = IsLower ? actualPanelWidth-k : k+1;\n        if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)\n          res.segment(s,r) += (alpha * cjRhs.coeff(i)) * cjLhs.col(i).segment(s,r);\n        if (HasUnitDiag)\n          res.coeffRef(i) += alpha * cjRhs.coeff(i);\n      }\n      Index r = IsLower ? rows - pi - actualPanelWidth : pi;\n      if (r>0)\n      {\n        Index s = IsLower ? pi+actualPanelWidth : 0;\n        general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(\n            r, actualPanelWidth,\n            LhsMapper(&lhs.coeffRef(s,pi), lhsStride),\n            RhsMapper(&rhs.coeffRef(pi), rhsIncr),\n            &res.coeffRef(s), resIncr, alpha);\n      }\n    }\n    if((!IsLower) && cols>size)\n    {\n      general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(\n          rows, cols-size,\n          LhsMapper(&lhs.coeffRef(0,size), lhsStride),\n          RhsMapper(&rhs.coeffRef(size), rhsIncr),\n          _res, resIncr, alpha);\n    }\n  }\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>\nstruct triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar;\n  enum {\n    IsLower = ((Mode&Lower)==Lower),\n    HasUnitDiag = (Mode & UnitDiag)==UnitDiag,\n    HasZeroDiag = (Mode & ZeroDiag)==ZeroDiag\n  };\n  static EIGEN_DONT_INLINE void run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,\n                                    const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha);\n};\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs,int Version>\nEIGEN_DONT_INLINE void triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,RowMajor,Version>\n  ::run(Index _rows, Index _cols, const LhsScalar* _lhs, Index lhsStride,\n        const RhsScalar* _rhs, Index rhsIncr, ResScalar* _res, Index resIncr, const ResScalar& alpha)\n  {\n    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;\n    Index diagSize = (std::min)(_rows,_cols);\n    Index rows = IsLower ? _rows : diagSize;\n    Index cols = IsLower ? diagSize : _cols;\n\n    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;\n    const LhsMap lhs(_lhs,rows,cols,OuterStride<>(lhsStride));\n    typename conj_expr_if<ConjLhs,LhsMap>::type cjLhs(lhs);\n\n    typedef Map<const Matrix<RhsScalar,Dynamic,1> > RhsMap;\n    const RhsMap rhs(_rhs,cols);\n    typename conj_expr_if<ConjRhs,RhsMap>::type cjRhs(rhs);\n\n    typedef Map<Matrix<ResScalar,Dynamic,1>, 0, InnerStride<> > ResMap;\n    ResMap res(_res,rows,InnerStride<>(resIncr));\n\n    typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,RowMajor> RhsMapper;\n\n    for (Index pi=0; pi<diagSize; pi+=PanelWidth)\n    {\n      Index actualPanelWidth = (std::min)(PanelWidth, diagSize-pi);\n      for (Index k=0; k<actualPanelWidth; ++k)\n      {\n        Index i = pi + k;\n        Index s = IsLower ? pi  : ((HasUnitDiag||HasZeroDiag) ? i+1 : i);\n        Index r = IsLower ? k+1 : actualPanelWidth-k;\n        if ((!(HasUnitDiag||HasZeroDiag)) || (--r)>0)\n          res.coeffRef(i) += alpha * (cjLhs.row(i).segment(s,r).cwiseProduct(cjRhs.segment(s,r).transpose())).sum();\n        if (HasUnitDiag)\n          res.coeffRef(i) += alpha * cjRhs.coeff(i);\n      }\n      Index r = IsLower ? pi : cols - pi - actualPanelWidth;\n      if (r>0)\n      {\n        Index s = IsLower ? 0 : pi + actualPanelWidth;\n        general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs,BuiltIn>::run(\n            actualPanelWidth, r,\n            LhsMapper(&lhs.coeffRef(pi,s), lhsStride),\n            RhsMapper(&rhs.coeffRef(s), rhsIncr),\n            &res.coeffRef(pi), resIncr, alpha);\n      }\n    }\n    if(IsLower && rows>diagSize)\n    {\n      general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,ConjLhs,RhsScalar,RhsMapper,ConjRhs>::run(\n            rows-diagSize, cols,\n            LhsMapper(&lhs.coeffRef(diagSize,0), lhsStride),\n            RhsMapper(&rhs.coeffRef(0), rhsIncr),\n            &res.coeffRef(diagSize), resIncr, alpha);\n    }\n  }\n\n/***************************************************************************\n* Wrapper to product_triangular_vector\n***************************************************************************/\n\ntemplate<int Mode,int StorageOrder>\nstruct trmv_selector;\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<int Mode, typename Lhs, typename Rhs>\nstruct triangular_product_impl<Mode,true,Lhs,false,Rhs,true>\n{\n  template<typename Dest> static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha)\n  {\n    eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols());\n  \n    internal::trmv_selector<Mode,(int(internal::traits<Lhs>::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(lhs, rhs, dst, alpha);\n  }\n};\n\ntemplate<int Mode, typename Lhs, typename Rhs>\nstruct triangular_product_impl<Mode,false,Lhs,true,Rhs,false>\n{\n  template<typename Dest> static void run(Dest& dst, const Lhs &lhs, const Rhs &rhs, const typename Dest::Scalar& alpha)\n  {\n    eigen_assert(dst.rows()==lhs.rows() && dst.cols()==rhs.cols());\n\n    Transpose<Dest> dstT(dst);\n    internal::trmv_selector<(Mode & (UnitDiag|ZeroDiag)) | ((Mode & Lower) ? Upper : Lower),\n                            (int(internal::traits<Rhs>::Flags)&RowMajorBit) ? ColMajor : RowMajor>\n            ::run(rhs.transpose(),lhs.transpose(), dstT, alpha);\n  }\n};\n\n} // end namespace internal\n\nnamespace internal {\n\n// TODO: find a way to factorize this piece of code with gemv_selector since the logic is exactly the same.\n  \ntemplate<int Mode> struct trmv_selector<Mode,ColMajor>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    typedef typename Lhs::Scalar      LhsScalar;\n    typedef typename Rhs::Scalar      RhsScalar;\n    typedef typename Dest::Scalar     ResScalar;\n    typedef typename Dest::RealScalar RealScalar;\n    \n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n    \n    typedef Map<Matrix<ResScalar,Dynamic,1>, EIGEN_PLAIN_ENUM_MIN(AlignedMax,internal::packet_traits<ResScalar>::size)> MappedDest;\n\n    typename internal::add_const_on_value_type<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);\n    typename internal::add_const_on_value_type<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);\n\n    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)\n                                  * RhsBlasTraits::extractScalarFactor(rhs);\n\n    enum {\n      // FIXME find a way to allow an inner stride on the result if packet_traits<Scalar>::size==1\n      // on, the other hand it is good for the cache to pack the vector anyways...\n      EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1,\n      ComplexByReal = (NumTraits<LhsScalar>::IsComplex) && (!NumTraits<RhsScalar>::IsComplex),\n      MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal\n    };\n\n    gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,MightCannotUseDest> static_dest;\n\n    bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0));\n    bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible;\n\n    RhsScalar compatibleAlpha = get_factor<ResScalar,RhsScalar>::run(actualAlpha);\n\n    ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),\n                                                  evalToDest ? dest.data() : static_dest.data());\n\n    if(!evalToDest)\n    {\n      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      Index size = dest.size();\n      EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      #endif\n      if(!alphaIsCompatible)\n      {\n        MappedDest(actualDestPtr, dest.size()).setZero();\n        compatibleAlpha = RhsScalar(1);\n      }\n      else\n        MappedDest(actualDestPtr, dest.size()) = dest;\n    }\n\n    internal::triangular_matrix_vector_product\n      <Index,Mode,\n       LhsScalar, LhsBlasTraits::NeedToConjugate,\n       RhsScalar, RhsBlasTraits::NeedToConjugate,\n       ColMajor>\n      ::run(actualLhs.rows(),actualLhs.cols(),\n            actualLhs.data(),actualLhs.outerStride(),\n            actualRhs.data(),actualRhs.innerStride(),\n            actualDestPtr,1,compatibleAlpha);\n\n    if (!evalToDest)\n    {\n      if(!alphaIsCompatible)\n        dest += actualAlpha * MappedDest(actualDestPtr, dest.size());\n      else\n        dest = MappedDest(actualDestPtr, dest.size());\n    }\n  }\n};\n\ntemplate<int Mode> struct trmv_selector<Mode,RowMajor>\n{\n  template<typename Lhs, typename Rhs, typename Dest>\n  static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha)\n  {\n    typedef typename Lhs::Scalar      LhsScalar;\n    typedef typename Rhs::Scalar      RhsScalar;\n    typedef typename Dest::Scalar     ResScalar;\n    \n    typedef internal::blas_traits<Lhs> LhsBlasTraits;\n    typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType;\n    typedef internal::blas_traits<Rhs> RhsBlasTraits;\n    typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType;\n    typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned;\n\n    typename add_const<ActualLhsType>::type actualLhs = LhsBlasTraits::extract(lhs);\n    typename add_const<ActualRhsType>::type actualRhs = RhsBlasTraits::extract(rhs);\n\n    ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs)\n                                  * RhsBlasTraits::extractScalarFactor(rhs);\n\n    enum {\n      DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1\n    };\n\n    gemv_static_vector_if<RhsScalar,ActualRhsTypeCleaned::SizeAtCompileTime,ActualRhsTypeCleaned::MaxSizeAtCompileTime,!DirectlyUseRhs> static_rhs;\n\n    ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(),\n        DirectlyUseRhs ? const_cast<RhsScalar*>(actualRhs.data()) : static_rhs.data());\n\n    if(!DirectlyUseRhs)\n    {\n      #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      Index size = actualRhs.size();\n      EIGEN_DENSE_STORAGE_CTOR_PLUGIN\n      #endif\n      Map<typename ActualRhsTypeCleaned::PlainObject>(actualRhsPtr, actualRhs.size()) = actualRhs;\n    }\n\n    internal::triangular_matrix_vector_product\n      <Index,Mode,\n       LhsScalar, LhsBlasTraits::NeedToConjugate,\n       RhsScalar, RhsBlasTraits::NeedToConjugate,\n       RowMajor>\n      ::run(actualLhs.rows(),actualLhs.cols(),\n            actualLhs.data(),actualLhs.outerStride(),\n            actualRhsPtr,1,\n            dest.data(),dest.innerStride(),\n            actualAlpha);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULARMATRIXVECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularMatrixVector_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Triangular matrix-vector product functionality based on ?TRMV.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H\n#define EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/**********************************************************************\n* This file implements triangular matrix-vector multiplication using BLAS\n**********************************************************************/\n\n// trmv/hemv specialization\n\ntemplate<typename Index, int Mode, typename LhsScalar, bool ConjLhs, typename RhsScalar, bool ConjRhs, int StorageOrder>\nstruct triangular_matrix_vector_product_trmv :\n  triangular_matrix_vector_product<Index,Mode,LhsScalar,ConjLhs,RhsScalar,ConjRhs,StorageOrder,BuiltIn> {};\n\n#define EIGEN_BLAS_TRMV_SPECIALIZE(Scalar) \\\ntemplate<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \\\nstruct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor,Specialized> { \\\n static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \\\n                                     const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \\\n      triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,ColMajor>::run( \\\n        _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \\\n  } \\\n}; \\\ntemplate<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \\\nstruct triangular_matrix_vector_product<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor,Specialized> { \\\n static void run(Index _rows, Index _cols, const Scalar* _lhs, Index lhsStride, \\\n                                     const Scalar* _rhs, Index rhsIncr, Scalar* _res, Index resIncr, Scalar alpha) { \\\n      triangular_matrix_vector_product_trmv<Index,Mode,Scalar,ConjLhs,Scalar,ConjRhs,RowMajor>::run( \\\n        _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \\\n  } \\\n};\n\nEIGEN_BLAS_TRMV_SPECIALIZE(double)\nEIGEN_BLAS_TRMV_SPECIALIZE(float)\nEIGEN_BLAS_TRMV_SPECIALIZE(dcomplex)\nEIGEN_BLAS_TRMV_SPECIALIZE(scomplex)\n\n// implements col-major: res += alpha * op(triangular) * vector\n#define EIGEN_BLAS_TRMV_CM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \\\nstruct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor> { \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    LowUp = IsLower ? Lower : Upper \\\n  }; \\\n static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \\\n                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \\\n { \\\n   if (ConjLhs || IsZeroDiag) { \\\n     triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,ColMajor,BuiltIn>::run( \\\n       _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \\\n     return; \\\n   }\\\n   Index size = (std::min)(_rows,_cols); \\\n   Index rows = IsLower ? _rows : size; \\\n   Index cols = IsLower ? size : _cols; \\\n\\\n   typedef VectorX##EIGPREFIX VectorRhs; \\\n   EIGTYPE *x, *y;\\\n\\\n/* Set x*/ \\\n   Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \\\n   VectorRhs x_tmp; \\\n   if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \\\n   x = x_tmp.data(); \\\n\\\n/* Square part handling */\\\n\\\n   char trans, uplo, diag; \\\n   BlasIndex m, n, lda, incx, incy; \\\n   EIGTYPE const *a; \\\n   EIGTYPE beta(1); \\\n\\\n/* Set m, n */ \\\n   n = convert_index<BlasIndex>(size); \\\n   lda = convert_index<BlasIndex>(lhsStride); \\\n   incx = 1; \\\n   incy = convert_index<BlasIndex>(resIncr); \\\n\\\n/* Set uplo, trans and diag*/ \\\n   trans = 'N'; \\\n   uplo = IsLower ? 'L' : 'U'; \\\n   diag = IsUnitDiag ? 'U' : 'N'; \\\n\\\n/* call ?TRMV*/ \\\n   BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \\\n\\\n/* Add op(a_tr)rhs into res*/ \\\n   BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \\\n/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \\\n   if (size<(std::max)(rows,cols)) { \\\n     if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \\\n     x = x_tmp.data(); \\\n     if (size<rows) { \\\n       y = _res + size*resIncr; \\\n       a = _lhs + size; \\\n       m = convert_index<BlasIndex>(rows-size); \\\n       n = convert_index<BlasIndex>(size); \\\n     } \\\n     else { \\\n       x += size; \\\n       y = _res; \\\n       a = _lhs + size*lda; \\\n       m = convert_index<BlasIndex>(size); \\\n       n = convert_index<BlasIndex>(cols-size); \\\n     } \\\n     BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \\\n   } \\\n  } \\\n};\n\nEIGEN_BLAS_TRMV_CM(double,   double, d,  d)\nEIGEN_BLAS_TRMV_CM(dcomplex, double, cd, z)\nEIGEN_BLAS_TRMV_CM(float,    float,  f,  s)\nEIGEN_BLAS_TRMV_CM(scomplex, float,  cf, c)\n\n// implements row-major: res += alpha * op(triangular) * vector\n#define EIGEN_BLAS_TRMV_RM(EIGTYPE, BLASTYPE, EIGPREFIX, BLASPREFIX) \\\ntemplate<typename Index, int Mode, bool ConjLhs, bool ConjRhs> \\\nstruct triangular_matrix_vector_product_trmv<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor> { \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    SetDiag = (Mode&(ZeroDiag|UnitDiag)) ? 0 : 1, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    LowUp = IsLower ? Lower : Upper \\\n  }; \\\n static void run(Index _rows, Index _cols, const EIGTYPE* _lhs, Index lhsStride, \\\n                 const EIGTYPE* _rhs, Index rhsIncr, EIGTYPE* _res, Index resIncr, EIGTYPE alpha) \\\n { \\\n   if (IsZeroDiag) { \\\n     triangular_matrix_vector_product<Index,Mode,EIGTYPE,ConjLhs,EIGTYPE,ConjRhs,RowMajor,BuiltIn>::run( \\\n       _rows, _cols, _lhs, lhsStride, _rhs, rhsIncr, _res, resIncr, alpha); \\\n     return; \\\n   }\\\n   Index size = (std::min)(_rows,_cols); \\\n   Index rows = IsLower ? _rows : size; \\\n   Index cols = IsLower ? size : _cols; \\\n\\\n   typedef VectorX##EIGPREFIX VectorRhs; \\\n   EIGTYPE *x, *y;\\\n\\\n/* Set x*/ \\\n   Map<const VectorRhs, 0, InnerStride<> > rhs(_rhs,cols,InnerStride<>(rhsIncr)); \\\n   VectorRhs x_tmp; \\\n   if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \\\n   x = x_tmp.data(); \\\n\\\n/* Square part handling */\\\n\\\n   char trans, uplo, diag; \\\n   BlasIndex m, n, lda, incx, incy; \\\n   EIGTYPE const *a; \\\n   EIGTYPE beta(1); \\\n\\\n/* Set m, n */ \\\n   n = convert_index<BlasIndex>(size); \\\n   lda = convert_index<BlasIndex>(lhsStride); \\\n   incx = 1; \\\n   incy = convert_index<BlasIndex>(resIncr); \\\n\\\n/* Set uplo, trans and diag*/ \\\n   trans = ConjLhs ? 'C' : 'T'; \\\n   uplo = IsLower ? 'U' : 'L'; \\\n   diag = IsUnitDiag ? 'U' : 'N'; \\\n\\\n/* call ?TRMV*/ \\\n   BLASPREFIX##trmv_(&uplo, &trans, &diag, &n, (const BLASTYPE*)_lhs, &lda, (BLASTYPE*)x, &incx); \\\n\\\n/* Add op(a_tr)rhs into res*/ \\\n   BLASPREFIX##axpy_(&n, &numext::real_ref(alpha),(const BLASTYPE*)x, &incx, (BLASTYPE*)_res, &incy); \\\n/* Non-square case - doesn't fit to BLAS ?TRMV. Fall to default triangular product*/ \\\n   if (size<(std::max)(rows,cols)) { \\\n     if (ConjRhs) x_tmp = rhs.conjugate(); else x_tmp = rhs; \\\n     x = x_tmp.data(); \\\n     if (size<rows) { \\\n       y = _res + size*resIncr; \\\n       a = _lhs + size*lda; \\\n       m = convert_index<BlasIndex>(rows-size); \\\n       n = convert_index<BlasIndex>(size); \\\n     } \\\n     else { \\\n       x += size; \\\n       y = _res; \\\n       a = _lhs + size; \\\n       m = convert_index<BlasIndex>(size); \\\n       n = convert_index<BlasIndex>(cols-size); \\\n     } \\\n     BLASPREFIX##gemv_(&trans, &n, &m, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (const BLASTYPE*)x, &incx, &numext::real_ref(beta), (BLASTYPE*)y, &incy); \\\n   } \\\n  } \\\n};\n\nEIGEN_BLAS_TRMV_RM(double,   double, d,  d)\nEIGEN_BLAS_TRMV_RM(dcomplex, double, cd, z)\nEIGEN_BLAS_TRMV_RM(float,    float,  f,  s)\nEIGEN_BLAS_TRMV_RM(scomplex, float,  cf, c)\n\n} // end namespase internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_MATRIX_VECTOR_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularSolverMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_H\n#define EIGEN_TRIANGULAR_SOLVER_MATRIX_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n// if the rhs is row major, let's transpose the product\ntemplate <typename Scalar, typename Index, int Side, int Mode, bool Conjugate, int TriStorageOrder>\nstruct triangular_solve_matrix<Scalar,Index,Side,Mode,Conjugate,TriStorageOrder,RowMajor>\n{\n  static void run(\n    Index size, Index cols,\n    const Scalar*  tri, Index triStride,\n    Scalar* _other, Index otherStride,\n    level3_blocking<Scalar,Scalar>& blocking)\n  {\n    triangular_solve_matrix<\n      Scalar, Index, Side==OnTheLeft?OnTheRight:OnTheLeft,\n      (Mode&UnitDiag) | ((Mode&Upper) ? Lower : Upper),\n      NumTraits<Scalar>::IsComplex && Conjugate,\n      TriStorageOrder==RowMajor ? ColMajor : RowMajor, ColMajor>\n      ::run(size, cols, tri, triStride, _other, otherStride, blocking);\n  }\n};\n\n/* Optimized triangular solver with multiple right hand side and the triangular matrix on the left\n */\ntemplate <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>\nstruct triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>\n{\n  static EIGEN_DONT_INLINE void run(\n    Index size, Index otherSize,\n    const Scalar* _tri, Index triStride,\n    Scalar* _other, Index otherStride,\n    level3_blocking<Scalar,Scalar>& blocking);\n};\ntemplate <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>\nEIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor>::run(\n    Index size, Index otherSize,\n    const Scalar* _tri, Index triStride,\n    Scalar* _other, Index otherStride,\n    level3_blocking<Scalar,Scalar>& blocking)\n  {\n    Index cols = otherSize;\n\n    typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> TriMapper;\n    typedef blas_data_mapper<Scalar, Index, ColMajor> OtherMapper;\n    TriMapper tri(_tri, triStride);\n    OtherMapper other(_other, otherStride);\n\n    typedef gebp_traits<Scalar,Scalar> Traits;\n\n    enum {\n      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),\n      IsLower = (Mode&Lower) == Lower\n    };\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(size,blocking.mc());  // cache block size along the M direction\n\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*cols;\n\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    conj_if<Conjugate> conj;\n    gebp_kernel<Scalar, Scalar, Index, OtherMapper, Traits::mr, Traits::nr, Conjugate, false> gebp_kernel;\n    gemm_pack_lhs<Scalar, Index, TriMapper, Traits::mr, Traits::LhsProgress, TriStorageOrder> pack_lhs;\n    gemm_pack_rhs<Scalar, Index, OtherMapper, Traits::nr, ColMajor, false, true> pack_rhs;\n\n    // the goal here is to subdivise the Rhs panels such that we keep some cache\n    // coherence when accessing the rhs elements\n    std::ptrdiff_t l1, l2, l3;\n    manage_caching_sizes(GetAction, &l1, &l2, &l3);\n    Index subcols = cols>0 ? l2/(4 * sizeof(Scalar) * std::max<Index>(otherStride,size)) : 0;\n    subcols = std::max<Index>((subcols/Traits::nr)*Traits::nr, Traits::nr);\n\n    for(Index k2=IsLower ? 0 : size;\n        IsLower ? k2<size : k2>0;\n        IsLower ? k2+=kc : k2-=kc)\n    {\n      const Index actual_kc = (std::min)(IsLower ? size-k2 : k2, kc);\n\n      // We have selected and packed a big horizontal panel R1 of rhs. Let B be the packed copy of this panel,\n      // and R2 the remaining part of rhs. The corresponding vertical panel of lhs is split into\n      // A11 (the triangular part) and A21 the remaining rectangular part.\n      // Then the high level algorithm is:\n      //  - B = R1                    => general block copy (done during the next step)\n      //  - R1 = A11^-1 B             => tricky part\n      //  - update B from the new R1  => actually this has to be performed continuously during the above step\n      //  - R2 -= A21 * B             => GEPP\n\n      // The tricky part: compute R1 = A11^-1 B while updating B from R1\n      // The idea is to split A11 into multiple small vertical panels.\n      // Each panel can be split into a small triangular part T1k which is processed without optimization,\n      // and the remaining small part T2k which is processed using gebp with appropriate block strides\n      for(Index j2=0; j2<cols; j2+=subcols)\n      {\n        Index actual_cols = (std::min)(cols-j2,subcols);\n        // for each small vertical panels [T1k^T, T2k^T]^T of lhs\n        for (Index k1=0; k1<actual_kc; k1+=SmallPanelWidth)\n        {\n          Index actualPanelWidth = std::min<Index>(actual_kc-k1, SmallPanelWidth);\n          // tr solve\n          for (Index k=0; k<actualPanelWidth; ++k)\n          {\n            // TODO write a small kernel handling this (can be shared with trsv)\n            Index i  = IsLower ? k2+k1+k : k2-k1-k-1;\n            Index rs = actualPanelWidth - k - 1; // remaining size\n            Index s  = TriStorageOrder==RowMajor ? (IsLower ? k2+k1 : i+1)\n                                                 :  IsLower ? i+1 : i-rs;\n\n            Scalar a = (Mode & UnitDiag) ? Scalar(1) : Scalar(1)/conj(tri(i,i));\n            for (Index j=j2; j<j2+actual_cols; ++j)\n            {\n              if (TriStorageOrder==RowMajor)\n              {\n                Scalar b(0);\n                const Scalar* l = &tri(i,s);\n                Scalar* r = &other(s,j);\n                for (Index i3=0; i3<k; ++i3)\n                  b += conj(l[i3]) * r[i3];\n\n                other(i,j) = (other(i,j) - b)*a;\n              }\n              else\n              {\n                Scalar b = (other(i,j) *= a);\n                Scalar* r = &other(s,j);\n                const Scalar* l = &tri(s,i);\n                for (Index i3=0;i3<rs;++i3)\n                  r[i3] -= b * conj(l[i3]);\n              }\n            }\n          }\n\n          Index lengthTarget = actual_kc-k1-actualPanelWidth;\n          Index startBlock   = IsLower ? k2+k1 : k2-k1-actualPanelWidth;\n          Index blockBOffset = IsLower ? k1 : lengthTarget;\n\n          // update the respective rows of B from other\n          pack_rhs(blockB+actual_kc*j2, other.getSubMapper(startBlock,j2), actualPanelWidth, actual_cols, actual_kc, blockBOffset);\n\n          // GEBP\n          if (lengthTarget>0)\n          {\n            Index startTarget  = IsLower ? k2+k1+actualPanelWidth : k2-actual_kc;\n\n            pack_lhs(blockA, tri.getSubMapper(startTarget,startBlock), actualPanelWidth, lengthTarget);\n\n            gebp_kernel(other.getSubMapper(startTarget,j2), blockA, blockB+actual_kc*j2, lengthTarget, actualPanelWidth, actual_cols, Scalar(-1),\n                        actualPanelWidth, actual_kc, 0, blockBOffset);\n          }\n        }\n      }\n      \n      // R2 -= A21 * B => GEPP\n      {\n        Index start = IsLower ? k2+kc : 0;\n        Index end   = IsLower ? size : k2-kc;\n        for(Index i2=start; i2<end; i2+=mc)\n        {\n          const Index actual_mc = (std::min)(mc,end-i2);\n          if (actual_mc>0)\n          {\n            pack_lhs(blockA, tri.getSubMapper(i2, IsLower ? k2 : k2-kc), actual_kc, actual_mc);\n\n            gebp_kernel(other.getSubMapper(i2, 0), blockA, blockB, actual_mc, actual_kc, cols, Scalar(-1), -1, -1, 0, 0);\n          }\n        }\n      }\n    }\n  }\n\n/* Optimized triangular solver with multiple left hand sides and the triangular matrix on the right\n */\ntemplate <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>\nstruct triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>\n{\n  static EIGEN_DONT_INLINE void run(\n    Index size, Index otherSize,\n    const Scalar* _tri, Index triStride,\n    Scalar* _other, Index otherStride,\n    level3_blocking<Scalar,Scalar>& blocking);\n};\ntemplate <typename Scalar, typename Index, int Mode, bool Conjugate, int TriStorageOrder>\nEIGEN_DONT_INLINE void triangular_solve_matrix<Scalar,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor>::run(\n    Index size, Index otherSize,\n    const Scalar* _tri, Index triStride,\n    Scalar* _other, Index otherStride,\n    level3_blocking<Scalar,Scalar>& blocking)\n  {\n    Index rows = otherSize;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    typedef blas_data_mapper<Scalar, Index, ColMajor> LhsMapper;\n    typedef const_blas_data_mapper<Scalar, Index, TriStorageOrder> RhsMapper;\n    LhsMapper lhs(_other, otherStride);\n    RhsMapper rhs(_tri, triStride);\n\n    typedef gebp_traits<Scalar,Scalar> Traits;\n    enum {\n      RhsStorageOrder   = TriStorageOrder,\n      SmallPanelWidth   = EIGEN_PLAIN_ENUM_MAX(Traits::mr,Traits::nr),\n      IsLower = (Mode&Lower) == Lower\n    };\n\n    Index kc = blocking.kc();                   // cache block size along the K direction\n    Index mc = (std::min)(rows,blocking.mc());  // cache block size along the M direction\n\n    std::size_t sizeA = kc*mc;\n    std::size_t sizeB = kc*size;\n\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());\n    ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());\n\n    conj_if<Conjugate> conj;\n    gebp_kernel<Scalar, Scalar, Index, LhsMapper, Traits::mr, Traits::nr, false, Conjugate> gebp_kernel;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs;\n    gemm_pack_rhs<Scalar, Index, RhsMapper, Traits::nr, RhsStorageOrder,false,true> pack_rhs_panel;\n    gemm_pack_lhs<Scalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, ColMajor, false, true> pack_lhs_panel;\n\n    for(Index k2=IsLower ? size : 0;\n        IsLower ? k2>0 : k2<size;\n        IsLower ? k2-=kc : k2+=kc)\n    {\n      const Index actual_kc = (std::min)(IsLower ? k2 : size-k2, kc);\n      Index actual_k2 = IsLower ? k2-actual_kc : k2 ;\n\n      Index startPanel = IsLower ? 0 : k2+actual_kc;\n      Index rs = IsLower ? actual_k2 : size - actual_k2 - actual_kc;\n      Scalar* geb = blockB+actual_kc*actual_kc;\n\n      if (rs>0) pack_rhs(geb, rhs.getSubMapper(actual_k2,startPanel), actual_kc, rs);\n\n      // triangular packing (we only pack the panels off the diagonal,\n      // neglecting the blocks overlapping the diagonal\n      {\n        for (Index j2=0; j2<actual_kc; j2+=SmallPanelWidth)\n        {\n          Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);\n          Index actual_j2 = actual_k2 + j2;\n          Index panelOffset = IsLower ? j2+actualPanelWidth : 0;\n          Index panelLength = IsLower ? actual_kc-j2-actualPanelWidth : j2;\n\n          if (panelLength>0)\n          pack_rhs_panel(blockB+j2*actual_kc,\n                         rhs.getSubMapper(actual_k2+panelOffset, actual_j2),\n                         panelLength, actualPanelWidth,\n                         actual_kc, panelOffset);\n        }\n      }\n\n      for(Index i2=0; i2<rows; i2+=mc)\n      {\n        const Index actual_mc = (std::min)(mc,rows-i2);\n\n        // triangular solver kernel\n        {\n          // for each small block of the diagonal (=> vertical panels of rhs)\n          for (Index j2 = IsLower\n                      ? (actual_kc - ((actual_kc%SmallPanelWidth) ? Index(actual_kc%SmallPanelWidth)\n                                                                  : Index(SmallPanelWidth)))\n                      : 0;\n               IsLower ? j2>=0 : j2<actual_kc;\n               IsLower ? j2-=SmallPanelWidth : j2+=SmallPanelWidth)\n          {\n            Index actualPanelWidth = std::min<Index>(actual_kc-j2, SmallPanelWidth);\n            Index absolute_j2 = actual_k2 + j2;\n            Index panelOffset = IsLower ? j2+actualPanelWidth : 0;\n            Index panelLength = IsLower ? actual_kc - j2 - actualPanelWidth : j2;\n\n            // GEBP\n            if(panelLength>0)\n            {\n              gebp_kernel(lhs.getSubMapper(i2,absolute_j2),\n                          blockA, blockB+j2*actual_kc,\n                          actual_mc, panelLength, actualPanelWidth,\n                          Scalar(-1),\n                          actual_kc, actual_kc, // strides\n                          panelOffset, panelOffset); // offsets\n            }\n\n            // unblocked triangular solve\n            for (Index k=0; k<actualPanelWidth; ++k)\n            {\n              Index j = IsLower ? absolute_j2+actualPanelWidth-k-1 : absolute_j2+k;\n\n              Scalar* r = &lhs(i2,j);\n              for (Index k3=0; k3<k; ++k3)\n              {\n                Scalar b = conj(rhs(IsLower ? j+1+k3 : absolute_j2+k3,j));\n                Scalar* a = &lhs(i2,IsLower ? j+1+k3 : absolute_j2+k3);\n                for (Index i=0; i<actual_mc; ++i)\n                  r[i] -= a[i] * b;\n              }\n              if((Mode & UnitDiag)==0)\n              {\n                Scalar inv_rjj = RealScalar(1)/conj(rhs(j,j));\n                for (Index i=0; i<actual_mc; ++i)\n                  r[i] *= inv_rjj;\n              }\n            }\n\n            // pack the just computed part of lhs to A\n            pack_lhs_panel(blockA, LhsMapper(_other+absolute_j2*otherStride+i2, otherStride),\n                           actualPanelWidth, actual_mc,\n                           actual_kc, j2);\n          }\n        }\n\n        if (rs>0)\n          gebp_kernel(lhs.getSubMapper(i2, startPanel), blockA, geb,\n                      actual_mc, actual_kc, rs, Scalar(-1),\n                      -1, -1, 0, 0);\n      }\n    }\n  }\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularSolverMatrix_BLAS.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to BLAS F77\n *   Triangular matrix * matrix product functionality based on ?TRMM.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H\n#define EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n// implements LeftSide op(triangular)^-1 * general\n#define EIGEN_BLAS_TRSM_L(EIGTYPE, BLASTYPE, BLASPREFIX) \\\ntemplate <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \\\nstruct triangular_solve_matrix<EIGTYPE,Index,OnTheLeft,Mode,Conjugate,TriStorageOrder,ColMajor> \\\n{ \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \\\n  }; \\\n  static void run( \\\n      Index size, Index otherSize, \\\n      const EIGTYPE* _tri, Index triStride, \\\n      EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \\\n  { \\\n   BlasIndex m = convert_index<BlasIndex>(size), n = convert_index<BlasIndex>(otherSize), lda, ldb; \\\n   char side = 'L', uplo, diag='N', transa; \\\n   /* Set alpha_ */ \\\n   EIGTYPE alpha(1); \\\n   ldb = convert_index<BlasIndex>(otherStride);\\\n\\\n   const EIGTYPE *a; \\\n/* Set trans */ \\\n   transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \\\n/* Set uplo */ \\\n   uplo = IsLower ? 'L' : 'U'; \\\n   if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \\\n/* Set a, lda */ \\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \\\n   Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \\\n   MatrixTri a_tmp; \\\n\\\n   if (conjA) { \\\n     a_tmp = tri.conjugate(); \\\n     a = a_tmp.data(); \\\n     lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n   } else { \\\n     a = _tri; \\\n     lda = convert_index<BlasIndex>(triStride); \\\n   } \\\n   if (IsUnitDiag) diag='U'; \\\n/* call ?trsm*/ \\\n   BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \\\n } \\\n};\n\nEIGEN_BLAS_TRSM_L(double,   double, d)\nEIGEN_BLAS_TRSM_L(dcomplex, double, z)\nEIGEN_BLAS_TRSM_L(float,    float,  s)\nEIGEN_BLAS_TRSM_L(scomplex, float,  c)\n\n\n// implements RightSide general * op(triangular)^-1\n#define EIGEN_BLAS_TRSM_R(EIGTYPE, BLASTYPE, BLASPREFIX) \\\ntemplate <typename Index, int Mode, bool Conjugate, int TriStorageOrder> \\\nstruct triangular_solve_matrix<EIGTYPE,Index,OnTheRight,Mode,Conjugate,TriStorageOrder,ColMajor> \\\n{ \\\n  enum { \\\n    IsLower = (Mode&Lower) == Lower, \\\n    IsUnitDiag  = (Mode&UnitDiag) ? 1 : 0, \\\n    IsZeroDiag  = (Mode&ZeroDiag) ? 1 : 0, \\\n    conjA = ((TriStorageOrder==ColMajor) && Conjugate) ? 1 : 0 \\\n  }; \\\n  static void run( \\\n      Index size, Index otherSize, \\\n      const EIGTYPE* _tri, Index triStride, \\\n      EIGTYPE* _other, Index otherStride, level3_blocking<EIGTYPE,EIGTYPE>& /*blocking*/) \\\n  { \\\n   BlasIndex m = convert_index<BlasIndex>(otherSize), n = convert_index<BlasIndex>(size), lda, ldb; \\\n   char side = 'R', uplo, diag='N', transa; \\\n   /* Set alpha_ */ \\\n   EIGTYPE alpha(1); \\\n   ldb = convert_index<BlasIndex>(otherStride);\\\n\\\n   const EIGTYPE *a; \\\n/* Set trans */ \\\n   transa = (TriStorageOrder==RowMajor) ? ((Conjugate) ? 'C' : 'T') : 'N'; \\\n/* Set uplo */ \\\n   uplo = IsLower ? 'L' : 'U'; \\\n   if (TriStorageOrder==RowMajor) uplo = (uplo == 'L') ? 'U' : 'L'; \\\n/* Set a, lda */ \\\n   typedef Matrix<EIGTYPE, Dynamic, Dynamic, TriStorageOrder> MatrixTri; \\\n   Map<const MatrixTri, 0, OuterStride<> > tri(_tri,size,size,OuterStride<>(triStride)); \\\n   MatrixTri a_tmp; \\\n\\\n   if (conjA) { \\\n     a_tmp = tri.conjugate(); \\\n     a = a_tmp.data(); \\\n     lda = convert_index<BlasIndex>(a_tmp.outerStride()); \\\n   } else { \\\n     a = _tri; \\\n     lda = convert_index<BlasIndex>(triStride); \\\n   } \\\n   if (IsUnitDiag) diag='U'; \\\n/* call ?trsm*/ \\\n   BLASPREFIX##trsm_(&side, &uplo, &transa, &diag, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)a, &lda, (BLASTYPE*)_other, &ldb); \\\n   /*std::cout << \"TRMS_L specialization!\\n\";*/ \\\n } \\\n};\n\nEIGEN_BLAS_TRSM_R(double,   double, d)\nEIGEN_BLAS_TRSM_R(dcomplex, double, z)\nEIGEN_BLAS_TRSM_R(float,    float,  s)\nEIGEN_BLAS_TRSM_R(scomplex, float,  c)\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_SOLVER_MATRIX_BLAS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/products/TriangularSolverVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIANGULAR_SOLVER_VECTOR_H\n#define EIGEN_TRIANGULAR_SOLVER_VECTOR_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate, int StorageOrder>\nstruct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheRight, Mode, Conjugate, StorageOrder>\n{\n  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)\n  {\n    triangular_solve_vector<LhsScalar,RhsScalar,Index,OnTheLeft,\n        ((Mode&Upper)==Upper ? Lower : Upper) | (Mode&UnitDiag),\n        Conjugate,StorageOrder==RowMajor?ColMajor:RowMajor\n      >::run(size, _lhs, lhsStride, rhs);\n  }\n};\n\n// forward and backward substitution, row-major, rhs is a vector\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>\nstruct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, RowMajor>\n{\n  enum {\n    IsLower = ((Mode&Lower)==Lower)\n  };\n  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)\n  {\n    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,RowMajor>, 0, OuterStride<> > LhsMap;\n    const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));\n\n    typedef const_blas_data_mapper<LhsScalar,Index,RowMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;\n\n    typename internal::conditional<\n                          Conjugate,\n                          const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,\n                          const LhsMap&>\n                        ::type cjLhs(lhs);\n    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;\n    for(Index pi=IsLower ? 0 : size;\n        IsLower ? pi<size : pi>0;\n        IsLower ? pi+=PanelWidth : pi-=PanelWidth)\n    {\n      Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);\n\n      Index r = IsLower ? pi : size - pi; // remaining size\n      if (r > 0)\n      {\n        // let's directly call the low level product function because:\n        // 1 - it is faster to compile\n        // 2 - it is slighlty faster at runtime\n        Index startRow = IsLower ? pi : pi-actualPanelWidth;\n        Index startCol = IsLower ? 0 : pi;\n\n        general_matrix_vector_product<Index,LhsScalar,LhsMapper,RowMajor,Conjugate,RhsScalar,RhsMapper,false>::run(\n          actualPanelWidth, r,\n          LhsMapper(&lhs.coeffRef(startRow,startCol), lhsStride),\n          RhsMapper(rhs + startCol, 1),\n          rhs + startRow, 1,\n          RhsScalar(-1));\n      }\n\n      for(Index k=0; k<actualPanelWidth; ++k)\n      {\n        Index i = IsLower ? pi+k : pi-k-1;\n        Index s = IsLower ? pi   : i+1;\n        if (k>0)\n          rhs[i] -= (cjLhs.row(i).segment(s,k).transpose().cwiseProduct(Map<const Matrix<RhsScalar,Dynamic,1> >(rhs+s,k))).sum();\n\n        if(!(Mode & UnitDiag))\n          rhs[i] /= cjLhs(i,i);\n      }\n    }\n  }\n};\n\n// forward and backward substitution, column-major, rhs is a vector\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, int Mode, bool Conjugate>\nstruct triangular_solve_vector<LhsScalar, RhsScalar, Index, OnTheLeft, Mode, Conjugate, ColMajor>\n{\n  enum {\n    IsLower = ((Mode&Lower)==Lower)\n  };\n  static void run(Index size, const LhsScalar* _lhs, Index lhsStride, RhsScalar* rhs)\n  {\n    typedef Map<const Matrix<LhsScalar,Dynamic,Dynamic,ColMajor>, 0, OuterStride<> > LhsMap;\n    const LhsMap lhs(_lhs,size,size,OuterStride<>(lhsStride));\n    typedef const_blas_data_mapper<LhsScalar,Index,ColMajor> LhsMapper;\n    typedef const_blas_data_mapper<RhsScalar,Index,ColMajor> RhsMapper;\n    typename internal::conditional<Conjugate,\n                                   const CwiseUnaryOp<typename internal::scalar_conjugate_op<LhsScalar>,LhsMap>,\n                                   const LhsMap&\n                                  >::type cjLhs(lhs);\n    static const Index PanelWidth = EIGEN_TUNE_TRIANGULAR_PANEL_WIDTH;\n\n    for(Index pi=IsLower ? 0 : size;\n        IsLower ? pi<size : pi>0;\n        IsLower ? pi+=PanelWidth : pi-=PanelWidth)\n    {\n      Index actualPanelWidth = (std::min)(IsLower ? size - pi : pi, PanelWidth);\n      Index startBlock = IsLower ? pi : pi-actualPanelWidth;\n      Index endBlock = IsLower ? pi + actualPanelWidth : 0;\n\n      for(Index k=0; k<actualPanelWidth; ++k)\n      {\n        Index i = IsLower ? pi+k : pi-k-1;\n        if(!(Mode & UnitDiag))\n          rhs[i] /= cjLhs.coeff(i,i);\n\n        Index r = actualPanelWidth - k - 1; // remaining size\n        Index s = IsLower ? i+1 : i-r;\n        if (r>0)\n          Map<Matrix<RhsScalar,Dynamic,1> >(rhs+s,r) -= rhs[i] * cjLhs.col(i).segment(s,r);\n      }\n      Index r = IsLower ? size - endBlock : startBlock; // remaining size\n      if (r > 0)\n      {\n        // let's directly call the low level product function because:\n        // 1 - it is faster to compile\n        // 2 - it is slighlty faster at runtime\n        general_matrix_vector_product<Index,LhsScalar,LhsMapper,ColMajor,Conjugate,RhsScalar,RhsMapper,false>::run(\n            r, actualPanelWidth,\n            LhsMapper(&lhs.coeffRef(endBlock,startBlock), lhsStride),\n            RhsMapper(rhs+startBlock, 1),\n            rhs+endBlock, 1, RhsScalar(-1));\n      }\n    }\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIANGULAR_SOLVER_VECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/BlasUtil.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BLASUTIL_H\n#define EIGEN_BLASUTIL_H\n\n// This file contains many lightweight helper classes used to\n// implement and control fast level 2 and level 3 BLAS-like routines.\n\nnamespace Eigen {\n\nnamespace internal {\n\n// forward declarations\ntemplate<typename LhsScalar, typename RhsScalar, typename Index, typename DataMapper, int mr, int nr, bool ConjugateLhs=false, bool ConjugateRhs=false>\nstruct gebp_kernel;\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int nr, int StorageOrder, bool Conjugate = false, bool PanelMode=false>\nstruct gemm_pack_rhs;\n\ntemplate<typename Scalar, typename Index, typename DataMapper, int Pack1, int Pack2, int StorageOrder, bool Conjugate = false, bool PanelMode = false>\nstruct gemm_pack_lhs;\n\ntemplate<\n  typename Index,\n  typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,\n  typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs,\n  int ResStorageOrder>\nstruct general_matrix_matrix_product;\n\ntemplate<typename Index,\n         typename LhsScalar, typename LhsMapper, int LhsStorageOrder, bool ConjugateLhs,\n         typename RhsScalar, typename RhsMapper, bool ConjugateRhs, int Version=Specialized>\nstruct general_matrix_vector_product;\n\n\ntemplate<bool Conjugate> struct conj_if;\n\ntemplate<> struct conj_if<true> {\n  template<typename T>\n  inline T operator()(const T& x) const { return numext::conj(x); }\n  template<typename T>\n  inline T pconj(const T& x) const { return internal::pconj(x); }\n};\n\ntemplate<> struct conj_if<false> {\n  template<typename T>\n  inline const T& operator()(const T& x) const { return x; }\n  template<typename T>\n  inline const T& pconj(const T& x) const { return x; }\n};\n\n// Generic implementation for custom complex types.\ntemplate<typename LhsScalar, typename RhsScalar, bool ConjLhs, bool ConjRhs>\nstruct conj_helper\n{\n  typedef typename ScalarBinaryOpTraits<LhsScalar,RhsScalar>::ReturnType Scalar;\n\n  EIGEN_STRONG_INLINE Scalar pmadd(const LhsScalar& x, const RhsScalar& y, const Scalar& c) const\n  { return padd(c, pmul(x,y)); }\n\n  EIGEN_STRONG_INLINE Scalar pmul(const LhsScalar& x, const RhsScalar& y) const\n  { return conj_if<ConjLhs>()(x) *  conj_if<ConjRhs>()(y); }\n};\n\ntemplate<typename Scalar> struct conj_helper<Scalar,Scalar,false,false>\n{\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const { return internal::pmadd(x,y,c); }\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const { return internal::pmul(x,y); }\n};\n\ntemplate<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, false,true>\n{\n  typedef std::complex<RealScalar> Scalar;\n  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const\n  { return c + pmul(x,y); }\n\n  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const\n  { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::imag(x)*numext::real(y) - numext::real(x)*numext::imag(y)); }\n};\n\ntemplate<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,false>\n{\n  typedef std::complex<RealScalar> Scalar;\n  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const\n  { return c + pmul(x,y); }\n\n  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const\n  { return Scalar(numext::real(x)*numext::real(y) + numext::imag(x)*numext::imag(y), numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }\n};\n\ntemplate<typename RealScalar> struct conj_helper<std::complex<RealScalar>, std::complex<RealScalar>, true,true>\n{\n  typedef std::complex<RealScalar> Scalar;\n  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const Scalar& y, const Scalar& c) const\n  { return c + pmul(x,y); }\n\n  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const Scalar& y) const\n  { return Scalar(numext::real(x)*numext::real(y) - numext::imag(x)*numext::imag(y), - numext::real(x)*numext::imag(y) - numext::imag(x)*numext::real(y)); }\n};\n\ntemplate<typename RealScalar,bool Conj> struct conj_helper<std::complex<RealScalar>, RealScalar, Conj,false>\n{\n  typedef std::complex<RealScalar> Scalar;\n  EIGEN_STRONG_INLINE Scalar pmadd(const Scalar& x, const RealScalar& y, const Scalar& c) const\n  { return padd(c, pmul(x,y)); }\n  EIGEN_STRONG_INLINE Scalar pmul(const Scalar& x, const RealScalar& y) const\n  { return conj_if<Conj>()(x)*y; }\n};\n\ntemplate<typename RealScalar,bool Conj> struct conj_helper<RealScalar, std::complex<RealScalar>, false,Conj>\n{\n  typedef std::complex<RealScalar> Scalar;\n  EIGEN_STRONG_INLINE Scalar pmadd(const RealScalar& x, const Scalar& y, const Scalar& c) const\n  { return padd(c, pmul(x,y)); }\n  EIGEN_STRONG_INLINE Scalar pmul(const RealScalar& x, const Scalar& y) const\n  { return x*conj_if<Conj>()(y); }\n};\n\ntemplate<typename From,typename To> struct get_factor {\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE To run(const From& x) { return To(x); }\n};\n\ntemplate<typename Scalar> struct get_factor<Scalar,typename NumTraits<Scalar>::Real> {\n  EIGEN_DEVICE_FUNC\n  static EIGEN_STRONG_INLINE typename NumTraits<Scalar>::Real run(const Scalar& x) { return numext::real(x); }\n};\n\n\ntemplate<typename Scalar, typename Index>\nclass BlasVectorMapper {\n  public:\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {}\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {\n    return m_data[i];\n  }\n  template <typename Packet, int AlignmentType>\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet load(Index i) const {\n    return ploadt<Packet, AlignmentType>(m_data + i);\n  }\n\n  template <typename Packet>\n  EIGEN_DEVICE_FUNC bool aligned(Index i) const {\n    return (UIntPtr(m_data+i)%sizeof(Packet))==0;\n  }\n\n  protected:\n  Scalar* m_data;\n};\n\ntemplate<typename Scalar, typename Index, int AlignmentType>\nclass BlasLinearMapper {\n  public:\n  typedef typename packet_traits<Scalar>::type Packet;\n  typedef typename packet_traits<Scalar>::half HalfPacket;\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {\n    internal::prefetch(&operator()(i));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const {\n    return m_data[i];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {\n    return ploadt<Packet, AlignmentType>(m_data + i);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {\n    return ploadt<HalfPacket, AlignmentType>(m_data + i);\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const {\n    pstoret<Scalar, Packet, AlignmentType>(m_data + i, p);\n  }\n\n  protected:\n  Scalar *m_data;\n};\n\n// Lightweight helper class to access matrix coefficients.\ntemplate<typename Scalar, typename Index, int StorageOrder, int AlignmentType = Unaligned>\nclass blas_data_mapper {\n  public:\n  typedef typename packet_traits<Scalar>::type Packet;\n  typedef typename packet_traits<Scalar>::half HalfPacket;\n\n  typedef BlasLinearMapper<Scalar, Index, AlignmentType> LinearMapper;\n  typedef BlasVectorMapper<Scalar, Index> VectorMapper;\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data), m_stride(stride) {}\n\n  EIGEN_DEVICE_FUNC  EIGEN_ALWAYS_INLINE blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>\n  getSubMapper(Index i, Index j) const {\n    return blas_data_mapper<Scalar, Index, StorageOrder, AlignmentType>(&operator()(i, j), m_stride);\n  }\n\n  EIGEN_DEVICE_FUNC  EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const {\n    return LinearMapper(&operator()(i, j));\n  }\n\n  EIGEN_DEVICE_FUNC  EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const {\n    return VectorMapper(&operator()(i, j));\n  }\n\n\n  EIGEN_DEVICE_FUNC\n  EIGEN_ALWAYS_INLINE Scalar& operator()(Index i, Index j) const {\n    return m_data[StorageOrder==RowMajor ? j + i*m_stride : i + j*m_stride];\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i, Index j) const {\n    return ploadt<Packet, AlignmentType>(&operator()(i, j));\n  }\n\n  template <typename PacketT, int AlignmentT>\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i, Index j) const {\n    return ploadt<PacketT, AlignmentT>(&operator()(i, j));\n  }\n\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {\n    return ploadt<HalfPacket, AlignmentType>(&operator()(i, j));\n  }\n\n  template<typename SubPacket>\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void scatterPacket(Index i, Index j, const SubPacket &p) const {\n    pscatter<Scalar, SubPacket>(&operator()(i, j), p, m_stride);\n  }\n\n  template<typename SubPacket>\n  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SubPacket gatherPacket(Index i, Index j) const {\n    return pgather<Scalar, SubPacket>(&operator()(i, j), m_stride);\n  }\n\n  EIGEN_DEVICE_FUNC const Index stride() const { return m_stride; }\n  EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; }\n\n  EIGEN_DEVICE_FUNC Index firstAligned(Index size) const {\n    if (UIntPtr(m_data)%sizeof(Scalar)) {\n      return -1;\n    }\n    return internal::first_default_aligned(m_data, size);\n  }\n\n  protected:\n  Scalar* EIGEN_RESTRICT m_data;\n  const Index m_stride;\n};\n\n// lightweight helper class to access matrix coefficients (const version)\ntemplate<typename Scalar, typename Index, int StorageOrder>\nclass const_blas_data_mapper : public blas_data_mapper<const Scalar, Index, StorageOrder> {\n  public:\n  EIGEN_ALWAYS_INLINE const_blas_data_mapper(const Scalar *data, Index stride) : blas_data_mapper<const Scalar, Index, StorageOrder>(data, stride) {}\n\n  EIGEN_ALWAYS_INLINE const_blas_data_mapper<Scalar, Index, StorageOrder> getSubMapper(Index i, Index j) const {\n    return const_blas_data_mapper<Scalar, Index, StorageOrder>(&(this->operator()(i, j)), this->m_stride);\n  }\n};\n\n\n/* Helper class to analyze the factors of a Product expression.\n * In particular it allows to pop out operator-, scalar multiples,\n * and conjugate */\ntemplate<typename XprType> struct blas_traits\n{\n  typedef typename traits<XprType>::Scalar Scalar;\n  typedef const XprType& ExtractType;\n  typedef XprType _ExtractType;\n  enum {\n    IsComplex = NumTraits<Scalar>::IsComplex,\n    IsTransposed = false,\n    NeedToConjugate = false,\n    HasUsableDirectAccess = (    (int(XprType::Flags)&DirectAccessBit)\n                              && (   bool(XprType::IsVectorAtCompileTime)\n                                  || int(inner_stride_at_compile_time<XprType>::ret) == 1)\n                             ) ?  1 : 0\n  };\n  typedef typename conditional<bool(HasUsableDirectAccess),\n    ExtractType,\n    typename _ExtractType::PlainObject\n    >::type DirectLinearAccessType;\n  static inline ExtractType extract(const XprType& x) { return x; }\n  static inline const Scalar extractScalarFactor(const XprType&) { return Scalar(1); }\n};\n\n// pop conjugate\ntemplate<typename Scalar, typename NestedXpr>\nstruct blas_traits<CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> >\n : blas_traits<NestedXpr>\n{\n  typedef blas_traits<NestedXpr> Base;\n  typedef CwiseUnaryOp<scalar_conjugate_op<Scalar>, NestedXpr> XprType;\n  typedef typename Base::ExtractType ExtractType;\n\n  enum {\n    IsComplex = NumTraits<Scalar>::IsComplex,\n    NeedToConjugate = Base::NeedToConjugate ? 0 : IsComplex\n  };\n  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }\n  static inline Scalar extractScalarFactor(const XprType& x) { return conj(Base::extractScalarFactor(x.nestedExpression())); }\n};\n\n// pop scalar multiple\ntemplate<typename Scalar, typename NestedXpr, typename Plain>\nstruct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> >\n : blas_traits<NestedXpr>\n{\n  typedef blas_traits<NestedXpr> Base;\n  typedef CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain>, NestedXpr> XprType;\n  typedef typename Base::ExtractType ExtractType;\n  static inline ExtractType extract(const XprType& x) { return Base::extract(x.rhs()); }\n  static inline Scalar extractScalarFactor(const XprType& x)\n  { return x.lhs().functor().m_other * Base::extractScalarFactor(x.rhs()); }\n};\ntemplate<typename Scalar, typename NestedXpr, typename Plain>\nstruct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > >\n : blas_traits<NestedXpr>\n{\n  typedef blas_traits<NestedXpr> Base;\n  typedef CwiseBinaryOp<scalar_product_op<Scalar>, NestedXpr, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain> > XprType;\n  typedef typename Base::ExtractType ExtractType;\n  static inline ExtractType extract(const XprType& x) { return Base::extract(x.lhs()); }\n  static inline Scalar extractScalarFactor(const XprType& x)\n  { return Base::extractScalarFactor(x.lhs()) * x.rhs().functor().m_other; }\n};\ntemplate<typename Scalar, typename Plain1, typename Plain2>\nstruct blas_traits<CwiseBinaryOp<scalar_product_op<Scalar>, const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain1>,\n                                                            const CwiseNullaryOp<scalar_constant_op<Scalar>,Plain2> > >\n : blas_traits<CwiseNullaryOp<scalar_constant_op<Scalar>,Plain1> >\n{};\n\n// pop opposite\ntemplate<typename Scalar, typename NestedXpr>\nstruct blas_traits<CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> >\n : blas_traits<NestedXpr>\n{\n  typedef blas_traits<NestedXpr> Base;\n  typedef CwiseUnaryOp<scalar_opposite_op<Scalar>, NestedXpr> XprType;\n  typedef typename Base::ExtractType ExtractType;\n  static inline ExtractType extract(const XprType& x) { return Base::extract(x.nestedExpression()); }\n  static inline Scalar extractScalarFactor(const XprType& x)\n  { return - Base::extractScalarFactor(x.nestedExpression()); }\n};\n\n// pop/push transpose\ntemplate<typename NestedXpr>\nstruct blas_traits<Transpose<NestedXpr> >\n : blas_traits<NestedXpr>\n{\n  typedef typename NestedXpr::Scalar Scalar;\n  typedef blas_traits<NestedXpr> Base;\n  typedef Transpose<NestedXpr> XprType;\n  typedef Transpose<const typename Base::_ExtractType>  ExtractType; // const to get rid of a compile error; anyway blas traits are only used on the RHS\n  typedef Transpose<const typename Base::_ExtractType> _ExtractType;\n  typedef typename conditional<bool(Base::HasUsableDirectAccess),\n    ExtractType,\n    typename ExtractType::PlainObject\n    >::type DirectLinearAccessType;\n  enum {\n    IsTransposed = Base::IsTransposed ? 0 : 1\n  };\n  static inline ExtractType extract(const XprType& x) { return ExtractType(Base::extract(x.nestedExpression())); }\n  static inline Scalar extractScalarFactor(const XprType& x) { return Base::extractScalarFactor(x.nestedExpression()); }\n};\n\ntemplate<typename T>\nstruct blas_traits<const T>\n     : blas_traits<T>\n{};\n\ntemplate<typename T, bool HasUsableDirectAccess=blas_traits<T>::HasUsableDirectAccess>\nstruct extract_data_selector {\n  static const typename T::Scalar* run(const T& m)\n  {\n    return blas_traits<T>::extract(m).data();\n  }\n};\n\ntemplate<typename T>\nstruct extract_data_selector<T,false> {\n  static typename T::Scalar* run(const T&) { return 0; }\n};\n\ntemplate<typename T> const typename T::Scalar* extract_data(const T& m)\n{\n  return extract_data_selector<T>::run(m);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BLASUTIL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/Constants.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2007-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CONSTANTS_H\n#define EIGEN_CONSTANTS_H\n\nnamespace Eigen {\n\n/** This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is\n  * stored in some runtime variable.\n  *\n  * Changing the value of Dynamic breaks the ABI, as Dynamic is often used as a template parameter for Matrix.\n  */\nconst int Dynamic = -1;\n\n/** This value means that a signed quantity (e.g., a signed index) is not known at compile-time, and that instead its value\n  * has to be specified at runtime.\n  */\nconst int DynamicIndex = 0xffffff;\n\n/** This value means that the increment to go from one value to another in a sequence is not constant for each step.\n  */\nconst int UndefinedIncr = 0xfffffe;\n\n/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm<int>().\n  * The value Infinity there means the L-infinity norm.\n  */\nconst int Infinity = -1;\n\n/** This value means that the cost to evaluate an expression coefficient is either very expensive or\n  * cannot be known at compile time.\n  *\n  * This value has to be positive to (1) simplify cost computation, and (2) allow to distinguish between a very expensive and very very expensive expressions.\n  * It thus must also be large enough to make sure unrolling won't happen and that sub expressions will be evaluated, but not too large to avoid overflow.\n  */\nconst int HugeCost = 10000;\n\n/** \\defgroup flags Flags\n  * \\ingroup Core_Module\n  *\n  * These are the possible bits which can be OR'ed to constitute the flags of a matrix or\n  * expression.\n  *\n  * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of\n  * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any\n  * runtime overhead.\n  *\n  * \\sa MatrixBase::Flags\n  */\n\n/** \\ingroup flags\n  *\n  * for a matrix, this means that the storage order is row-major.\n  * If this bit is not set, the storage order is column-major.\n  * For an expression, this determines the storage order of\n  * the matrix created by evaluation of that expression.\n  * \\sa \\blank  \\ref TopicStorageOrders */\nconst unsigned int RowMajorBit = 0x1;\n\n/** \\ingroup flags\n  * means the expression should be evaluated by the calling expression */\nconst unsigned int EvalBeforeNestingBit = 0x2;\n\n/** \\ingroup flags\n  * \\deprecated\n  * means the expression should be evaluated before any assignment */\nEIGEN_DEPRECATED\nconst unsigned int EvalBeforeAssigningBit = 0x4; // FIXME deprecated\n\n/** \\ingroup flags\n  *\n  * Short version: means the expression might be vectorized\n  *\n  * Long version: means that the coefficients can be handled by packets\n  * and start at a memory location whose alignment meets the requirements\n  * of the present CPU architecture for optimized packet access. In the fixed-size\n  * case, there is the additional condition that it be possible to access all the\n  * coefficients by packets (this implies the requirement that the size be a multiple of 16 bytes,\n  * and that any nontrivial strides don't break the alignment). In the dynamic-size case,\n  * there is no such condition on the total size and strides, so it might not be possible to access\n  * all coeffs by packets.\n  *\n  * \\note This bit can be set regardless of whether vectorization is actually enabled.\n  *       To check for actual vectorizability, see \\a ActualPacketAccessBit.\n  */\nconst unsigned int PacketAccessBit = 0x8;\n\n#ifdef EIGEN_VECTORIZE\n/** \\ingroup flags\n  *\n  * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant\n  * is set to the value \\a PacketAccessBit.\n  *\n  * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant\n  * is set to the value 0.\n  */\nconst unsigned int ActualPacketAccessBit = PacketAccessBit;\n#else\nconst unsigned int ActualPacketAccessBit = 0x0;\n#endif\n\n/** \\ingroup flags\n  *\n  * Short version: means the expression can be seen as 1D vector.\n  *\n  * Long version: means that one can access the coefficients\n  * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These\n  * index-based access methods are guaranteed\n  * to not have to do any runtime computation of a (row, col)-pair from the index, so that it\n  * is guaranteed that whenever it is available, index-based access is at least as fast as\n  * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.\n  *\n  * If both PacketAccessBit and LinearAccessBit are set, then the\n  * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a\n  * lvalue expression.\n  *\n  * Typically, all vector expressions have the LinearAccessBit, but there is one exception:\n  * Product expressions don't have it, because it would be troublesome for vectorization, even when the\n  * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but\n  * not index-based packet access, so they don't have the LinearAccessBit.\n  */\nconst unsigned int LinearAccessBit = 0x10;\n\n/** \\ingroup flags\n  *\n  * Means the expression has a coeffRef() method, i.e. is writable as its individual coefficients are directly addressable.\n  * This rules out read-only expressions.\n  *\n  * Note that DirectAccessBit and LvalueBit are mutually orthogonal, as there are examples of expression having one but note\n  * the other:\n  *   \\li writable expressions that don't have a very simple memory layout as a strided array, have LvalueBit but not DirectAccessBit\n  *   \\li Map-to-const expressions, for example Map<const Matrix>, have DirectAccessBit but not LvalueBit\n  *\n  * Expressions having LvalueBit also have their coeff() method returning a const reference instead of returning a new value.\n  */\nconst unsigned int LvalueBit = 0x20;\n\n/** \\ingroup flags\n  *\n  * Means that the underlying array of coefficients can be directly accessed as a plain strided array. The memory layout\n  * of the array of coefficients must be exactly the natural one suggested by rows(), cols(),\n  * outerStride(), innerStride(), and the RowMajorBit. This rules out expressions such as Diagonal, whose coefficients,\n  * though referencable, do not have such a regular memory layout.\n  *\n  * See the comment on LvalueBit for an explanation of how LvalueBit and DirectAccessBit are mutually orthogonal.\n  */\nconst unsigned int DirectAccessBit = 0x40;\n\n/** \\deprecated \\ingroup flags\n  *\n  * means the first coefficient packet is guaranteed to be aligned.\n  * An expression cannot has the AlignedBit without the PacketAccessBit flag.\n  * In other words, this means we are allow to perform an aligned packet access to the first element regardless\n  * of the expression kind:\n  * \\code\n  * expression.packet<Aligned>(0);\n  * \\endcode\n  */\nEIGEN_DEPRECATED const unsigned int AlignedBit = 0x80;\n\nconst unsigned int NestByRefBit = 0x100;\n\n/** \\ingroup flags\n  *\n  * for an expression, this means that the storage order\n  * can be either row-major or column-major.\n  * The precise choice will be decided at evaluation time or when\n  * combined with other expressions.\n  * \\sa \\blank  \\ref RowMajorBit, \\ref TopicStorageOrders */\nconst unsigned int NoPreferredStorageOrderBit = 0x200;\n\n/** \\ingroup flags\n  *\n  * Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed storage format,\n  * that is, the expression provides:\n  * \\code\n    inline const Scalar* valuePtr() const;\n    inline const Index* innerIndexPtr() const;\n    inline const Index* outerIndexPtr() const;\n    inline const Index* innerNonZeroPtr() const;\n    \\endcode\n  */\nconst unsigned int CompressedAccessBit = 0x400;\n\n\n// list of flags that are inherited by default\nconst unsigned int HereditaryBits = RowMajorBit\n                                  | EvalBeforeNestingBit;\n\n/** \\defgroup enums Enumerations\n  * \\ingroup Core_Module\n  *\n  * Various enumerations used in %Eigen. Many of these are used as template parameters.\n  */\n\n/** \\ingroup enums\n  * Enum containing possible values for the \\c Mode or \\c UpLo parameter of\n  * MatrixBase::selfadjointView() and MatrixBase::triangularView(), and selfadjoint solvers. */\nenum UpLoType {\n  /** View matrix as a lower triangular matrix. */\n  Lower=0x1,                      \n  /** View matrix as an upper triangular matrix. */\n  Upper=0x2,                      \n  /** %Matrix has ones on the diagonal; to be used in combination with #Lower or #Upper. */\n  UnitDiag=0x4, \n  /** %Matrix has zeros on the diagonal; to be used in combination with #Lower or #Upper. */\n  ZeroDiag=0x8,\n  /** View matrix as a lower triangular matrix with ones on the diagonal. */\n  UnitLower=UnitDiag|Lower, \n  /** View matrix as an upper triangular matrix with ones on the diagonal. */\n  UnitUpper=UnitDiag|Upper,\n  /** View matrix as a lower triangular matrix with zeros on the diagonal. */\n  StrictlyLower=ZeroDiag|Lower, \n  /** View matrix as an upper triangular matrix with zeros on the diagonal. */\n  StrictlyUpper=ZeroDiag|Upper,\n  /** Used in BandMatrix and SelfAdjointView to indicate that the matrix is self-adjoint. */\n  SelfAdjoint=0x10,\n  /** Used to support symmetric, non-selfadjoint, complex matrices. */\n  Symmetric=0x20\n};\n\n/** \\ingroup enums\n  * Enum for indicating whether a buffer is aligned or not. */\nenum AlignmentType {\n  Unaligned=0,        /**< Data pointer has no specific alignment. */\n  Aligned8=8,         /**< Data pointer is aligned on a 8 bytes boundary. */\n  Aligned16=16,       /**< Data pointer is aligned on a 16 bytes boundary. */\n  Aligned32=32,       /**< Data pointer is aligned on a 32 bytes boundary. */\n  Aligned64=64,       /**< Data pointer is aligned on a 64 bytes boundary. */\n  Aligned128=128,     /**< Data pointer is aligned on a 128 bytes boundary. */\n  AlignedMask=255,\n  Aligned=16,         /**< \\deprecated Synonym for Aligned16. */\n#if EIGEN_MAX_ALIGN_BYTES==128\n  AlignedMax = Aligned128\n#elif EIGEN_MAX_ALIGN_BYTES==64\n  AlignedMax = Aligned64\n#elif EIGEN_MAX_ALIGN_BYTES==32\n  AlignedMax = Aligned32\n#elif EIGEN_MAX_ALIGN_BYTES==16\n  AlignedMax = Aligned16\n#elif EIGEN_MAX_ALIGN_BYTES==8\n  AlignedMax = Aligned8\n#elif EIGEN_MAX_ALIGN_BYTES==0\n  AlignedMax = Unaligned\n#else\n#error Invalid value for EIGEN_MAX_ALIGN_BYTES\n#endif\n};\n\n/** \\ingroup enums\n * Enum used by DenseBase::corner() in Eigen2 compatibility mode. */\n// FIXME after the corner() API change, this was not needed anymore, except by AlignedBox\n// TODO: find out what to do with that. Adapt the AlignedBox API ?\nenum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };\n\n/** \\ingroup enums\n  * Enum containing possible values for the \\p Direction parameter of\n  * Reverse, PartialReduxExpr and VectorwiseOp. */\nenum DirectionType { \n  /** For Reverse, all columns are reversed; \n    * for PartialReduxExpr and VectorwiseOp, act on columns. */\n  Vertical, \n  /** For Reverse, all rows are reversed; \n    * for PartialReduxExpr and VectorwiseOp, act on rows. */\n  Horizontal, \n  /** For Reverse, both rows and columns are reversed; \n    * not used for PartialReduxExpr and VectorwiseOp. */\n  BothDirections \n};\n\n/** \\internal \\ingroup enums\n  * Enum to specify how to traverse the entries of a matrix. */\nenum TraversalType {\n  /** \\internal Default traversal, no vectorization, no index-based access */\n  DefaultTraversal,\n  /** \\internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */\n  LinearTraversal,\n  /** \\internal Equivalent to a slice vectorization for fixed-size matrices having good alignment\n    * and good size */\n  InnerVectorizedTraversal,\n  /** \\internal Vectorization path using a single loop plus scalar loops for the\n    * unaligned boundaries */\n  LinearVectorizedTraversal,\n  /** \\internal Generic vectorization path using one vectorized loop per row/column with some\n    * scalar loops to handle the unaligned boundaries */\n  SliceVectorizedTraversal,\n  /** \\internal Special case to properly handle incompatible scalar types or other defecting cases*/\n  InvalidTraversal,\n  /** \\internal Evaluate all entries at once */\n  AllAtOnceTraversal\n};\n\n/** \\internal \\ingroup enums\n  * Enum to specify whether to unroll loops when traversing over the entries of a matrix. */\nenum UnrollingType {\n  /** \\internal Do not unroll loops. */\n  NoUnrolling,\n  /** \\internal Unroll only the inner loop, but not the outer loop. */\n  InnerUnrolling,\n  /** \\internal Unroll both the inner and the outer loop. If there is only one loop, \n    * because linear traversal is used, then unroll that loop. */\n  CompleteUnrolling\n};\n\n/** \\internal \\ingroup enums\n  * Enum to specify whether to use the default (built-in) implementation or the specialization. */\nenum SpecializedType {\n  Specialized,\n  BuiltIn\n};\n\n/** \\ingroup enums\n  * Enum containing possible values for the \\p _Options template parameter of\n  * Matrix, Array and BandMatrix. */\nenum StorageOptions {\n  /** Storage order is column major (see \\ref TopicStorageOrders). */\n  ColMajor = 0,\n  /** Storage order is row major (see \\ref TopicStorageOrders). */\n  RowMajor = 0x1,  // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that\n  /** Align the matrix itself if it is vectorizable fixed-size */\n  AutoAlign = 0,\n  /** Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be requested to be aligned) */ // FIXME --- clarify the situation\n  DontAlign = 0x2\n};\n\n/** \\ingroup enums\n  * Enum for specifying whether to apply or solve on the left or right. */\nenum SideType {\n  /** Apply transformation on the left. */\n  OnTheLeft = 1,  \n  /** Apply transformation on the right. */\n  OnTheRight = 2  \n};\n\n/* the following used to be written as:\n *\n *   struct NoChange_t {};\n *   namespace {\n *     EIGEN_UNUSED NoChange_t NoChange;\n *   }\n *\n * on the ground that it feels dangerous to disambiguate overloaded functions on enum/integer types.  \n * However, this leads to \"variable declared but never referenced\" warnings on Intel Composer XE,\n * and we do not know how to get rid of them (bug 450).\n */\n\nenum NoChange_t   { NoChange };\nenum Sequential_t { Sequential };\nenum Default_t    { Default };\n\n/** \\internal \\ingroup enums\n  * Used in AmbiVector. */\nenum AmbiVectorMode {\n  IsDense         = 0,\n  IsSparse\n};\n\n/** \\ingroup enums\n  * Used as template parameter in DenseCoeffBase and MapBase to indicate \n  * which accessors should be provided. */\nenum AccessorLevels {\n  /** Read-only access via a member function. */\n  ReadOnlyAccessors, \n  /** Read/write access via member functions. */\n  WriteAccessors, \n  /** Direct read-only access to the coefficients. */\n  DirectAccessors, \n  /** Direct read/write access to the coefficients. */\n  DirectWriteAccessors\n};\n\n/** \\ingroup enums\n  * Enum with options to give to various decompositions. */\nenum DecompositionOptions {\n  /** \\internal Not used (meant for LDLT?). */\n  Pivoting            = 0x01, \n  /** \\internal Not used (meant for LDLT?). */\n  NoPivoting          = 0x02, \n  /** Used in JacobiSVD to indicate that the square matrix U is to be computed. */\n  ComputeFullU        = 0x04,\n  /** Used in JacobiSVD to indicate that the thin matrix U is to be computed. */\n  ComputeThinU        = 0x08,\n  /** Used in JacobiSVD to indicate that the square matrix V is to be computed. */\n  ComputeFullV        = 0x10,\n  /** Used in JacobiSVD to indicate that the thin matrix V is to be computed. */\n  ComputeThinV        = 0x20,\n  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify\n    * that only the eigenvalues are to be computed and not the eigenvectors. */\n  EigenvaluesOnly     = 0x40,\n  /** Used in SelfAdjointEigenSolver and GeneralizedSelfAdjointEigenSolver to specify\n    * that both the eigenvalues and the eigenvectors are to be computed. */\n  ComputeEigenvectors = 0x80,\n  /** \\internal */\n  EigVecMask = EigenvaluesOnly | ComputeEigenvectors,\n  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should\n    * solve the generalized eigenproblem \\f$ Ax = \\lambda B x \\f$. */\n  Ax_lBx              = 0x100,\n  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should\n    * solve the generalized eigenproblem \\f$ ABx = \\lambda x \\f$. */\n  ABx_lx              = 0x200,\n  /** Used in GeneralizedSelfAdjointEigenSolver to indicate that it should\n    * solve the generalized eigenproblem \\f$ BAx = \\lambda x \\f$. */\n  BAx_lx              = 0x400,\n  /** \\internal */\n  GenEigMask = Ax_lBx | ABx_lx | BAx_lx\n};\n\n/** \\ingroup enums\n  * Possible values for the \\p QRPreconditioner template parameter of JacobiSVD. */\nenum QRPreconditioners {\n  /** Do not specify what is to be done if the SVD of a non-square matrix is asked for. */\n  NoQRPreconditioner,\n  /** Use a QR decomposition without pivoting as the first step. */\n  HouseholderQRPreconditioner,\n  /** Use a QR decomposition with column pivoting as the first step. */\n  ColPivHouseholderQRPreconditioner,\n  /** Use a QR decomposition with full pivoting as the first step. */\n  FullPivHouseholderQRPreconditioner\n};\n\n#ifdef Success\n#error The preprocessor symbol 'Success' is defined, possibly by the X11 header file X.h\n#endif\n\n/** \\ingroup enums\n  * Enum for reporting the status of a computation. */\nenum ComputationInfo {\n  /** Computation was successful. */\n  Success = 0,        \n  /** The provided data did not satisfy the prerequisites. */\n  NumericalIssue = 1, \n  /** Iterative procedure did not converge. */\n  NoConvergence = 2,\n  /** The inputs are invalid, or the algorithm has been improperly called.\n    * When assertions are enabled, such errors trigger an assert. */\n  InvalidInput = 3\n};\n\n/** \\ingroup enums\n  * Enum used to specify how a particular transformation is stored in a matrix.\n  * \\sa Transform, Hyperplane::transform(). */\nenum TransformTraits {\n  /** Transformation is an isometry. */\n  Isometry      = 0x1,\n  /** Transformation is an affine transformation stored as a (Dim+1)^2 matrix whose last row is \n    * assumed to be [0 ... 0 1]. */\n  Affine        = 0x2,\n  /** Transformation is an affine transformation stored as a (Dim) x (Dim+1) matrix. */\n  AffineCompact = 0x10 | Affine,\n  /** Transformation is a general projective transformation stored as a (Dim+1)^2 matrix. */\n  Projective    = 0x20\n};\n\n/** \\internal \\ingroup enums\n  * Enum used to choose between implementation depending on the computer architecture. */\nnamespace Architecture\n{\n  enum Type {\n    Generic = 0x0,\n    SSE = 0x1,\n    AltiVec = 0x2,\n    VSX = 0x3,\n    NEON = 0x4,\n#if defined EIGEN_VECTORIZE_SSE\n    Target = SSE\n#elif defined EIGEN_VECTORIZE_ALTIVEC\n    Target = AltiVec\n#elif defined EIGEN_VECTORIZE_VSX\n    Target = VSX\n#elif defined EIGEN_VECTORIZE_NEON\n    Target = NEON\n#else\n    Target = Generic\n#endif\n  };\n}\n\n/** \\internal \\ingroup enums\n  * Enum used as template parameter in Product and product evaluators. */\nenum ProductImplType\n{ DefaultProduct=0, LazyProduct, AliasFreeProduct, CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };\n\n/** \\internal \\ingroup enums\n  * Enum used in experimental parallel implementation. */\nenum Action {GetAction, SetAction};\n\n/** The type used to identify a dense storage. */\nstruct Dense {};\n\n/** The type used to identify a general sparse storage. */\nstruct Sparse {};\n\n/** The type used to identify a general solver (factored) storage. */\nstruct SolverStorage {};\n\n/** The type used to identify a permutation storage. */\nstruct PermutationStorage {};\n\n/** The type used to identify a permutation storage. */\nstruct TranspositionsStorage {};\n\n/** The type used to identify a matrix expression */\nstruct MatrixXpr {};\n\n/** The type used to identify an array expression */\nstruct ArrayXpr {};\n\n// An evaluator must define its shape. By default, it can be one of the following:\nstruct DenseShape             { static std::string debugName() { return \"DenseShape\"; } };\nstruct SolverShape            { static std::string debugName() { return \"SolverShape\"; } };\nstruct HomogeneousShape       { static std::string debugName() { return \"HomogeneousShape\"; } };\nstruct DiagonalShape          { static std::string debugName() { return \"DiagonalShape\"; } };\nstruct BandShape              { static std::string debugName() { return \"BandShape\"; } };\nstruct TriangularShape        { static std::string debugName() { return \"TriangularShape\"; } };\nstruct SelfAdjointShape       { static std::string debugName() { return \"SelfAdjointShape\"; } };\nstruct PermutationShape       { static std::string debugName() { return \"PermutationShape\"; } };\nstruct TranspositionsShape    { static std::string debugName() { return \"TranspositionsShape\"; } };\nstruct SparseShape            { static std::string debugName() { return \"SparseShape\"; } };\n\nnamespace internal {\n\n  // random access iterators based on coeff*() accessors.\nstruct IndexBased {};\n\n// evaluator based on iterators to access coefficients. \nstruct IteratorBased {};\n\n/** \\internal\n * Constants for comparison functors\n */\nenum ComparisonName {\n  cmp_EQ = 0,\n  cmp_LT = 1,\n  cmp_LE = 2,\n  cmp_UNORD = 3,\n  cmp_NEQ = 4,\n  cmp_GT = 5,\n  cmp_GE = 6\n};\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_CONSTANTS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/DisableStupidWarnings.h",
    "content": "#ifndef EIGEN_WARNINGS_DISABLED\n#define EIGEN_WARNINGS_DISABLED\n\n#ifdef _MSC_VER\n  // 4100 - unreferenced formal parameter (occurred e.g. in aligned_allocator::destroy(pointer p))\n  // 4101 - unreferenced local variable\n  // 4181 - qualifier applied to reference type ignored\n  // 4211 - nonstandard extension used : redefined extern to static\n  // 4244 - 'argument' : conversion from 'type1' to 'type2', possible loss of data\n  // 4273 - QtAlignedMalloc, inconsistent DLL linkage\n  // 4324 - structure was padded due to declspec(align())\n  // 4503 - decorated name length exceeded, name was truncated\n  // 4512 - assignment operator could not be generated\n  // 4522 - 'class' : multiple assignment operators specified\n  // 4700 - uninitialized local variable 'xyz' used\n  // 4714 - function marked as __forceinline not inlined\n  // 4717 - 'function' : recursive on all control paths, function will cause runtime stack overflow\n  // 4800 - 'type' : forcing value to bool 'true' or 'false' (performance warning)\n  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS\n    #pragma warning( push )\n  #endif\n  #pragma warning( disable : 4100 4101 4181 4211 4244 4273 4324 4503 4512 4522 4700 4714 4717 4800)\n\n#elif defined __INTEL_COMPILER\n  // 2196 - routine is both \"inline\" and \"noinline\" (\"noinline\" assumed)\n  //        ICC 12 generates this warning even without any inline keyword, when defining class methods 'inline' i.e. inside of class body\n  //        typedef that may be a reference type.\n  // 279  - controlling expression is constant\n  //        ICC 12 generates this warning on assert(constant_expression_depending_on_template_params) and frankly this is a legitimate use case.\n  // 1684 - conversion from pointer to same-sized integral type (potential portability problem)\n  // 2259 - non-pointer conversion from \"Eigen::Index={ptrdiff_t={long}}\" to \"int\" may lose significant bits\n  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS\n    #pragma warning push\n  #endif\n  #pragma warning disable 2196 279 1684 2259\n\n#elif defined __clang__\n  // -Wconstant-logical-operand - warning: use of logical && with constant operand; switch to bitwise & or remove constant\n  //     this is really a stupid warning as it warns on compile-time expressions involving enums\n  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS\n    #pragma clang diagnostic push\n  #endif\n  #pragma clang diagnostic ignored \"-Wconstant-logical-operand\"\n  #if __clang_major__ >= 3 && __clang_minor__ >= 5\n    #pragma clang diagnostic ignored \"-Wabsolute-value\"\n  #endif\n\n#elif defined __GNUC__ && __GNUC__>=6\n\n  #ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS\n    #pragma GCC diagnostic push\n  #endif\n  #pragma GCC diagnostic ignored \"-Wignored-attributes\"\n\n#endif\n\n#if defined __NVCC__\n  // Disable the \"statement is unreachable\" message\n  #pragma diag_suppress code_is_unreachable\n  // Disable the \"dynamic initialization in unreachable code\" message\n  #pragma diag_suppress initialization_not_reachable\n  // Disable the \"invalid error number\" message that we get with older versions of nvcc\n  #pragma diag_suppress 1222\n  // Disable the \"calling a __host__ function from a __host__ __device__ function is not allowed\" messages (yes, there are many of them and they seem to change with every version of the compiler)\n  #pragma diag_suppress 2527\n  #pragma diag_suppress 2529\n  #pragma diag_suppress 2651\n  #pragma diag_suppress 2653\n  #pragma diag_suppress 2668\n  #pragma diag_suppress 2669\n  #pragma diag_suppress 2670\n  #pragma diag_suppress 2671\n  #pragma diag_suppress 2735\n  #pragma diag_suppress 2737\n#endif\n\n#endif // not EIGEN_WARNINGS_DISABLED\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/ForwardDeclarations.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2007-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_FORWARDDECLARATIONS_H\n#define EIGEN_FORWARDDECLARATIONS_H\n\nnamespace Eigen {\nnamespace internal {\n\ntemplate<typename T> struct traits;\n\n// here we say once and for all that traits<const T> == traits<T>\n// When constness must affect traits, it has to be constness on template parameters on which T itself depends.\n// For example, traits<Map<const T> > != traits<Map<T> >, but\n//              traits<const Map<T> > == traits<Map<T> >\ntemplate<typename T> struct traits<const T> : traits<T> {};\n\ntemplate<typename Derived> struct has_direct_access\n{\n  enum { ret = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0 };\n};\n\ntemplate<typename Derived> struct accessors_level\n{\n  enum { has_direct_access = (traits<Derived>::Flags & DirectAccessBit) ? 1 : 0,\n         has_write_access = (traits<Derived>::Flags & LvalueBit) ? 1 : 0,\n         value = has_direct_access ? (has_write_access ? DirectWriteAccessors : DirectAccessors)\n                                   : (has_write_access ? WriteAccessors       : ReadOnlyAccessors)\n  };\n};\n\ntemplate<typename T> struct evaluator_traits;\n\ntemplate< typename T> struct evaluator;\n\n} // end namespace internal\n\ntemplate<typename T> struct NumTraits;\n\ntemplate<typename Derived> struct EigenBase;\ntemplate<typename Derived> class DenseBase;\ntemplate<typename Derived> class PlainObjectBase;\n\n\ntemplate<typename Derived,\n         int Level = internal::accessors_level<Derived>::value >\nclass DenseCoeffsBase;\n\ntemplate<typename _Scalar, int _Rows, int _Cols,\n         int _Options = AutoAlign |\n#if EIGEN_GNUC_AT(3,4)\n    // workaround a bug in at least gcc 3.4.6\n    // the innermost ?: ternary operator is misparsed. We write it slightly\n    // differently and this makes gcc 3.4.6 happy, but it's ugly.\n    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined\n    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)\n                          ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor\n                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION\n                          : Eigen::ColMajor ),\n#else\n                          ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor\n                          : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor\n                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),\n#endif\n         int _MaxRows = _Rows,\n         int _MaxCols = _Cols\n> class Matrix;\n\ntemplate<typename Derived> class MatrixBase;\ntemplate<typename Derived> class ArrayBase;\n\ntemplate<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;\ntemplate<typename ExpressionType, template <typename> class StorageBase > class NoAlias;\ntemplate<typename ExpressionType> class NestByValue;\ntemplate<typename ExpressionType> class ForceAlignedAccess;\ntemplate<typename ExpressionType> class SwapWrapper;\n\ntemplate<typename XprType, int BlockRows=Dynamic, int BlockCols=Dynamic, bool InnerPanel = false> class Block;\ntemplate<typename XprType, typename RowIndices, typename ColIndices> class IndexedView;\n\ntemplate<typename MatrixType, int Size=Dynamic> class VectorBlock;\ntemplate<typename MatrixType> class Transpose;\ntemplate<typename MatrixType> class Conjugate;\ntemplate<typename NullaryOp, typename MatrixType>         class CwiseNullaryOp;\ntemplate<typename UnaryOp,   typename MatrixType>         class CwiseUnaryOp;\ntemplate<typename ViewOp,    typename MatrixType>         class CwiseUnaryView;\ntemplate<typename BinaryOp,  typename Lhs, typename Rhs>  class CwiseBinaryOp;\ntemplate<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>  class CwiseTernaryOp;\ntemplate<typename Decomposition, typename Rhstype>        class Solve;\ntemplate<typename XprType>                                class Inverse;\n\ntemplate<typename Lhs, typename Rhs, int Option = DefaultProduct> class Product;\n\ntemplate<typename Derived> class DiagonalBase;\ntemplate<typename _DiagonalVectorType> class DiagonalWrapper;\ntemplate<typename _Scalar, int SizeAtCompileTime, int MaxSizeAtCompileTime=SizeAtCompileTime> class DiagonalMatrix;\ntemplate<typename MatrixType, typename DiagonalType, int ProductOrder> class DiagonalProduct;\ntemplate<typename MatrixType, int Index = 0> class Diagonal;\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class PermutationMatrix;\ntemplate<int SizeAtCompileTime, int MaxSizeAtCompileTime = SizeAtCompileTime, typename IndexType=int> class Transpositions;\ntemplate<typename Derived> class PermutationBase;\ntemplate<typename Derived> class TranspositionsBase;\ntemplate<typename _IndicesType> class PermutationWrapper;\ntemplate<typename _IndicesType> class TranspositionsWrapper;\n\ntemplate<typename Derived,\n         int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors\n> class MapBase;\ntemplate<int InnerStrideAtCompileTime, int OuterStrideAtCompileTime> class Stride;\ntemplate<int Value = Dynamic> class InnerStride;\ntemplate<int Value = Dynamic> class OuterStride;\ntemplate<typename MatrixType, int MapOptions=Unaligned, typename StrideType = Stride<0,0> > class Map;\ntemplate<typename Derived> class RefBase;\ntemplate<typename PlainObjectType, int Options = 0,\n         typename StrideType = typename internal::conditional<PlainObjectType::IsVectorAtCompileTime,InnerStride<1>,OuterStride<> >::type > class Ref;\n\ntemplate<typename Derived> class TriangularBase;\ntemplate<typename MatrixType, unsigned int Mode> class TriangularView;\ntemplate<typename MatrixType, unsigned int Mode> class SelfAdjointView;\ntemplate<typename MatrixType> class SparseView;\ntemplate<typename ExpressionType> class WithFormat;\ntemplate<typename MatrixType> struct CommaInitializer;\ntemplate<typename Derived> class ReturnByValue;\ntemplate<typename ExpressionType> class ArrayWrapper;\ntemplate<typename ExpressionType> class MatrixWrapper;\ntemplate<typename Derived> class SolverBase;\ntemplate<typename XprType> class InnerIterator;\n\nnamespace internal {\ntemplate<typename DecompositionType> struct kernel_retval_base;\ntemplate<typename DecompositionType> struct kernel_retval;\ntemplate<typename DecompositionType> struct image_retval_base;\ntemplate<typename DecompositionType> struct image_retval;\n} // end namespace internal\n\nnamespace internal {\ntemplate<typename _Scalar, int Rows=Dynamic, int Cols=Dynamic, int Supers=Dynamic, int Subs=Dynamic, int Options=0> class BandMatrix;\n}\n\nnamespace internal {\ntemplate<typename Lhs, typename Rhs> struct product_type;\n\ntemplate<bool> struct EnableIf;\n\n/** \\internal\n  * \\class product_evaluator\n  * Products need their own evaluator with more template arguments allowing for\n  * easier partial template specializations.\n  */\ntemplate< typename T,\n          int ProductTag = internal::product_type<typename T::Lhs,typename T::Rhs>::ret,\n          typename LhsShape = typename evaluator_traits<typename T::Lhs>::Shape,\n          typename RhsShape = typename evaluator_traits<typename T::Rhs>::Shape,\n          typename LhsScalar = typename traits<typename T::Lhs>::Scalar,\n          typename RhsScalar = typename traits<typename T::Rhs>::Scalar\n        > struct product_evaluator;\n}\n\ntemplate<typename Lhs, typename Rhs,\n         int ProductType = internal::product_type<Lhs,Rhs>::value>\nstruct ProductReturnType;\n\n// this is a workaround for sun CC\ntemplate<typename Lhs, typename Rhs> struct LazyProductReturnType;\n\nnamespace internal {\n\n// Provides scalar/packet-wise product and product with accumulation\n// with optional conjugation of the arguments.\ntemplate<typename LhsScalar, typename RhsScalar, bool ConjLhs=false, bool ConjRhs=false> struct conj_helper;\n\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_sum_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_difference_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_conj_product_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_min_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_max_op;\ntemplate<typename Scalar> struct scalar_opposite_op;\ntemplate<typename Scalar> struct scalar_conjugate_op;\ntemplate<typename Scalar> struct scalar_real_op;\ntemplate<typename Scalar> struct scalar_imag_op;\ntemplate<typename Scalar> struct scalar_abs_op;\ntemplate<typename Scalar> struct scalar_abs2_op;\ntemplate<typename Scalar> struct scalar_sqrt_op;\ntemplate<typename Scalar> struct scalar_rsqrt_op;\ntemplate<typename Scalar> struct scalar_exp_op;\ntemplate<typename Scalar> struct scalar_log_op;\ntemplate<typename Scalar> struct scalar_cos_op;\ntemplate<typename Scalar> struct scalar_sin_op;\ntemplate<typename Scalar> struct scalar_acos_op;\ntemplate<typename Scalar> struct scalar_asin_op;\ntemplate<typename Scalar> struct scalar_tan_op;\ntemplate<typename Scalar> struct scalar_inverse_op;\ntemplate<typename Scalar> struct scalar_square_op;\ntemplate<typename Scalar> struct scalar_cube_op;\ntemplate<typename Scalar, typename NewType> struct scalar_cast_op;\ntemplate<typename Scalar> struct scalar_random_op;\ntemplate<typename Scalar> struct scalar_constant_op;\ntemplate<typename Scalar> struct scalar_identity_op;\ntemplate<typename Scalar,bool iscpx> struct scalar_sign_op;\ntemplate<typename Scalar,typename ScalarExponent> struct scalar_pow_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_hypot_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_product_op;\ntemplate<typename LhsScalar,typename RhsScalar=LhsScalar> struct scalar_quotient_op;\n\n// SpecialFunctions module\ntemplate<typename Scalar> struct scalar_lgamma_op;\ntemplate<typename Scalar> struct scalar_digamma_op;\ntemplate<typename Scalar> struct scalar_erf_op;\ntemplate<typename Scalar> struct scalar_erfc_op;\ntemplate<typename Scalar> struct scalar_igamma_op;\ntemplate<typename Scalar> struct scalar_igammac_op;\ntemplate<typename Scalar> struct scalar_zeta_op;\ntemplate<typename Scalar> struct scalar_betainc_op;\n\n} // end namespace internal\n\nstruct IOFormat;\n\n// Array module\ntemplate<typename _Scalar, int _Rows, int _Cols,\n         int _Options = AutoAlign |\n#if EIGEN_GNUC_AT(3,4)\n    // workaround a bug in at least gcc 3.4.6\n    // the innermost ?: ternary operator is misparsed. We write it slightly\n    // differently and this makes gcc 3.4.6 happy, but it's ugly.\n    // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined\n    // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)\n                          ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor\n                          : !(_Cols==1 && _Rows!=1) ?  EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION\n                          : Eigen::ColMajor ),\n#else\n                          ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor\n                          : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor\n                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),\n#endif\n         int _MaxRows = _Rows, int _MaxCols = _Cols> class Array;\ntemplate<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType> class Select;\ntemplate<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;\ntemplate<typename ExpressionType, int Direction> class VectorwiseOp;\ntemplate<typename MatrixType,int RowFactor,int ColFactor> class Replicate;\ntemplate<typename MatrixType, int Direction = BothDirections> class Reverse;\n\ntemplate<typename MatrixType> class FullPivLU;\ntemplate<typename MatrixType> class PartialPivLU;\nnamespace internal {\ntemplate<typename MatrixType> struct inverse_impl;\n}\ntemplate<typename MatrixType> class HouseholderQR;\ntemplate<typename MatrixType> class ColPivHouseholderQR;\ntemplate<typename MatrixType> class FullPivHouseholderQR;\ntemplate<typename MatrixType> class CompleteOrthogonalDecomposition;\ntemplate<typename MatrixType, int QRPreconditioner = ColPivHouseholderQRPreconditioner> class JacobiSVD;\ntemplate<typename MatrixType> class BDCSVD;\ntemplate<typename MatrixType, int UpLo = Lower> class LLT;\ntemplate<typename MatrixType, int UpLo = Lower> class LDLT;\ntemplate<typename VectorsType, typename CoeffsType, int Side=OnTheLeft> class HouseholderSequence;\ntemplate<typename Scalar>     class JacobiRotation;\n\n// Geometry module:\ntemplate<typename Derived, int _Dim> class RotationBase;\ntemplate<typename Lhs, typename Rhs> class Cross;\ntemplate<typename Derived> class QuaternionBase;\ntemplate<typename Scalar> class Rotation2D;\ntemplate<typename Scalar> class AngleAxis;\ntemplate<typename Scalar,int Dim> class Translation;\ntemplate<typename Scalar,int Dim> class AlignedBox;\ntemplate<typename Scalar, int Options = AutoAlign> class Quaternion;\ntemplate<typename Scalar,int Dim,int Mode,int _Options=AutoAlign> class Transform;\ntemplate <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class ParametrizedLine;\ntemplate <typename _Scalar, int _AmbientDim, int Options=AutoAlign> class Hyperplane;\ntemplate<typename Scalar> class UniformScaling;\ntemplate<typename MatrixType,int Direction> class Homogeneous;\n\n// Sparse module:\ntemplate<typename Derived> class SparseMatrixBase;\n\n// MatrixFunctions module\ntemplate<typename Derived> struct MatrixExponentialReturnValue;\ntemplate<typename Derived> class MatrixFunctionReturnValue;\ntemplate<typename Derived> class MatrixSquareRootReturnValue;\ntemplate<typename Derived> class MatrixLogarithmReturnValue;\ntemplate<typename Derived> class MatrixPowerReturnValue;\ntemplate<typename Derived> class MatrixComplexPowerReturnValue;\n\nnamespace internal {\ntemplate <typename Scalar>\nstruct stem_function\n{\n  typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;\n  typedef ComplexScalar type(ComplexScalar, int);\n};\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_FORWARDDECLARATIONS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/IndexedViewHelper.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_INDEXED_VIEW_HELPER_H\n#define EIGEN_INDEXED_VIEW_HELPER_H\n\nnamespace Eigen {\n\n/** \\namespace Eigen::placeholders\n  * \\ingroup Core_Module\n  *\n  * Namespace containing symbolic placeholder and identifiers\n  */\nnamespace placeholders {\n\nnamespace internal {\nstruct symbolic_last_tag {};\n}\n\n/** \\var last\n  * \\ingroup Core_Module\n  *\n  * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last element/row/columns\n  * of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).\n  *\n  * This symbolic placeholder support standard arithmetic operation.\n  *\n  * A typical usage example would be:\n  * \\code\n  * using namespace Eigen;\n  * using Eigen::placeholders::last;\n  * VectorXd v(n);\n  * v(seq(2,last-2)).setOnes();\n  * \\endcode\n  *\n  * \\sa end\n  */\nstatic const Symbolic::SymbolExpr<internal::symbolic_last_tag> last;\n\n/** \\var end\n  * \\ingroup Core_Module\n  *\n  * Can be used as a parameter to Eigen::seq and Eigen::seqN functions to symbolically reference the last+1 element/row/columns\n  * of the underlying vector or matrix once passed to DenseBase::operator()(const RowIndices&, const ColIndices&).\n  *\n  * This symbolic placeholder support standard arithmetic operation.\n  * It is essentially an alias to last+1\n  *\n  * \\sa last\n  */\n#ifdef EIGEN_PARSED_BY_DOXYGEN\nstatic const auto end = last+1;\n#else\n// Using a FixedExpr<1> expression is important here to make sure the compiler\n// can fully optimize the computation starting indices with zero overhead.\nstatic const Symbolic::AddExpr<Symbolic::SymbolExpr<internal::symbolic_last_tag>,Symbolic::ValueExpr<Eigen::internal::FixedInt<1> > > end(last+fix<1>());\n#endif\n\n} // end namespace placeholders\n\nnamespace internal {\n\n // Replace symbolic last/end \"keywords\" by their true runtime value\ninline Index eval_expr_given_size(Index x, Index /* size */)   { return x; }\n\ntemplate<int N>\nFixedInt<N> eval_expr_given_size(FixedInt<N> x, Index /*size*/)   { return x; }\n\ntemplate<typename Derived>\nIndex eval_expr_given_size(const Symbolic::BaseExpr<Derived> &x, Index size)\n{\n  return x.derived().eval(placeholders::last=size-1);\n}\n\n// Extract increment/step at compile time\ntemplate<typename T, typename EnableIf = void> struct get_compile_time_incr {\n  enum { value = UndefinedIncr };\n};\n\n// Analogue of std::get<0>(x), but tailored for our needs.\ntemplate<typename T>\nIndex first(const T& x) { return x.first(); }\n\n// IndexedViewCompatibleType/makeIndexedViewCompatible turn an arbitrary object of type T into something usable by MatrixSlice\n// The generic implementation is a no-op\ntemplate<typename T,int XprSize,typename EnableIf=void>\nstruct IndexedViewCompatibleType {\n  typedef T type;\n};\n\ntemplate<typename T,typename Q>\nconst T& makeIndexedViewCompatible(const T& x, Index /*size*/, Q) { return x; }\n\n//--------------------------------------------------------------------------------\n// Handling of a single Index\n//--------------------------------------------------------------------------------\n\nstruct SingleRange {\n  enum {\n    SizeAtCompileTime = 1\n  };\n  SingleRange(Index val) : m_value(val) {}\n  Index operator[](Index) const { return m_value; }\n  Index size() const { return 1; }\n  Index first() const { return m_value; }\n  Index m_value;\n};\n\ntemplate<> struct get_compile_time_incr<SingleRange> {\n  enum { value = 1 }; // 1 or 0 ??\n};\n\n// Turn a single index into something that looks like an array (i.e., that exposes a .size(), and operatro[](int) methods)\ntemplate<typename T, int XprSize>\nstruct IndexedViewCompatibleType<T,XprSize,typename internal::enable_if<internal::is_integral<T>::value>::type> {\n  // Here we could simply use Array, but maybe it's less work for the compiler to use\n  // a simpler wrapper as SingleRange\n  //typedef Eigen::Array<Index,1,1> type;\n  typedef SingleRange type;\n};\n\ntemplate<typename T, int XprSize>\nstruct IndexedViewCompatibleType<T, XprSize, typename enable_if<Symbolic::is_symbolic<T>::value>::type> {\n  typedef SingleRange type;\n};\n\n\ntemplate<typename T>\ntypename enable_if<Symbolic::is_symbolic<T>::value,SingleRange>::type\nmakeIndexedViewCompatible(const T& id, Index size, SpecializedType) {\n  return eval_expr_given_size(id,size);\n}\n\n//--------------------------------------------------------------------------------\n// Handling of all\n//--------------------------------------------------------------------------------\n\nstruct all_t { all_t() {} };\n\n// Convert a symbolic 'all' into a usable range type\ntemplate<int XprSize>\nstruct AllRange {\n  enum { SizeAtCompileTime = XprSize };\n  AllRange(Index size = XprSize) : m_size(size) {}\n  Index operator[](Index i) const { return i; }\n  Index size() const { return m_size.value(); }\n  Index first() const { return 0; }\n  variable_if_dynamic<Index,XprSize> m_size;\n};\n\ntemplate<int XprSize>\nstruct IndexedViewCompatibleType<all_t,XprSize> {\n  typedef AllRange<XprSize> type;\n};\n\ntemplate<typename XprSizeType>\ninline AllRange<get_fixed_value<XprSizeType>::value> makeIndexedViewCompatible(all_t , XprSizeType size, SpecializedType) {\n  return AllRange<get_fixed_value<XprSizeType>::value>(size);\n}\n\ntemplate<int Size> struct get_compile_time_incr<AllRange<Size> > {\n  enum { value = 1 };\n};\n\n} // end namespace internal\n\n\nnamespace placeholders {\n\n/** \\var all\n  * \\ingroup Core_Module\n  * Can be used as a parameter to DenseBase::operator()(const RowIndices&, const ColIndices&) to index all rows or columns\n  */\nstatic const Eigen::internal::all_t all;\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_INDEXED_VIEW_HELPER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/IntegralConstant.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_INTEGRAL_CONSTANT_H\n#define EIGEN_INTEGRAL_CONSTANT_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<int N> class FixedInt;\ntemplate<int N> class VariableAndFixedInt;\n\n/** \\internal\n  * \\class FixedInt\n  *\n  * This class embeds a compile-time integer \\c N.\n  *\n  * It is similar to c++11 std::integral_constant<int,N> but with some additional features\n  * such as:\n  *  - implicit conversion to int\n  *  - arithmetic and some bitwise operators: -, +, *, /, %, &, |\n  *  - c++98/14 compatibility with fix<N> and fix<N>() syntax to define integral constants.\n  *\n  * It is strongly discouraged to directly deal with this class FixedInt. Instances are expcected to\n  * be created by the user using Eigen::fix<N> or Eigen::fix<N>(). In C++98-11, the former syntax does\n  * not create a FixedInt<N> instance but rather a point to function that needs to be \\em cleaned-up\n  * using the generic helper:\n  * \\code\n  * internal::cleanup_index_type<T>::type\n  * internal::cleanup_index_type<T,DynamicKey>::type\n  * \\endcode\n  * where T can a FixedInt<N>, a pointer to function FixedInt<N> (*)(), or numerous other integer-like representations.\n  * \\c DynamicKey is either Dynamic (default) or DynamicIndex and used to identify true compile-time values.\n  *\n  * For convenience, you can extract the compile-time value \\c N in a generic way using the following helper:\n  * \\code\n  * internal::get_fixed_value<T,DefaultVal>::value\n  * \\endcode\n  * that will give you \\c N if T equals FixedInt<N> or FixedInt<N> (*)(), and \\c DefaultVal if T does not embed any compile-time value (e.g., T==int).\n  *\n  * \\sa fix<N>, class VariableAndFixedInt\n  */\ntemplate<int N> class FixedInt\n{\npublic:\n  static const int value = N;\n  operator int() const { return value; }\n  FixedInt() {}\n  FixedInt( VariableAndFixedInt<N> other) {\n    EIGEN_ONLY_USED_FOR_DEBUG(other);\n    eigen_internal_assert(int(other)==N);\n  }\n\n  FixedInt<-N> operator-() const { return FixedInt<-N>(); }\n  template<int M>\n  FixedInt<N+M> operator+( FixedInt<M>) const { return FixedInt<N+M>(); }\n  template<int M>\n  FixedInt<N-M> operator-( FixedInt<M>) const { return FixedInt<N-M>(); }\n  template<int M>\n  FixedInt<N*M> operator*( FixedInt<M>) const { return FixedInt<N*M>(); }\n  template<int M>\n  FixedInt<N/M> operator/( FixedInt<M>) const { return FixedInt<N/M>(); }\n  template<int M>\n  FixedInt<N%M> operator%( FixedInt<M>) const { return FixedInt<N%M>(); }\n  template<int M>\n  FixedInt<N|M> operator|( FixedInt<M>) const { return FixedInt<N|M>(); }\n  template<int M>\n  FixedInt<N&M> operator&( FixedInt<M>) const { return FixedInt<N&M>(); }\n\n#if EIGEN_HAS_CXX14\n  // Needed in C++14 to allow fix<N>():\n  FixedInt operator() () const { return *this; }\n\n  VariableAndFixedInt<N> operator() (int val) const { return VariableAndFixedInt<N>(val); }\n#else\n  FixedInt ( FixedInt<N> (*)() ) {}\n#endif\n\n#if EIGEN_HAS_CXX11\n  FixedInt(std::integral_constant<int,N>) {}\n#endif\n};\n\n/** \\internal\n  * \\class VariableAndFixedInt\n  *\n  * This class embeds both a compile-time integer \\c N and a runtime integer.\n  * Both values are supposed to be equal unless the compile-time value \\c N has a special\n  * value meaning that the runtime-value should be used. Depending on the context, this special\n  * value can be either Eigen::Dynamic (for positive quantities) or Eigen::DynamicIndex (for\n  * quantities that can be negative).\n  *\n  * It is the return-type of the function Eigen::fix<N>(int), and most of the time this is the only\n  * way it is used. It is strongly discouraged to directly deal with instances of VariableAndFixedInt.\n  * Indeed, in order to write generic code, it is the responsibility of the callee to properly convert\n  * it to either a true compile-time quantity (i.e. a FixedInt<N>), or to a runtime quantity (e.g., an Index)\n  * using the following generic helper:\n  * \\code\n  * internal::cleanup_index_type<T>::type\n  * internal::cleanup_index_type<T,DynamicKey>::type\n  * \\endcode\n  * where T can be a template instantiation of VariableAndFixedInt or numerous other integer-like representations.\n  * \\c DynamicKey is either Dynamic (default) or DynamicIndex and used to identify true compile-time values.\n  *\n  * For convenience, you can also extract the compile-time value \\c N using the following helper:\n  * \\code\n  * internal::get_fixed_value<T,DefaultVal>::value\n  * \\endcode\n  * that will give you \\c N if T equals VariableAndFixedInt<N>, and \\c DefaultVal if T does not embed any compile-time value (e.g., T==int).\n  *\n  * \\sa fix<N>(int), class FixedInt\n  */\ntemplate<int N> class VariableAndFixedInt\n{\npublic:\n  static const int value = N;\n  operator int() const { return m_value; }\n  VariableAndFixedInt(int val) { m_value = val; }\nprotected:\n  int m_value;\n};\n\ntemplate<typename T, int Default=Dynamic> struct get_fixed_value {\n  static const int value = Default;\n};\n\ntemplate<int N,int Default> struct get_fixed_value<FixedInt<N>,Default> {\n  static const int value = N;\n};\n\n#if !EIGEN_HAS_CXX14\ntemplate<int N,int Default> struct get_fixed_value<FixedInt<N> (*)(),Default> {\n  static const int value = N;\n};\n#endif\n\ntemplate<int N,int Default> struct get_fixed_value<VariableAndFixedInt<N>,Default> {\n  static const int value = N ;\n};\n\ntemplate<typename T, int N, int Default>\nstruct get_fixed_value<variable_if_dynamic<T,N>,Default> {\n  static const int value = N;\n};\n\ntemplate<typename T> EIGEN_DEVICE_FUNC Index get_runtime_value(const T &x) { return x; }\n#if !EIGEN_HAS_CXX14\ntemplate<int N> EIGEN_DEVICE_FUNC Index get_runtime_value(FixedInt<N> (*)()) { return N; }\n#endif\n\n// Cleanup integer/FixedInt/VariableAndFixedInt/etc types:\n\n// By default, no cleanup:\ntemplate<typename T, int DynamicKey=Dynamic, typename EnableIf=void> struct cleanup_index_type { typedef T type; };\n\n// Convert any integral type (e.g., short, int, unsigned int, etc.) to Eigen::Index\ntemplate<typename T, int DynamicKey> struct cleanup_index_type<T,DynamicKey,typename internal::enable_if<internal::is_integral<T>::value>::type> { typedef Index type; };\n\n#if !EIGEN_HAS_CXX14\n// In c++98/c++11, fix<N> is a pointer to function that we better cleanup to a true FixedInt<N>:\ntemplate<int N, int DynamicKey> struct cleanup_index_type<FixedInt<N> (*)(), DynamicKey> { typedef FixedInt<N> type; };\n#endif\n\n// If VariableAndFixedInt does not match DynamicKey, then we turn it to a pure compile-time value:\ntemplate<int N, int DynamicKey> struct cleanup_index_type<VariableAndFixedInt<N>, DynamicKey> { typedef FixedInt<N> type; };\n// If VariableAndFixedInt matches DynamicKey, then we turn it to a pure runtime-value (aka Index):\ntemplate<int DynamicKey> struct cleanup_index_type<VariableAndFixedInt<DynamicKey>, DynamicKey> { typedef Index type; };\n\n#if EIGEN_HAS_CXX11\ntemplate<int N, int DynamicKey> struct cleanup_index_type<std::integral_constant<int,N>, DynamicKey> { typedef FixedInt<N> type; };\n#endif\n\n} // end namespace internal\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n\n#if EIGEN_HAS_CXX14\ntemplate<int N>\nstatic const internal::FixedInt<N> fix{};\n#else\ntemplate<int N>\ninline internal::FixedInt<N> fix() { return internal::FixedInt<N>(); }\n\n// The generic typename T is mandatory. Otherwise, a code like fix<N> could refer to either the function above or this next overload.\n// This way a code like fix<N> can only refer to the previous function.\ntemplate<int N,typename T>\ninline internal::VariableAndFixedInt<N> fix(T val) { return internal::VariableAndFixedInt<N>(val); }\n#endif\n\n#else // EIGEN_PARSED_BY_DOXYGEN\n\n/** \\var fix<N>()\n  * \\ingroup Core_Module\n  *\n  * This \\em identifier permits to construct an object embedding a compile-time integer \\c N.\n  *\n  * \\tparam N the compile-time integer value\n  *\n  * It is typically used in conjunction with the Eigen::seq and Eigen::seqN functions to pass compile-time values to them:\n  * \\code\n  * seqN(10,fix<4>,fix<-3>)   // <=> [10 7 4 1]\n  * \\endcode\n  *\n  * See also the function fix(int) to pass both a compile-time and runtime value.\n  *\n  * In c++14, it is implemented as:\n  * \\code\n  * template<int N> static const internal::FixedInt<N> fix{};\n  * \\endcode\n  * where internal::FixedInt<N> is an internal template class similar to\n  * <a href=\"http://en.cppreference.com/w/cpp/types/integral_constant\">\\c std::integral_constant </a><tt> <int,N> </tt>\n  * Here, \\c fix<N> is thus an object of type \\c internal::FixedInt<N>.\n  *\n  * In c++98/11, it is implemented as a function:\n  * \\code\n  * template<int N> inline internal::FixedInt<N> fix();\n  * \\endcode\n  * Here internal::FixedInt<N> is thus a pointer to function.\n  *\n  * If for some reason you want a true object in c++98 then you can write: \\code fix<N>() \\endcode which is also valid in c++14.\n  *\n  * \\sa fix<N>(int), seq, seqN\n  */\ntemplate<int N>\nstatic const auto fix();\n\n/** \\fn fix<N>(int)\n  * \\ingroup Core_Module\n  *\n  * This function returns an object embedding both a compile-time integer \\c N, and a fallback runtime value \\a val.\n  *\n  * \\tparam N the compile-time integer value\n  * \\param  val the fallback runtime integer value\n  *\n  * This function is a more general version of the \\ref fix identifier/function that can be used in template code\n  * where the compile-time value could turn out to actually mean \"undefined at compile-time\". For positive integers\n  * such as a size or a dimension, this case is identified by Eigen::Dynamic, whereas runtime signed integers\n  * (e.g., an increment/stride) are identified as Eigen::DynamicIndex. In such a case, the runtime value \\a val\n  * will be used as a fallback.\n  *\n  * A typical use case would be:\n  * \\code\n  * template<typename Derived> void foo(const MatrixBase<Derived> &mat) {\n  *   const int N = Derived::RowsAtCompileTime==Dynamic ? Dynamic : Derived::RowsAtCompileTime/2;\n  *   const int n = mat.rows()/2;\n  *   ... mat( seqN(0,fix<N>(n) ) ...;\n  * }\n  * \\endcode\n  * In this example, the function Eigen::seqN knows that the second argument is expected to be a size.\n  * If the passed compile-time value N equals Eigen::Dynamic, then the proxy object returned by fix will be dissmissed, and converted to an Eigen::Index of value \\c n.\n  * Otherwise, the runtime-value \\c n will be dissmissed, and the returned ArithmeticSequence will be of the exact same type as <tt> seqN(0,fix<N>) </tt>.\n  *\n  * \\sa fix, seqN, class ArithmeticSequence\n  */\ntemplate<int N>\nstatic const auto fix(int val);\n\n#endif // EIGEN_PARSED_BY_DOXYGEN\n\n} // end namespace Eigen\n\n#endif // EIGEN_INTEGRAL_CONSTANT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/MKL_support.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to Intel(R) MKL\n *   Include file with common MKL declarations\n ********************************************************************************\n*/\n\n#ifndef EIGEN_MKL_SUPPORT_H\n#define EIGEN_MKL_SUPPORT_H\n\n#ifdef EIGEN_USE_MKL_ALL\n  #ifndef EIGEN_USE_BLAS\n    #define EIGEN_USE_BLAS\n  #endif\n  #ifndef EIGEN_USE_LAPACKE\n    #define EIGEN_USE_LAPACKE\n  #endif\n  #ifndef EIGEN_USE_MKL_VML\n    #define EIGEN_USE_MKL_VML\n  #endif\n#endif\n\n#ifdef EIGEN_USE_LAPACKE_STRICT\n  #define EIGEN_USE_LAPACKE\n#endif\n\n#if defined(EIGEN_USE_MKL_VML)\n  #define EIGEN_USE_MKL\n#endif\n\n#if defined EIGEN_USE_MKL\n#   include <mkl.h> \n/*Check IMKL version for compatibility: < 10.3 is not usable with Eigen*/\n#   ifndef INTEL_MKL_VERSION\n#       undef EIGEN_USE_MKL /* INTEL_MKL_VERSION is not even defined on older versions */\n#   elif INTEL_MKL_VERSION < 100305    /* the intel-mkl-103-release-notes say this was when the lapacke.h interface was added*/\n#       undef EIGEN_USE_MKL\n#   endif\n#   ifndef EIGEN_USE_MKL\n    /*If the MKL version is too old, undef everything*/\n#       undef   EIGEN_USE_MKL_ALL\n#       undef   EIGEN_USE_LAPACKE\n#       undef   EIGEN_USE_MKL_VML\n#       undef   EIGEN_USE_LAPACKE_STRICT\n#       undef   EIGEN_USE_LAPACKE\n#   endif\n#endif\n\n#if defined EIGEN_USE_MKL\n\n#define EIGEN_MKL_VML_THRESHOLD 128\n\n/* MKL_DOMAIN_BLAS, etc are defined only in 10.3 update 7 */\n/* MKL_BLAS, etc are not defined in 11.2 */\n#ifdef MKL_DOMAIN_ALL\n#define EIGEN_MKL_DOMAIN_ALL MKL_DOMAIN_ALL\n#else\n#define EIGEN_MKL_DOMAIN_ALL MKL_ALL\n#endif\n\n#ifdef MKL_DOMAIN_BLAS\n#define EIGEN_MKL_DOMAIN_BLAS MKL_DOMAIN_BLAS\n#else\n#define EIGEN_MKL_DOMAIN_BLAS MKL_BLAS\n#endif\n\n#ifdef MKL_DOMAIN_FFT\n#define EIGEN_MKL_DOMAIN_FFT MKL_DOMAIN_FFT\n#else\n#define EIGEN_MKL_DOMAIN_FFT MKL_FFT\n#endif\n\n#ifdef MKL_DOMAIN_VML\n#define EIGEN_MKL_DOMAIN_VML MKL_DOMAIN_VML\n#else\n#define EIGEN_MKL_DOMAIN_VML MKL_VML\n#endif\n\n#ifdef MKL_DOMAIN_PARDISO\n#define EIGEN_MKL_DOMAIN_PARDISO MKL_DOMAIN_PARDISO\n#else\n#define EIGEN_MKL_DOMAIN_PARDISO MKL_PARDISO\n#endif\n#endif\n\nnamespace Eigen {\n\ntypedef std::complex<double> dcomplex;\ntypedef std::complex<float>  scomplex;\n\n#if defined(EIGEN_USE_MKL)\ntypedef MKL_INT BlasIndex;\n#else\ntypedef int BlasIndex;\n#endif\n\n} // end namespace Eigen\n\n#if defined(EIGEN_USE_BLAS)\n#include \"../../misc/blas.h\"\n#endif\n\n#endif // EIGEN_MKL_SUPPORT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/Macros.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MACROS_H\n#define EIGEN_MACROS_H\n\n#define EIGEN_WORLD_VERSION 3\n#define EIGEN_MAJOR_VERSION 3\n#define EIGEN_MINOR_VERSION 90\n\n#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \\\n                                      (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \\\n                                                                 EIGEN_MINOR_VERSION>=z))))\n\n// Compiler identification, EIGEN_COMP_*\n\n/// \\internal EIGEN_COMP_GNUC set to 1 for all compilers compatible with GCC\n#ifdef __GNUC__\n  #define EIGEN_COMP_GNUC (__GNUC__*10+__GNUC_MINOR__)\n#else\n  #define EIGEN_COMP_GNUC 0\n#endif\n\n/// \\internal EIGEN_COMP_CLANG set to major+minor version (e.g., 307 for clang 3.7) if the compiler is clang\n#if defined(__clang__)\n  #define EIGEN_COMP_CLANG (__clang_major__*100+__clang_minor__)\n#else\n  #define EIGEN_COMP_CLANG 0\n#endif\n\n\n/// \\internal EIGEN_COMP_LLVM set to 1 if the compiler backend is llvm\n#if defined(__llvm__)\n  #define EIGEN_COMP_LLVM 1\n#else\n  #define EIGEN_COMP_LLVM 0\n#endif\n\n/// \\internal EIGEN_COMP_ICC set to __INTEL_COMPILER if the compiler is Intel compiler, 0 otherwise\n#if defined(__INTEL_COMPILER)\n  #define EIGEN_COMP_ICC __INTEL_COMPILER\n#else\n  #define EIGEN_COMP_ICC 0\n#endif\n\n/// \\internal EIGEN_COMP_MINGW set to 1 if the compiler is mingw\n#if defined(__MINGW32__)\n  #define EIGEN_COMP_MINGW 1\n#else\n  #define EIGEN_COMP_MINGW 0\n#endif\n\n/// \\internal EIGEN_COMP_SUNCC set to 1 if the compiler is Solaris Studio\n#if defined(__SUNPRO_CC)\n  #define EIGEN_COMP_SUNCC 1\n#else\n  #define EIGEN_COMP_SUNCC 0\n#endif\n\n/// \\internal EIGEN_COMP_MSVC set to _MSC_VER if the compiler is Microsoft Visual C++, 0 otherwise.\n#if defined(_MSC_VER)\n  #define EIGEN_COMP_MSVC _MSC_VER\n#else\n  #define EIGEN_COMP_MSVC 0\n#endif\n\n// For the record, here is a table summarizing the possible values for EIGEN_COMP_MSVC:\n//  name  ver   MSC_VER\n//  2008    9      1500\n//  2010   10      1600\n//  2012   11      1700\n//  2013   12      1800\n//  2015   14      1900\n//  \"15\"   15      1900\n\n/// \\internal EIGEN_COMP_MSVC_STRICT set to 1 if the compiler is really Microsoft Visual C++ and not ,e.g., ICC or clang-cl\n#if EIGEN_COMP_MSVC && !(EIGEN_COMP_ICC || EIGEN_COMP_LLVM || EIGEN_COMP_CLANG)\n  #define EIGEN_COMP_MSVC_STRICT _MSC_VER\n#else\n  #define EIGEN_COMP_MSVC_STRICT 0\n#endif\n\n/// \\internal EIGEN_COMP_IBM set to 1 if the compiler is IBM XL C++\n#if defined(__IBMCPP__) || defined(__xlc__)\n  #define EIGEN_COMP_IBM 1\n#else\n  #define EIGEN_COMP_IBM 0\n#endif\n\n/// \\internal EIGEN_COMP_PGI set to 1 if the compiler is Portland Group Compiler\n#if defined(__PGI)\n  #define EIGEN_COMP_PGI 1\n#else\n  #define EIGEN_COMP_PGI 0\n#endif\n\n/// \\internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler\n#if defined(__CC_ARM) || defined(__ARMCC_VERSION)\n  #define EIGEN_COMP_ARM 1\n#else\n  #define EIGEN_COMP_ARM 0\n#endif\n\n/// \\internal EIGEN_COMP_ARM set to 1 if the compiler is ARM Compiler\n#if defined(__EMSCRIPTEN__)\n  #define EIGEN_COMP_EMSCRIPTEN 1\n#else\n  #define EIGEN_COMP_EMSCRIPTEN 0\n#endif\n\n\n/// \\internal EIGEN_GNUC_STRICT set to 1 if the compiler is really GCC and not a compatible compiler (e.g., ICC, clang, mingw, etc.)\n#if EIGEN_COMP_GNUC && !(EIGEN_COMP_CLANG || EIGEN_COMP_ICC || EIGEN_COMP_MINGW || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM || EIGEN_COMP_EMSCRIPTEN)\n  #define EIGEN_COMP_GNUC_STRICT 1\n#else\n  #define EIGEN_COMP_GNUC_STRICT 0\n#endif\n\n\n#if EIGEN_COMP_GNUC\n  #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__==x && __GNUC_MINOR__>=y) || __GNUC__>x)\n  #define EIGEN_GNUC_AT_MOST(x,y)  ((__GNUC__==x && __GNUC_MINOR__<=y) || __GNUC__<x)\n  #define EIGEN_GNUC_AT(x,y)       ( __GNUC__==x && __GNUC_MINOR__==y )\n#else\n  #define EIGEN_GNUC_AT_LEAST(x,y) 0\n  #define EIGEN_GNUC_AT_MOST(x,y)  0\n  #define EIGEN_GNUC_AT(x,y)       0\n#endif\n\n// FIXME: could probably be removed as we do not support gcc 3.x anymore\n#if EIGEN_COMP_GNUC && (__GNUC__ <= 3)\n#define EIGEN_GCC3_OR_OLDER 1\n#else\n#define EIGEN_GCC3_OR_OLDER 0\n#endif\n\n\n// Architecture identification, EIGEN_ARCH_*\n\n#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64)\n  #define EIGEN_ARCH_x86_64 1\n#else\n  #define EIGEN_ARCH_x86_64 0\n#endif\n\n#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386)\n  #define EIGEN_ARCH_i386 1\n#else\n  #define EIGEN_ARCH_i386 0\n#endif\n\n#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_i386\n  #define EIGEN_ARCH_i386_OR_x86_64 1\n#else\n  #define EIGEN_ARCH_i386_OR_x86_64 0\n#endif\n\n/// \\internal EIGEN_ARCH_ARM set to 1 if the architecture is ARM\n#if defined(__arm__)\n  #define EIGEN_ARCH_ARM 1\n#else\n  #define EIGEN_ARCH_ARM 0\n#endif\n\n/// \\internal EIGEN_ARCH_ARM64 set to 1 if the architecture is ARM64\n#if defined(__aarch64__)\n  #define EIGEN_ARCH_ARM64 1\n#else\n  #define EIGEN_ARCH_ARM64 0\n#endif\n\n#if EIGEN_ARCH_ARM || EIGEN_ARCH_ARM64\n  #define EIGEN_ARCH_ARM_OR_ARM64 1\n#else\n  #define EIGEN_ARCH_ARM_OR_ARM64 0\n#endif\n\n/// \\internal EIGEN_ARCH_MIPS set to 1 if the architecture is MIPS\n#if defined(__mips__) || defined(__mips)\n  #define EIGEN_ARCH_MIPS 1\n#else\n  #define EIGEN_ARCH_MIPS 0\n#endif\n\n/// \\internal EIGEN_ARCH_SPARC set to 1 if the architecture is SPARC\n#if defined(__sparc__) || defined(__sparc)\n  #define EIGEN_ARCH_SPARC 1\n#else\n  #define EIGEN_ARCH_SPARC 0\n#endif\n\n/// \\internal EIGEN_ARCH_IA64 set to 1 if the architecture is Intel Itanium\n#if defined(__ia64__)\n  #define EIGEN_ARCH_IA64 1\n#else\n  #define EIGEN_ARCH_IA64 0\n#endif\n\n/// \\internal EIGEN_ARCH_PPC set to 1 if the architecture is PowerPC\n#if defined(__powerpc__) || defined(__ppc__) || defined(_M_PPC)\n  #define EIGEN_ARCH_PPC 1\n#else\n  #define EIGEN_ARCH_PPC 0\n#endif\n\n\n\n// Operating system identification, EIGEN_OS_*\n\n/// \\internal EIGEN_OS_UNIX set to 1 if the OS is a unix variant\n#if defined(__unix__) || defined(__unix)\n  #define EIGEN_OS_UNIX 1\n#else\n  #define EIGEN_OS_UNIX 0\n#endif\n\n/// \\internal EIGEN_OS_LINUX set to 1 if the OS is based on Linux kernel\n#if defined(__linux__)\n  #define EIGEN_OS_LINUX 1\n#else\n  #define EIGEN_OS_LINUX 0\n#endif\n\n/// \\internal EIGEN_OS_ANDROID set to 1 if the OS is Android\n// note: ANDROID is defined when using ndk_build, __ANDROID__ is defined when using a standalone toolchain.\n#if defined(__ANDROID__) || defined(ANDROID)\n  #define EIGEN_OS_ANDROID 1\n#else\n  #define EIGEN_OS_ANDROID 0\n#endif\n\n/// \\internal EIGEN_OS_GNULINUX set to 1 if the OS is GNU Linux and not Linux-based OS (e.g., not android)\n#if defined(__gnu_linux__) && !(EIGEN_OS_ANDROID)\n  #define EIGEN_OS_GNULINUX 1\n#else\n  #define EIGEN_OS_GNULINUX 0\n#endif\n\n/// \\internal EIGEN_OS_BSD set to 1 if the OS is a BSD variant\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) || defined(__DragonFly__)\n  #define EIGEN_OS_BSD 1\n#else\n  #define EIGEN_OS_BSD 0\n#endif\n\n/// \\internal EIGEN_OS_MAC set to 1 if the OS is MacOS\n#if defined(__APPLE__)\n  #define EIGEN_OS_MAC 1\n#else\n  #define EIGEN_OS_MAC 0\n#endif\n\n/// \\internal EIGEN_OS_QNX set to 1 if the OS is QNX\n#if defined(__QNX__)\n  #define EIGEN_OS_QNX 1\n#else\n  #define EIGEN_OS_QNX 0\n#endif\n\n/// \\internal EIGEN_OS_WIN set to 1 if the OS is Windows based\n#if defined(_WIN32)\n  #define EIGEN_OS_WIN 1\n#else\n  #define EIGEN_OS_WIN 0\n#endif\n\n/// \\internal EIGEN_OS_WIN64 set to 1 if the OS is Windows 64bits\n#if defined(_WIN64)\n  #define EIGEN_OS_WIN64 1\n#else\n  #define EIGEN_OS_WIN64 0\n#endif\n\n/// \\internal EIGEN_OS_WINCE set to 1 if the OS is Windows CE\n#if defined(_WIN32_WCE)\n  #define EIGEN_OS_WINCE 1\n#else\n  #define EIGEN_OS_WINCE 0\n#endif\n\n/// \\internal EIGEN_OS_CYGWIN set to 1 if the OS is Windows/Cygwin\n#if defined(__CYGWIN__)\n  #define EIGEN_OS_CYGWIN 1\n#else\n  #define EIGEN_OS_CYGWIN 0\n#endif\n\n/// \\internal EIGEN_OS_WIN_STRICT set to 1 if the OS is really Windows and not some variants\n#if EIGEN_OS_WIN && !( EIGEN_OS_WINCE || EIGEN_OS_CYGWIN )\n  #define EIGEN_OS_WIN_STRICT 1\n#else\n  #define EIGEN_OS_WIN_STRICT 0\n#endif\n\n/// \\internal EIGEN_OS_SUN set to 1 if the OS is SUN\n#if (defined(sun) || defined(__sun)) && !(defined(__SVR4) || defined(__svr4__))\n  #define EIGEN_OS_SUN 1\n#else\n  #define EIGEN_OS_SUN 0\n#endif\n\n/// \\internal EIGEN_OS_SOLARIS set to 1 if the OS is Solaris\n#if (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))\n  #define EIGEN_OS_SOLARIS 1\n#else\n  #define EIGEN_OS_SOLARIS 0\n#endif\n\n\n\n#if EIGEN_GNUC_AT_MOST(4,3) && !EIGEN_COMP_CLANG\n  // see bug 89\n  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 0\n#else\n  #define EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO 1\n#endif\n\n// This macro can be used to prevent from macro expansion, e.g.:\n//   std::max EIGEN_NOT_A_MACRO(a,b)\n#define EIGEN_NOT_A_MACRO\n\n#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR\n#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::RowMajor\n#else\n#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION Eigen::ColMajor\n#endif\n\n#ifndef EIGEN_DEFAULT_DENSE_INDEX_TYPE\n#define EIGEN_DEFAULT_DENSE_INDEX_TYPE std::ptrdiff_t\n#endif\n\n// Cross compiler wrapper around LLVM's __has_builtin\n#ifdef __has_builtin\n#  define EIGEN_HAS_BUILTIN(x) __has_builtin(x)\n#else\n#  define EIGEN_HAS_BUILTIN(x) 0\n#endif\n\n// A Clang feature extension to determine compiler features.\n// We use it to determine 'cxx_rvalue_references'\n#ifndef __has_feature\n# define __has_feature(x) 0\n#endif\n\n// Some old compilers do not support template specializations like:\n// template<typename T,int N> void foo(const T x[N]);\n#if !( EIGEN_COMP_CLANG && ((EIGEN_COMP_CLANG<309) || defined(__apple_build_version__)) || EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<49)\n#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 1\n#else\n#define EIGEN_HAS_STATIC_ARRAY_TEMPLATE 0\n#endif\n\n// Upperbound on the C++ version to use.\n// Expected values are 03, 11, 14, 17, etc.\n// By default, let's use an arbitrarily large C++ version.\n#ifndef EIGEN_MAX_CPP_VER\n#define EIGEN_MAX_CPP_VER 99\n#endif\n\n#if EIGEN_MAX_CPP_VER>=11 && (defined(__cplusplus) && (__cplusplus >= 201103L) || EIGEN_COMP_MSVC >= 1900)\n#define EIGEN_HAS_CXX11 1\n#else\n#define EIGEN_HAS_CXX11 0\n#endif\n\n#if EIGEN_MAX_CPP_VER>=14 && (defined(__cplusplus) && (__cplusplus > 201103L) || EIGEN_COMP_MSVC >= 1910)\n#define EIGEN_HAS_CXX14 1\n#else\n#define EIGEN_HAS_CXX14 0\n#endif\n\n// Do we support r-value references?\n#ifndef EIGEN_HAS_RVALUE_REFERENCES\n#if EIGEN_MAX_CPP_VER>=11 && \\\n    (__has_feature(cxx_rvalue_references) || \\\n    (defined(__cplusplus) && __cplusplus >= 201103L) || \\\n    (EIGEN_COMP_MSVC >= 1600))\n  #define EIGEN_HAS_RVALUE_REFERENCES 1\n#else\n  #define EIGEN_HAS_RVALUE_REFERENCES 0\n#endif\n#endif\n\n// Does the compiler support C99?\n#ifndef EIGEN_HAS_C99_MATH\n#if EIGEN_MAX_CPP_VER>=11 && \\\n    ((defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901))       \\\n  || (defined(__GNUC__) && defined(_GLIBCXX_USE_C99)) \\\n  || (defined(_LIBCPP_VERSION) && !defined(_MSC_VER)) \\\n  || (EIGEN_COMP_MSVC >= 1900) || defined(__SYCL_DEVICE_ONLY__))\n  #define EIGEN_HAS_C99_MATH 1\n#else\n  #define EIGEN_HAS_C99_MATH 0\n#endif\n#endif\n\n// Does the compiler support result_of?\n#ifndef EIGEN_HAS_STD_RESULT_OF\n#if EIGEN_MAX_CPP_VER>=11 && ((__has_feature(cxx_lambdas) || (defined(__cplusplus) && __cplusplus >= 201103L)))\n#define EIGEN_HAS_STD_RESULT_OF 1\n#else\n#define EIGEN_HAS_STD_RESULT_OF 0\n#endif\n#endif\n\n// Does the compiler support variadic templates?\n#ifndef EIGEN_HAS_VARIADIC_TEMPLATES\n#if EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) \\\n  && (!defined(__NVCC__) || !EIGEN_ARCH_ARM_OR_ARM64 || (defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000) )\n    // ^^ Disable the use of variadic templates when compiling with versions of nvcc older than 8.0 on ARM devices:\n    //    this prevents nvcc from crashing when compiling Eigen on Tegra X1\n#define EIGEN_HAS_VARIADIC_TEMPLATES 1\n#elif  EIGEN_MAX_CPP_VER>=11 && (__cplusplus > 199711L || EIGEN_COMP_MSVC >= 1900) && defined(__SYCL_DEVICE_ONLY__)\n#define EIGEN_HAS_VARIADIC_TEMPLATES 1\n#else\n#define EIGEN_HAS_VARIADIC_TEMPLATES 0\n#endif\n#endif\n\n// Does the compiler fully support const expressions? (as in c++14)\n#ifndef EIGEN_HAS_CONSTEXPR\n\n#if defined(__CUDACC__)\n// Const expressions are supported provided that c++11 is enabled and we're using either clang or nvcc 7.5 or above\n#if EIGEN_MAX_CPP_VER>=14 && (__cplusplus > 199711L && defined(__CUDACC_VER__) && (EIGEN_COMP_CLANG || __CUDACC_VER__ >= 70500))\n  #define EIGEN_HAS_CONSTEXPR 1\n#endif\n#elif EIGEN_MAX_CPP_VER>=14 && (__has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \\\n  (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L)) || \\\n  (EIGEN_COMP_CLANG >= 306 && (__cplusplus > 199711L)))\n#define EIGEN_HAS_CONSTEXPR 1\n#endif\n\n#ifndef EIGEN_HAS_CONSTEXPR\n#define EIGEN_HAS_CONSTEXPR 0\n#endif\n\n#endif\n\n// Does the compiler support C++11 math?\n// Let's be conservative and enable the default C++11 implementation only if we are sure it exists\n#ifndef EIGEN_HAS_CXX11_MATH\n  #if EIGEN_MAX_CPP_VER>=11 && ((__cplusplus > 201103L) || (__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_MSVC || EIGEN_COMP_ICC)  \\\n      && (EIGEN_ARCH_i386_OR_x86_64) && (EIGEN_OS_GNULINUX || EIGEN_OS_WIN_STRICT || EIGEN_OS_MAC))\n    #define EIGEN_HAS_CXX11_MATH 1\n  #else\n    #define EIGEN_HAS_CXX11_MATH 0\n  #endif\n#endif\n\n// Does the compiler support proper C++11 containers?\n#ifndef EIGEN_HAS_CXX11_CONTAINERS\n  #if    EIGEN_MAX_CPP_VER>=11 && \\\n         ((__cplusplus > 201103L) \\\n      || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \\\n      || EIGEN_COMP_MSVC >= 1900)\n    #define EIGEN_HAS_CXX11_CONTAINERS 1\n  #else\n    #define EIGEN_HAS_CXX11_CONTAINERS 0\n  #endif\n#endif\n\n// Does the compiler support C++11 noexcept?\n#ifndef EIGEN_HAS_CXX11_NOEXCEPT\n  #if    EIGEN_MAX_CPP_VER>=11 && \\\n         (__has_feature(cxx_noexcept) \\\n      || (__cplusplus > 201103L) \\\n      || ((__cplusplus >= 201103L) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_CLANG || EIGEN_COMP_ICC>=1400)) \\\n      || EIGEN_COMP_MSVC >= 1900)\n    #define EIGEN_HAS_CXX11_NOEXCEPT 1\n  #else\n    #define EIGEN_HAS_CXX11_NOEXCEPT 0\n  #endif\n#endif\n\n/** Allows to disable some optimizations which might affect the accuracy of the result.\n  * Such optimization are enabled by default, and set EIGEN_FAST_MATH to 0 to disable them.\n  * They currently include:\n  *   - single precision ArrayBase::sin() and ArrayBase::cos() for SSE and AVX vectorization.\n  */\n#ifndef EIGEN_FAST_MATH\n#define EIGEN_FAST_MATH 1\n#endif\n\n#define EIGEN_DEBUG_VAR(x) std::cerr << #x << \" = \" << x << std::endl;\n\n// concatenate two tokens\n#define EIGEN_CAT2(a,b) a ## b\n#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b)\n\n#define EIGEN_COMMA ,\n\n// convert a token to a string\n#define EIGEN_MAKESTRING2(a) #a\n#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a)\n\n// EIGEN_STRONG_INLINE is a stronger version of the inline, using __forceinline on MSVC,\n// but it still doesn't use GCC's always_inline. This is useful in (common) situations where MSVC needs forceinline\n// but GCC is still doing fine with just inline.\n#if EIGEN_COMP_MSVC || EIGEN_COMP_ICC\n#define EIGEN_STRONG_INLINE __forceinline\n#else\n#define EIGEN_STRONG_INLINE inline\n#endif\n\n// EIGEN_ALWAYS_INLINE is the stronget, it has the effect of making the function inline and adding every possible\n// attribute to maximize inlining. This should only be used when really necessary: in particular,\n// it uses __attribute__((always_inline)) on GCC, which most of the time is useless and can severely harm compile times.\n// FIXME with the always_inline attribute,\n// gcc 3.4.x and 4.1 reports the following compilation error:\n//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'\n//    : function body not available\n//   See also bug 1367\n#if EIGEN_GNUC_AT_LEAST(4,2)\n#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline\n#else\n#define EIGEN_ALWAYS_INLINE EIGEN_STRONG_INLINE\n#endif\n\n#if EIGEN_COMP_GNUC\n#define EIGEN_DONT_INLINE __attribute__((noinline))\n#elif EIGEN_COMP_MSVC\n#define EIGEN_DONT_INLINE __declspec(noinline)\n#else\n#define EIGEN_DONT_INLINE\n#endif\n\n#if EIGEN_COMP_GNUC\n#define EIGEN_PERMISSIVE_EXPR __extension__\n#else\n#define EIGEN_PERMISSIVE_EXPR\n#endif\n\n// this macro allows to get rid of linking errors about multiply defined functions.\n//  - static is not very good because it prevents definitions from different object files to be merged.\n//           So static causes the resulting linked executable to be bloated with multiple copies of the same function.\n//  - inline is not perfect either as it unwantedly hints the compiler toward inlining the function.\n#define EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC\n#define EIGEN_DEFINE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS EIGEN_DEVICE_FUNC inline\n\n#ifdef NDEBUG\n# ifndef EIGEN_NO_DEBUG\n#  define EIGEN_NO_DEBUG\n# endif\n#endif\n\n// eigen_plain_assert is where we implement the workaround for the assert() bug in GCC <= 4.3, see bug 89\n#ifdef EIGEN_NO_DEBUG\n  #define eigen_plain_assert(x)\n#else\n  #if EIGEN_SAFE_TO_USE_STANDARD_ASSERT_MACRO\n    namespace Eigen {\n    namespace internal {\n    inline bool copy_bool(bool b) { return b; }\n    }\n    }\n    #define eigen_plain_assert(x) assert(x)\n  #else\n    // work around bug 89\n    #include <cstdlib>   // for abort\n    #include <iostream>  // for std::cerr\n\n    namespace Eigen {\n    namespace internal {\n    // trivial function copying a bool. Must be EIGEN_DONT_INLINE, so we implement it after including Eigen headers.\n    // see bug 89.\n    namespace {\n    EIGEN_DONT_INLINE bool copy_bool(bool b) { return b; }\n    }\n    inline void assert_fail(const char *condition, const char *function, const char *file, int line)\n    {\n      std::cerr << \"assertion failed: \" << condition << \" in function \" << function << \" at \" << file << \":\" << line << std::endl;\n      abort();\n    }\n    }\n    }\n    #define eigen_plain_assert(x) \\\n      do { \\\n        if(!Eigen::internal::copy_bool(x)) \\\n          Eigen::internal::assert_fail(EIGEN_MAKESTRING(x), __PRETTY_FUNCTION__, __FILE__, __LINE__); \\\n      } while(false)\n  #endif\n#endif\n\n// eigen_assert can be overridden\n#ifndef eigen_assert\n#define eigen_assert(x) eigen_plain_assert(x)\n#endif\n\n#ifdef EIGEN_INTERNAL_DEBUGGING\n#define eigen_internal_assert(x) eigen_assert(x)\n#else\n#define eigen_internal_assert(x)\n#endif\n\n#ifdef EIGEN_NO_DEBUG\n#define EIGEN_ONLY_USED_FOR_DEBUG(x) EIGEN_UNUSED_VARIABLE(x)\n#else\n#define EIGEN_ONLY_USED_FOR_DEBUG(x)\n#endif\n\n#ifndef EIGEN_NO_DEPRECATED_WARNING\n  #if EIGEN_COMP_GNUC\n    #define EIGEN_DEPRECATED __attribute__((deprecated))\n  #elif EIGEN_COMP_MSVC\n    #define EIGEN_DEPRECATED __declspec(deprecated)\n  #else\n    #define EIGEN_DEPRECATED\n  #endif\n#else\n  #define EIGEN_DEPRECATED\n#endif\n\n#if EIGEN_COMP_GNUC\n#define EIGEN_UNUSED __attribute__((unused))\n#else\n#define EIGEN_UNUSED\n#endif\n\n// Suppresses 'unused variable' warnings.\nnamespace Eigen {\n  namespace internal {\n    template<typename T> EIGEN_DEVICE_FUNC void ignore_unused_variable(const T&) {}\n  }\n}\n#define EIGEN_UNUSED_VARIABLE(var) Eigen::internal::ignore_unused_variable(var);\n\n#if !defined(EIGEN_ASM_COMMENT)\n  #if EIGEN_COMP_GNUC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64)\n    #define EIGEN_ASM_COMMENT(X)  __asm__(\"#\" X)\n  #else\n    #define EIGEN_ASM_COMMENT(X)\n  #endif\n#endif\n\n\n#if EIGEN_COMP_MSVC\n  // NOTE MSVC often gives C4127 warnings with compiletime if statements. See bug 1362.\n  // This workaround is ugly, but it does the job.\n#  define EIGEN_CONST_CONDITIONAL(cond)  (void)0, cond\n#else\n#  define EIGEN_CONST_CONDITIONAL(cond)  cond\n#endif\n\n//------------------------------------------------------------------------------------------\n// Static and dynamic alignment control\n//\n// The main purpose of this section is to define EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES\n// as the maximal boundary in bytes on which dynamically and statically allocated data may be alignment respectively.\n// The values of EIGEN_MAX_ALIGN_BYTES and EIGEN_MAX_STATIC_ALIGN_BYTES can be specified by the user. If not,\n// a default value is automatically computed based on architecture, compiler, and OS.\n//\n// This section also defines macros EIGEN_ALIGN_TO_BOUNDARY(N) and the shortcuts EIGEN_ALIGN{8,16,32,_MAX}\n// to be used to declare statically aligned buffers.\n//------------------------------------------------------------------------------------------\n\n\n/* EIGEN_ALIGN_TO_BOUNDARY(n) forces data to be n-byte aligned. This is used to satisfy SIMD requirements.\n * However, we do that EVEN if vectorization (EIGEN_VECTORIZE) is disabled,\n * so that vectorization doesn't affect binary compatibility.\n *\n * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link\n * vectorized and non-vectorized code.\n */\n#if (defined __CUDACC__)\n  #define EIGEN_ALIGN_TO_BOUNDARY(n) __align__(n)\n#elif EIGEN_COMP_GNUC || EIGEN_COMP_PGI || EIGEN_COMP_IBM || EIGEN_COMP_ARM\n  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))\n#elif EIGEN_COMP_MSVC\n  #define EIGEN_ALIGN_TO_BOUNDARY(n) __declspec(align(n))\n#elif EIGEN_COMP_SUNCC\n  // FIXME not sure about this one:\n  #define EIGEN_ALIGN_TO_BOUNDARY(n) __attribute__((aligned(n)))\n#else\n  #error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler\n#endif\n\n// If the user explicitly disable vectorization, then we also disable alignment\n#if defined(EIGEN_DONT_VECTORIZE)\n  #define EIGEN_IDEAL_MAX_ALIGN_BYTES 0\n#elif defined(EIGEN_VECTORIZE_AVX512)\n  // 64 bytes static alignmeent is preferred only if really required\n  #define EIGEN_IDEAL_MAX_ALIGN_BYTES 64\n#elif defined(__AVX__)\n  // 32 bytes static alignmeent is preferred only if really required\n  #define EIGEN_IDEAL_MAX_ALIGN_BYTES 32\n#else\n  #define EIGEN_IDEAL_MAX_ALIGN_BYTES 16\n#endif\n\n\n// EIGEN_MIN_ALIGN_BYTES defines the minimal value for which the notion of explicit alignment makes sense\n#define EIGEN_MIN_ALIGN_BYTES 16\n\n// Defined the boundary (in bytes) on which the data needs to be aligned. Note\n// that unless EIGEN_ALIGN is defined and not equal to 0, the data may not be\n// aligned at all regardless of the value of this #define.\n\n#if (defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN))  && defined(EIGEN_MAX_STATIC_ALIGN_BYTES) && EIGEN_MAX_STATIC_ALIGN_BYTES>0\n#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.\n#endif\n\n// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated\n// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0\n#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)\n  #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES\n    #undef EIGEN_MAX_STATIC_ALIGN_BYTES\n  #endif\n  #define EIGEN_MAX_STATIC_ALIGN_BYTES 0\n#endif\n\n#ifndef EIGEN_MAX_STATIC_ALIGN_BYTES\n\n  // Try to automatically guess what is the best default value for EIGEN_MAX_STATIC_ALIGN_BYTES\n\n  // 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable\n  // 16 byte alignment on all platforms where vectorization might be enabled. In theory we could always\n  // enable alignment, but it can be a cause of problems on some platforms, so we just disable it in\n  // certain common platform (compiler+architecture combinations) to avoid these problems.\n  // Only static alignment is really problematic (relies on nonstandard compiler extensions),\n  // try to keep heap alignment even when we have to disable static alignment.\n  #if EIGEN_COMP_GNUC && !(EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM_OR_ARM64 || EIGEN_ARCH_PPC || EIGEN_ARCH_IA64)\n  #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1\n  #elif EIGEN_ARCH_ARM_OR_ARM64 && EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_MOST(4, 6)\n  // Old versions of GCC on ARM, at least 4.4, were once seen to have buggy static alignment support.\n  // Not sure which version fixed it, hopefully it doesn't affect 4.7, which is still somewhat in use.\n  // 4.8 and newer seem definitely unaffected.\n  #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 1\n  #else\n  #define EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT 0\n  #endif\n\n  // static alignment is completely disabled with GCC 3, Sun Studio, and QCC/QNX\n  #if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_STACK_ALIGNMENT \\\n  && !EIGEN_GCC3_OR_OLDER \\\n  && !EIGEN_COMP_SUNCC \\\n  && !EIGEN_OS_QNX\n    #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 1\n  #else\n    #define EIGEN_ARCH_WANTS_STACK_ALIGNMENT 0\n  #endif\n\n  #if EIGEN_ARCH_WANTS_STACK_ALIGNMENT\n    #define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES\n  #else\n    #define EIGEN_MAX_STATIC_ALIGN_BYTES 0\n  #endif\n\n#endif\n\n// If EIGEN_MAX_ALIGN_BYTES is defined, then it is considered as an upper bound for EIGEN_MAX_ALIGN_BYTES\n#if defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES<EIGEN_MAX_STATIC_ALIGN_BYTES\n#undef EIGEN_MAX_STATIC_ALIGN_BYTES\n#define EIGEN_MAX_STATIC_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES\n#endif\n\n#if EIGEN_MAX_STATIC_ALIGN_BYTES==0 && !defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)\n  #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT\n#endif\n\n// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.\n// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES)\n// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).\n// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.\n\n\n// Shortcuts to EIGEN_ALIGN_TO_BOUNDARY\n#define EIGEN_ALIGN8  EIGEN_ALIGN_TO_BOUNDARY(8)\n#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)\n#define EIGEN_ALIGN32 EIGEN_ALIGN_TO_BOUNDARY(32)\n#define EIGEN_ALIGN64 EIGEN_ALIGN_TO_BOUNDARY(64)\n#if EIGEN_MAX_STATIC_ALIGN_BYTES>0\n#define EIGEN_ALIGN_MAX EIGEN_ALIGN_TO_BOUNDARY(EIGEN_MAX_STATIC_ALIGN_BYTES)\n#else\n#define EIGEN_ALIGN_MAX\n#endif\n\n\n// Dynamic alignment control\n\n#if defined(EIGEN_DONT_ALIGN) && defined(EIGEN_MAX_ALIGN_BYTES) && EIGEN_MAX_ALIGN_BYTES>0\n#error EIGEN_MAX_ALIGN_BYTES and EIGEN_DONT_ALIGN are both defined with EIGEN_MAX_ALIGN_BYTES!=0. Use EIGEN_MAX_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN.\n#endif\n\n#ifdef EIGEN_DONT_ALIGN\n  #ifdef EIGEN_MAX_ALIGN_BYTES\n    #undef EIGEN_MAX_ALIGN_BYTES\n  #endif\n  #define EIGEN_MAX_ALIGN_BYTES 0\n#elif !defined(EIGEN_MAX_ALIGN_BYTES)\n  #define EIGEN_MAX_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES\n#endif\n\n#if EIGEN_IDEAL_MAX_ALIGN_BYTES > EIGEN_MAX_ALIGN_BYTES\n#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_IDEAL_MAX_ALIGN_BYTES\n#else\n#define EIGEN_DEFAULT_ALIGN_BYTES EIGEN_MAX_ALIGN_BYTES\n#endif\n\n\n#ifndef EIGEN_UNALIGNED_VECTORIZE\n#define EIGEN_UNALIGNED_VECTORIZE 1\n#endif\n\n//----------------------------------------------------------------------\n\n\n#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD\n  #define EIGEN_RESTRICT\n#endif\n#ifndef EIGEN_RESTRICT\n  #define EIGEN_RESTRICT __restrict\n#endif\n\n#ifndef EIGEN_STACK_ALLOCATION_LIMIT\n// 131072 == 128 KB\n#define EIGEN_STACK_ALLOCATION_LIMIT 131072\n#endif\n\n#ifndef EIGEN_DEFAULT_IO_FORMAT\n#ifdef EIGEN_MAKING_DOCS\n// format used in Eigen's documentation\n// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic.\n#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat(3, 0, \" \", \"\\n\", \"\", \"\")\n#else\n#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat()\n#endif\n#endif\n\n// just an empty macro !\n#define EIGEN_EMPTY\n\n#if EIGEN_COMP_MSVC_STRICT && (EIGEN_COMP_MSVC < 1900 ||  defined(__CUDACC_VER__)) // for older MSVC versions, as well as 1900 && CUDA 8, using the base operator is sufficient (cf Bugs 1000, 1324)\n  #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \\\n    using Base::operator =;\n#elif EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)\n  #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \\\n    using Base::operator =; \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \\\n    template <typename OtherDerived> \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }\n#else\n  #define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \\\n    using Base::operator =; \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \\\n    { \\\n      Base::operator=(other); \\\n      return *this; \\\n    }\n#endif\n\n\n/** \\internal\n * \\brief Macro to manually inherit assignment operators.\n * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.\n */\n#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)\n\n/**\n* Just a side note. Commenting within defines works only by documenting\n* behind the object (via '!<'). Comments cannot be multi-line and thus\n* we have these extra long lines. What is confusing doxygen over here is\n* that we use '\\' and basically have a bunch of typedefs with their\n* documentation in a single line.\n**/\n\n#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \\\n  typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; /*!< \\brief Numeric type, e.g. float, double, int or std::complex<float>. */ \\\n  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; /*!< \\brief The underlying numeric type for composed scalar types. \\details In cases where Scalar is e.g. std::complex<T>, T were corresponding to RealScalar. */ \\\n  typedef typename Base::CoeffReturnType CoeffReturnType; /*!< \\brief The return type for coefficient access. \\details Depending on whether the object allows direct coefficient access (e.g. for a MatrixXd), this type is either 'const Scalar&' or simply 'Scalar' for objects that do not allow direct coefficient access. */ \\\n  typedef typename Eigen::internal::ref_selector<Derived>::type Nested; \\\n  typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \\\n  typedef typename Eigen::internal::traits<Derived>::StorageIndex StorageIndex; \\\n  enum CompileTimeTraits \\\n      { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \\\n        ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \\\n        Flags = Eigen::internal::traits<Derived>::Flags, \\\n        SizeAtCompileTime = Base::SizeAtCompileTime, \\\n        MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \\\n        IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \\\n  using Base::derived; \\\n  using Base::const_cast_derived;\n\n\n// FIXME Maybe the EIGEN_DENSE_PUBLIC_INTERFACE could be removed as importing PacketScalar is rarely needed\n#define EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \\\n  EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \\\n  typedef typename Base::PacketScalar PacketScalar;\n\n\n#define EIGEN_PLAIN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)\n#define EIGEN_PLAIN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)\n\n// EIGEN_SIZE_MIN_PREFER_DYNAMIC gives the min between compile-time sizes. 0 has absolute priority, followed by 1,\n// followed by Dynamic, followed by other finite values. The reason for giving Dynamic the priority over\n// finite values is that min(3, Dynamic) should be Dynamic, since that could be anything between 0 and 3.\n#define EIGEN_SIZE_MIN_PREFER_DYNAMIC(a,b) (((int)a == 0 || (int)b == 0) ? 0 \\\n                           : ((int)a == 1 || (int)b == 1) ? 1 \\\n                           : ((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \\\n                           : ((int)a <= (int)b) ? (int)a : (int)b)\n\n// EIGEN_SIZE_MIN_PREFER_FIXED is a variant of EIGEN_SIZE_MIN_PREFER_DYNAMIC comparing MaxSizes. The difference is that finite values\n// now have priority over Dynamic, so that min(3, Dynamic) gives 3. Indeed, whatever the actual value is\n// (between 0 and 3), it is not more than 3.\n#define EIGEN_SIZE_MIN_PREFER_FIXED(a,b)  (((int)a == 0 || (int)b == 0) ? 0 \\\n                           : ((int)a == 1 || (int)b == 1) ? 1 \\\n                           : ((int)a == Dynamic && (int)b == Dynamic) ? Dynamic \\\n                           : ((int)a == Dynamic) ? (int)b \\\n                           : ((int)b == Dynamic) ? (int)a \\\n                           : ((int)a <= (int)b) ? (int)a : (int)b)\n\n// see EIGEN_SIZE_MIN_PREFER_DYNAMIC. No need for a separate variant for MaxSizes here.\n#define EIGEN_SIZE_MAX(a,b) (((int)a == Dynamic || (int)b == Dynamic) ? Dynamic \\\n                           : ((int)a >= (int)b) ? (int)a : (int)b)\n\n#define EIGEN_LOGICAL_XOR(a,b) (((a) || (b)) && !((a) && (b)))\n\n#define EIGEN_IMPLIES(a,b) (!(a) || (b))\n\n// the expression type of a standard coefficient wise binary operation\n#define EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME) \\\n    CwiseBinaryOp< \\\n      EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)< \\\n          typename internal::traits<LHS>::Scalar, \\\n          typename internal::traits<RHS>::Scalar \\\n      >, \\\n      const LHS, \\\n      const RHS \\\n    >\n\n#define EIGEN_MAKE_CWISE_BINARY_OP(METHOD,OPNAME) \\\n  template<typename OtherDerived> \\\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME) \\\n  (METHOD)(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \\\n  { \\\n    return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,OPNAME)(derived(), other.derived()); \\\n  }\n\n#define EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,TYPEA,TYPEB) \\\n  (Eigen::internal::has_ReturnType<Eigen::ScalarBinaryOpTraits<TYPEA,TYPEB,EIGEN_CAT(EIGEN_CAT(Eigen::internal::scalar_,OPNAME),_op)<TYPEA,TYPEB>  > >::value)\n\n#define EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(EXPR,SCALAR,OPNAME) \\\n  CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<typename internal::traits<EXPR>::Scalar,SCALAR>, const EXPR, \\\n                const typename internal::plain_constant_type<EXPR,SCALAR>::type>\n\n#define EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(SCALAR,EXPR,OPNAME) \\\n  CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<SCALAR,typename internal::traits<EXPR>::Scalar>, \\\n                const typename internal::plain_constant_type<EXPR,SCALAR>::type, const EXPR>\n\n// Workaround for MSVC 2010 (see ML thread \"patch with compile for for MSVC 2010\")\n#if EIGEN_COMP_MSVC_STRICT<=1600\n#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) typename internal::enable_if<true,X>::type\n#else\n#define EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(X) X\n#endif\n\n#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME) \\\n  template <typename T> EIGEN_DEVICE_FUNC inline \\\n  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type,OPNAME))\\\n  (METHOD)(const T& scalar) const { \\\n    typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,Scalar,T)>::type PromotedT; \\\n    return EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,PromotedT,OPNAME)(derived(), \\\n           typename internal::plain_constant_type<Derived,PromotedT>::type(derived().rows(), derived().cols(), internal::scalar_constant_op<PromotedT>(scalar))); \\\n  }\n\n#define EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \\\n  template <typename T> EIGEN_DEVICE_FUNC inline friend \\\n  EIGEN_MSVC10_WORKAROUND_BINARYOP_RETURN_TYPE(const EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(typename internal::promote_scalar_arg<Scalar EIGEN_COMMA T EIGEN_COMMA EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type,Derived,OPNAME)) \\\n  (METHOD)(const T& scalar, const StorageBaseType& matrix) { \\\n    typedef typename internal::promote_scalar_arg<Scalar,T,EIGEN_SCALAR_BINARY_SUPPORTED(OPNAME,T,Scalar)>::type PromotedT; \\\n    return EIGEN_SCALAR_BINARYOP_EXPR_RETURN_TYPE(PromotedT,Derived,OPNAME)( \\\n           typename internal::plain_constant_type<Derived,PromotedT>::type(matrix.derived().rows(), matrix.derived().cols(), internal::scalar_constant_op<PromotedT>(scalar)), matrix.derived()); \\\n  }\n\n#define EIGEN_MAKE_SCALAR_BINARY_OP(METHOD,OPNAME) \\\n  EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(METHOD,OPNAME) \\\n  EIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(METHOD,OPNAME)\n\n\n#ifdef EIGEN_EXCEPTIONS\n#  define EIGEN_THROW_X(X) throw X\n#  define EIGEN_THROW throw\n#  define EIGEN_TRY try\n#  define EIGEN_CATCH(X) catch (X)\n#else\n#  ifdef __CUDA_ARCH__\n#    define EIGEN_THROW_X(X) asm(\"trap;\")\n#    define EIGEN_THROW asm(\"trap;\")\n#  else\n#    define EIGEN_THROW_X(X) std::abort()\n#    define EIGEN_THROW std::abort()\n#  endif\n#  define EIGEN_TRY if (true)\n#  define EIGEN_CATCH(X) else\n#endif\n\n\n#if EIGEN_HAS_CXX11_NOEXCEPT\n#   define EIGEN_INCLUDE_TYPE_TRAITS\n#   define EIGEN_NOEXCEPT noexcept\n#   define EIGEN_NOEXCEPT_IF(x) noexcept(x)\n#   define EIGEN_NO_THROW noexcept(true)\n#   define EIGEN_EXCEPTION_SPEC(X) noexcept(false)\n#else\n#   define EIGEN_NOEXCEPT\n#   define EIGEN_NOEXCEPT_IF(x)\n#   define EIGEN_NO_THROW throw()\n#   define EIGEN_EXCEPTION_SPEC(X) throw(X)\n#endif\n\n#endif // EIGEN_MACROS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/Memory.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Kenneth Riddile <kfriddile@yahoo.com>\n// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>\n// Copyright (C) 2010 Thomas Capricelli <orzel@freehackers.org>\n// Copyright (C) 2013 Pavel Holoborodko <pavel@holoborodko.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n/*****************************************************************************\n*** Platform checks for aligned malloc functions                           ***\n*****************************************************************************/\n\n#ifndef EIGEN_MEMORY_H\n#define EIGEN_MEMORY_H\n\n#ifndef EIGEN_MALLOC_ALREADY_ALIGNED\n\n// Try to determine automatically if malloc is already aligned.\n\n// On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:\n//   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html\n// This is true at least since glibc 2.8.\n// This leaves the question how to detect 64-bit. According to this document,\n//   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf\n// page 114, \"[The] LP64 model [...] is used by all 64-bit UNIX ports\" so it's indeed\n// quite safe, at least within the context of glibc, to equate 64-bit with LP64.\n#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \\\n && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)\n  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1\n#else\n  #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0\n#endif\n\n// FreeBSD 6 seems to have 16-byte aligned malloc\n//   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup\n// FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures\n//   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup\n#if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)\n  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1\n#else\n  #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0\n#endif\n\n#if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16))     \\\n || (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16))   \\\n || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED              \\\n || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED\n  #define EIGEN_MALLOC_ALREADY_ALIGNED 1\n#else\n  #define EIGEN_MALLOC_ALREADY_ALIGNED 0\n#endif\n\n#endif\n\nnamespace Eigen {\n\nnamespace internal {\n\nEIGEN_DEVICE_FUNC\ninline void throw_std_bad_alloc()\n{\n  #ifdef EIGEN_EXCEPTIONS\n    throw std::bad_alloc();\n  #else\n    std::size_t huge = static_cast<std::size_t>(-1);\n    new int[huge];\n  #endif\n}\n\n/*****************************************************************************\n*** Implementation of handmade aligned functions                           ***\n*****************************************************************************/\n\n/* ----- Hand made implementations of aligned malloc/free and realloc ----- */\n\n/** \\internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.\n  * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.\n  */\ninline void* handmade_aligned_malloc(std::size_t size)\n{\n  void *original = std::malloc(size+EIGEN_DEFAULT_ALIGN_BYTES);\n  if (original == 0) return 0;\n  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);\n  *(reinterpret_cast<void**>(aligned) - 1) = original;\n  return aligned;\n}\n\n/** \\internal Frees memory allocated with handmade_aligned_malloc */\ninline void handmade_aligned_free(void *ptr)\n{\n  if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));\n}\n\n/** \\internal\n  * \\brief Reallocates aligned memory.\n  * Since we know that our handmade version is based on std::malloc\n  * we can use std::realloc to implement efficient reallocation.\n  */\ninline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)\n{\n  if (ptr == 0) return handmade_aligned_malloc(size);\n  void *original = *(reinterpret_cast<void**>(ptr) - 1);\n  std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);\n  original = std::realloc(original,size+EIGEN_DEFAULT_ALIGN_BYTES);\n  if (original == 0) return 0;\n  void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1))) + EIGEN_DEFAULT_ALIGN_BYTES);\n  void *previous_aligned = static_cast<char *>(original)+previous_offset;\n  if(aligned!=previous_aligned)\n    std::memmove(aligned, previous_aligned, size);\n\n  *(reinterpret_cast<void**>(aligned) - 1) = original;\n  return aligned;\n}\n\n/*****************************************************************************\n*** Implementation of portable aligned versions of malloc/free/realloc     ***\n*****************************************************************************/\n\n#ifdef EIGEN_NO_MALLOC\nEIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()\n{\n  eigen_assert(false && \"heap allocation is forbidden (EIGEN_NO_MALLOC is defined)\");\n}\n#elif defined EIGEN_RUNTIME_NO_MALLOC\nEIGEN_DEVICE_FUNC inline bool is_malloc_allowed_impl(bool update, bool new_value = false)\n{\n  static bool value = true;\n  if (update == 1)\n    value = new_value;\n  return value;\n}\nEIGEN_DEVICE_FUNC inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }\nEIGEN_DEVICE_FUNC inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }\nEIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()\n{\n  eigen_assert(is_malloc_allowed() && \"heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)\");\n}\n#else\nEIGEN_DEVICE_FUNC inline void check_that_malloc_is_allowed()\n{}\n#endif\n\n/** \\internal Allocates \\a size bytes. The returned pointer is guaranteed to have 16 or 32 bytes alignment depending on the requirements.\n  * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.\n  */\nEIGEN_DEVICE_FUNC inline void* aligned_malloc(std::size_t size)\n{\n  check_that_malloc_is_allowed();\n\n  void *result;\n  #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED\n    result = std::malloc(size);\n    #if EIGEN_DEFAULT_ALIGN_BYTES==16\n    eigen_assert((size<16 || (std::size_t(result)%16)==0) && \"System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.\");\n    #endif\n  #else\n    result = handmade_aligned_malloc(size);\n  #endif\n\n  if(!result && size)\n    throw_std_bad_alloc();\n\n  return result;\n}\n\n/** \\internal Frees memory allocated with aligned_malloc. */\nEIGEN_DEVICE_FUNC inline void aligned_free(void *ptr)\n{\n  #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED\n    std::free(ptr);\n  #else\n    handmade_aligned_free(ptr);\n  #endif\n}\n\n/**\n  * \\internal\n  * \\brief Reallocates an aligned block of memory.\n  * \\throws std::bad_alloc on allocation failure\n  */\ninline void* aligned_realloc(void *ptr, std::size_t new_size, std::size_t old_size)\n{\n  EIGEN_UNUSED_VARIABLE(old_size);\n\n  void *result;\n#if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED\n  result = std::realloc(ptr,new_size);\n#else\n  result = handmade_aligned_realloc(ptr,new_size,old_size);\n#endif\n\n  if (!result && new_size)\n    throw_std_bad_alloc();\n\n  return result;\n}\n\n/*****************************************************************************\n*** Implementation of conditionally aligned functions                      ***\n*****************************************************************************/\n\n/** \\internal Allocates \\a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.\n  * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.\n  */\ntemplate<bool Align> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc(std::size_t size)\n{\n  return aligned_malloc(size);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void* conditional_aligned_malloc<false>(std::size_t size)\n{\n  check_that_malloc_is_allowed();\n\n  void *result = std::malloc(size);\n  if(!result && size)\n    throw_std_bad_alloc();\n  return result;\n}\n\n/** \\internal Frees memory allocated with conditional_aligned_malloc */\ntemplate<bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_free(void *ptr)\n{\n  aligned_free(ptr);\n}\n\ntemplate<> EIGEN_DEVICE_FUNC inline void conditional_aligned_free<false>(void *ptr)\n{\n  std::free(ptr);\n}\n\ntemplate<bool Align> inline void* conditional_aligned_realloc(void* ptr, std::size_t new_size, std::size_t old_size)\n{\n  return aligned_realloc(ptr, new_size, old_size);\n}\n\ntemplate<> inline void* conditional_aligned_realloc<false>(void* ptr, std::size_t new_size, std::size_t)\n{\n  return std::realloc(ptr, new_size);\n}\n\n/*****************************************************************************\n*** Construction/destruction of array elements                             ***\n*****************************************************************************/\n\n/** \\internal Destructs the elements of an array.\n  * The \\a size parameters tells on how many objects to call the destructor of T.\n  */\ntemplate<typename T> EIGEN_DEVICE_FUNC inline void destruct_elements_of_array(T *ptr, std::size_t size)\n{\n  // always destruct an array starting from the end.\n  if(ptr)\n    while(size) ptr[--size].~T();\n}\n\n/** \\internal Constructs the elements of an array.\n  * The \\a size parameter tells on how many objects to call the constructor of T.\n  */\ntemplate<typename T> EIGEN_DEVICE_FUNC inline T* construct_elements_of_array(T *ptr, std::size_t size)\n{\n  std::size_t i;\n  EIGEN_TRY\n  {\n      for (i = 0; i < size; ++i) ::new (ptr + i) T;\n      return ptr;\n  }\n  EIGEN_CATCH(...)\n  {\n    destruct_elements_of_array(ptr, i);\n    EIGEN_THROW;\n  }\n  return NULL;\n}\n\n/*****************************************************************************\n*** Implementation of aligned new/delete-like functions                    ***\n*****************************************************************************/\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void check_size_for_overflow(std::size_t size)\n{\n  if(size > std::size_t(-1) / sizeof(T))\n    throw_std_bad_alloc();\n}\n\n/** \\internal Allocates \\a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.\n  * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.\n  * The default constructor of T is called.\n  */\ntemplate<typename T> EIGEN_DEVICE_FUNC inline T* aligned_new(std::size_t size)\n{\n  check_size_for_overflow<T>(size);\n  T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));\n  EIGEN_TRY\n  {\n    return construct_elements_of_array(result, size);\n  }\n  EIGEN_CATCH(...)\n  {\n    aligned_free(result);\n    EIGEN_THROW;\n  }\n  return result;\n}\n\ntemplate<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new(std::size_t size)\n{\n  check_size_for_overflow<T>(size);\n  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));\n  EIGEN_TRY\n  {\n    return construct_elements_of_array(result, size);\n  }\n  EIGEN_CATCH(...)\n  {\n    conditional_aligned_free<Align>(result);\n    EIGEN_THROW;\n  }\n  return result;\n}\n\n/** \\internal Deletes objects constructed with aligned_new\n  * The \\a size parameters tells on how many objects to call the destructor of T.\n  */\ntemplate<typename T> EIGEN_DEVICE_FUNC inline void aligned_delete(T *ptr, std::size_t size)\n{\n  destruct_elements_of_array<T>(ptr, size);\n  aligned_free(ptr);\n}\n\n/** \\internal Deletes objects constructed with conditional_aligned_new\n  * The \\a size parameters tells on how many objects to call the destructor of T.\n  */\ntemplate<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete(T *ptr, std::size_t size)\n{\n  destruct_elements_of_array<T>(ptr, size);\n  conditional_aligned_free<Align>(ptr);\n}\n\ntemplate<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_realloc_new(T* pts, std::size_t new_size, std::size_t old_size)\n{\n  check_size_for_overflow<T>(new_size);\n  check_size_for_overflow<T>(old_size);\n  if(new_size < old_size)\n    destruct_elements_of_array(pts+new_size, old_size-new_size);\n  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));\n  if(new_size > old_size)\n  {\n    EIGEN_TRY\n    {\n      construct_elements_of_array(result+old_size, new_size-old_size);\n    }\n    EIGEN_CATCH(...)\n    {\n      conditional_aligned_free<Align>(result);\n      EIGEN_THROW;\n    }\n  }\n  return result;\n}\n\n\ntemplate<typename T, bool Align> EIGEN_DEVICE_FUNC inline T* conditional_aligned_new_auto(std::size_t size)\n{\n  if(size==0)\n    return 0; // short-cut. Also fixes Bug 884\n  check_size_for_overflow<T>(size);\n  T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));\n  if(NumTraits<T>::RequireInitialization)\n  {\n    EIGEN_TRY\n    {\n      construct_elements_of_array(result, size);\n    }\n    EIGEN_CATCH(...)\n    {\n      conditional_aligned_free<Align>(result);\n      EIGEN_THROW;\n    }\n  }\n  return result;\n}\n\ntemplate<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, std::size_t new_size, std::size_t old_size)\n{\n  check_size_for_overflow<T>(new_size);\n  check_size_for_overflow<T>(old_size);\n  if(NumTraits<T>::RequireInitialization && (new_size < old_size))\n    destruct_elements_of_array(pts+new_size, old_size-new_size);\n  T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));\n  if(NumTraits<T>::RequireInitialization && (new_size > old_size))\n  {\n    EIGEN_TRY\n    {\n      construct_elements_of_array(result+old_size, new_size-old_size);\n    }\n    EIGEN_CATCH(...)\n    {\n      conditional_aligned_free<Align>(result);\n      EIGEN_THROW;\n    }\n  }\n  return result;\n}\n\ntemplate<typename T, bool Align> EIGEN_DEVICE_FUNC inline void conditional_aligned_delete_auto(T *ptr, std::size_t size)\n{\n  if(NumTraits<T>::RequireInitialization)\n    destruct_elements_of_array<T>(ptr, size);\n  conditional_aligned_free<Align>(ptr);\n}\n\n/****************************************************************************/\n\n/** \\internal Returns the index of the first element of the array that is well aligned with respect to the requested \\a Alignment.\n  *\n  * \\tparam Alignment requested alignment in Bytes.\n  * \\param array the address of the start of the array\n  * \\param size the size of the array\n  *\n  * \\note If no element of the array is well aligned or the requested alignment is not a multiple of a scalar,\n  * the size of the array is returned. For example with SSE, the requested alignment is typically 16-bytes. If\n  * packet size for the given scalar type is 1, then everything is considered well-aligned.\n  *\n  * \\note Otherwise, if the Alignment is larger that the scalar size, we rely on the assumptions that sizeof(Scalar) is a\n  * power of 2. On the other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for\n  * example with Scalar=double on certain 32-bit platforms, see bug #79.\n  *\n  * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.\n  * \\sa first_default_aligned()\n  */\ntemplate<int Alignment, typename Scalar, typename Index>\nEIGEN_DEVICE_FUNC inline Index first_aligned(const Scalar* array, Index size)\n{\n  const Index ScalarSize = sizeof(Scalar);\n  const Index AlignmentSize = Alignment / ScalarSize;\n  const Index AlignmentMask = AlignmentSize-1;\n\n  if(AlignmentSize<=1)\n  {\n    // Either the requested alignment if smaller than a scalar, or it exactly match a 1 scalar\n    // so that all elements of the array have the same alignment.\n    return 0;\n  }\n  else if( (UIntPtr(array) & (sizeof(Scalar)-1)) || (Alignment%ScalarSize)!=0)\n  {\n    // The array is not aligned to the size of a single scalar, or the requested alignment is not a multiple of the scalar size.\n    // Consequently, no element of the array is well aligned.\n    return size;\n  }\n  else\n  {\n    Index first = (AlignmentSize - (Index((UIntPtr(array)/sizeof(Scalar))) & AlignmentMask)) & AlignmentMask;\n    return (first < size) ? first : size;\n  }\n}\n\n/** \\internal Returns the index of the first element of the array that is well aligned with respect the largest packet requirement.\n   * \\sa first_aligned(Scalar*,Index) and first_default_aligned(DenseBase<Derived>) */\ntemplate<typename Scalar, typename Index>\nEIGEN_DEVICE_FUNC inline Index first_default_aligned(const Scalar* array, Index size)\n{\n  typedef typename packet_traits<Scalar>::type DefaultPacketType;\n  return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(array, size);\n}\n\n/** \\internal Returns the smallest integer multiple of \\a base and greater or equal to \\a size\n  */\ntemplate<typename Index>\ninline Index first_multiple(Index size, Index base)\n{\n  return ((size+base-1)/base)*base;\n}\n\n// std::copy is much slower than memcpy, so let's introduce a smart_copy which\n// use memcpy on trivial types, i.e., on types that does not require an initialization ctor.\ntemplate<typename T, bool UseMemcpy> struct smart_copy_helper;\n\ntemplate<typename T> EIGEN_DEVICE_FUNC void smart_copy(const T* start, const T* end, T* target)\n{\n  smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);\n}\n\ntemplate<typename T> struct smart_copy_helper<T,true> {\n  EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)\n  {\n    IntPtr size = IntPtr(end)-IntPtr(start);\n    if(size==0) return;\n    eigen_internal_assert(start!=0 && end!=0 && target!=0);\n    memcpy(target, start, size);\n  }\n};\n\ntemplate<typename T> struct smart_copy_helper<T,false> {\n  EIGEN_DEVICE_FUNC static inline void run(const T* start, const T* end, T* target)\n  { std::copy(start, end, target); }\n};\n\n// intelligent memmove. falls back to std::memmove for POD types, uses std::copy otherwise.\ntemplate<typename T, bool UseMemmove> struct smart_memmove_helper;\n\ntemplate<typename T> void smart_memmove(const T* start, const T* end, T* target)\n{\n  smart_memmove_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);\n}\n\ntemplate<typename T> struct smart_memmove_helper<T,true> {\n  static inline void run(const T* start, const T* end, T* target)\n  {\n    IntPtr size = IntPtr(end)-IntPtr(start);\n    if(size==0) return;\n    eigen_internal_assert(start!=0 && end!=0 && target!=0);\n    std::memmove(target, start, size);\n  }\n};\n\ntemplate<typename T> struct smart_memmove_helper<T,false> {\n  static inline void run(const T* start, const T* end, T* target)\n  {\n    if (UIntPtr(target) < UIntPtr(start))\n    {\n      std::copy(start, end, target);\n    }\n    else\n    {\n      std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) / sizeof(T);\n      std::copy_backward(start, end, target + count);\n    }\n  }\n};\n\n\n/*****************************************************************************\n*** Implementation of runtime stack allocation (falling back to malloc)    ***\n*****************************************************************************/\n\n// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA\n// to the appropriate stack allocation function\n#ifndef EIGEN_ALLOCA\n  #if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)\n    #define EIGEN_ALLOCA alloca\n  #elif EIGEN_COMP_MSVC\n    #define EIGEN_ALLOCA _alloca\n  #endif\n#endif\n\n// This helper class construct the allocated memory, and takes care of destructing and freeing the handled data\n// at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.\ntemplate<typename T> class aligned_stack_memory_handler : noncopyable\n{\n  public:\n    /* Creates a stack_memory_handler responsible for the buffer \\a ptr of size \\a size.\n     * Note that \\a ptr can be 0 regardless of the other parameters.\n     * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).\n     * In this case, the buffer elements will also be destructed when this handler will be destructed.\n     * Finally, if \\a dealloc is true, then the pointer \\a ptr is freed.\n     **/\n    aligned_stack_memory_handler(T* ptr, std::size_t size, bool dealloc)\n      : m_ptr(ptr), m_size(size), m_deallocate(dealloc)\n    {\n      if(NumTraits<T>::RequireInitialization && m_ptr)\n        Eigen::internal::construct_elements_of_array(m_ptr, size);\n    }\n    ~aligned_stack_memory_handler()\n    {\n      if(NumTraits<T>::RequireInitialization && m_ptr)\n        Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);\n      if(m_deallocate)\n        Eigen::internal::aligned_free(m_ptr);\n    }\n  protected:\n    T* m_ptr;\n    std::size_t m_size;\n    bool m_deallocate;\n};\n\ntemplate<typename T> class scoped_array : noncopyable\n{\n  T* m_ptr;\npublic:\n  explicit scoped_array(std::ptrdiff_t size)\n  {\n    m_ptr = new T[size];\n  }\n  ~scoped_array()\n  {\n    delete[] m_ptr;\n  }\n  T& operator[](std::ptrdiff_t i) { return m_ptr[i]; }\n  const T& operator[](std::ptrdiff_t i) const { return m_ptr[i]; }\n  T* &ptr() { return m_ptr; }\n  const T* ptr() const { return m_ptr; }\n  operator const T*() const { return m_ptr; }\n};\n\ntemplate<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)\n{\n  std::swap(a.ptr(),b.ptr());\n}\n\n} // end namespace internal\n\n/** \\internal\n  * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack\n  * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform\n  * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.\n  * The allocated buffer is automatically deleted when exiting the scope of this declaration.\n  * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.\n  * Here is an example:\n  * \\code\n  * {\n  *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);\n  *   // use data[0] to data[size-1]\n  * }\n  * \\endcode\n  * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.\n  */\n#ifdef EIGEN_ALLOCA\n\n  #if EIGEN_DEFAULT_ALIGN_BYTES>0\n    // We always manually re-align the result of EIGEN_ALLOCA.\n    // If alloca is already aligned, the compiler should be smart enough to optimize away the re-alignment.\n    #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)))\n  #else\n    #define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE)\n  #endif\n\n  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \\\n    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \\\n    TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \\\n               : reinterpret_cast<TYPE*>( \\\n                      (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \\\n                    : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) );  \\\n    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)\n\n#else\n\n  #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \\\n    Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \\\n    TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE));    \\\n    Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)\n\n#endif\n\n\n/*****************************************************************************\n*** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***\n*****************************************************************************/\n\n#if EIGEN_MAX_ALIGN_BYTES!=0\n  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \\\n      void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \\\n        EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \\\n        EIGEN_CATCH (...) { return 0; } \\\n      }\n  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \\\n      void *operator new(std::size_t size) { \\\n        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \\\n      } \\\n      void *operator new[](std::size_t size) { \\\n        return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \\\n      } \\\n      void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \\\n      void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \\\n      void operator delete(void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \\\n      void operator delete[](void * ptr, std::size_t /* sz */) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \\\n      /* in-place new and delete. since (at least afaik) there is no actual   */ \\\n      /* memory allocated we can safely let the default implementation handle */ \\\n      /* this particular case. */ \\\n      static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \\\n      static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \\\n      void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \\\n      void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \\\n      /* nothrow-new (returns zero instead of std::bad_alloc) */ \\\n      EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \\\n      void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \\\n        Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \\\n      } \\\n      typedef void eigen_aligned_operator_new_marker_type;\n#else\n  #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)\n#endif\n\n#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)\n#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \\\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))\n\n/****************************************************************************/\n\n/** \\class aligned_allocator\n* \\ingroup Core_Module\n*\n* \\brief STL compatible allocator to use with with 16 byte aligned types\n*\n* Example:\n* \\code\n* // Matrix4f requires 16 bytes alignment:\n* std::map< int, Matrix4f, std::less<int>,\n*           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;\n* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:\n* std::map< int, Vector3f > my_map_vec3;\n* \\endcode\n*\n* \\sa \\blank \\ref TopicStlContainers.\n*/\ntemplate<class T>\nclass aligned_allocator : public std::allocator<T>\n{\npublic:\n  typedef std::size_t     size_type;\n  typedef std::ptrdiff_t  difference_type;\n  typedef T*              pointer;\n  typedef const T*        const_pointer;\n  typedef T&              reference;\n  typedef const T&        const_reference;\n  typedef T               value_type;\n\n  template<class U>\n  struct rebind\n  {\n    typedef aligned_allocator<U> other;\n  };\n\n  aligned_allocator() : std::allocator<T>() {}\n\n  aligned_allocator(const aligned_allocator& other) : std::allocator<T>(other) {}\n\n  template<class U>\n  aligned_allocator(const aligned_allocator<U>& other) : std::allocator<T>(other) {}\n\n  ~aligned_allocator() {}\n\n  pointer allocate(size_type num, const void* /*hint*/ = 0)\n  {\n    internal::check_size_for_overflow<T>(num);\n    return static_cast<pointer>( internal::aligned_malloc(num * sizeof(T)) );\n  }\n\n  void deallocate(pointer p, size_type /*num*/)\n  {\n    internal::aligned_free(p);\n  }\n};\n\n//---------- Cache sizes ----------\n\n#if !defined(EIGEN_NO_CPUID)\n#  if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64\n#    if defined(__PIC__) && EIGEN_ARCH_i386\n       // Case for x86 with PIC\n#      define EIGEN_CPUID(abcd,func,id) \\\n         __asm__ __volatile__ (\"xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1\": \"=a\" (abcd[0]), \"=&r\" (abcd[1]), \"=c\" (abcd[2]), \"=d\" (abcd[3]) : \"a\" (func), \"c\" (id));\n#    elif defined(__PIC__) && EIGEN_ARCH_x86_64\n       // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.\n       // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.\n#      define EIGEN_CPUID(abcd,func,id) \\\n        __asm__ __volatile__ (\"xchg{q}\\t{%%}rbx, %q1; cpuid; xchg{q}\\t{%%}rbx, %q1\": \"=a\" (abcd[0]), \"=&r\" (abcd[1]), \"=c\" (abcd[2]), \"=d\" (abcd[3]) : \"0\" (func), \"2\" (id));\n#    else\n       // Case for x86_64 or x86 w/o PIC\n#      define EIGEN_CPUID(abcd,func,id) \\\n         __asm__ __volatile__ (\"cpuid\": \"=a\" (abcd[0]), \"=b\" (abcd[1]), \"=c\" (abcd[2]), \"=d\" (abcd[3]) : \"0\" (func), \"2\" (id) );\n#    endif\n#  elif EIGEN_COMP_MSVC\n#    if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64\n#      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)\n#    endif\n#  endif\n#endif\n\nnamespace internal {\n\n#ifdef EIGEN_CPUID\n\ninline bool cpuid_is_vendor(int abcd[4], const int vendor[3])\n{\n  return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];\n}\n\ninline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)\n{\n  int abcd[4];\n  l1 = l2 = l3 = 0;\n  int cache_id = 0;\n  int cache_type = 0;\n  do {\n    abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;\n    EIGEN_CPUID(abcd,0x4,cache_id);\n    cache_type  = (abcd[0] & 0x0F) >> 0;\n    if(cache_type==1||cache_type==3) // data or unified cache\n    {\n      int cache_level = (abcd[0] & 0xE0) >> 5;  // A[7:5]\n      int ways        = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]\n      int partitions  = (abcd[1] & 0x003FF000) >> 12; // B[21:12]\n      int line_size   = (abcd[1] & 0x00000FFF) >>  0; // B[11:0]\n      int sets        = (abcd[2]);                    // C[31:0]\n\n      int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);\n\n      switch(cache_level)\n      {\n        case 1: l1 = cache_size; break;\n        case 2: l2 = cache_size; break;\n        case 3: l3 = cache_size; break;\n        default: break;\n      }\n    }\n    cache_id++;\n  } while(cache_type>0 && cache_id<16);\n}\n\ninline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)\n{\n  int abcd[4];\n  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;\n  l1 = l2 = l3 = 0;\n  EIGEN_CPUID(abcd,0x00000002,0);\n  unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;\n  bool check_for_p2_core2 = false;\n  for(int i=0; i<14; ++i)\n  {\n    switch(bytes[i])\n    {\n      case 0x0A: l1 = 8; break;   // 0Ah   data L1 cache, 8 KB, 2 ways, 32 byte lines\n      case 0x0C: l1 = 16; break;  // 0Ch   data L1 cache, 16 KB, 4 ways, 32 byte lines\n      case 0x0E: l1 = 24; break;  // 0Eh   data L1 cache, 24 KB, 6 ways, 64 byte lines\n      case 0x10: l1 = 16; break;  // 10h   data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)\n      case 0x15: l1 = 16; break;  // 15h   code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)\n      case 0x2C: l1 = 32; break;  // 2Ch   data L1 cache, 32 KB, 8 ways, 64 byte lines\n      case 0x30: l1 = 32; break;  // 30h   code L1 cache, 32 KB, 8 ways, 64 byte lines\n      case 0x60: l1 = 16; break;  // 60h   data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored\n      case 0x66: l1 = 8; break;   // 66h   data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored\n      case 0x67: l1 = 16; break;  // 67h   data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored\n      case 0x68: l1 = 32; break;  // 68h   data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored\n      case 0x1A: l2 = 96; break;   // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)\n      case 0x22: l3 = 512; break;   // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored\n      case 0x23: l3 = 1024; break;   // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x25: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x29: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x39: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored\n      case 0x3A: l2 = 192; break;   // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored\n      case 0x3B: l2 = 128; break;   // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored\n      case 0x3C: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored\n      case 0x3D: l2 = 384; break;   // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored\n      case 0x3E: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored\n      case 0x40: l2 = 0; break;   // no integrated L2 cache (P6 core) or L3 cache (P4 core)\n      case 0x41: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 32 byte lines\n      case 0x42: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 32 byte lines\n      case 0x43: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 32 byte lines\n      case 0x44: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines\n      case 0x45: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines\n      case 0x46: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines\n      case 0x47: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines\n      case 0x48: l2 = 3072; break;   // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines\n      case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2\n      case 0x4A: l3 = 6144; break;   // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines\n      case 0x4B: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines\n      case 0x4C: l3 = 12288; break;   // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines\n      case 0x4D: l3 = 16384; break;   // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines\n      case 0x4E: l2 = 6144; break;   // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines\n      case 0x78: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines\n      case 0x79: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x7A: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x7B: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x7C: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored\n      case 0x7D: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines\n      case 0x7E: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)\n      case 0x7F: l2 = 512; break;   // code and data L2 cache, 512 KB, 2 ways, 64 byte lines\n      case 0x80: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines\n      case 0x81: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 32 byte lines\n      case 0x82: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 32 byte lines\n      case 0x83: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 32 byte lines\n      case 0x84: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines\n      case 0x85: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines\n      case 0x86: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines\n      case 0x87: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines\n      case 0x88: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)\n      case 0x89: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)\n      case 0x8A: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)\n      case 0x8D: l3 = 3072; break;   // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)\n\n      default: break;\n    }\n  }\n  if(check_for_p2_core2 && l2 == l3)\n    l3 = 0;\n  l1 *= 1024;\n  l2 *= 1024;\n  l3 *= 1024;\n}\n\ninline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)\n{\n  if(max_std_funcs>=4)\n    queryCacheSizes_intel_direct(l1,l2,l3);\n  else\n    queryCacheSizes_intel_codes(l1,l2,l3);\n}\n\ninline void queryCacheSizes_amd(int& l1, int& l2, int& l3)\n{\n  int abcd[4];\n  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;\n  EIGEN_CPUID(abcd,0x80000005,0);\n  l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB\n  abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;\n  EIGEN_CPUID(abcd,0x80000006,0);\n  l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB\n  l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB\n}\n#endif\n\n/** \\internal\n * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */\ninline void queryCacheSizes(int& l1, int& l2, int& l3)\n{\n  #ifdef EIGEN_CPUID\n  int abcd[4];\n  const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};\n  const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};\n  const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // \"AMDisbetter!\"\n\n  // identify the CPU vendor\n  EIGEN_CPUID(abcd,0x0,0);\n  int max_std_funcs = abcd[1];\n  if(cpuid_is_vendor(abcd,GenuineIntel))\n    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);\n  else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))\n    queryCacheSizes_amd(l1,l2,l3);\n  else\n    // by default let's use Intel's API\n    queryCacheSizes_intel(l1,l2,l3,max_std_funcs);\n\n  // here is the list of other vendors:\n//   ||cpuid_is_vendor(abcd,\"VIA VIA VIA \")\n//   ||cpuid_is_vendor(abcd,\"CyrixInstead\")\n//   ||cpuid_is_vendor(abcd,\"CentaurHauls\")\n//   ||cpuid_is_vendor(abcd,\"GenuineTMx86\")\n//   ||cpuid_is_vendor(abcd,\"TransmetaCPU\")\n//   ||cpuid_is_vendor(abcd,\"RiseRiseRise\")\n//   ||cpuid_is_vendor(abcd,\"Geode by NSC\")\n//   ||cpuid_is_vendor(abcd,\"SiS SiS SiS \")\n//   ||cpuid_is_vendor(abcd,\"UMC UMC UMC \")\n//   ||cpuid_is_vendor(abcd,\"NexGenDriven\")\n  #else\n  l1 = l2 = l3 = -1;\n  #endif\n}\n\n/** \\internal\n * \\returns the size in Bytes of the L1 data cache */\ninline int queryL1CacheSize()\n{\n  int l1(-1), l2, l3;\n  queryCacheSizes(l1,l2,l3);\n  return l1;\n}\n\n/** \\internal\n * \\returns the size in Bytes of the L2 or L3 cache if this later is present */\ninline int queryTopLevelCacheSize()\n{\n  int l1, l2(-1), l3(-1);\n  queryCacheSizes(l1,l2,l3);\n  return (std::max)(l2,l3);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_MEMORY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/Meta.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_META_H\n#define EIGEN_META_H\n\n#if defined(__CUDA_ARCH__)\n#include <cfloat>\n#include <math_constants.h>\n#endif\n\n#if EIGEN_COMP_ICC>=1600 &&  __cplusplus >= 201103L\n#include <cstdint>\n#endif\n\nnamespace Eigen {\n\ntypedef EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex;\n\n/**\n * \\brief The Index type as used for the API.\n * \\details To change this, \\c \\#define the preprocessor symbol \\c EIGEN_DEFAULT_DENSE_INDEX_TYPE.\n * \\sa \\blank \\ref TopicPreprocessorDirectives, StorageIndex.\n */\n\ntypedef EIGEN_DEFAULT_DENSE_INDEX_TYPE Index;\n\nnamespace internal {\n\n/** \\internal\n  * \\file Meta.h\n  * This file contains generic metaprogramming classes which are not specifically related to Eigen.\n  * \\note In case you wonder, yes we're aware that Boost already provides all these features,\n  * we however don't want to add a dependency to Boost.\n  */\n\n// Only recent versions of ICC complain about using ptrdiff_t to hold pointers,\n// and older versions do not provide *intptr_t types.\n#if EIGEN_COMP_ICC>=1600 &&  __cplusplus >= 201103L\ntypedef std::intptr_t  IntPtr;\ntypedef std::uintptr_t UIntPtr;\n#else\ntypedef std::ptrdiff_t IntPtr;\ntypedef std::size_t UIntPtr;\n#endif\n\nstruct true_type {  enum { value = 1 }; };\nstruct false_type { enum { value = 0 }; };\n\ntemplate<bool Condition, typename Then, typename Else>\nstruct conditional { typedef Then type; };\n\ntemplate<typename Then, typename Else>\nstruct conditional <false, Then, Else> { typedef Else type; };\n\ntemplate<typename T, typename U> struct is_same { enum { value = 0 }; };\ntemplate<typename T> struct is_same<T,T> { enum { value = 1 }; };\n\ntemplate<typename T> struct remove_reference { typedef T type; };\ntemplate<typename T> struct remove_reference<T&> { typedef T type; };\n\ntemplate<typename T> struct remove_pointer { typedef T type; };\ntemplate<typename T> struct remove_pointer<T*> { typedef T type; };\ntemplate<typename T> struct remove_pointer<T*const> { typedef T type; };\n\ntemplate <class T> struct remove_const { typedef T type; };\ntemplate <class T> struct remove_const<const T> { typedef T type; };\ntemplate <class T> struct remove_const<const T[]> { typedef T type[]; };\ntemplate <class T, unsigned int Size> struct remove_const<const T[Size]> { typedef T type[Size]; };\n\ntemplate<typename T> struct remove_all { typedef T type; };\ntemplate<typename T> struct remove_all<const T>   { typedef typename remove_all<T>::type type; };\ntemplate<typename T> struct remove_all<T const&>  { typedef typename remove_all<T>::type type; };\ntemplate<typename T> struct remove_all<T&>        { typedef typename remove_all<T>::type type; };\ntemplate<typename T> struct remove_all<T const*>  { typedef typename remove_all<T>::type type; };\ntemplate<typename T> struct remove_all<T*>        { typedef typename remove_all<T>::type type; };\n\ntemplate<typename T> struct is_arithmetic      { enum { value = false }; };\ntemplate<> struct is_arithmetic<float>         { enum { value = true }; };\ntemplate<> struct is_arithmetic<double>        { enum { value = true }; };\ntemplate<> struct is_arithmetic<long double>   { enum { value = true }; };\ntemplate<> struct is_arithmetic<bool>          { enum { value = true }; };\ntemplate<> struct is_arithmetic<char>          { enum { value = true }; };\ntemplate<> struct is_arithmetic<signed char>   { enum { value = true }; };\ntemplate<> struct is_arithmetic<unsigned char> { enum { value = true }; };\ntemplate<> struct is_arithmetic<signed short>  { enum { value = true }; };\ntemplate<> struct is_arithmetic<unsigned short>{ enum { value = true }; };\ntemplate<> struct is_arithmetic<signed int>    { enum { value = true }; };\ntemplate<> struct is_arithmetic<unsigned int>  { enum { value = true }; };\ntemplate<> struct is_arithmetic<signed long>   { enum { value = true }; };\ntemplate<> struct is_arithmetic<unsigned long> { enum { value = true }; };\n\n#if EIGEN_HAS_CXX11\nusing std::is_integral;\n#else\ntemplate<typename T> struct is_integral               { enum { value = false }; };\ntemplate<> struct is_integral<bool>                   { enum { value = true }; };\ntemplate<> struct is_integral<char>                   { enum { value = true }; };\ntemplate<> struct is_integral<signed char>            { enum { value = true }; };\ntemplate<> struct is_integral<unsigned char>          { enum { value = true }; };\ntemplate<> struct is_integral<signed short>           { enum { value = true }; };\ntemplate<> struct is_integral<unsigned short>         { enum { value = true }; };\ntemplate<> struct is_integral<signed int>             { enum { value = true }; };\ntemplate<> struct is_integral<unsigned int>           { enum { value = true }; };\ntemplate<> struct is_integral<signed long>            { enum { value = true }; };\ntemplate<> struct is_integral<unsigned long>          { enum { value = true }; };\n#endif\n\n\ntemplate <typename T> struct add_const { typedef const T type; };\ntemplate <typename T> struct add_const<T&> { typedef T& type; };\n\ntemplate <typename T> struct is_const { enum { value = 0 }; };\ntemplate <typename T> struct is_const<T const> { enum { value = 1 }; };\n\ntemplate<typename T> struct add_const_on_value_type            { typedef const T type;  };\ntemplate<typename T> struct add_const_on_value_type<T&>        { typedef T const& type; };\ntemplate<typename T> struct add_const_on_value_type<T*>        { typedef T const* type; };\ntemplate<typename T> struct add_const_on_value_type<T* const>  { typedef T const* const type; };\ntemplate<typename T> struct add_const_on_value_type<T const* const>  { typedef T const* const type; };\n\n\ntemplate<typename From, typename To>\nstruct is_convertible_impl\n{\nprivate:\n  struct any_conversion\n  {\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(T&);\n  };\n  struct yes {int a[1];};\n  struct no  {int a[2];};\n\n  static yes test(const To&, int);\n  static no  test(any_conversion, ...);\n\npublic:\n  static From ms_from;\n#ifdef __INTEL_COMPILER\n  #pragma warning push\n  #pragma warning ( disable : 2259 )\n#endif\n  enum { value = sizeof(test(ms_from, 0))==sizeof(yes) };\n#ifdef __INTEL_COMPILER\n  #pragma warning pop\n#endif\n};\n\ntemplate<typename From, typename To>\nstruct is_convertible\n{\n  enum { value = is_convertible_impl<typename remove_all<From>::type,\n                                     typename remove_all<To  >::type>::value };\n};\n\n/** \\internal Allows to enable/disable an overload\n  * according to a compile time condition.\n  */\ntemplate<bool Condition, typename T=void> struct enable_if;\n\ntemplate<typename T> struct enable_if<true,T>\n{ typedef T type; };\n\n#if defined(__CUDA_ARCH__)\n#if !defined(__FLT_EPSILON__)\n#define __FLT_EPSILON__ FLT_EPSILON\n#define __DBL_EPSILON__ DBL_EPSILON\n#endif\n\nnamespace device {\n\ntemplate<typename T> struct numeric_limits\n{\n  EIGEN_DEVICE_FUNC\n  static T epsilon() { return 0; }\n  static T (max)() { assert(false && \"Highest not supported for this type\"); }\n  static T (min)() { assert(false && \"Lowest not supported for this type\"); }\n  static T infinity() { assert(false && \"Infinity not supported for this type\"); }\n  static T quiet_NaN() { assert(false && \"quiet_NaN not supported for this type\"); }\n};\ntemplate<> struct numeric_limits<float>\n{\n  EIGEN_DEVICE_FUNC\n  static float epsilon() { return __FLT_EPSILON__; }\n  EIGEN_DEVICE_FUNC\n  static float (max)() { return CUDART_MAX_NORMAL_F; }\n  EIGEN_DEVICE_FUNC\n  static float (min)() { return FLT_MIN; }\n  EIGEN_DEVICE_FUNC\n  static float infinity() { return CUDART_INF_F; }\n  EIGEN_DEVICE_FUNC\n  static float quiet_NaN() { return CUDART_NAN_F; }\n};\ntemplate<> struct numeric_limits<double>\n{\n  EIGEN_DEVICE_FUNC\n  static double epsilon() { return __DBL_EPSILON__; }\n  EIGEN_DEVICE_FUNC\n  static double (max)() { return DBL_MAX; }\n  EIGEN_DEVICE_FUNC\n  static double (min)() { return DBL_MIN; }\n  EIGEN_DEVICE_FUNC\n  static double infinity() { return CUDART_INF; }\n  EIGEN_DEVICE_FUNC\n  static double quiet_NaN() { return CUDART_NAN; }\n};\ntemplate<> struct numeric_limits<int>\n{\n  EIGEN_DEVICE_FUNC\n  static int epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static int (max)() { return INT_MAX; }\n  EIGEN_DEVICE_FUNC\n  static int (min)() { return INT_MIN; }\n};\ntemplate<> struct numeric_limits<unsigned int>\n{\n  EIGEN_DEVICE_FUNC\n  static unsigned int epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static unsigned int (max)() { return UINT_MAX; }\n  EIGEN_DEVICE_FUNC\n  static unsigned int (min)() { return 0; }\n};\ntemplate<> struct numeric_limits<long>\n{\n  EIGEN_DEVICE_FUNC\n  static long epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static long (max)() { return LONG_MAX; }\n  EIGEN_DEVICE_FUNC\n  static long (min)() { return LONG_MIN; }\n};\ntemplate<> struct numeric_limits<unsigned long>\n{\n  EIGEN_DEVICE_FUNC\n  static unsigned long epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static unsigned long (max)() { return ULONG_MAX; }\n  EIGEN_DEVICE_FUNC\n  static unsigned long (min)() { return 0; }\n};\ntemplate<> struct numeric_limits<long long>\n{\n  EIGEN_DEVICE_FUNC\n  static long long epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static long long (max)() { return LLONG_MAX; }\n  EIGEN_DEVICE_FUNC\n  static long long (min)() { return LLONG_MIN; }\n};\ntemplate<> struct numeric_limits<unsigned long long>\n{\n  EIGEN_DEVICE_FUNC\n  static unsigned long long epsilon() { return 0; }\n  EIGEN_DEVICE_FUNC\n  static unsigned long long (max)() { return ULLONG_MAX; }\n  EIGEN_DEVICE_FUNC\n  static unsigned long long (min)() { return 0; }\n};\n\n}\n\n#endif\n\n/** \\internal\n  * A base class do disable default copy ctor and copy assignement operator.\n  */\nclass noncopyable\n{\n  EIGEN_DEVICE_FUNC noncopyable(const noncopyable&);\n  EIGEN_DEVICE_FUNC const noncopyable& operator=(const noncopyable&);\nprotected:\n  EIGEN_DEVICE_FUNC noncopyable() {}\n  EIGEN_DEVICE_FUNC ~noncopyable() {}\n};\n\n/** \\internal\n  * Provides access to the number of elements in the object of as a compile-time constant expression.\n  * It \"returns\" Eigen::Dynamic if the size cannot be resolved at compile-time (default).\n  *\n  * Similar to std::tuple_size, but more general.\n  *\n  * It currently supports:\n  *  - any types T defining T::SizeAtCompileTime\n  *  - plain C arrays as T[N]\n  *  - std::array (c++11)\n  *  - some internal types such as SingleRange and AllRange\n  *\n  * The second template parameter eases SFINAE-based specializations.\n  */\ntemplate<typename T, typename EnableIf = void> struct array_size {\n  enum { value = Dynamic };\n};\n\ntemplate<typename T> struct array_size<T,typename internal::enable_if<((T::SizeAtCompileTime&0)==0)>::type> {\n  enum { value = T::SizeAtCompileTime };\n};\n\ntemplate<typename T, int N> struct array_size<const T (&)[N]> {\n  enum { value = N };\n};\ntemplate<typename T, int N> struct array_size<T (&)[N]> {\n  enum { value = N };\n};\n\n#if EIGEN_HAS_CXX11\ntemplate<typename T, std::size_t N> struct array_size<const std::array<T,N> > {\n  enum { value = N };\n};\ntemplate<typename T, std::size_t N> struct array_size<std::array<T,N> > {\n  enum { value = N };\n};\n#endif\n\n/** \\internal\n  * Analogue of the std::size free function.\n  * It returns the size of the container or view \\a x of type \\c T\n  *\n  * It currently supports:\n  *  - any types T defining a member T::size() const\n  *  - plain C arrays as T[N]\n  *\n  */\ntemplate<typename T>\nIndex size(const T& x) { return x.size(); }\n\ntemplate<typename T,std::size_t N>\nIndex size(const T (&) [N]) { return N; }\n\n/** \\internal\n  * Convenient struct to get the result type of a unary or binary functor.\n  *\n  * It supports both the current STL mechanism (using the result_type member) as well as\n  * upcoming next STL generation (using a templated result member).\n  * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack.\n  */\n#if EIGEN_HAS_STD_RESULT_OF\ntemplate<typename T> struct result_of {\n  typedef typename std::result_of<T>::type type1;\n  typedef typename remove_all<type1>::type type;\n};\n#else\ntemplate<typename T> struct result_of { };\n\nstruct has_none {int a[1];};\nstruct has_std_result_type {int a[2];};\nstruct has_tr1_result {int a[3];};\n\ntemplate<typename Func, typename ArgType, int SizeOf=sizeof(has_none)>\nstruct unary_result_of_select {typedef typename internal::remove_all<ArgType>::type type;};\n\ntemplate<typename Func, typename ArgType>\nstruct unary_result_of_select<Func, ArgType, sizeof(has_std_result_type)> {typedef typename Func::result_type type;};\n\ntemplate<typename Func, typename ArgType>\nstruct unary_result_of_select<Func, ArgType, sizeof(has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};\n\ntemplate<typename Func, typename ArgType>\nstruct result_of<Func(ArgType)> {\n    template<typename T>\n    static has_std_result_type    testFunctor(T const *, typename T::result_type const * = 0);\n    template<typename T>\n    static has_tr1_result         testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);\n    static has_none               testFunctor(...);\n\n    // note that the following indirection is needed for gcc-3.3\n    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};\n    typedef typename unary_result_of_select<Func, ArgType, FunctorType>::type type;\n};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(has_none)>\nstruct binary_result_of_select {typedef typename internal::remove_all<ArgType0>::type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1>\nstruct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_std_result_type)>\n{typedef typename Func::result_type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1>\nstruct binary_result_of_select<Func, ArgType0, ArgType1, sizeof(has_tr1_result)>\n{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1>\nstruct result_of<Func(ArgType0,ArgType1)> {\n    template<typename T>\n    static has_std_result_type    testFunctor(T const *, typename T::result_type const * = 0);\n    template<typename T>\n    static has_tr1_result         testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);\n    static has_none               testFunctor(...);\n\n    // note that the following indirection is needed for gcc-3.3\n    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};\n    typedef typename binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;\n};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1, typename ArgType2, int SizeOf=sizeof(has_none)>\nstruct ternary_result_of_select {typedef typename internal::remove_all<ArgType0>::type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>\nstruct ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, sizeof(has_std_result_type)>\n{typedef typename Func::result_type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>\nstruct ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, sizeof(has_tr1_result)>\n{typedef typename Func::template result<Func(ArgType0,ArgType1,ArgType2)>::type type;};\n\ntemplate<typename Func, typename ArgType0, typename ArgType1, typename ArgType2>\nstruct result_of<Func(ArgType0,ArgType1,ArgType2)> {\n    template<typename T>\n    static has_std_result_type    testFunctor(T const *, typename T::result_type const * = 0);\n    template<typename T>\n    static has_tr1_result         testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1,ArgType2)>::type const * = 0);\n    static has_none               testFunctor(...);\n\n    // note that the following indirection is needed for gcc-3.3\n    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};\n    typedef typename ternary_result_of_select<Func, ArgType0, ArgType1, ArgType2, FunctorType>::type type;\n};\n#endif\n\nstruct meta_yes { char a[1]; };\nstruct meta_no  { char a[2]; };\n\n// Check whether T::ReturnType does exist\ntemplate <typename T>\nstruct has_ReturnType\n{\n  template <typename C> static meta_yes testFunctor(C const *, typename C::ReturnType const * = 0);\n  template <typename C> static meta_no  testFunctor(...);\n\n  enum { value = sizeof(testFunctor<T>(static_cast<T*>(0))) == sizeof(meta_yes) };\n};\n\ntemplate<typename T> const T* return_ptr();\n\ntemplate <typename T, typename IndexType=Index>\nstruct has_nullary_operator\n{\n  template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()())>0)>::type * = 0);\n  static meta_no testFunctor(...);\n\n  enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };\n};\n\ntemplate <typename T, typename IndexType=Index>\nstruct has_unary_operator\n{\n  template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()(IndexType(0)))>0)>::type * = 0);\n  static meta_no testFunctor(...);\n\n  enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };\n};\n\ntemplate <typename T, typename IndexType=Index>\nstruct has_binary_operator\n{\n  template <typename C> static meta_yes testFunctor(C const *,typename enable_if<(sizeof(return_ptr<C>()->operator()(IndexType(0),IndexType(0)))>0)>::type * = 0);\n  static meta_no testFunctor(...);\n\n  enum { value = sizeof(testFunctor(static_cast<T*>(0))) == sizeof(meta_yes) };\n};\n\n/** \\internal In short, it computes int(sqrt(\\a Y)) with \\a Y an integer.\n  * Usage example: \\code meta_sqrt<1023>::ret \\endcode\n  */\ntemplate<int Y,\n         int InfX = 0,\n         int SupX = ((Y==1) ? 1 : Y/2),\n         bool Done = ((SupX-InfX)<=1 ? true : ((SupX*SupX <= Y) && ((SupX+1)*(SupX+1) > Y))) >\n                                // use ?: instead of || just to shut up a stupid gcc 4.3 warning\nclass meta_sqrt\n{\n    enum {\n      MidX = (InfX+SupX)/2,\n      TakeInf = MidX*MidX > Y ? 1 : 0,\n      NewInf = int(TakeInf) ? InfX : int(MidX),\n      NewSup = int(TakeInf) ? int(MidX) : SupX\n    };\n  public:\n    enum { ret = meta_sqrt<Y,NewInf,NewSup>::ret };\n};\n\ntemplate<int Y, int InfX, int SupX>\nclass meta_sqrt<Y, InfX, SupX, true> { public:  enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; };\n\n\n/** \\internal Computes the least common multiple of two positive integer A and B\n  * at compile-time. It implements a naive algorithm testing all multiples of A.\n  * It thus works better if A>=B.\n  */\ntemplate<int A, int B, int K=1, bool Done = ((A*K)%B)==0>\nstruct meta_least_common_multiple\n{\n  enum { ret = meta_least_common_multiple<A,B,K+1>::ret };\n};\ntemplate<int A, int B, int K>\nstruct meta_least_common_multiple<A,B,K,true>\n{\n  enum { ret = A*K };\n};\n\n/** \\internal determines whether the product of two numeric types is allowed and what the return type is */\ntemplate<typename T, typename U> struct scalar_product_traits\n{\n  enum { Defined = 0 };\n};\n\n// FIXME quick workaround around current limitation of result_of\n// template<typename Scalar, typename ArgType0, typename ArgType1>\n// struct result_of<scalar_product_op<Scalar>(ArgType0,ArgType1)> {\n// typedef typename scalar_product_traits<typename remove_all<ArgType0>::type, typename remove_all<ArgType1>::type>::ReturnType type;\n// };\n\n} // end namespace internal\n\nnamespace numext {\n  \n#if defined(__CUDA_ARCH__)\ntemplate<typename T> EIGEN_DEVICE_FUNC   void swap(T &a, T &b) { T tmp = b; b = a; a = tmp; }\n#else\ntemplate<typename T> EIGEN_STRONG_INLINE void swap(T &a, T &b) { std::swap(a,b); }\n#endif\n\n#if defined(__CUDA_ARCH__)\nusing internal::device::numeric_limits;\n#else\nusing std::numeric_limits;\n#endif\n\n// Integer division with rounding up.\n// T is assumed to be an integer type with a>=0, and b>0\ntemplate<typename T>\nT div_ceil(const T &a, const T &b)\n{\n  return (a+b-1) / b;\n}\n\n} // end namespace numext\n\n} // end namespace Eigen\n\n#endif // EIGEN_META_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/NonMPL2.h",
    "content": "#ifdef EIGEN_MPL2_ONLY\n#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/ReenableStupidWarnings.h",
    "content": "#ifdef EIGEN_WARNINGS_DISABLED\n#undef EIGEN_WARNINGS_DISABLED\n\n#ifndef EIGEN_PERMANENTLY_DISABLE_STUPID_WARNINGS\n  #ifdef _MSC_VER\n    #pragma warning( pop )\n  #elif defined __INTEL_COMPILER\n    #pragma warning pop\n  #elif defined __clang__\n    #pragma clang diagnostic pop\n  #elif defined __GNUC__ && __GNUC__>=6\n    #pragma GCC diagnostic pop\n  #endif\n\n  #if defined __NVCC__\n//    Don't reenable the diagnostic messages, as it turns out these messages need\n//    to be disabled at the point of the template instantiation (i.e the user code)\n//    otherwise they'll be triggered by nvcc.\n//    #pragma diag_default code_is_unreachable\n//    #pragma diag_default initialization_not_reachable\n//    #pragma diag_default 2651\n//    #pragma diag_default 2653\n  #endif\n\n#endif\n\n#endif // EIGEN_WARNINGS_DISABLED\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/StaticAssert.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STATIC_ASSERT_H\n#define EIGEN_STATIC_ASSERT_H\n\n/* Some notes on Eigen's static assertion mechanism:\n *\n *  - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean\n *    expression, and MSG an enum listed in struct internal::static_assertion<true>\n *\n *  - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)\n *    in that case, the static assertion is converted to the following runtime assert:\n *      eigen_assert(CONDITION && \"MSG\")\n *\n *  - currently EIGEN_STATIC_ASSERT can only be used in function scope\n *\n */\n\n#ifndef EIGEN_NO_STATIC_ASSERT\n\n  #if EIGEN_MAX_CPP_VER>=11 && (__has_feature(cxx_static_assert) || (defined(__cplusplus) && __cplusplus >= 201103L) || (EIGEN_COMP_MSVC >= 1600))\n\n    // if native static_assert is enabled, let's use it\n    #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG);\n\n  #else // not CXX0X\n\n    namespace Eigen {\n\n    namespace internal {\n\n    template<bool condition>\n    struct static_assertion {};\n\n    template<>\n    struct static_assertion<true>\n    {\n      enum {\n        YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX,\n        YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES,\n        YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES,\n        THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE,\n        THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE,\n        THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE,\n        OUT_OF_RANGE_ACCESS,\n        YOU_MADE_A_PROGRAMMING_MISTAKE,\n        EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT,\n        EIGEN_INTERNAL_COMPILATION_ERROR_OR_YOU_MADE_A_PROGRAMMING_MISTAKE,\n        YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR,\n        YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR,\n        UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC,\n        THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES,\n        FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED,\n        NUMERIC_TYPE_MUST_BE_REAL,\n        COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED,\n        WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED,\n        THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE,\n        INVALID_MATRIX_PRODUCT,\n        INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS,\n        INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION,\n        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY,\n        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES,\n        THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES,\n        INVALID_MATRIX_TEMPLATE_PARAMETERS,\n        INVALID_MATRIXBASE_TEMPLATE_PARAMETERS,\n        BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER,\n        THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX,\n        THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE,\n        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES,\n        YOU_ALREADY_SPECIFIED_THIS_STRIDE,\n        INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION,\n        THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD,\n        PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1,\n        THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS,\n        YOU_CANNOT_MIX_ARRAYS_AND_MATRICES,\n        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION,\n        THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY,\n        YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT,\n        THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS,\n        THIS_METHOD_IS_ONLY_FOR_INNER_OR_LAZY_PRODUCTS,\n        THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL,\n        THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES,\n        YOU_PASSED_A_ROW_VECTOR_BUT_A_COLUMN_VECTOR_WAS_EXPECTED,\n        YOU_PASSED_A_COLUMN_VECTOR_BUT_A_ROW_VECTOR_WAS_EXPECTED,\n        THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE,\n        THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH,\n        OBJECT_ALLOCATED_ON_STACK_IS_TOO_BIG,\n        IMPLICIT_CONVERSION_TO_SCALAR_IS_FOR_INNER_PRODUCT_ONLY,\n        STORAGE_LAYOUT_DOES_NOT_MATCH,\n        EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE,\n        THIS_COEFFICIENT_ACCESSOR_TAKING_ONE_ACCESS_IS_ONLY_FOR_EXPRESSIONS_ALLOWING_LINEAR_ACCESS,\n        MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY,\n        THIS_TYPE_IS_NOT_SUPPORTED,\n        STORAGE_KIND_MUST_MATCH,\n        STORAGE_INDEX_MUST_MATCH,\n        CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY\n      };\n    };\n\n    } // end namespace internal\n\n    } // end namespace Eigen\n\n    // Specialized implementation for MSVC to avoid \"conditional\n    // expression is constant\" warnings.  This implementation doesn't\n    // appear to work under GCC, hence the multiple implementations.\n    #if EIGEN_COMP_MSVC\n\n      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \\\n        {Eigen::internal::static_assertion<bool(CONDITION)>::MSG;}\n\n    #else\n      // In some cases clang interprets bool(CONDITION) as function declaration\n      #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \\\n        if (Eigen::internal::static_assertion<static_cast<bool>(CONDITION)>::MSG) {}\n\n    #endif\n\n  #endif // not CXX0X\n\n#else // EIGEN_NO_STATIC_ASSERT\n\n  #define EIGEN_STATIC_ASSERT(CONDITION,MSG) eigen_assert((CONDITION) && #MSG);\n\n#endif // EIGEN_NO_STATIC_ASSERT\n\n\n// static assertion failing if the type \\a TYPE is not a vector type\n#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \\\n  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \\\n                      YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX)\n\n// static assertion failing if the type \\a TYPE is not fixed-size\n#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \\\n  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \\\n                      YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR)\n\n// static assertion failing if the type \\a TYPE is not dynamic-size\n#define EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(TYPE) \\\n  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime==Eigen::Dynamic, \\\n                      YOU_CALLED_A_DYNAMIC_SIZE_METHOD_ON_A_FIXED_SIZE_MATRIX_OR_VECTOR)\n\n// static assertion failing if the type \\a TYPE is not a vector type of the given size\n#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \\\n  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \\\n                      THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE)\n\n// static assertion failing if the type \\a TYPE is not a vector type of the given size\n#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \\\n  EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \\\n                      THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE)\n\n// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)\n#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \\\n  EIGEN_STATIC_ASSERT( \\\n      (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \\\n    || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \\\n    || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\\\n    YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES)\n\n#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \\\n     ( \\\n        (int(Eigen::internal::size_of_xpr_at_compile_time<TYPE0>::ret)==0 && int(Eigen::internal::size_of_xpr_at_compile_time<TYPE1>::ret)==0) \\\n    || (\\\n          (int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \\\n        || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \\\n        || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \\\n      &&  (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \\\n        || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \\\n        || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))\\\n       ) \\\n     )\n\n#define EIGEN_STATIC_ASSERT_NON_INTEGER(TYPE) \\\n    EIGEN_STATIC_ASSERT(!NumTraits<TYPE>::IsInteger, THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)\n\n\n// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes\n#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \\\n  EIGEN_STATIC_ASSERT( \\\n     EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\\\n    YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES)\n\n#define EIGEN_STATIC_ASSERT_SIZE_1x1(TYPE) \\\n      EIGEN_STATIC_ASSERT((TYPE::RowsAtCompileTime == 1 || TYPE::RowsAtCompileTime == Dynamic) && \\\n                          (TYPE::ColsAtCompileTime == 1 || TYPE::ColsAtCompileTime == Dynamic), \\\n                          THIS_METHOD_IS_ONLY_FOR_1x1_EXPRESSIONS)\n\n#define EIGEN_STATIC_ASSERT_LVALUE(Derived) \\\n      EIGEN_STATIC_ASSERT(Eigen::internal::is_lvalue<Derived>::value, \\\n                          THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY)\n\n#define EIGEN_STATIC_ASSERT_ARRAYXPR(Derived) \\\n      EIGEN_STATIC_ASSERT((Eigen::internal::is_same<typename Eigen::internal::traits<Derived>::XprKind, ArrayXpr>::value), \\\n                          THIS_METHOD_IS_ONLY_FOR_ARRAYS_NOT_MATRICES)\n\n#define EIGEN_STATIC_ASSERT_SAME_XPR_KIND(Derived1, Derived2) \\\n      EIGEN_STATIC_ASSERT((Eigen::internal::is_same<typename Eigen::internal::traits<Derived1>::XprKind, \\\n                                             typename Eigen::internal::traits<Derived2>::XprKind \\\n                                            >::value), \\\n                          YOU_CANNOT_MIX_ARRAYS_AND_MATRICES)\n\n// Check that a cost value is positive, and that is stay within a reasonable range\n// TODO this check could be enabled for internal debugging only\n#define EIGEN_INTERNAL_CHECK_COST_VALUE(C) \\\n      EIGEN_STATIC_ASSERT((C)>=0 && (C)<=HugeCost*HugeCost, EIGEN_INTERNAL_ERROR_PLEASE_FILE_A_BUG_REPORT__INVALID_COST_VALUE);\n\n#endif // EIGEN_STATIC_ASSERT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/SymbolicIndex.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SYMBOLIC_INDEX_H\n#define EIGEN_SYMBOLIC_INDEX_H\n\nnamespace Eigen {\n\n/** \\namespace Eigen::Symbolic\n  * \\ingroup Core_Module\n  *\n  * This namespace defines a set of classes and functions to build and evaluate symbolic expressions of scalar type Index.\n  * Here is a simple example:\n  *\n  * \\code\n  * // First step, defines symbols:\n  * struct x_tag {};  static const Symbolic::SymbolExpr<x_tag> x;\n  * struct y_tag {};  static const Symbolic::SymbolExpr<y_tag> y;\n  * struct z_tag {};  static const Symbolic::SymbolExpr<z_tag> z;\n  *\n  * // Defines an expression:\n  * auto expr = (x+3)/y+z;\n  *\n  * // And evaluate it: (c++14)\n  * std::cout << expr.eval(x=6,y=3,z=-13) << \"\\n\";\n  *\n  * // In c++98/11, only one symbol per expression is supported for now:\n  * auto expr98 = (3-x)/2;\n  * std::cout << expr98.eval(x=6) << \"\\n\";\n  * \\endcode\n  *\n  * It is currently only used internally to define and minipulate the placeholders::last and placeholders::end symbols in Eigen::seq and Eigen::seqN.\n  *\n  */\nnamespace Symbolic {\n\ntemplate<typename Tag> class Symbol;\ntemplate<typename Arg0> class NegateExpr;\ntemplate<typename Arg1,typename Arg2> class AddExpr;\ntemplate<typename Arg1,typename Arg2> class ProductExpr;\ntemplate<typename Arg1,typename Arg2> class QuotientExpr;\n\n// A simple wrapper around an integral value to provide the eval method.\n// We could also use a free-function symbolic_eval...\ntemplate<typename IndexType=Index>\nclass ValueExpr {\npublic:\n  ValueExpr(IndexType val) : m_value(val) {}\n  template<typename T>\n  IndexType eval_impl(const T&) const { return m_value; }\nprotected:\n  IndexType m_value;\n};\n\n// Specialization for compile-time value,\n// It is similar to ValueExpr(N) but this version helps the compiler to generate better code.\ntemplate<int N>\nclass ValueExpr<internal::FixedInt<N> > {\npublic:\n  ValueExpr() {}\n  template<typename T>\n  Index eval_impl(const T&) const { return N; }\n};\n\n\n/** \\class BaseExpr\n  * \\ingroup Core_Module\n  * Common base class of any symbolic expressions\n  */\ntemplate<typename Derived>\nclass BaseExpr\n{\npublic:\n  const Derived& derived() const { return *static_cast<const Derived*>(this); }\n\n  /** Evaluate the expression given the \\a values of the symbols.\n    *\n    * \\param values defines the values of the symbols, it can either be a SymbolValue or a std::tuple of SymbolValue\n    *               as constructed by SymbolExpr::operator= operator.\n    *\n    */\n  template<typename T>\n  Index eval(const T& values) const { return derived().eval_impl(values); }\n\n#if EIGEN_HAS_CXX14\n  template<typename... Types>\n  Index eval(Types&&... values) const { return derived().eval_impl(std::make_tuple(values...)); }\n#endif\n\n  NegateExpr<Derived> operator-() const { return NegateExpr<Derived>(derived()); }\n\n  AddExpr<Derived,ValueExpr<> > operator+(Index b) const\n  { return AddExpr<Derived,ValueExpr<> >(derived(),  b); }\n  AddExpr<Derived,ValueExpr<> > operator-(Index a) const\n  { return AddExpr<Derived,ValueExpr<> >(derived(), -a); }\n  ProductExpr<Derived,ValueExpr<> > operator*(Index a) const\n  { return ProductExpr<Derived,ValueExpr<> >(derived(),a); }\n  QuotientExpr<Derived,ValueExpr<> > operator/(Index a) const\n  { return QuotientExpr<Derived,ValueExpr<> >(derived(),a); }\n\n  friend AddExpr<Derived,ValueExpr<> > operator+(Index a, const BaseExpr& b)\n  { return AddExpr<Derived,ValueExpr<> >(b.derived(), a); }\n  friend AddExpr<NegateExpr<Derived>,ValueExpr<> > operator-(Index a, const BaseExpr& b)\n  { return AddExpr<NegateExpr<Derived>,ValueExpr<> >(-b.derived(), a); }\n  friend ProductExpr<ValueExpr<>,Derived> operator*(Index a, const BaseExpr& b)\n  { return ProductExpr<ValueExpr<>,Derived>(a,b.derived()); }\n  friend QuotientExpr<ValueExpr<>,Derived> operator/(Index a, const BaseExpr& b)\n  { return QuotientExpr<ValueExpr<>,Derived>(a,b.derived()); }\n\n  template<int N>\n  AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N>) const\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > > operator-(internal::FixedInt<N>) const\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > >(derived(), ValueExpr<internal::FixedInt<-N> >()); }\n  template<int N>\n  ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator*(internal::FixedInt<N>) const\n  { return ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator/(internal::FixedInt<N>) const\n  { return QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }\n\n  template<int N>\n  friend AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N>, const BaseExpr& b)\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(b.derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  friend AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > > operator-(internal::FixedInt<N>, const BaseExpr& b)\n  { return AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > >(-b.derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  friend ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator*(internal::FixedInt<N>, const BaseExpr& b)\n  { return ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }\n  template<int N>\n  friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N>, const BaseExpr& b)\n  { return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }\n\n#if (!EIGEN_HAS_CXX14)\n  template<int N>\n  AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)()) const\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > > operator-(internal::FixedInt<N> (*)()) const\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<-N> > >(derived(), ValueExpr<internal::FixedInt<-N> >()); }\n  template<int N>\n  ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator*(internal::FixedInt<N> (*)()) const\n  { return ProductExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator/(internal::FixedInt<N> (*)()) const\n  { return QuotientExpr<Derived,ValueExpr<internal::FixedInt<N> > >(derived(),ValueExpr<internal::FixedInt<N> >()); }\n\n  template<int N>\n  friend AddExpr<Derived,ValueExpr<internal::FixedInt<N> > > operator+(internal::FixedInt<N> (*)(), const BaseExpr& b)\n  { return AddExpr<Derived,ValueExpr<internal::FixedInt<N> > >(b.derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  friend AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > > operator-(internal::FixedInt<N> (*)(), const BaseExpr& b)\n  { return AddExpr<NegateExpr<Derived>,ValueExpr<internal::FixedInt<N> > >(-b.derived(), ValueExpr<internal::FixedInt<N> >()); }\n  template<int N>\n  friend ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator*(internal::FixedInt<N> (*)(), const BaseExpr& b)\n  { return ProductExpr<ValueExpr<internal::FixedInt<N> >,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }\n  template<int N>\n  friend QuotientExpr<ValueExpr<internal::FixedInt<N> >,Derived> operator/(internal::FixedInt<N> (*)(), const BaseExpr& b)\n  { return QuotientExpr<ValueExpr<internal::FixedInt<N> > ,Derived>(ValueExpr<internal::FixedInt<N> >(),b.derived()); }\n#endif\n\n\n  template<typename OtherDerived>\n  AddExpr<Derived,OtherDerived> operator+(const BaseExpr<OtherDerived> &b) const\n  { return AddExpr<Derived,OtherDerived>(derived(),  b.derived()); }\n\n  template<typename OtherDerived>\n  AddExpr<Derived,NegateExpr<OtherDerived> > operator-(const BaseExpr<OtherDerived> &b) const\n  { return AddExpr<Derived,NegateExpr<OtherDerived> >(derived(), -b.derived()); }\n\n  template<typename OtherDerived>\n  ProductExpr<Derived,OtherDerived> operator*(const BaseExpr<OtherDerived> &b) const\n  { return ProductExpr<Derived,OtherDerived>(derived(), b.derived()); }\n\n  template<typename OtherDerived>\n  QuotientExpr<Derived,OtherDerived> operator/(const BaseExpr<OtherDerived> &b) const\n  { return QuotientExpr<Derived,OtherDerived>(derived(), b.derived()); }\n};\n\ntemplate<typename T>\nstruct is_symbolic {\n  // BaseExpr has no conversion ctor, so we only have to check whether T can be staticaly cast to its base class BaseExpr<T>.\n  enum { value = internal::is_convertible<T,BaseExpr<T> >::value };\n};\n\n// Specialization for functions, because is_convertible fails in this case.\n// Useful in c++98/11 mode when testing is_symbolic<decltype(fix<N>)>\ntemplate<typename T>\nstruct is_symbolic<T (*)()> {\n  enum { value = false };\n};\n\n/** Represents the actual value of a symbol identified by its tag\n  *\n  * It is the return type of SymbolValue::operator=, and most of the time this is only way it is used.\n  */\ntemplate<typename Tag>\nclass SymbolValue\n{\npublic:\n  /** Default constructor from the value \\a val */\n  SymbolValue(Index val) : m_value(val) {}\n\n  /** \\returns the stored value of the symbol */\n  Index value() const { return m_value; }\nprotected:\n  Index m_value;\n};\n\n/** Expression of a symbol uniquely identified by the template parameter type \\c tag */\ntemplate<typename tag>\nclass SymbolExpr : public BaseExpr<SymbolExpr<tag> >\n{\npublic:\n  /** Alias to the template parameter \\c tag */\n  typedef tag Tag;\n\n  SymbolExpr() {}\n\n  /** Associate the value \\a val to the given symbol \\c *this, uniquely identified by its \\c Tag.\n    *\n    * The returned object should be passed to ExprBase::eval() to evaluate a given expression with this specified runtime-time value.\n    */\n  SymbolValue<Tag> operator=(Index val) const {\n    return SymbolValue<Tag>(val);\n  }\n\n  Index eval_impl(const SymbolValue<Tag> &values) const { return values.value(); }\n\n#if EIGEN_HAS_CXX14\n  // C++14 versions suitable for multiple symbols\n  template<typename... Types>\n  Index eval_impl(const std::tuple<Types...>& values) const { return std::get<SymbolValue<Tag> >(values).value(); }\n#endif\n};\n\ntemplate<typename Arg0>\nclass NegateExpr : public BaseExpr<NegateExpr<Arg0> >\n{\npublic:\n  NegateExpr(const Arg0& arg0) : m_arg0(arg0) {}\n\n  template<typename T>\n  Index eval_impl(const T& values) const { return -m_arg0.eval_impl(values); }\nprotected:\n  Arg0 m_arg0;\n};\n\ntemplate<typename Arg0, typename Arg1>\nclass AddExpr : public BaseExpr<AddExpr<Arg0,Arg1> >\n{\npublic:\n  AddExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {}\n\n  template<typename T>\n  Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) + m_arg1.eval_impl(values); }\nprotected:\n  Arg0 m_arg0;\n  Arg1 m_arg1;\n};\n\ntemplate<typename Arg0, typename Arg1>\nclass ProductExpr : public BaseExpr<ProductExpr<Arg0,Arg1> >\n{\npublic:\n  ProductExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {}\n\n  template<typename T>\n  Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) * m_arg1.eval_impl(values); }\nprotected:\n  Arg0 m_arg0;\n  Arg1 m_arg1;\n};\n\ntemplate<typename Arg0, typename Arg1>\nclass QuotientExpr : public BaseExpr<QuotientExpr<Arg0,Arg1> >\n{\npublic:\n  QuotientExpr(const Arg0& arg0, const Arg1& arg1) : m_arg0(arg0), m_arg1(arg1) {}\n\n  template<typename T>\n  Index eval_impl(const T& values) const { return m_arg0.eval_impl(values) / m_arg1.eval_impl(values); }\nprotected:\n  Arg0 m_arg0;\n  Arg1 m_arg1;\n};\n\n} // end namespace Symbolic\n\n} // end namespace Eigen\n\n#endif // EIGEN_SYMBOLIC_INDEX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Core/util/XprHelper.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_XPRHELPER_H\n#define EIGEN_XPRHELPER_H\n\n// just a workaround because GCC seems to not really like empty structs\n// FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled\n// so currently we simply disable this optimization for gcc 4.3\n#if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)\n  #define EIGEN_EMPTY_STRUCT_CTOR(X) \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \\\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}\n#else\n  #define EIGEN_EMPTY_STRUCT_CTOR(X)\n#endif\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename IndexDest, typename IndexSrc>\nEIGEN_DEVICE_FUNC\ninline IndexDest convert_index(const IndexSrc& idx) {\n  // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:\n  eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && \"Index value to big for target type\");\n  return IndexDest(idx);\n}\n\n\n// promote_scalar_arg is an helper used in operation between an expression and a scalar, like:\n//    expression * scalar\n// Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.\n// The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.\n// Then the logic is as follows:\n//  - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.\n//  - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.\n//  - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.\n//  - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).\ntemplate<typename ExprScalar,typename T, bool IsSupported>\nstruct promote_scalar_arg;\n\ntemplate<typename S,typename T>\nstruct promote_scalar_arg<S,T,true>\n{\n  typedef T type;\n};\n\n// Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.\ntemplate<typename ExprScalar,typename T,typename PromotedType,\n  bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,\n  bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>\nstruct promote_scalar_arg_unsupported;\n\n// Start recursion with NumTraits<ExprScalar>::Literal\ntemplate<typename S,typename T>\nstruct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};\n\n// We found a match!\ntemplate<typename S,typename T, typename PromotedType>\nstruct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>\n{\n  typedef PromotedType type;\n};\n\n// No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,\n// so let's try to promote to ExprScalar\ntemplate<typename ExprScalar,typename T, typename PromotedType>\nstruct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>\n   : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>\n{};\n\n// Unsafe real-to-integer, let's stop.\ntemplate<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>\nstruct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};\n\n// T is not even convertible to ExprScalar, let's stop.\ntemplate<typename S,typename T>\nstruct promote_scalar_arg_unsupported<S,T,S,false,true> {};\n\n//classes inheriting no_assignment_operator don't generate a default operator=.\nclass no_assignment_operator\n{\n  private:\n    no_assignment_operator& operator=(const no_assignment_operator&);\n};\n\n/** \\internal return the index type with the largest number of bits */\ntemplate<typename I1, typename I2>\nstruct promote_index_type\n{\n  typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;\n};\n\n/** \\internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that\n  * can be accessed using value() and setValue().\n  * Otherwise, this class is an empty structure and value() just returns the template parameter Value.\n  */\ntemplate<typename T, int Value> class variable_if_dynamic\n{\n  public:\n    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamic)\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }\n    EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return T(Value); }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}\n};\n\ntemplate<typename T> class variable_if_dynamic<T, Dynamic>\n{\n    T m_value;\n    EIGEN_DEVICE_FUNC variable_if_dynamic() { eigen_assert(false); }\n  public:\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value) : m_value(value) {}\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }\n};\n\n/** \\internal like variable_if_dynamic but for DynamicIndex\n  */\ntemplate<typename T, int Value> class variable_if_dynamicindex\n{\n  public:\n    EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }\n    EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T value() { return T(Value); }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T) {}\n};\n\ntemplate<typename T> class variable_if_dynamicindex<T, DynamicIndex>\n{\n    T m_value;\n    EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }\n  public:\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}\n    EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }\n};\n\ntemplate<typename T> struct functor_traits\n{\n  enum\n  {\n    Cost = 10,\n    PacketAccess = false,\n    IsRepeatable = false\n  };\n};\n\ntemplate<typename T> struct packet_traits;\n\ntemplate<typename T> struct unpacket_traits\n{\n  typedef T type;\n  typedef T half;\n  enum\n  {\n    size = 1,\n    alignment = 1\n  };\n};\n\ntemplate<int Size, typename PacketType,\n         bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>\nstruct find_best_packet_helper;\n\ntemplate< int Size, typename PacketType>\nstruct find_best_packet_helper<Size,PacketType,true>\n{\n  typedef PacketType type;\n};\n\ntemplate<int Size, typename PacketType>\nstruct find_best_packet_helper<Size,PacketType,false>\n{\n  typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;\n};\n\ntemplate<typename T, int Size>\nstruct find_best_packet\n{\n  typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;\n};\n\n#if EIGEN_MAX_STATIC_ALIGN_BYTES>0\ntemplate<int ArrayBytes, int AlignmentBytes,\n         bool Match     =  bool((ArrayBytes%AlignmentBytes)==0),\n         bool TryHalf   =  bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >\nstruct compute_default_alignment_helper\n{\n  enum { value = 0 };\n};\n\ntemplate<int ArrayBytes, int AlignmentBytes, bool TryHalf>\nstruct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match\n{\n  enum { value = AlignmentBytes };\n};\n\ntemplate<int ArrayBytes, int AlignmentBytes>\nstruct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half\n{\n  // current packet too large, try with an half-packet\n  enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };\n};\n#else\n// If static alignment is disabled, no need to bother.\n// This also avoids a division by zero in \"bool Match =  bool((ArrayBytes%AlignmentBytes)==0)\"\ntemplate<int ArrayBytes, int AlignmentBytes>\nstruct compute_default_alignment_helper\n{\n  enum { value = 0 };\n};\n#endif\n\ntemplate<typename T, int Size> struct compute_default_alignment {\n  enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };\n};\n\ntemplate<typename T> struct compute_default_alignment<T,Dynamic> {\n  enum { value = EIGEN_MAX_ALIGN_BYTES };\n};\n\ntemplate<typename _Scalar, int _Rows, int _Cols,\n         int _Options = AutoAlign |\n                          ( (_Rows==1 && _Cols!=1) ? RowMajor\n                          : (_Cols==1 && _Rows!=1) ? ColMajor\n                          : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),\n         int _MaxRows = _Rows,\n         int _MaxCols = _Cols\n> class make_proper_matrix_type\n{\n    enum {\n      IsColVector = _Cols==1 && _Rows!=1,\n      IsRowVector = _Rows==1 && _Cols!=1,\n      Options = IsColVector ? (_Options | ColMajor) & ~RowMajor\n              : IsRowVector ? (_Options | RowMajor) & ~ColMajor\n              : _Options\n    };\n  public:\n    typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;\n};\n\ntemplate<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>\nclass compute_matrix_flags\n{\n    enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };\n  public:\n    // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>\n    // and then propagate this information to the evaluator's flags.\n    // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.\n    enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };\n};\n\ntemplate<int _Rows, int _Cols> struct size_at_compile_time\n{\n  enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };\n};\n\ntemplate<typename XprType> struct size_of_xpr_at_compile_time\n{\n  enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };\n};\n\n/* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,\n * whereas eval is a const reference in the case of a matrix\n */\n\ntemplate<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;\ntemplate<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;\ntemplate<typename T> struct plain_matrix_type<T,Dense>\n{\n  typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;\n};\ntemplate<typename T> struct plain_matrix_type<T,DiagonalShape>\n{\n  typedef typename T::PlainObject type;\n};\n\ntemplate<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>\n{\n  typedef Matrix<typename traits<T>::Scalar,\n                traits<T>::RowsAtCompileTime,\n                traits<T>::ColsAtCompileTime,\n                AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),\n                traits<T>::MaxRowsAtCompileTime,\n                traits<T>::MaxColsAtCompileTime\n          > type;\n};\n\ntemplate<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>\n{\n  typedef Array<typename traits<T>::Scalar,\n                traits<T>::RowsAtCompileTime,\n                traits<T>::ColsAtCompileTime,\n                AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),\n                traits<T>::MaxRowsAtCompileTime,\n                traits<T>::MaxColsAtCompileTime\n          > type;\n};\n\n/* eval : the return type of eval(). For matrices, this is just a const reference\n * in order to avoid a useless copy\n */\n\ntemplate<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;\n\ntemplate<typename T> struct eval<T,Dense>\n{\n  typedef typename plain_matrix_type<T>::type type;\n//   typedef typename T::PlainObject type;\n//   typedef T::Matrix<typename traits<T>::Scalar,\n//                 traits<T>::RowsAtCompileTime,\n//                 traits<T>::ColsAtCompileTime,\n//                 AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),\n//                 traits<T>::MaxRowsAtCompileTime,\n//                 traits<T>::MaxColsAtCompileTime\n//           > type;\n};\n\ntemplate<typename T> struct eval<T,DiagonalShape>\n{\n  typedef typename plain_matrix_type<T>::type type;\n};\n\n// for matrices, no need to evaluate, just use a const reference to avoid a useless copy\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>\n{\n  typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;\n};\n\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>\nstruct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>\n{\n  typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;\n};\n\n\n/* similar to plain_matrix_type, but using the evaluator's Flags */\ntemplate<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;\n\ntemplate<typename T>\nstruct plain_object_eval<T,Dense>\n{\n  typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;\n};\n\n\n/* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major\n */\ntemplate<typename T> struct plain_matrix_type_column_major\n{\n  enum { Rows = traits<T>::RowsAtCompileTime,\n         Cols = traits<T>::ColsAtCompileTime,\n         MaxRows = traits<T>::MaxRowsAtCompileTime,\n         MaxCols = traits<T>::MaxColsAtCompileTime\n  };\n  typedef Matrix<typename traits<T>::Scalar,\n                Rows,\n                Cols,\n                (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,\n                MaxRows,\n                MaxCols\n          > type;\n};\n\n/* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major\n */\ntemplate<typename T> struct plain_matrix_type_row_major\n{\n  enum { Rows = traits<T>::RowsAtCompileTime,\n         Cols = traits<T>::ColsAtCompileTime,\n         MaxRows = traits<T>::MaxRowsAtCompileTime,\n         MaxCols = traits<T>::MaxColsAtCompileTime\n  };\n  typedef Matrix<typename traits<T>::Scalar,\n                Rows,\n                Cols,\n                (MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,\n                MaxRows,\n                MaxCols\n          > type;\n};\n\n/** \\internal The reference selector for template expressions. The idea is that we don't\n  * need to use references for expressions since they are light weight proxy\n  * objects which should generate no copying overhead. */\ntemplate <typename T>\nstruct ref_selector\n{\n  typedef typename conditional<\n    bool(traits<T>::Flags & NestByRefBit),\n    T const&,\n    const T\n  >::type type;\n  \n  typedef typename conditional<\n    bool(traits<T>::Flags & NestByRefBit),\n    T &,\n    T\n  >::type non_const_type;\n};\n\n/** \\internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */\ntemplate<typename T1, typename T2>\nstruct transfer_constness\n{\n  typedef typename conditional<\n    bool(internal::is_const<T1>::value),\n    typename internal::add_const_on_value_type<T2>::type,\n    T2\n  >::type type;\n};\n\n\n// However, we still need a mechanism to detect whether an expression which is evaluated multiple time\n// has to be evaluated into a temporary.\n// That's the purpose of this new nested_eval helper:\n/** \\internal Determines how a given expression should be nested when evaluated multiple times.\n  * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be\n  * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or\n  * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is\n  * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes\n  * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.\n  *\n  * \\tparam T the type of the expression being nested.\n  * \\tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.\n  * \\tparam PlainObject the type of the temporary if needed.\n  */\ntemplate<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval\n{\n  enum {\n    ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,\n    CoeffReadCost = evaluator<T>::CoeffReadCost,  // NOTE What if an evaluator evaluate itself into a tempory?\n                                                  //      Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.\n                                                  //      This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON\n                                                  //      for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.\n                                                  //      Another solution could be to count the number of temps?\n    NAsInteger = n == Dynamic ? HugeCost : n,\n    CostEval   = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,\n    CostNoEval = NAsInteger * CoeffReadCost,\n    Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))\n  };\n\n  typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;\n};\n\ntemplate<typename T>\nEIGEN_DEVICE_FUNC\ninline T* const_cast_ptr(const T* ptr)\n{\n  return const_cast<T*>(ptr);\n}\n\ntemplate<typename Derived, typename XprKind = typename traits<Derived>::XprKind>\nstruct dense_xpr_base\n{\n  /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */\n};\n\ntemplate<typename Derived>\nstruct dense_xpr_base<Derived, MatrixXpr>\n{\n  typedef MatrixBase<Derived> type;\n};\n\ntemplate<typename Derived>\nstruct dense_xpr_base<Derived, ArrayXpr>\n{\n  typedef ArrayBase<Derived> type;\n};\n\ntemplate<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>\nstruct generic_xpr_base;\n\ntemplate<typename Derived, typename XprKind>\nstruct generic_xpr_base<Derived, XprKind, Dense>\n{\n  typedef typename dense_xpr_base<Derived,XprKind>::type type;\n};\n\ntemplate<typename XprType, typename CastType> struct cast_return_type\n{\n  typedef typename XprType::Scalar CurrentScalarType;\n  typedef typename remove_all<CastType>::type _CastType;\n  typedef typename _CastType::Scalar NewScalarType;\n  typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,\n                              const XprType&,CastType>::type type;\n};\n\ntemplate <typename A, typename B> struct promote_storage_type;\n\ntemplate <typename A> struct promote_storage_type<A,A>\n{\n  typedef A ret;\n};\ntemplate <typename A> struct promote_storage_type<A, const A>\n{\n  typedef A ret;\n};\ntemplate <typename A> struct promote_storage_type<const A, A>\n{\n  typedef A ret;\n};\n\n/** \\internal Specify the \"storage kind\" of applying a coefficient-wise\n  * binary operations between two expressions of kinds A and B respectively.\n  * The template parameter Functor permits to specialize the resulting storage kind wrt to\n  * the functor.\n  * The default rules are as follows:\n  * \\code\n  * A      op A      -> A\n  * A      op dense  -> dense\n  * dense  op B      -> dense\n  * sparse op dense  -> sparse\n  * dense  op sparse -> sparse\n  * \\endcode\n  */\ntemplate <typename A, typename B, typename Functor> struct cwise_promote_storage_type;\n\ntemplate <typename A, typename Functor>                   struct cwise_promote_storage_type<A,A,Functor>                                      { typedef A      ret; };\ntemplate <typename Functor>                               struct cwise_promote_storage_type<Dense,Dense,Functor>                              { typedef Dense  ret; };\ntemplate <typename A, typename Functor>                   struct cwise_promote_storage_type<A,Dense,Functor>                                  { typedef Dense  ret; };\ntemplate <typename B, typename Functor>                   struct cwise_promote_storage_type<Dense,B,Functor>                                  { typedef Dense  ret; };\ntemplate <typename Functor>                               struct cwise_promote_storage_type<Sparse,Dense,Functor>                             { typedef Sparse ret; };\ntemplate <typename Functor>                               struct cwise_promote_storage_type<Dense,Sparse,Functor>                             { typedef Sparse ret; };\n\ntemplate <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {\n  enum { value = LhsOrder };\n};\n\ntemplate <typename LhsKind, int LhsOrder, int RhsOrder>   struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder>                { enum { value = RhsOrder }; };\ntemplate <typename RhsKind, int LhsOrder, int RhsOrder>   struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder>                { enum { value = LhsOrder }; };\ntemplate <int Order>                                      struct cwise_promote_storage_order<Sparse,Sparse,Order,Order>                       { enum { value = Order }; };\n\n\n/** \\internal Specify the \"storage kind\" of multiplying an expression of kind A with kind B.\n  * The template parameter ProductTag permits to specialize the resulting storage kind wrt to\n  * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.\n  * The default rules are as follows:\n  * \\code\n  *  K * K            -> K\n  *  dense * K        -> dense\n  *  K * dense        -> dense\n  *  diag * K         -> K\n  *  K * diag         -> K\n  *  Perm * K         -> K\n  * K * Perm          -> K\n  * \\endcode\n  */\ntemplate <typename A, typename B, int ProductTag> struct product_promote_storage_type;\n\ntemplate <typename A, int ProductTag> struct product_promote_storage_type<A,                  A,                  ProductTag> { typedef A     ret;};\ntemplate <int ProductTag>             struct product_promote_storage_type<Dense,              Dense,              ProductTag> { typedef Dense ret;};\ntemplate <typename A, int ProductTag> struct product_promote_storage_type<A,                  Dense,              ProductTag> { typedef Dense ret; };\ntemplate <typename B, int ProductTag> struct product_promote_storage_type<Dense,              B,                  ProductTag> { typedef Dense ret; };\n\ntemplate <typename A, int ProductTag> struct product_promote_storage_type<A,                  DiagonalShape,      ProductTag> { typedef A ret; };\ntemplate <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape,      B,                  ProductTag> { typedef B ret; };\ntemplate <int ProductTag>             struct product_promote_storage_type<Dense,              DiagonalShape,      ProductTag> { typedef Dense ret; };\ntemplate <int ProductTag>             struct product_promote_storage_type<DiagonalShape,      Dense,              ProductTag> { typedef Dense ret; };\n\ntemplate <typename A, int ProductTag> struct product_promote_storage_type<A,                  PermutationStorage, ProductTag> { typedef A ret; };\ntemplate <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B,                  ProductTag> { typedef B ret; };\ntemplate <int ProductTag>             struct product_promote_storage_type<Dense,              PermutationStorage, ProductTag> { typedef Dense ret; };\ntemplate <int ProductTag>             struct product_promote_storage_type<PermutationStorage, Dense,              ProductTag> { typedef Dense ret; };\n\n/** \\internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.\n  * \\tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.\n  */\ntemplate<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>\nstruct plain_row_type\n{\n  typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,\n                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;\n  typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,\n                 ExpressionType::PlainObject::Options | RowMajor, 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;\n\n  typedef typename conditional<\n    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,\n    MatrixRowType,\n    ArrayRowType \n  >::type type;\n};\n\ntemplate<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>\nstruct plain_col_type\n{\n  typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,\n                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;\n  typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,\n                 ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;\n\n  typedef typename conditional<\n    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,\n    MatrixColType,\n    ArrayColType \n  >::type type;\n};\n\ntemplate<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>\nstruct plain_diag_type\n{\n  enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),\n         max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)\n  };\n  typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;\n  typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;\n\n  typedef typename conditional<\n    is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,\n    MatrixDiagType,\n    ArrayDiagType \n  >::type type;\n};\n\ntemplate<typename Expr,typename Scalar = typename Expr::Scalar>\nstruct plain_constant_type\n{\n  enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };\n\n  typedef Array<Scalar,  traits<Expr>::RowsAtCompileTime,   traits<Expr>::ColsAtCompileTime,\n                Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;\n\n  typedef Matrix<Scalar,  traits<Expr>::RowsAtCompileTime,   traits<Expr>::ColsAtCompileTime,\n                 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;\n\n  typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;\n};\n\ntemplate<typename ExpressionType>\nstruct is_lvalue\n{\n  enum { value = (!bool(is_const<ExpressionType>::value)) &&\n                 bool(traits<ExpressionType>::Flags & LvalueBit) };\n};\n\ntemplate<typename T> struct is_diagonal\n{ enum { ret = false }; };\n\ntemplate<typename T> struct is_diagonal<DiagonalBase<T> >\n{ enum { ret = true }; };\n\ntemplate<typename T> struct is_diagonal<DiagonalWrapper<T> >\n{ enum { ret = true }; };\n\ntemplate<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >\n{ enum { ret = true }; };\n\ntemplate<typename S1, typename S2> struct glue_shapes;\ntemplate<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type;  };\n\ntemplate<typename T1, typename T2>\nbool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<has_direct_access<T1>::ret&&has_direct_access<T2>::ret, T1>::type * = 0)\n{\n  return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());\n}\n\ntemplate<typename T1, typename T2>\nbool is_same_dense(const T1 &, const T2 &, typename enable_if<!(has_direct_access<T1>::ret&&has_direct_access<T2>::ret), T1>::type * = 0)\n{\n  return false;\n}\n\n// Internal helper defining the cost of a scalar division for the type T.\n// The default heuristic can be specialized for each scalar type and architecture.\ntemplate<typename T,bool Vectorized=false,typename EnableIf = void>\nstruct scalar_div_cost {\n  enum { value = 8*NumTraits<T>::MulCost };\n};\n\ntemplate<typename T,bool Vectorized>\nstruct scalar_div_cost<std::complex<T>, Vectorized> {\n  enum { value = 2*scalar_div_cost<T>::value\n               + 6*NumTraits<T>::MulCost\n               + 3*NumTraits<T>::AddCost\n  };\n};\n\n\ntemplate<bool Vectorized>\nstruct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };\ntemplate<bool Vectorized>\nstruct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };\n\n\n#ifdef EIGEN_DEBUG_ASSIGN\nstd::string demangle_traversal(int t)\n{\n  if(t==DefaultTraversal) return \"DefaultTraversal\";\n  if(t==LinearTraversal) return \"LinearTraversal\";\n  if(t==InnerVectorizedTraversal) return \"InnerVectorizedTraversal\";\n  if(t==LinearVectorizedTraversal) return \"LinearVectorizedTraversal\";\n  if(t==SliceVectorizedTraversal) return \"SliceVectorizedTraversal\";\n  return \"?\";\n}\nstd::string demangle_unrolling(int t)\n{\n  if(t==NoUnrolling) return \"NoUnrolling\";\n  if(t==InnerUnrolling) return \"InnerUnrolling\";\n  if(t==CompleteUnrolling) return \"CompleteUnrolling\";\n  return \"?\";\n}\nstd::string demangle_flags(int f)\n{\n  std::string res;\n  if(f&RowMajorBit)                 res += \" | RowMajor\";\n  if(f&PacketAccessBit)             res += \" | Packet\";\n  if(f&LinearAccessBit)             res += \" | Linear\";\n  if(f&LvalueBit)                   res += \" | Lvalue\";\n  if(f&DirectAccessBit)             res += \" | Direct\";\n  if(f&NestByRefBit)                res += \" | NestByRef\";\n  if(f&NoPreferredStorageOrderBit)  res += \" | NoPreferredStorageOrderBit\";\n  \n  return res;\n}\n#endif\n\n} // end namespace internal\n\n\n/** \\class ScalarBinaryOpTraits\n  * \\ingroup Core_Module\n  *\n  * \\brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.\n  *\n  * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.\n  *\n  * For instance, let \\c U1, \\c U2 and \\c U3 be three user defined scalar types for which most operations between instances of \\c U1 and \\c U2 returns an \\c U3.\n  * You can let %Eigen knows that by defining:\n    \\code\n    template<typename BinaryOp>\n    struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType;  };\n    template<typename BinaryOp>\n    struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType;  };\n    \\endcode\n  * You can then explicitly disable some particular operations to get more explicit error messages:\n    \\code\n    template<>\n    struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};\n    \\endcode\n  * Or customize the return type for individual operation:\n    \\code\n    template<>\n    struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };\n    \\endcode\n  *\n  * By default, the following generic combinations are supported:\n  <table class=\"manual\">\n  <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>\n  <tr            ><td>\\c T </td><td>\\c T </td><td>\\c * </td><td>\\c T </td><td></td></tr>\n  <tr class=\"alt\"><td>\\c NumTraits<T>::Real </td><td>\\c T </td><td>\\c * </td><td>\\c T </td><td>Only if \\c NumTraits<T>::IsComplex </td></tr>\n  <tr            ><td>\\c T </td><td>\\c NumTraits<T>::Real </td><td>\\c * </td><td>\\c T </td><td>Only if \\c NumTraits<T>::IsComplex </td></tr>\n  </table>\n  *\n  * \\sa CwiseBinaryOp\n  */\ntemplate<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >\nstruct ScalarBinaryOpTraits\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n  // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.\n  : internal::scalar_product_traits<ScalarA,ScalarB>\n#endif // EIGEN_PARSED_BY_DOXYGEN\n{};\n\ntemplate<typename T, typename BinaryOp>\nstruct ScalarBinaryOpTraits<T,T,BinaryOp>\n{\n  typedef T ReturnType;\n};\n\ntemplate <typename T, typename BinaryOp>\nstruct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>\n{\n  typedef T ReturnType;\n};\ntemplate <typename T, typename BinaryOp>\nstruct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>\n{\n  typedef T ReturnType;\n};\n\n// For Matrix * Permutation\ntemplate<typename T, typename BinaryOp>\nstruct ScalarBinaryOpTraits<T,void,BinaryOp>\n{\n  typedef T ReturnType;\n};\n\n// For Permutation * Matrix\ntemplate<typename T, typename BinaryOp>\nstruct ScalarBinaryOpTraits<void,T,BinaryOp>\n{\n  typedef T ReturnType;\n};\n\n// for Permutation*Permutation\ntemplate<typename BinaryOp>\nstruct ScalarBinaryOpTraits<void,void,BinaryOp>\n{\n  typedef void ReturnType;\n};\n\n// We require Lhs and Rhs to have \"compatible\" scalar types.\n// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.\n// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to\n// add together a float matrix and a double matrix.\n#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \\\n  EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \\\n    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n    \n} // end namespace Eigen\n\n#endif // EIGEN_XPRHELPER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/ComplexEigenSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Claire Maurice\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_EIGEN_SOLVER_H\n#define EIGEN_COMPLEX_EIGEN_SOLVER_H\n\n#include \"./ComplexSchur.h\"\n\nnamespace Eigen { \n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class ComplexEigenSolver\n  *\n  * \\brief Computes eigenvalues and eigenvectors of general complex matrices\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are\n  * computing the eigendecomposition; this is expected to be an\n  * instantiation of the Matrix class template.\n  *\n  * The eigenvalues and eigenvectors of a matrix \\f$ A \\f$ are scalars\n  * \\f$ \\lambda \\f$ and vectors \\f$ v \\f$ such that \\f$ Av = \\lambda v\n  * \\f$.  If \\f$ D \\f$ is a diagonal matrix with the eigenvalues on\n  * the diagonal, and \\f$ V \\f$ is a matrix with the eigenvectors as\n  * its columns, then \\f$ A V = V D \\f$. The matrix \\f$ V \\f$ is\n  * almost always invertible, in which case we have \\f$ A = V D V^{-1}\n  * \\f$. This is called the eigendecomposition.\n  *\n  * The main function in this class is compute(), which computes the\n  * eigenvalues and eigenvectors of a given function. The\n  * documentation for that function contains an example showing the\n  * main features of the class.\n  *\n  * \\sa class EigenSolver, class SelfAdjointEigenSolver\n  */\ntemplate<typename _MatrixType> class ComplexEigenSolver\n{\n  public:\n\n    /** \\brief Synonym for the template parameter \\p _MatrixType. */\n    typedef _MatrixType MatrixType;\n\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n    /** \\brief Scalar type for matrices of type #MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    /** \\brief Complex scalar type for #MatrixType.\n      *\n      * This is \\c std::complex<Scalar> if #Scalar is real (e.g.,\n      * \\c float or \\c double) and just \\c Scalar if #Scalar is\n      * complex.\n      */\n    typedef std::complex<RealScalar> ComplexScalar;\n\n    /** \\brief Type for vector of eigenvalues as returned by eigenvalues().\n      *\n      * This is a column vector with entries of type #ComplexScalar.\n      * The length of the vector is the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options&(~RowMajor), MaxColsAtCompileTime, 1> EigenvalueType;\n\n    /** \\brief Type for matrix of eigenvectors as returned by eigenvectors().\n      *\n      * This is a square matrix with entries of type #ComplexScalar.\n      * The size is the same as the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorType;\n\n    /** \\brief Default constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute().\n      */\n    ComplexEigenSolver()\n            : m_eivec(),\n              m_eivalues(),\n              m_schur(),\n              m_isInitialized(false),\n              m_eigenvectorsOk(false),\n              m_matX()\n    {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa ComplexEigenSolver()\n      */\n    explicit ComplexEigenSolver(Index size)\n            : m_eivec(size, size),\n              m_eivalues(size),\n              m_schur(size),\n              m_isInitialized(false),\n              m_eigenvectorsOk(false),\n              m_matX(size, size)\n    {}\n\n    /** \\brief Constructor; computes eigendecomposition of given matrix.\n      *\n      * \\param[in]  matrix  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are\n      *    computed.\n      *\n      * This constructor calls compute() to compute the eigendecomposition.\n      */\n    template<typename InputType>\n    explicit ComplexEigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)\n            : m_eivec(matrix.rows(),matrix.cols()),\n              m_eivalues(matrix.cols()),\n              m_schur(matrix.rows()),\n              m_isInitialized(false),\n              m_eigenvectorsOk(false),\n              m_matX(matrix.rows(),matrix.cols())\n    {\n      compute(matrix.derived(), computeEigenvectors);\n    }\n\n    /** \\brief Returns the eigenvectors of given matrix.\n      *\n      * \\returns  A const reference to the matrix whose columns are the eigenvectors.\n      *\n      * \\pre Either the constructor\n      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member\n      * function compute(const MatrixType& matrix, bool) has been called before\n      * to compute the eigendecomposition of a matrix, and\n      * \\p computeEigenvectors was set to true (the default).\n      *\n      * This function returns a matrix whose columns are the eigenvectors. Column\n      * \\f$ k \\f$ is an eigenvector corresponding to eigenvalue number \\f$ k\n      * \\f$ as returned by eigenvalues().  The eigenvectors are normalized to\n      * have (Euclidean) norm equal to one. The matrix returned by this\n      * function is the matrix \\f$ V \\f$ in the eigendecomposition \\f$ A = V D\n      * V^{-1} \\f$, if it exists.\n      *\n      * Example: \\include ComplexEigenSolver_eigenvectors.cpp\n      * Output: \\verbinclude ComplexEigenSolver_eigenvectors.out\n      */\n    const EigenvectorType& eigenvectors() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexEigenSolver is not initialized.\");\n      eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n      return m_eivec;\n    }\n\n    /** \\brief Returns the eigenvalues of given matrix.\n      *\n      * \\returns A const reference to the column vector containing the eigenvalues.\n      *\n      * \\pre Either the constructor\n      * ComplexEigenSolver(const MatrixType& matrix, bool) or the member\n      * function compute(const MatrixType& matrix, bool) has been called before\n      * to compute the eigendecomposition of a matrix.\n      *\n      * This function returns a column vector containing the\n      * eigenvalues. Eigenvalues are repeated according to their\n      * algebraic multiplicity, so there are as many eigenvalues as\n      * rows in the matrix. The eigenvalues are not sorted in any particular\n      * order.\n      *\n      * Example: \\include ComplexEigenSolver_eigenvalues.cpp\n      * Output: \\verbinclude ComplexEigenSolver_eigenvalues.out\n      */\n    const EigenvalueType& eigenvalues() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexEigenSolver is not initialized.\");\n      return m_eivalues;\n    }\n\n    /** \\brief Computes eigendecomposition of given matrix.\n      *\n      * \\param[in]  matrix  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are\n      *    computed.\n      * \\returns    Reference to \\c *this\n      *\n      * This function computes the eigenvalues of the complex matrix \\p matrix.\n      * The eigenvalues() function can be used to retrieve them.  If\n      * \\p computeEigenvectors is true, then the eigenvectors are also computed\n      * and can be retrieved by calling eigenvectors().\n      *\n      * The matrix is first reduced to Schur form using the\n      * ComplexSchur class. The Schur decomposition is then used to\n      * compute the eigenvalues and eigenvectors.\n      *\n      * The cost of the computation is dominated by the cost of the\n      * Schur decomposition, which is \\f$ O(n^3) \\f$ where \\f$ n \\f$\n      * is the size of the matrix.\n      *\n      * Example: \\include ComplexEigenSolver_compute.cpp\n      * Output: \\verbinclude ComplexEigenSolver_compute.out\n      */\n    template<typename InputType>\n    ComplexEigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful, \\c NoConvergence otherwise.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexEigenSolver is not initialized.\");\n      return m_schur.info();\n    }\n\n    /** \\brief Sets the maximum number of iterations allowed. */\n    ComplexEigenSolver& setMaxIterations(Index maxIters)\n    {\n      m_schur.setMaxIterations(maxIters);\n      return *this;\n    }\n\n    /** \\brief Returns the maximum number of iterations. */\n    Index getMaxIterations()\n    {\n      return m_schur.getMaxIterations();\n    }\n\n  protected:\n    \n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n    \n    EigenvectorType m_eivec;\n    EigenvalueType m_eivalues;\n    ComplexSchur<MatrixType> m_schur;\n    bool m_isInitialized;\n    bool m_eigenvectorsOk;\n    EigenvectorType m_matX;\n\n  private:\n    void doComputeEigenvectors(RealScalar matrixnorm);\n    void sortEigenvalues(bool computeEigenvectors);\n};\n\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nComplexEigenSolver<MatrixType>& \nComplexEigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)\n{\n  check_template_parameters();\n  \n  // this code is inspired from Jampack\n  eigen_assert(matrix.cols() == matrix.rows());\n\n  // Do a complex Schur decomposition, A = U T U^*\n  // The eigenvalues are on the diagonal of T.\n  m_schur.compute(matrix.derived(), computeEigenvectors);\n\n  if(m_schur.info() == Success)\n  {\n    m_eivalues = m_schur.matrixT().diagonal();\n    if(computeEigenvectors)\n      doComputeEigenvectors(m_schur.matrixT().norm());\n    sortEigenvalues(computeEigenvectors);\n  }\n\n  m_isInitialized = true;\n  m_eigenvectorsOk = computeEigenvectors;\n  return *this;\n}\n\n\ntemplate<typename MatrixType>\nvoid ComplexEigenSolver<MatrixType>::doComputeEigenvectors(RealScalar matrixnorm)\n{\n  const Index n = m_eivalues.size();\n\n  matrixnorm = numext::maxi(matrixnorm,(std::numeric_limits<RealScalar>::min)());\n\n  // Compute X such that T = X D X^(-1), where D is the diagonal of T.\n  // The matrix X is unit triangular.\n  m_matX = EigenvectorType::Zero(n, n);\n  for(Index k=n-1 ; k>=0 ; k--)\n  {\n    m_matX.coeffRef(k,k) = ComplexScalar(1.0,0.0);\n    // Compute X(i,k) using the (i,k) entry of the equation X T = D X\n    for(Index i=k-1 ; i>=0 ; i--)\n    {\n      m_matX.coeffRef(i,k) = -m_schur.matrixT().coeff(i,k);\n      if(k-i-1>0)\n        m_matX.coeffRef(i,k) -= (m_schur.matrixT().row(i).segment(i+1,k-i-1) * m_matX.col(k).segment(i+1,k-i-1)).value();\n      ComplexScalar z = m_schur.matrixT().coeff(i,i) - m_schur.matrixT().coeff(k,k);\n      if(z==ComplexScalar(0))\n      {\n        // If the i-th and k-th eigenvalue are equal, then z equals 0.\n        // Use a small value instead, to prevent division by zero.\n        numext::real_ref(z) = NumTraits<RealScalar>::epsilon() * matrixnorm;\n      }\n      m_matX.coeffRef(i,k) = m_matX.coeff(i,k) / z;\n    }\n  }\n\n  // Compute V as V = U X; now A = U T U^* = U X D X^(-1) U^* = V D V^(-1)\n  m_eivec.noalias() = m_schur.matrixU() * m_matX;\n  // .. and normalize the eigenvectors\n  for(Index k=0 ; k<n ; k++)\n  {\n    m_eivec.col(k).normalize();\n  }\n}\n\n\ntemplate<typename MatrixType>\nvoid ComplexEigenSolver<MatrixType>::sortEigenvalues(bool computeEigenvectors)\n{\n  const Index n =  m_eivalues.size();\n  for (Index i=0; i<n; i++)\n  {\n    Index k;\n    m_eivalues.cwiseAbs().tail(n-i).minCoeff(&k);\n    if (k != 0)\n    {\n      k += i;\n      std::swap(m_eivalues[k],m_eivalues[i]);\n      if(computeEigenvectors)\n\tm_eivec.col(i).swap(m_eivec.col(k));\n    }\n  }\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_EIGEN_SOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/ComplexSchur.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Claire Maurice\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLEX_SCHUR_H\n#define EIGEN_COMPLEX_SCHUR_H\n\n#include \"./HessenbergDecomposition.h\"\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<typename MatrixType, bool IsComplex> struct complex_schur_reduce_to_hessenberg;\n}\n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class ComplexSchur\n  *\n  * \\brief Performs a complex Schur decomposition of a real or complex square matrix\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are\n  * computing the Schur decomposition; this is expected to be an\n  * instantiation of the Matrix class template.\n  *\n  * Given a real or complex square matrix A, this class computes the\n  * Schur decomposition: \\f$ A = U T U^*\\f$ where U is a unitary\n  * complex matrix, and T is a complex upper triangular matrix.  The\n  * diagonal of the matrix T corresponds to the eigenvalues of the\n  * matrix A.\n  *\n  * Call the function compute() to compute the Schur decomposition of\n  * a given matrix. Alternatively, you can use the \n  * ComplexSchur(const MatrixType&, bool) constructor which computes\n  * the Schur decomposition at construction time. Once the\n  * decomposition is computed, you can use the matrixU() and matrixT()\n  * functions to retrieve the matrices U and V in the decomposition.\n  *\n  * \\note This code is inspired from Jampack\n  *\n  * \\sa class RealSchur, class EigenSolver, class ComplexEigenSolver\n  */\ntemplate<typename _MatrixType> class ComplexSchur\n{\n  public:\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n    /** \\brief Scalar type for matrices of type \\p _MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    /** \\brief Complex scalar type for \\p _MatrixType. \n      *\n      * This is \\c std::complex<Scalar> if #Scalar is real (e.g.,\n      * \\c float or \\c double) and just \\c Scalar if #Scalar is\n      * complex.\n      */\n    typedef std::complex<RealScalar> ComplexScalar;\n\n    /** \\brief Type for the matrices in the Schur decomposition.\n      *\n      * This is a square matrix with entries of type #ComplexScalar. \n      * The size is the same as the size of \\p _MatrixType.\n      */\n    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrixType;\n\n    /** \\brief Default constructor.\n      *\n      * \\param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.\n      *\n      * The default constructor is useful in cases in which the user\n      * intends to perform decompositions via compute().  The \\p size\n      * parameter is only used as a hint. It is not an error to give a\n      * wrong \\p size, but it may impair performance.\n      *\n      * \\sa compute() for an example.\n      */\n    explicit ComplexSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)\n      : m_matT(size,size),\n        m_matU(size,size),\n        m_hess(size),\n        m_isInitialized(false),\n        m_matUisUptodate(false),\n        m_maxIters(-1)\n    {}\n\n    /** \\brief Constructor; computes Schur decomposition of given matrix. \n      * \n      * \\param[in]  matrix    Square matrix whose Schur decomposition is to be computed.\n      * \\param[in]  computeU  If true, both T and U are computed; if false, only T is computed.\n      *\n      * This constructor calls compute() to compute the Schur decomposition.\n      *\n      * \\sa matrixT() and matrixU() for examples.\n      */\n    template<typename InputType>\n    explicit ComplexSchur(const EigenBase<InputType>& matrix, bool computeU = true)\n      : m_matT(matrix.rows(),matrix.cols()),\n        m_matU(matrix.rows(),matrix.cols()),\n        m_hess(matrix.rows()),\n        m_isInitialized(false),\n        m_matUisUptodate(false),\n        m_maxIters(-1)\n    {\n      compute(matrix.derived(), computeU);\n    }\n\n    /** \\brief Returns the unitary matrix in the Schur decomposition. \n      *\n      * \\returns A const reference to the matrix U.\n      *\n      * It is assumed that either the constructor\n      * ComplexSchur(const MatrixType& matrix, bool computeU) or the\n      * member function compute(const MatrixType& matrix, bool computeU)\n      * has been called before to compute the Schur decomposition of a\n      * matrix, and that \\p computeU was set to true (the default\n      * value).\n      *\n      * Example: \\include ComplexSchur_matrixU.cpp\n      * Output: \\verbinclude ComplexSchur_matrixU.out\n      */\n    const ComplexMatrixType& matrixU() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexSchur is not initialized.\");\n      eigen_assert(m_matUisUptodate && \"The matrix U has not been computed during the ComplexSchur decomposition.\");\n      return m_matU;\n    }\n\n    /** \\brief Returns the triangular matrix in the Schur decomposition. \n      *\n      * \\returns A const reference to the matrix T.\n      *\n      * It is assumed that either the constructor\n      * ComplexSchur(const MatrixType& matrix, bool computeU) or the\n      * member function compute(const MatrixType& matrix, bool computeU)\n      * has been called before to compute the Schur decomposition of a\n      * matrix.\n      *\n      * Note that this function returns a plain square matrix. If you want to reference\n      * only the upper triangular part, use:\n      * \\code schur.matrixT().triangularView<Upper>() \\endcode \n      *\n      * Example: \\include ComplexSchur_matrixT.cpp\n      * Output: \\verbinclude ComplexSchur_matrixT.out\n      */\n    const ComplexMatrixType& matrixT() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexSchur is not initialized.\");\n      return m_matT;\n    }\n\n    /** \\brief Computes Schur decomposition of given matrix. \n      * \n      * \\param[in]  matrix  Square matrix whose Schur decomposition is to be computed.\n      * \\param[in]  computeU  If true, both T and U are computed; if false, only T is computed.\n\n      * \\returns    Reference to \\c *this\n      *\n      * The Schur decomposition is computed by first reducing the\n      * matrix to Hessenberg form using the class\n      * HessenbergDecomposition. The Hessenberg matrix is then reduced\n      * to triangular form by performing QR iterations with a single\n      * shift. The cost of computing the Schur decomposition depends\n      * on the number of iterations; as a rough guide, it may be taken\n      * on the number of iterations; as a rough guide, it may be taken\n      * to be \\f$25n^3\\f$ complex flops, or \\f$10n^3\\f$ complex flops\n      * if \\a computeU is false.\n      *\n      * Example: \\include ComplexSchur_compute.cpp\n      * Output: \\verbinclude ComplexSchur_compute.out\n      *\n      * \\sa compute(const MatrixType&, bool, Index)\n      */\n    template<typename InputType>\n    ComplexSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);\n    \n    /** \\brief Compute Schur decomposition from a given Hessenberg matrix\n     *  \\param[in] matrixH Matrix in Hessenberg form H\n     *  \\param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T\n     *  \\param computeU Computes the matriX U of the Schur vectors\n     * \\return Reference to \\c *this\n     * \n     *  This routine assumes that the matrix is already reduced in Hessenberg form matrixH\n     *  using either the class HessenbergDecomposition or another mean. \n     *  It computes the upper quasi-triangular matrix T of the Schur decomposition of H\n     *  When computeU is true, this routine computes the matrix U such that \n     *  A = U T U^T =  (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix\n     * \n     * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix\n     * is not available, the user should give an identity matrix (Q.setIdentity())\n     * \n     * \\sa compute(const MatrixType&, bool)\n     */\n    template<typename HessMatrixType, typename OrthMatrixType>\n    ComplexSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU=true);\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful, \\c NoConvergence otherwise.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"ComplexSchur is not initialized.\");\n      return m_info;\n    }\n\n    /** \\brief Sets the maximum number of iterations allowed. \n      *\n      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size\n      * of the matrix.\n      */\n    ComplexSchur& setMaxIterations(Index maxIters)\n    {\n      m_maxIters = maxIters;\n      return *this;\n    }\n\n    /** \\brief Returns the maximum number of iterations. */\n    Index getMaxIterations()\n    {\n      return m_maxIters;\n    }\n\n    /** \\brief Maximum number of iterations per row.\n      *\n      * If not otherwise specified, the maximum number of iterations is this number times the size of the\n      * matrix. It is currently set to 30.\n      */\n    static const int m_maxIterationsPerRow = 30;\n\n  protected:\n    ComplexMatrixType m_matT, m_matU;\n    HessenbergDecomposition<MatrixType> m_hess;\n    ComputationInfo m_info;\n    bool m_isInitialized;\n    bool m_matUisUptodate;\n    Index m_maxIters;\n\n  private:  \n    bool subdiagonalEntryIsNeglegible(Index i);\n    ComplexScalar computeShift(Index iu, Index iter);\n    void reduceToTriangularForm(bool computeU);\n    friend struct internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>;\n};\n\n/** If m_matT(i+1,i) is neglegible in floating point arithmetic\n  * compared to m_matT(i,i) and m_matT(j,j), then set it to zero and\n  * return true, else return false. */\ntemplate<typename MatrixType>\ninline bool ComplexSchur<MatrixType>::subdiagonalEntryIsNeglegible(Index i)\n{\n  RealScalar d = numext::norm1(m_matT.coeff(i,i)) + numext::norm1(m_matT.coeff(i+1,i+1));\n  RealScalar sd = numext::norm1(m_matT.coeff(i+1,i));\n  if (internal::isMuchSmallerThan(sd, d, NumTraits<RealScalar>::epsilon()))\n  {\n    m_matT.coeffRef(i+1,i) = ComplexScalar(0);\n    return true;\n  }\n  return false;\n}\n\n\n/** Compute the shift in the current QR iteration. */\ntemplate<typename MatrixType>\ntypename ComplexSchur<MatrixType>::ComplexScalar ComplexSchur<MatrixType>::computeShift(Index iu, Index iter)\n{\n  using std::abs;\n  if (iter == 10 || iter == 20) \n  {\n    // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f\n    return abs(numext::real(m_matT.coeff(iu,iu-1))) + abs(numext::real(m_matT.coeff(iu-1,iu-2)));\n  }\n\n  // compute the shift as one of the eigenvalues of t, the 2x2\n  // diagonal block on the bottom of the active submatrix\n  Matrix<ComplexScalar,2,2> t = m_matT.template block<2,2>(iu-1,iu-1);\n  RealScalar normt = t.cwiseAbs().sum();\n  t /= normt;     // the normalization by sf is to avoid under/overflow\n\n  ComplexScalar b = t.coeff(0,1) * t.coeff(1,0);\n  ComplexScalar c = t.coeff(0,0) - t.coeff(1,1);\n  ComplexScalar disc = sqrt(c*c + RealScalar(4)*b);\n  ComplexScalar det = t.coeff(0,0) * t.coeff(1,1) - b;\n  ComplexScalar trace = t.coeff(0,0) + t.coeff(1,1);\n  ComplexScalar eival1 = (trace + disc) / RealScalar(2);\n  ComplexScalar eival2 = (trace - disc) / RealScalar(2);\n\n  if(numext::norm1(eival1) > numext::norm1(eival2))\n    eival2 = det / eival1;\n  else\n    eival1 = det / eival2;\n\n  // choose the eigenvalue closest to the bottom entry of the diagonal\n  if(numext::norm1(eival1-t.coeff(1,1)) < numext::norm1(eival2-t.coeff(1,1)))\n    return normt * eival1;\n  else\n    return normt * eival2;\n}\n\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nComplexSchur<MatrixType>& ComplexSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)\n{\n  m_matUisUptodate = false;\n  eigen_assert(matrix.cols() == matrix.rows());\n\n  if(matrix.cols() == 1)\n  {\n    m_matT = matrix.derived().template cast<ComplexScalar>();\n    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1);\n    m_info = Success;\n    m_isInitialized = true;\n    m_matUisUptodate = computeU;\n    return *this;\n  }\n\n  internal::complex_schur_reduce_to_hessenberg<MatrixType, NumTraits<Scalar>::IsComplex>::run(*this, matrix.derived(), computeU);\n  computeFromHessenberg(m_matT, m_matU, computeU);\n  return *this;\n}\n\ntemplate<typename MatrixType>\ntemplate<typename HessMatrixType, typename OrthMatrixType>\nComplexSchur<MatrixType>& ComplexSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU)\n{\n  m_matT = matrixH;\n  if(computeU)\n    m_matU = matrixQ;\n  reduceToTriangularForm(computeU);\n  return *this;\n}\nnamespace internal {\n\n/* Reduce given matrix to Hessenberg form */\ntemplate<typename MatrixType, bool IsComplex>\nstruct complex_schur_reduce_to_hessenberg\n{\n  // this is the implementation for the case IsComplex = true\n  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)\n  {\n    _this.m_hess.compute(matrix);\n    _this.m_matT = _this.m_hess.matrixH();\n    if(computeU)  _this.m_matU = _this.m_hess.matrixQ();\n  }\n};\n\ntemplate<typename MatrixType>\nstruct complex_schur_reduce_to_hessenberg<MatrixType, false>\n{\n  static void run(ComplexSchur<MatrixType>& _this, const MatrixType& matrix, bool computeU)\n  {\n    typedef typename ComplexSchur<MatrixType>::ComplexScalar ComplexScalar;\n\n    // Note: m_hess is over RealScalar; m_matT and m_matU is over ComplexScalar\n    _this.m_hess.compute(matrix);\n    _this.m_matT = _this.m_hess.matrixH().template cast<ComplexScalar>();\n    if(computeU)  \n    {\n      // This may cause an allocation which seems to be avoidable\n      MatrixType Q = _this.m_hess.matrixQ(); \n      _this.m_matU = Q.template cast<ComplexScalar>();\n    }\n  }\n};\n\n} // end namespace internal\n\n// Reduce the Hessenberg matrix m_matT to triangular form by QR iteration.\ntemplate<typename MatrixType>\nvoid ComplexSchur<MatrixType>::reduceToTriangularForm(bool computeU)\n{  \n  Index maxIters = m_maxIters;\n  if (maxIters == -1)\n    maxIters = m_maxIterationsPerRow * m_matT.rows();\n\n  // The matrix m_matT is divided in three parts. \n  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. \n  // Rows il,...,iu is the part we are working on (the active submatrix).\n  // Rows iu+1,...,end are already brought in triangular form.\n  Index iu = m_matT.cols() - 1;\n  Index il;\n  Index iter = 0; // number of iterations we are working on the (iu,iu) element\n  Index totalIter = 0; // number of iterations for whole matrix\n\n  while(true)\n  {\n    // find iu, the bottom row of the active submatrix\n    while(iu > 0)\n    {\n      if(!subdiagonalEntryIsNeglegible(iu-1)) break;\n      iter = 0;\n      --iu;\n    }\n\n    // if iu is zero then we are done; the whole matrix is triangularized\n    if(iu==0) break;\n\n    // if we spent too many iterations, we give up\n    iter++;\n    totalIter++;\n    if(totalIter > maxIters) break;\n\n    // find il, the top row of the active submatrix\n    il = iu-1;\n    while(il > 0 && !subdiagonalEntryIsNeglegible(il-1))\n    {\n      --il;\n    }\n\n    /* perform the QR step using Givens rotations. The first rotation\n       creates a bulge; the (il+2,il) element becomes nonzero. This\n       bulge is chased down to the bottom of the active submatrix. */\n\n    ComplexScalar shift = computeShift(iu, iter);\n    JacobiRotation<ComplexScalar> rot;\n    rot.makeGivens(m_matT.coeff(il,il) - shift, m_matT.coeff(il+1,il));\n    m_matT.rightCols(m_matT.cols()-il).applyOnTheLeft(il, il+1, rot.adjoint());\n    m_matT.topRows((std::min)(il+2,iu)+1).applyOnTheRight(il, il+1, rot);\n    if(computeU) m_matU.applyOnTheRight(il, il+1, rot);\n\n    for(Index i=il+1 ; i<iu ; i++)\n    {\n      rot.makeGivens(m_matT.coeffRef(i,i-1), m_matT.coeffRef(i+1,i-1), &m_matT.coeffRef(i,i-1));\n      m_matT.coeffRef(i+1,i-1) = ComplexScalar(0);\n      m_matT.rightCols(m_matT.cols()-i).applyOnTheLeft(i, i+1, rot.adjoint());\n      m_matT.topRows((std::min)(i+2,iu)+1).applyOnTheRight(i, i+1, rot);\n      if(computeU) m_matU.applyOnTheRight(i, i+1, rot);\n    }\n  }\n\n  if(totalIter <= maxIters)\n    m_info = Success;\n  else\n    m_info = NoConvergence;\n\n  m_isInitialized = true;\n  m_matUisUptodate = computeU;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_SCHUR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/ComplexSchur_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Complex Schur needed to complex unsymmetrical eigenvalues/eigenvectors.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_COMPLEX_SCHUR_LAPACKE_H\n#define EIGEN_COMPLEX_SCHUR_LAPACKE_H\n\nnamespace Eigen { \n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_SCHUR_COMPLEX(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \\\ntemplate<> template<typename InputType> inline \\\nComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \\\nComplexSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \\\n{ \\\n  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> MatrixType; \\\n  typedef MatrixType::RealScalar RealScalar; \\\n  typedef std::complex<RealScalar> ComplexScalar; \\\n\\\n  eigen_assert(matrix.cols() == matrix.rows()); \\\n\\\n  m_matUisUptodate = false; \\\n  if(matrix.cols() == 1) \\\n  { \\\n    m_matT = matrix.derived().template cast<ComplexScalar>(); \\\n    if(computeU)  m_matU = ComplexMatrixType::Identity(1,1); \\\n      m_info = Success; \\\n      m_isInitialized = true; \\\n      m_matUisUptodate = computeU; \\\n      return *this; \\\n  } \\\n  lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \\\n  lapack_int matrix_order = LAPACKE_COLROW; \\\n  char jobvs, sort='N'; \\\n  LAPACK_##LAPACKE_PREFIX_U##_SELECT1 select = 0; \\\n  jobvs = (computeU) ? 'V' : 'N'; \\\n  m_matU.resize(n, n); \\\n  lapack_int ldvs  = internal::convert_index<lapack_int>(m_matU.outerStride()); \\\n  m_matT = matrix; \\\n  lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \\\n  Matrix<EIGTYPE, Dynamic, Dynamic> w; \\\n  w.resize(n, 1);\\\n  info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)w.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \\\n  if(info == 0) \\\n    m_info = Success; \\\n  else \\\n    m_info = NoConvergence; \\\n\\\n  m_isInitialized = true; \\\n  m_matUisUptodate = computeU; \\\n  return *this; \\\n\\\n}\n\nEIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float,  c, C, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SCHUR_COMPLEX(dcomplex, lapack_complex_double, z, Z, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_SCHUR_COMPLEX(scomplex, lapack_complex_float,  c, C, RowMajor, LAPACK_ROW_MAJOR)\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPLEX_SCHUR_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/EigenSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_EIGENSOLVER_H\n#define EIGEN_EIGENSOLVER_H\n\n#include \"./RealSchur.h\"\n\nnamespace Eigen { \n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class EigenSolver\n  *\n  * \\brief Computes eigenvalues and eigenvectors of general matrices\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the\n  * eigendecomposition; this is expected to be an instantiation of the Matrix\n  * class template. Currently, only real matrices are supported.\n  *\n  * The eigenvalues and eigenvectors of a matrix \\f$ A \\f$ are scalars\n  * \\f$ \\lambda \\f$ and vectors \\f$ v \\f$ such that \\f$ Av = \\lambda v \\f$.  If\n  * \\f$ D \\f$ is a diagonal matrix with the eigenvalues on the diagonal, and\n  * \\f$ V \\f$ is a matrix with the eigenvectors as its columns, then \\f$ A V =\n  * V D \\f$. The matrix \\f$ V \\f$ is almost always invertible, in which case we\n  * have \\f$ A = V D V^{-1} \\f$. This is called the eigendecomposition.\n  *\n  * The eigenvalues and eigenvectors of a matrix may be complex, even when the\n  * matrix is real. However, we can choose real matrices \\f$ V \\f$ and \\f$ D\n  * \\f$ satisfying \\f$ A V = V D \\f$, just like the eigendecomposition, if the\n  * matrix \\f$ D \\f$ is not required to be diagonal, but if it is allowed to\n  * have blocks of the form\n  * \\f[ \\begin{bmatrix} u & v \\\\ -v & u \\end{bmatrix} \\f]\n  * (where \\f$ u \\f$ and \\f$ v \\f$ are real numbers) on the diagonal.  These\n  * blocks correspond to complex eigenvalue pairs \\f$ u \\pm iv \\f$. We call\n  * this variant of the eigendecomposition the pseudo-eigendecomposition.\n  *\n  * Call the function compute() to compute the eigenvalues and eigenvectors of\n  * a given matrix. Alternatively, you can use the \n  * EigenSolver(const MatrixType&, bool) constructor which computes the\n  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and\n  * eigenvectors are computed, they can be retrieved with the eigenvalues() and\n  * eigenvectors() functions. The pseudoEigenvalueMatrix() and\n  * pseudoEigenvectors() methods allow the construction of the\n  * pseudo-eigendecomposition.\n  *\n  * The documentation for EigenSolver(const MatrixType&, bool) contains an\n  * example of the typical use of this class.\n  *\n  * \\note The implementation is adapted from\n  * <a href=\"http://math.nist.gov/javanumerics/jama/\">JAMA</a> (public domain).\n  * Their code is based on EISPACK.\n  *\n  * \\sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver\n  */\ntemplate<typename _MatrixType> class EigenSolver\n{\n  public:\n\n    /** \\brief Synonym for the template parameter \\p _MatrixType. */\n    typedef _MatrixType MatrixType;\n\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n    /** \\brief Scalar type for matrices of type #MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    /** \\brief Complex scalar type for #MatrixType. \n      *\n      * This is \\c std::complex<Scalar> if #Scalar is real (e.g.,\n      * \\c float or \\c double) and just \\c Scalar if #Scalar is\n      * complex.\n      */\n    typedef std::complex<RealScalar> ComplexScalar;\n\n    /** \\brief Type for vector of eigenvalues as returned by eigenvalues(). \n      *\n      * This is a column vector with entries of type #ComplexScalar.\n      * The length of the vector is the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;\n\n    /** \\brief Type for matrix of eigenvectors as returned by eigenvectors(). \n      *\n      * This is a square matrix with entries of type #ComplexScalar. \n      * The size is the same as the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;\n\n    /** \\brief Default constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).\n      *\n      * \\sa compute() for an example.\n      */\n    EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false), m_realSchur(), m_matT(), m_tmp() {}\n\n    /** \\brief Default constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa EigenSolver()\n      */\n    explicit EigenSolver(Index size)\n      : m_eivec(size, size),\n        m_eivalues(size),\n        m_isInitialized(false),\n        m_eigenvectorsOk(false),\n        m_realSchur(size),\n        m_matT(size, size), \n        m_tmp(size)\n    {}\n\n    /** \\brief Constructor; computes eigendecomposition of given matrix. \n      * \n      * \\param[in]  matrix  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are\n      *    computed. \n      *\n      * This constructor calls compute() to compute the eigenvalues\n      * and eigenvectors.\n      *\n      * Example: \\include EigenSolver_EigenSolver_MatrixType.cpp\n      * Output: \\verbinclude EigenSolver_EigenSolver_MatrixType.out\n      *\n      * \\sa compute()\n      */\n    template<typename InputType>\n    explicit EigenSolver(const EigenBase<InputType>& matrix, bool computeEigenvectors = true)\n      : m_eivec(matrix.rows(), matrix.cols()),\n        m_eivalues(matrix.cols()),\n        m_isInitialized(false),\n        m_eigenvectorsOk(false),\n        m_realSchur(matrix.cols()),\n        m_matT(matrix.rows(), matrix.cols()), \n        m_tmp(matrix.cols())\n    {\n      compute(matrix.derived(), computeEigenvectors);\n    }\n\n    /** \\brief Returns the eigenvectors of given matrix. \n      *\n      * \\returns  %Matrix whose columns are the (possibly complex) eigenvectors.\n      *\n      * \\pre Either the constructor \n      * EigenSolver(const MatrixType&,bool) or the member function\n      * compute(const MatrixType&, bool) has been called before, and\n      * \\p computeEigenvectors was set to true (the default).\n      *\n      * Column \\f$ k \\f$ of the returned matrix is an eigenvector corresponding\n      * to eigenvalue number \\f$ k \\f$ as returned by eigenvalues().  The\n      * eigenvectors are normalized to have (Euclidean) norm equal to one. The\n      * matrix returned by this function is the matrix \\f$ V \\f$ in the\n      * eigendecomposition \\f$ A = V D V^{-1} \\f$, if it exists.\n      *\n      * Example: \\include EigenSolver_eigenvectors.cpp\n      * Output: \\verbinclude EigenSolver_eigenvectors.out\n      *\n      * \\sa eigenvalues(), pseudoEigenvectors()\n      */\n    EigenvectorsType eigenvectors() const;\n\n    /** \\brief Returns the pseudo-eigenvectors of given matrix. \n      *\n      * \\returns  Const reference to matrix whose columns are the pseudo-eigenvectors.\n      *\n      * \\pre Either the constructor \n      * EigenSolver(const MatrixType&,bool) or the member function\n      * compute(const MatrixType&, bool) has been called before, and\n      * \\p computeEigenvectors was set to true (the default).\n      *\n      * The real matrix \\f$ V \\f$ returned by this function and the\n      * block-diagonal matrix \\f$ D \\f$ returned by pseudoEigenvalueMatrix()\n      * satisfy \\f$ AV = VD \\f$.\n      *\n      * Example: \\include EigenSolver_pseudoEigenvectors.cpp\n      * Output: \\verbinclude EigenSolver_pseudoEigenvectors.out\n      *\n      * \\sa pseudoEigenvalueMatrix(), eigenvectors()\n      */\n    const MatrixType& pseudoEigenvectors() const\n    {\n      eigen_assert(m_isInitialized && \"EigenSolver is not initialized.\");\n      eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n      return m_eivec;\n    }\n\n    /** \\brief Returns the block-diagonal matrix in the pseudo-eigendecomposition.\n      *\n      * \\returns  A block-diagonal matrix.\n      *\n      * \\pre Either the constructor \n      * EigenSolver(const MatrixType&,bool) or the member function\n      * compute(const MatrixType&, bool) has been called before.\n      *\n      * The matrix \\f$ D \\f$ returned by this function is real and\n      * block-diagonal. The blocks on the diagonal are either 1-by-1 or 2-by-2\n      * blocks of the form\n      * \\f$ \\begin{bmatrix} u & v \\\\ -v & u \\end{bmatrix} \\f$.\n      * These blocks are not sorted in any particular order.\n      * The matrix \\f$ D \\f$ and the matrix \\f$ V \\f$ returned by\n      * pseudoEigenvectors() satisfy \\f$ AV = VD \\f$.\n      *\n      * \\sa pseudoEigenvectors() for an example, eigenvalues()\n      */\n    MatrixType pseudoEigenvalueMatrix() const;\n\n    /** \\brief Returns the eigenvalues of given matrix. \n      *\n      * \\returns A const reference to the column vector containing the eigenvalues.\n      *\n      * \\pre Either the constructor \n      * EigenSolver(const MatrixType&,bool) or the member function\n      * compute(const MatrixType&, bool) has been called before.\n      *\n      * The eigenvalues are repeated according to their algebraic multiplicity,\n      * so there are as many eigenvalues as rows in the matrix. The eigenvalues \n      * are not sorted in any particular order.\n      *\n      * Example: \\include EigenSolver_eigenvalues.cpp\n      * Output: \\verbinclude EigenSolver_eigenvalues.out\n      *\n      * \\sa eigenvectors(), pseudoEigenvalueMatrix(),\n      *     MatrixBase::eigenvalues()\n      */\n    const EigenvalueType& eigenvalues() const\n    {\n      eigen_assert(m_isInitialized && \"EigenSolver is not initialized.\");\n      return m_eivalues;\n    }\n\n    /** \\brief Computes eigendecomposition of given matrix. \n      * \n      * \\param[in]  matrix  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are\n      *    computed. \n      * \\returns    Reference to \\c *this\n      *\n      * This function computes the eigenvalues of the real matrix \\p matrix.\n      * The eigenvalues() function can be used to retrieve them.  If \n      * \\p computeEigenvectors is true, then the eigenvectors are also computed\n      * and can be retrieved by calling eigenvectors().\n      *\n      * The matrix is first reduced to real Schur form using the RealSchur\n      * class. The Schur decomposition is then used to compute the eigenvalues\n      * and eigenvectors.\n      *\n      * The cost of the computation is dominated by the cost of the\n      * Schur decomposition, which is very approximately \\f$ 25n^3 \\f$\n      * (where \\f$ n \\f$ is the size of the matrix) if \\p computeEigenvectors \n      * is true, and \\f$ 10n^3 \\f$ if \\p computeEigenvectors is false.\n      *\n      * This method reuses of the allocated data in the EigenSolver object.\n      *\n      * Example: \\include EigenSolver_compute.cpp\n      * Output: \\verbinclude EigenSolver_compute.out\n      */\n    template<typename InputType>\n    EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);\n\n    /** \\returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"EigenSolver is not initialized.\");\n      return m_info;\n    }\n\n    /** \\brief Sets the maximum number of iterations allowed. */\n    EigenSolver& setMaxIterations(Index maxIters)\n    {\n      m_realSchur.setMaxIterations(maxIters);\n      return *this;\n    }\n\n    /** \\brief Returns the maximum number of iterations. */\n    Index getMaxIterations()\n    {\n      return m_realSchur.getMaxIterations();\n    }\n\n  private:\n    void doComputeEigenvectors();\n\n  protected:\n    \n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n      EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);\n    }\n    \n    MatrixType m_eivec;\n    EigenvalueType m_eivalues;\n    bool m_isInitialized;\n    bool m_eigenvectorsOk;\n    ComputationInfo m_info;\n    RealSchur<MatrixType> m_realSchur;\n    MatrixType m_matT;\n\n    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;\n    ColumnVectorType m_tmp;\n};\n\ntemplate<typename MatrixType>\nMatrixType EigenSolver<MatrixType>::pseudoEigenvalueMatrix() const\n{\n  eigen_assert(m_isInitialized && \"EigenSolver is not initialized.\");\n  const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();\n  Index n = m_eivalues.rows();\n  MatrixType matD = MatrixType::Zero(n,n);\n  for (Index i=0; i<n; ++i)\n  {\n    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i)), precision))\n      matD.coeffRef(i,i) = numext::real(m_eivalues.coeff(i));\n    else\n    {\n      matD.template block<2,2>(i,i) <<  numext::real(m_eivalues.coeff(i)), numext::imag(m_eivalues.coeff(i)),\n                                       -numext::imag(m_eivalues.coeff(i)), numext::real(m_eivalues.coeff(i));\n      ++i;\n    }\n  }\n  return matD;\n}\n\ntemplate<typename MatrixType>\ntypename EigenSolver<MatrixType>::EigenvectorsType EigenSolver<MatrixType>::eigenvectors() const\n{\n  eigen_assert(m_isInitialized && \"EigenSolver is not initialized.\");\n  eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n  const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();\n  Index n = m_eivec.cols();\n  EigenvectorsType matV(n,n);\n  for (Index j=0; j<n; ++j)\n  {\n    if (internal::isMuchSmallerThan(numext::imag(m_eivalues.coeff(j)), numext::real(m_eivalues.coeff(j)), precision) || j+1==n)\n    {\n      // we have a real eigen value\n      matV.col(j) = m_eivec.col(j).template cast<ComplexScalar>();\n      matV.col(j).normalize();\n    }\n    else\n    {\n      // we have a pair of complex eigen values\n      for (Index i=0; i<n; ++i)\n      {\n        matV.coeffRef(i,j)   = ComplexScalar(m_eivec.coeff(i,j),  m_eivec.coeff(i,j+1));\n        matV.coeffRef(i,j+1) = ComplexScalar(m_eivec.coeff(i,j), -m_eivec.coeff(i,j+1));\n      }\n      matV.col(j).normalize();\n      matV.col(j+1).normalize();\n      ++j;\n    }\n  }\n  return matV;\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nEigenSolver<MatrixType>& \nEigenSolver<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeEigenvectors)\n{\n  check_template_parameters();\n  \n  using std::sqrt;\n  using std::abs;\n  using numext::isfinite;\n  eigen_assert(matrix.cols() == matrix.rows());\n\n  // Reduce to real Schur form.\n  m_realSchur.compute(matrix.derived(), computeEigenvectors);\n  \n  m_info = m_realSchur.info();\n\n  if (m_info == Success)\n  {\n    m_matT = m_realSchur.matrixT();\n    if (computeEigenvectors)\n      m_eivec = m_realSchur.matrixU();\n  \n    // Compute eigenvalues from matT\n    m_eivalues.resize(matrix.cols());\n    Index i = 0;\n    while (i < matrix.cols()) \n    {\n      if (i == matrix.cols() - 1 || m_matT.coeff(i+1, i) == Scalar(0)) \n      {\n        m_eivalues.coeffRef(i) = m_matT.coeff(i, i);\n        if(!(isfinite)(m_eivalues.coeffRef(i)))\n        {\n          m_isInitialized = true;\n          m_eigenvectorsOk = false;\n          m_info = NumericalIssue;\n          return *this;\n        }\n        ++i;\n      }\n      else\n      {\n        Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i+1, i+1));\n        Scalar z;\n        // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1)));\n        // without overflow\n        {\n          Scalar t0 = m_matT.coeff(i+1, i);\n          Scalar t1 = m_matT.coeff(i, i+1);\n          Scalar maxval = numext::maxi<Scalar>(abs(p),numext::maxi<Scalar>(abs(t0),abs(t1)));\n          t0 /= maxval;\n          t1 /= maxval;\n          Scalar p0 = p/maxval;\n          z = maxval * sqrt(abs(p0 * p0 + t0 * t1));\n        }\n        \n        m_eivalues.coeffRef(i)   = ComplexScalar(m_matT.coeff(i+1, i+1) + p, z);\n        m_eivalues.coeffRef(i+1) = ComplexScalar(m_matT.coeff(i+1, i+1) + p, -z);\n        if(!((isfinite)(m_eivalues.coeffRef(i)) && (isfinite)(m_eivalues.coeffRef(i+1))))\n        {\n          m_isInitialized = true;\n          m_eigenvectorsOk = false;\n          m_info = NumericalIssue;\n          return *this;\n        }\n        i += 2;\n      }\n    }\n    \n    // Compute eigenvectors.\n    if (computeEigenvectors)\n      doComputeEigenvectors();\n  }\n\n  m_isInitialized = true;\n  m_eigenvectorsOk = computeEigenvectors;\n\n  return *this;\n}\n\n\ntemplate<typename MatrixType>\nvoid EigenSolver<MatrixType>::doComputeEigenvectors()\n{\n  using std::abs;\n  const Index size = m_eivec.cols();\n  const Scalar eps = NumTraits<Scalar>::epsilon();\n\n  // inefficient! this is already computed in RealSchur\n  Scalar norm(0);\n  for (Index j = 0; j < size; ++j)\n  {\n    norm += m_matT.row(j).segment((std::max)(j-1,Index(0)), size-(std::max)(j-1,Index(0))).cwiseAbs().sum();\n  }\n  \n  // Backsubstitute to find vectors of upper triangular form\n  if (norm == Scalar(0))\n  {\n    return;\n  }\n\n  for (Index n = size-1; n >= 0; n--)\n  {\n    Scalar p = m_eivalues.coeff(n).real();\n    Scalar q = m_eivalues.coeff(n).imag();\n\n    // Scalar vector\n    if (q == Scalar(0))\n    {\n      Scalar lastr(0), lastw(0);\n      Index l = n;\n\n      m_matT.coeffRef(n,n) = Scalar(1);\n      for (Index i = n-1; i >= 0; i--)\n      {\n        Scalar w = m_matT.coeff(i,i) - p;\n        Scalar r = m_matT.row(i).segment(l,n-l+1).dot(m_matT.col(n).segment(l, n-l+1));\n\n        if (m_eivalues.coeff(i).imag() < Scalar(0))\n        {\n          lastw = w;\n          lastr = r;\n        }\n        else\n        {\n          l = i;\n          if (m_eivalues.coeff(i).imag() == Scalar(0))\n          {\n            if (w != Scalar(0))\n              m_matT.coeffRef(i,n) = -r / w;\n            else\n              m_matT.coeffRef(i,n) = -r / (eps * norm);\n          }\n          else // Solve real equations\n          {\n            Scalar x = m_matT.coeff(i,i+1);\n            Scalar y = m_matT.coeff(i+1,i);\n            Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();\n            Scalar t = (x * lastr - lastw * r) / denom;\n            m_matT.coeffRef(i,n) = t;\n            if (abs(x) > abs(lastw))\n              m_matT.coeffRef(i+1,n) = (-r - w * t) / x;\n            else\n              m_matT.coeffRef(i+1,n) = (-lastr - y * t) / lastw;\n          }\n\n          // Overflow control\n          Scalar t = abs(m_matT.coeff(i,n));\n          if ((eps * t) * t > Scalar(1))\n            m_matT.col(n).tail(size-i) /= t;\n        }\n      }\n    }\n    else if (q < Scalar(0) && n > 0) // Complex vector\n    {\n      Scalar lastra(0), lastsa(0), lastw(0);\n      Index l = n-1;\n\n      // Last vector component imaginary so matrix is triangular\n      if (abs(m_matT.coeff(n,n-1)) > abs(m_matT.coeff(n-1,n)))\n      {\n        m_matT.coeffRef(n-1,n-1) = q / m_matT.coeff(n,n-1);\n        m_matT.coeffRef(n-1,n) = -(m_matT.coeff(n,n) - p) / m_matT.coeff(n,n-1);\n      }\n      else\n      {\n        ComplexScalar cc = ComplexScalar(Scalar(0),-m_matT.coeff(n-1,n)) / ComplexScalar(m_matT.coeff(n-1,n-1)-p,q);\n        m_matT.coeffRef(n-1,n-1) = numext::real(cc);\n        m_matT.coeffRef(n-1,n) = numext::imag(cc);\n      }\n      m_matT.coeffRef(n,n-1) = Scalar(0);\n      m_matT.coeffRef(n,n) = Scalar(1);\n      for (Index i = n-2; i >= 0; i--)\n      {\n        Scalar ra = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n-1).segment(l, n-l+1));\n        Scalar sa = m_matT.row(i).segment(l, n-l+1).dot(m_matT.col(n).segment(l, n-l+1));\n        Scalar w = m_matT.coeff(i,i) - p;\n\n        if (m_eivalues.coeff(i).imag() < Scalar(0))\n        {\n          lastw = w;\n          lastra = ra;\n          lastsa = sa;\n        }\n        else\n        {\n          l = i;\n          if (m_eivalues.coeff(i).imag() == RealScalar(0))\n          {\n            ComplexScalar cc = ComplexScalar(-ra,-sa) / ComplexScalar(w,q);\n            m_matT.coeffRef(i,n-1) = numext::real(cc);\n            m_matT.coeffRef(i,n) = numext::imag(cc);\n          }\n          else\n          {\n            // Solve complex equations\n            Scalar x = m_matT.coeff(i,i+1);\n            Scalar y = m_matT.coeff(i+1,i);\n            Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;\n            Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q;\n            if ((vr == Scalar(0)) && (vi == Scalar(0)))\n              vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw));\n\n            ComplexScalar cc = ComplexScalar(x*lastra-lastw*ra+q*sa,x*lastsa-lastw*sa-q*ra) / ComplexScalar(vr,vi);\n            m_matT.coeffRef(i,n-1) = numext::real(cc);\n            m_matT.coeffRef(i,n) = numext::imag(cc);\n            if (abs(x) > (abs(lastw) + abs(q)))\n            {\n              m_matT.coeffRef(i+1,n-1) = (-ra - w * m_matT.coeff(i,n-1) + q * m_matT.coeff(i,n)) / x;\n              m_matT.coeffRef(i+1,n) = (-sa - w * m_matT.coeff(i,n) - q * m_matT.coeff(i,n-1)) / x;\n            }\n            else\n            {\n              cc = ComplexScalar(-lastra-y*m_matT.coeff(i,n-1),-lastsa-y*m_matT.coeff(i,n)) / ComplexScalar(lastw,q);\n              m_matT.coeffRef(i+1,n-1) = numext::real(cc);\n              m_matT.coeffRef(i+1,n) = numext::imag(cc);\n            }\n          }\n\n          // Overflow control\n          Scalar t = numext::maxi<Scalar>(abs(m_matT.coeff(i,n-1)),abs(m_matT.coeff(i,n)));\n          if ((eps * t) * t > Scalar(1))\n            m_matT.block(i, n-1, size-i, 2) /= t;\n\n        }\n      }\n      \n      // We handled a pair of complex conjugate eigenvalues, so need to skip them both\n      n--;\n    }\n    else\n    {\n      eigen_assert(0 && \"Internal bug in EigenSolver (INF or NaN has not been detected)\"); // this should not happen\n    }\n  }\n\n  // Back transformation to get eigenvectors of original matrix\n  for (Index j = size-1; j >= 0; j--)\n  {\n    m_tmp.noalias() = m_eivec.leftCols(j+1) * m_matT.col(j).segment(0, j+1);\n    m_eivec.col(j) = m_tmp;\n  }\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_EIGENSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n// Copyright (C) 2016 Tobias Wood <tobias@spinicist.org.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERALIZEDEIGENSOLVER_H\n#define EIGEN_GENERALIZEDEIGENSOLVER_H\n\n#include \"./RealQZ.h\"\n\nnamespace Eigen { \n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class GeneralizedEigenSolver\n  *\n  * \\brief Computes the generalized eigenvalues and eigenvectors of a pair of general matrices\n  *\n  * \\tparam _MatrixType the type of the matrices of which we are computing the\n  * eigen-decomposition; this is expected to be an instantiation of the Matrix\n  * class template. Currently, only real matrices are supported.\n  *\n  * The generalized eigenvalues and eigenvectors of a matrix pair \\f$ A \\f$ and \\f$ B \\f$ are scalars\n  * \\f$ \\lambda \\f$ and vectors \\f$ v \\f$ such that \\f$ Av = \\lambda Bv \\f$.  If\n  * \\f$ D \\f$ is a diagonal matrix with the eigenvalues on the diagonal, and\n  * \\f$ V \\f$ is a matrix with the eigenvectors as its columns, then \\f$ A V =\n  * B V D \\f$. The matrix \\f$ V \\f$ is almost always invertible, in which case we\n  * have \\f$ A = B V D V^{-1} \\f$. This is called the generalized eigen-decomposition.\n  *\n  * The generalized eigenvalues and eigenvectors of a matrix pair may be complex, even when the\n  * matrices are real. Moreover, the generalized eigenvalue might be infinite if the matrix B is\n  * singular. To workaround this difficulty, the eigenvalues are provided as a pair of complex \\f$ \\alpha \\f$\n  * and real \\f$ \\beta \\f$ such that: \\f$ \\lambda_i = \\alpha_i / \\beta_i \\f$. If \\f$ \\beta_i \\f$ is (nearly) zero,\n  * then one can consider the well defined left eigenvalue \\f$ \\mu = \\beta_i / \\alpha_i\\f$ such that:\n  * \\f$ \\mu_i A v_i = B v_i \\f$, or even \\f$ \\mu_i u_i^T A  = u_i^T B \\f$ where \\f$ u_i \\f$ is\n  * called the left eigenvector.\n  *\n  * Call the function compute() to compute the generalized eigenvalues and eigenvectors of\n  * a given matrix pair. Alternatively, you can use the\n  * GeneralizedEigenSolver(const MatrixType&, const MatrixType&, bool) constructor which computes the\n  * eigenvalues and eigenvectors at construction time. Once the eigenvalue and\n  * eigenvectors are computed, they can be retrieved with the eigenvalues() and\n  * eigenvectors() functions.\n  *\n  * Here is an usage example of this class:\n  * Example: \\include GeneralizedEigenSolver.cpp\n  * Output: \\verbinclude GeneralizedEigenSolver.out\n  *\n  * \\sa MatrixBase::eigenvalues(), class ComplexEigenSolver, class SelfAdjointEigenSolver\n  */\ntemplate<typename _MatrixType> class GeneralizedEigenSolver\n{\n  public:\n\n    /** \\brief Synonym for the template parameter \\p _MatrixType. */\n    typedef _MatrixType MatrixType;\n\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n    /** \\brief Scalar type for matrices of type #MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    /** \\brief Complex scalar type for #MatrixType. \n      *\n      * This is \\c std::complex<Scalar> if #Scalar is real (e.g.,\n      * \\c float or \\c double) and just \\c Scalar if #Scalar is\n      * complex.\n      */\n    typedef std::complex<RealScalar> ComplexScalar;\n\n    /** \\brief Type for vector of real scalar values eigenvalues as returned by betas().\n      *\n      * This is a column vector with entries of type #Scalar.\n      * The length of the vector is the size of #MatrixType.\n      */\n    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> VectorType;\n\n    /** \\brief Type for vector of complex scalar values eigenvalues as returned by alphas().\n      *\n      * This is a column vector with entries of type #ComplexScalar.\n      * The length of the vector is the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ComplexVectorType;\n\n    /** \\brief Expression type for the eigenvalues as returned by eigenvalues().\n      */\n    typedef CwiseBinaryOp<internal::scalar_quotient_op<ComplexScalar,Scalar>,ComplexVectorType,VectorType> EigenvalueType;\n\n    /** \\brief Type for matrix of eigenvectors as returned by eigenvectors(). \n      *\n      * This is a square matrix with entries of type #ComplexScalar. \n      * The size is the same as the size of #MatrixType.\n      */\n    typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, Options, MaxRowsAtCompileTime, MaxColsAtCompileTime> EigenvectorsType;\n\n    /** \\brief Default constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via EigenSolver::compute(const MatrixType&, bool).\n      *\n      * \\sa compute() for an example.\n      */\n    GeneralizedEigenSolver()\n      : m_eivec(),\n        m_alphas(),\n        m_betas(),\n        m_valuesOkay(false),\n        m_vectorsOkay(false),\n        m_realQZ()\n    {}\n\n    /** \\brief Default constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa GeneralizedEigenSolver()\n      */\n    explicit GeneralizedEigenSolver(Index size)\n      : m_eivec(size, size),\n        m_alphas(size),\n        m_betas(size),\n        m_valuesOkay(false),\n        m_vectorsOkay(false),\n        m_realQZ(size),\n        m_tmp(size)\n    {}\n\n    /** \\brief Constructor; computes the generalized eigendecomposition of given matrix pair.\n      * \n      * \\param[in]  A  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  B  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are computed.\n      *\n      * This constructor calls compute() to compute the generalized eigenvalues\n      * and eigenvectors.\n      *\n      * \\sa compute()\n      */\n    GeneralizedEigenSolver(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true)\n      : m_eivec(A.rows(), A.cols()),\n        m_alphas(A.cols()),\n        m_betas(A.cols()),\n        m_valuesOkay(false),\n        m_vectorsOkay(false),\n        m_realQZ(A.cols()),\n        m_tmp(A.cols())\n    {\n      compute(A, B, computeEigenvectors);\n    }\n\n    /* \\brief Returns the computed generalized eigenvectors.\n      *\n      * \\returns  %Matrix whose columns are the (possibly complex) right eigenvectors.\n      * i.e. the eigenvectors that solve (A - l*B)x = 0. The ordering matches the eigenvalues.\n      *\n      * \\pre Either the constructor \n      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&, bool) or the member function\n      * compute(const MatrixType&, const MatrixType& bool) has been called before, and\n      * \\p computeEigenvectors was set to true (the default).\n      *\n      * \\sa eigenvalues()\n      */\n    EigenvectorsType eigenvectors() const {\n      eigen_assert(m_vectorsOkay && \"Eigenvectors for GeneralizedEigenSolver were not calculated.\");\n      return m_eivec;\n    }\n\n    /** \\brief Returns an expression of the computed generalized eigenvalues.\n      *\n      * \\returns An expression of the column vector containing the eigenvalues.\n      *\n      * It is a shortcut for \\code this->alphas().cwiseQuotient(this->betas()); \\endcode\n      * Not that betas might contain zeros. It is therefore not recommended to use this function,\n      * but rather directly deal with the alphas and betas vectors.\n      *\n      * \\pre Either the constructor \n      * GeneralizedEigenSolver(const MatrixType&,const MatrixType&,bool) or the member function\n      * compute(const MatrixType&,const MatrixType&,bool) has been called before.\n      *\n      * The eigenvalues are repeated according to their algebraic multiplicity,\n      * so there are as many eigenvalues as rows in the matrix. The eigenvalues \n      * are not sorted in any particular order.\n      *\n      * \\sa alphas(), betas(), eigenvectors()\n      */\n    EigenvalueType eigenvalues() const\n    {\n      eigen_assert(m_valuesOkay && \"GeneralizedEigenSolver is not initialized.\");\n      return EigenvalueType(m_alphas,m_betas);\n    }\n\n    /** \\returns A const reference to the vectors containing the alpha values\n      *\n      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).\n      *\n      * \\sa betas(), eigenvalues() */\n    ComplexVectorType alphas() const\n    {\n      eigen_assert(m_valuesOkay && \"GeneralizedEigenSolver is not initialized.\");\n      return m_alphas;\n    }\n\n    /** \\returns A const reference to the vectors containing the beta values\n      *\n      * This vector permits to reconstruct the j-th eigenvalues as alphas(i)/betas(j).\n      *\n      * \\sa alphas(), eigenvalues() */\n    VectorType betas() const\n    {\n      eigen_assert(m_valuesOkay && \"GeneralizedEigenSolver is not initialized.\");\n      return m_betas;\n    }\n\n    /** \\brief Computes generalized eigendecomposition of given matrix.\n      * \n      * \\param[in]  A  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  B  Square matrix whose eigendecomposition is to be computed.\n      * \\param[in]  computeEigenvectors  If true, both the eigenvectors and the\n      *    eigenvalues are computed; if false, only the eigenvalues are\n      *    computed. \n      * \\returns    Reference to \\c *this\n      *\n      * This function computes the eigenvalues of the real matrix \\p matrix.\n      * The eigenvalues() function can be used to retrieve them.  If \n      * \\p computeEigenvectors is true, then the eigenvectors are also computed\n      * and can be retrieved by calling eigenvectors().\n      *\n      * The matrix is first reduced to real generalized Schur form using the RealQZ\n      * class. The generalized Schur decomposition is then used to compute the eigenvalues\n      * and eigenvectors.\n      *\n      * The cost of the computation is dominated by the cost of the\n      * generalized Schur decomposition.\n      *\n      * This method reuses of the allocated data in the GeneralizedEigenSolver object.\n      */\n    GeneralizedEigenSolver& compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors = true);\n\n    ComputationInfo info() const\n    {\n      eigen_assert(m_valuesOkay && \"EigenSolver is not initialized.\");\n      return m_realQZ.info();\n    }\n\n    /** Sets the maximal number of iterations allowed.\n    */\n    GeneralizedEigenSolver& setMaxIterations(Index maxIters)\n    {\n      m_realQZ.setMaxIterations(maxIters);\n      return *this;\n    }\n\n  protected:\n    \n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n      EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);\n    }\n    \n    EigenvectorsType m_eivec;\n    ComplexVectorType m_alphas;\n    VectorType m_betas;\n    bool m_valuesOkay, m_vectorsOkay;\n    RealQZ<MatrixType> m_realQZ;\n    ComplexVectorType m_tmp;\n};\n\ntemplate<typename MatrixType>\nGeneralizedEigenSolver<MatrixType>&\nGeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)\n{\n  check_template_parameters();\n  \n  using std::sqrt;\n  using std::abs;\n  eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());\n  Index size = A.cols();\n  m_valuesOkay = false;\n  m_vectorsOkay = false;\n  // Reduce to generalized real Schur form:\n  // A = Q S Z and B = Q T Z\n  m_realQZ.compute(A, B, computeEigenvectors);\n  if (m_realQZ.info() == Success)\n  {\n    // Resize storage\n    m_alphas.resize(size);\n    m_betas.resize(size);\n    if (computeEigenvectors)\n    {\n      m_eivec.resize(size,size);\n      m_tmp.resize(size);\n    }\n\n    // Aliases:\n    Map<VectorType> v(reinterpret_cast<Scalar*>(m_tmp.data()), size);\n    ComplexVectorType &cv = m_tmp;\n    const MatrixType &mZ = m_realQZ.matrixZ();\n    const MatrixType &mS = m_realQZ.matrixS();\n    const MatrixType &mT = m_realQZ.matrixT();\n\n    Index i = 0;\n    while (i < size)\n    {\n      if (i == size - 1 || mS.coeff(i+1, i) == Scalar(0))\n      {\n        // Real eigenvalue\n        m_alphas.coeffRef(i) = mS.diagonal().coeff(i);\n        m_betas.coeffRef(i)  = mT.diagonal().coeff(i);\n        if (computeEigenvectors)\n        {\n          v.setConstant(Scalar(0.0));\n          v.coeffRef(i) = Scalar(1.0);\n          // For singular eigenvalues do nothing more\n          if(abs(m_betas.coeffRef(i)) >= (std::numeric_limits<RealScalar>::min)())\n          {\n            // Non-singular eigenvalue\n            const Scalar alpha = real(m_alphas.coeffRef(i));\n            const Scalar beta = m_betas.coeffRef(i);\n            for (Index j = i-1; j >= 0; j--)\n            {\n              const Index st = j+1;\n              const Index sz = i-j;\n              if (j > 0 && mS.coeff(j, j-1) != Scalar(0))\n              {\n                // 2x2 block\n                Matrix<Scalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( v.segment(st,sz) );\n                Matrix<Scalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);\n                v.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);\n                j--;\n              }\n              else\n              {\n                v.coeffRef(j) = -v.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum() / (beta*mS.coeffRef(j,j) - alpha*mT.coeffRef(j,j));\n              }\n            }\n          }\n          m_eivec.col(i).real().noalias() = mZ.transpose() * v;\n          m_eivec.col(i).real().normalize();\n          m_eivec.col(i).imag().setConstant(0);\n        }\n        ++i;\n      }\n      else\n      {\n        // We need to extract the generalized eigenvalues of the pair of a general 2x2 block S and a positive diagonal 2x2 block T\n        // Then taking beta=T_00*T_11, we can avoid any division, and alpha is the eigenvalues of A = (U^-1 * S * U) * diag(T_11,T_00):\n\n        // T =  [a 0]\n        //      [0 b]\n        RealScalar a = mT.diagonal().coeff(i),\n                   b = mT.diagonal().coeff(i+1);\n        const RealScalar beta = m_betas.coeffRef(i) = m_betas.coeffRef(i+1) = a*b;\n\n        // ^^ NOTE: using diagonal()(i) instead of coeff(i,i) workarounds a MSVC bug.\n        Matrix<RealScalar,2,2> S2 = mS.template block<2,2>(i,i) * Matrix<Scalar,2,1>(b,a).asDiagonal();\n\n        Scalar p = Scalar(0.5) * (S2.coeff(0,0) - S2.coeff(1,1));\n        Scalar z = sqrt(abs(p * p + S2.coeff(1,0) * S2.coeff(0,1)));\n        const ComplexScalar alpha = ComplexScalar(S2.coeff(1,1) + p, (beta > 0) ? z : -z);\n        m_alphas.coeffRef(i)   = conj(alpha);\n        m_alphas.coeffRef(i+1) = alpha;\n\n        if (computeEigenvectors) {\n          // Compute eigenvector in position (i+1) and then position (i) is just the conjugate\n          cv.setZero();\n          cv.coeffRef(i+1) = Scalar(1.0);\n          // here, the \"static_cast\" workaound expression template issues.\n          cv.coeffRef(i) = -(static_cast<Scalar>(beta*mS.coeffRef(i,i+1)) - alpha*mT.coeffRef(i,i+1))\n                          / (static_cast<Scalar>(beta*mS.coeffRef(i,i))   - alpha*mT.coeffRef(i,i));\n          for (Index j = i-1; j >= 0; j--)\n          {\n            const Index st = j+1;\n            const Index sz = i+1-j;\n            if (j > 0 && mS.coeff(j, j-1) != Scalar(0))\n            {\n              // 2x2 block\n              Matrix<ComplexScalar, 2, 1> rhs = (alpha*mT.template block<2,Dynamic>(j-1,st,2,sz) - beta*mS.template block<2,Dynamic>(j-1,st,2,sz)) .lazyProduct( cv.segment(st,sz) );\n              Matrix<ComplexScalar, 2, 2> lhs = beta * mS.template block<2,2>(j-1,j-1) - alpha * mT.template block<2,2>(j-1,j-1);\n              cv.template segment<2>(j-1) = lhs.partialPivLu().solve(rhs);\n              j--;\n            } else {\n              cv.coeffRef(j) =  cv.segment(st,sz).transpose().cwiseProduct(beta*mS.block(j,st,1,sz) - alpha*mT.block(j,st,1,sz)).sum()\n                              / (alpha*mT.coeffRef(j,j) - static_cast<Scalar>(beta*mS.coeffRef(j,j)));\n            }\n          }\n          m_eivec.col(i+1).noalias() = (mZ.transpose() * cv);\n          m_eivec.col(i+1).normalize();\n          m_eivec.col(i) = m_eivec.col(i+1).conjugate();\n        }\n        i += 2;\n      }\n    }\n\n    m_valuesOkay = true;\n    m_vectorsOkay = computeEigenvectors;\n  }\n  return *this;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERALIZEDEIGENSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H\n#define EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H\n\n#include \"./Tridiagonalization.h\"\n\nnamespace Eigen { \n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class GeneralizedSelfAdjointEigenSolver\n  *\n  * \\brief Computes eigenvalues and eigenvectors of the generalized selfadjoint eigen problem\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the\n  * eigendecomposition; this is expected to be an instantiation of the Matrix\n  * class template.\n  *\n  * This class solves the generalized eigenvalue problem\n  * \\f$ Av = \\lambda Bv \\f$. In this case, the matrix \\f$ A \\f$ should be\n  * selfadjoint and the matrix \\f$ B \\f$ should be positive definite.\n  *\n  * Only the \\b lower \\b triangular \\b part of the input matrix is referenced.\n  *\n  * Call the function compute() to compute the eigenvalues and eigenvectors of\n  * a given matrix. Alternatively, you can use the\n  * GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)\n  * constructor which computes the eigenvalues and eigenvectors at construction time.\n  * Once the eigenvalue and eigenvectors are computed, they can be retrieved with the eigenvalues()\n  * and eigenvectors() functions.\n  *\n  * The documentation for GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)\n  * contains an example of the typical use of this class.\n  *\n  * \\sa class SelfAdjointEigenSolver, class EigenSolver, class ComplexEigenSolver\n  */\ntemplate<typename _MatrixType>\nclass GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixType>\n{\n    typedef SelfAdjointEigenSolver<_MatrixType> Base;\n  public:\n\n    typedef _MatrixType MatrixType;\n\n    /** \\brief Default constructor for fixed-size matrices.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute(). This constructor\n      * can only be used if \\p _MatrixType is a fixed-size matrix; use\n      * GeneralizedSelfAdjointEigenSolver(Index) for dynamic-size matrices.\n      */\n    GeneralizedSelfAdjointEigenSolver() : Base() {}\n\n    /** \\brief Constructor, pre-allocates memory for dynamic-size matrices.\n      *\n      * \\param [in]  size  Positive integer, size of the matrix whose\n      * eigenvalues and eigenvectors will be computed.\n      *\n      * This constructor is useful for dynamic-size matrices, when the user\n      * intends to perform decompositions via compute(). The \\p size\n      * parameter is only used as a hint. It is not an error to give a wrong\n      * \\p size, but it may impair performance.\n      *\n      * \\sa compute() for an example\n      */\n    explicit GeneralizedSelfAdjointEigenSolver(Index size)\n        : Base(size)\n    {}\n\n    /** \\brief Constructor; computes generalized eigendecomposition of given matrix pencil.\n      *\n      * \\param[in]  matA  Selfadjoint matrix in matrix pencil.\n      *                   Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  matB  Positive-definite matrix in matrix pencil.\n      *                   Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.\n      *                     Default is #ComputeEigenvectors|#Ax_lBx.\n      *\n      * This constructor calls compute(const MatrixType&, const MatrixType&, int)\n      * to compute the eigenvalues and (if requested) the eigenvectors of the\n      * generalized eigenproblem \\f$ Ax = \\lambda B x \\f$ with \\a matA the\n      * selfadjoint matrix \\f$ A \\f$ and \\a matB the positive definite matrix\n      * \\f$ B \\f$. Each eigenvector \\f$ x \\f$ satisfies the property\n      * \\f$ x^* B x = 1 \\f$. The eigenvectors are computed if\n      * \\a options contains ComputeEigenvectors.\n      *\n      * In addition, the two following variants can be solved via \\p options:\n      * - \\c ABx_lx: \\f$ ABx = \\lambda x \\f$\n      * - \\c BAx_lx: \\f$ BAx = \\lambda x \\f$\n      *\n      * Example: \\include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType2.out\n      *\n      * \\sa compute(const MatrixType&, const MatrixType&, int)\n      */\n    GeneralizedSelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB,\n                                      int options = ComputeEigenvectors|Ax_lBx)\n      : Base(matA.cols())\n    {\n      compute(matA, matB, options);\n    }\n\n    /** \\brief Computes generalized eigendecomposition of given matrix pencil.\n      *\n      * \\param[in]  matA  Selfadjoint matrix in matrix pencil.\n      *                   Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  matB  Positive-definite matrix in matrix pencil.\n      *                   Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  options A or-ed set of flags {#ComputeEigenvectors,#EigenvaluesOnly} | {#Ax_lBx,#ABx_lx,#BAx_lx}.\n      *                     Default is #ComputeEigenvectors|#Ax_lBx.\n      *\n      * \\returns    Reference to \\c *this\n      *\n      * Accoring to \\p options, this function computes eigenvalues and (if requested)\n      * the eigenvectors of one of the following three generalized eigenproblems:\n      * - \\c Ax_lBx: \\f$ Ax = \\lambda B x \\f$\n      * - \\c ABx_lx: \\f$ ABx = \\lambda x \\f$\n      * - \\c BAx_lx: \\f$ BAx = \\lambda x \\f$\n      * with \\a matA the selfadjoint matrix \\f$ A \\f$ and \\a matB the positive definite\n      * matrix \\f$ B \\f$.\n      * In addition, each eigenvector \\f$ x \\f$ satisfies the property \\f$ x^* B x = 1 \\f$.\n      *\n      * The eigenvalues() function can be used to retrieve\n      * the eigenvalues. If \\p options contains ComputeEigenvectors, then the\n      * eigenvectors are also computed and can be retrieved by calling\n      * eigenvectors().\n      *\n      * The implementation uses LLT to compute the Cholesky decomposition\n      * \\f$ B = LL^* \\f$ and computes the classical eigendecomposition\n      * of the selfadjoint matrix \\f$ L^{-1} A (L^*)^{-1} \\f$ if \\p options contains Ax_lBx\n      * and of \\f$ L^{*} A L \\f$ otherwise. This solves the\n      * generalized eigenproblem, because any solution of the generalized\n      * eigenproblem \\f$ Ax = \\lambda B x \\f$ corresponds to a solution\n      * \\f$ L^{-1} A (L^*)^{-1} (L^* x) = \\lambda (L^* x) \\f$ of the\n      * eigenproblem for \\f$ L^{-1} A (L^*)^{-1} \\f$. Similar statements\n      * can be made for the two other variants.\n      *\n      * Example: \\include SelfAdjointEigenSolver_compute_MatrixType2.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_compute_MatrixType2.out\n      *\n      * \\sa GeneralizedSelfAdjointEigenSolver(const MatrixType&, const MatrixType&, int)\n      */\n    GeneralizedSelfAdjointEigenSolver& compute(const MatrixType& matA, const MatrixType& matB,\n                                               int options = ComputeEigenvectors|Ax_lBx);\n\n  protected:\n\n};\n\n\ntemplate<typename MatrixType>\nGeneralizedSelfAdjointEigenSolver<MatrixType>& GeneralizedSelfAdjointEigenSolver<MatrixType>::\ncompute(const MatrixType& matA, const MatrixType& matB, int options)\n{\n  eigen_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());\n  eigen_assert((options&~(EigVecMask|GenEigMask))==0\n          && (options&EigVecMask)!=EigVecMask\n          && ((options&GenEigMask)==0 || (options&GenEigMask)==Ax_lBx\n           || (options&GenEigMask)==ABx_lx || (options&GenEigMask)==BAx_lx)\n          && \"invalid option parameter\");\n\n  bool computeEigVecs = ((options&EigVecMask)==0) || ((options&EigVecMask)==ComputeEigenvectors);\n\n  // Compute the cholesky decomposition of matB = L L' = U'U\n  LLT<MatrixType> cholB(matB);\n\n  int type = (options&GenEigMask);\n  if(type==0)\n    type = Ax_lBx;\n\n  if(type==Ax_lBx)\n  {\n    // compute C = inv(L) A inv(L')\n    MatrixType matC = matA.template selfadjointView<Lower>();\n    cholB.matrixL().template solveInPlace<OnTheLeft>(matC);\n    cholB.matrixU().template solveInPlace<OnTheRight>(matC);\n\n    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly );\n\n    // transform back the eigen vectors: evecs = inv(U) * evecs\n    if(computeEigVecs)\n      cholB.matrixU().solveInPlace(Base::m_eivec);\n  }\n  else if(type==ABx_lx)\n  {\n    // compute C = L' A L\n    MatrixType matC = matA.template selfadjointView<Lower>();\n    matC = matC * cholB.matrixL();\n    matC = cholB.matrixU() * matC;\n\n    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);\n\n    // transform back the eigen vectors: evecs = inv(U) * evecs\n    if(computeEigVecs)\n      cholB.matrixU().solveInPlace(Base::m_eivec);\n  }\n  else if(type==BAx_lx)\n  {\n    // compute C = L' A L\n    MatrixType matC = matA.template selfadjointView<Lower>();\n    matC = matC * cholB.matrixL();\n    matC = cholB.matrixU() * matC;\n\n    Base::compute(matC, computeEigVecs ? ComputeEigenvectors : EigenvaluesOnly);\n\n    // transform back the eigen vectors: evecs = L * evecs\n    if(computeEigVecs)\n      Base::m_eivec = cholB.matrixL() * Base::m_eivec;\n  }\n\n  return *this;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_GENERALIZEDSELFADJOINTEIGENSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/HessenbergDecomposition.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HESSENBERGDECOMPOSITION_H\n#define EIGEN_HESSENBERGDECOMPOSITION_H\n\nnamespace Eigen { \n\nnamespace internal {\n  \ntemplate<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType;\ntemplate<typename MatrixType>\nstruct traits<HessenbergDecompositionMatrixHReturnType<MatrixType> >\n{\n  typedef MatrixType ReturnType;\n};\n\n}\n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class HessenbergDecomposition\n  *\n  * \\brief Reduces a square matrix to Hessenberg form by an orthogonal similarity transformation\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the Hessenberg decomposition\n  *\n  * This class performs an Hessenberg decomposition of a matrix \\f$ A \\f$. In\n  * the real case, the Hessenberg decomposition consists of an orthogonal\n  * matrix \\f$ Q \\f$ and a Hessenberg matrix \\f$ H \\f$ such that \\f$ A = Q H\n  * Q^T \\f$. An orthogonal matrix is a matrix whose inverse equals its\n  * transpose (\\f$ Q^{-1} = Q^T \\f$). A Hessenberg matrix has zeros below the\n  * subdiagonal, so it is almost upper triangular. The Hessenberg decomposition\n  * of a complex matrix is \\f$ A = Q H Q^* \\f$ with \\f$ Q \\f$ unitary (that is,\n  * \\f$ Q^{-1} = Q^* \\f$).\n  *\n  * Call the function compute() to compute the Hessenberg decomposition of a\n  * given matrix. Alternatively, you can use the\n  * HessenbergDecomposition(const MatrixType&) constructor which computes the\n  * Hessenberg decomposition at construction time. Once the decomposition is\n  * computed, you can use the matrixH() and matrixQ() functions to construct\n  * the matrices H and Q in the decomposition.\n  *\n  * The documentation for matrixH() contains an example of the typical use of\n  * this class.\n  *\n  * \\sa class ComplexSchur, class Tridiagonalization, \\ref QR_Module \"QR Module\"\n  */\ntemplate<typename _MatrixType> class HessenbergDecomposition\n{\n  public:\n\n    /** \\brief Synonym for the template parameter \\p _MatrixType. */\n    typedef _MatrixType MatrixType;\n\n    enum {\n      Size = MatrixType::RowsAtCompileTime,\n      SizeMinusOne = Size == Dynamic ? Dynamic : Size - 1,\n      Options = MatrixType::Options,\n      MaxSize = MatrixType::MaxRowsAtCompileTime,\n      MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : MaxSize - 1\n    };\n\n    /** \\brief Scalar type for matrices of type #MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    /** \\brief Type for vector of Householder coefficients.\n      *\n      * This is column vector with entries of type #Scalar. The length of the\n      * vector is one less than the size of #MatrixType, if it is a fixed-side\n      * type.\n      */\n    typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;\n\n    /** \\brief Return type of matrixQ() */\n    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;\n    \n    typedef internal::HessenbergDecompositionMatrixHReturnType<MatrixType> MatrixHReturnType;\n\n    /** \\brief Default constructor; the decomposition will be computed later.\n      *\n      * \\param [in] size  The size of the matrix whose Hessenberg decomposition will be computed.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute().  The \\p size parameter is only\n      * used as a hint. It is not an error to give a wrong \\p size, but it may\n      * impair performance.\n      *\n      * \\sa compute() for an example.\n      */\n    explicit HessenbergDecomposition(Index size = Size==Dynamic ? 2 : Size)\n      : m_matrix(size,size),\n        m_temp(size),\n        m_isInitialized(false)\n    {\n      if(size>1)\n        m_hCoeffs.resize(size-1);\n    }\n\n    /** \\brief Constructor; computes Hessenberg decomposition of given matrix.\n      *\n      * \\param[in]  matrix  Square matrix whose Hessenberg decomposition is to be computed.\n      *\n      * This constructor calls compute() to compute the Hessenberg\n      * decomposition.\n      *\n      * \\sa matrixH() for an example.\n      */\n    template<typename InputType>\n    explicit HessenbergDecomposition(const EigenBase<InputType>& matrix)\n      : m_matrix(matrix.derived()),\n        m_temp(matrix.rows()),\n        m_isInitialized(false)\n    {\n      if(matrix.rows()<2)\n      {\n        m_isInitialized = true;\n        return;\n      }\n      m_hCoeffs.resize(matrix.rows()-1,1);\n      _compute(m_matrix, m_hCoeffs, m_temp);\n      m_isInitialized = true;\n    }\n\n    /** \\brief Computes Hessenberg decomposition of given matrix.\n      *\n      * \\param[in]  matrix  Square matrix whose Hessenberg decomposition is to be computed.\n      * \\returns    Reference to \\c *this\n      *\n      * The Hessenberg decomposition is computed by bringing the columns of the\n      * matrix successively in the required form using Householder reflections\n      * (see, e.g., Algorithm 7.4.2 in Golub \\& Van Loan, <i>%Matrix\n      * Computations</i>). The cost is \\f$ 10n^3/3 \\f$ flops, where \\f$ n \\f$\n      * denotes the size of the given matrix.\n      *\n      * This method reuses of the allocated data in the HessenbergDecomposition\n      * object.\n      *\n      * Example: \\include HessenbergDecomposition_compute.cpp\n      * Output: \\verbinclude HessenbergDecomposition_compute.out\n      */\n    template<typename InputType>\n    HessenbergDecomposition& compute(const EigenBase<InputType>& matrix)\n    {\n      m_matrix = matrix.derived();\n      if(matrix.rows()<2)\n      {\n        m_isInitialized = true;\n        return *this;\n      }\n      m_hCoeffs.resize(matrix.rows()-1,1);\n      _compute(m_matrix, m_hCoeffs, m_temp);\n      m_isInitialized = true;\n      return *this;\n    }\n\n    /** \\brief Returns the Householder coefficients.\n      *\n      * \\returns a const reference to the vector of Householder coefficients\n      *\n      * \\pre Either the constructor HessenbergDecomposition(const MatrixType&)\n      * or the member function compute(const MatrixType&) has been called\n      * before to compute the Hessenberg decomposition of a matrix.\n      *\n      * The Householder coefficients allow the reconstruction of the matrix\n      * \\f$ Q \\f$ in the Hessenberg decomposition from the packed data.\n      *\n      * \\sa packedMatrix(), \\ref Householder_Module \"Householder module\"\n      */\n    const CoeffVectorType& householderCoefficients() const\n    {\n      eigen_assert(m_isInitialized && \"HessenbergDecomposition is not initialized.\");\n      return m_hCoeffs;\n    }\n\n    /** \\brief Returns the internal representation of the decomposition\n      *\n      *\t\\returns a const reference to a matrix with the internal representation\n      *\t         of the decomposition.\n      *\n      * \\pre Either the constructor HessenbergDecomposition(const MatrixType&)\n      * or the member function compute(const MatrixType&) has been called\n      * before to compute the Hessenberg decomposition of a matrix.\n      *\n      * The returned matrix contains the following information:\n      *  - the upper part and lower sub-diagonal represent the Hessenberg matrix H\n      *  - the rest of the lower part contains the Householder vectors that, combined with\n      *    Householder coefficients returned by householderCoefficients(),\n      *    allows to reconstruct the matrix Q as\n      *       \\f$ Q = H_{N-1} \\ldots H_1 H_0 \\f$.\n      *    Here, the matrices \\f$ H_i \\f$ are the Householder transformations\n      *       \\f$ H_i = (I - h_i v_i v_i^T) \\f$\n      *    where \\f$ h_i \\f$ is the \\f$ i \\f$th Householder coefficient and\n      *    \\f$ v_i \\f$ is the Householder vector defined by\n      *       \\f$ v_i = [ 0, \\ldots, 0, 1, M(i+2,i), \\ldots, M(N-1,i) ]^T \\f$\n      *    with M the matrix returned by this function.\n      *\n      * See LAPACK for further details on this packed storage.\n      *\n      * Example: \\include HessenbergDecomposition_packedMatrix.cpp\n      * Output: \\verbinclude HessenbergDecomposition_packedMatrix.out\n      *\n      * \\sa householderCoefficients()\n      */\n    const MatrixType& packedMatrix() const\n    {\n      eigen_assert(m_isInitialized && \"HessenbergDecomposition is not initialized.\");\n      return m_matrix;\n    }\n\n    /** \\brief Reconstructs the orthogonal matrix Q in the decomposition\n      *\n      * \\returns object representing the matrix Q\n      *\n      * \\pre Either the constructor HessenbergDecomposition(const MatrixType&)\n      * or the member function compute(const MatrixType&) has been called\n      * before to compute the Hessenberg decomposition of a matrix.\n      *\n      * This function returns a light-weight object of template class\n      * HouseholderSequence. You can either apply it directly to a matrix or\n      * you can convert it to a matrix of type #MatrixType.\n      *\n      * \\sa matrixH() for an example, class HouseholderSequence\n      */\n    HouseholderSequenceType matrixQ() const\n    {\n      eigen_assert(m_isInitialized && \"HessenbergDecomposition is not initialized.\");\n      return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())\n             .setLength(m_matrix.rows() - 1)\n             .setShift(1);\n    }\n\n    /** \\brief Constructs the Hessenberg matrix H in the decomposition\n      *\n      * \\returns expression object representing the matrix H\n      *\n      * \\pre Either the constructor HessenbergDecomposition(const MatrixType&)\n      * or the member function compute(const MatrixType&) has been called\n      * before to compute the Hessenberg decomposition of a matrix.\n      *\n      * The object returned by this function constructs the Hessenberg matrix H\n      * when it is assigned to a matrix or otherwise evaluated. The matrix H is\n      * constructed from the packed matrix as returned by packedMatrix(): The\n      * upper part (including the subdiagonal) of the packed matrix contains\n      * the matrix H. It may sometimes be better to directly use the packed\n      * matrix instead of constructing the matrix H.\n      *\n      * Example: \\include HessenbergDecomposition_matrixH.cpp\n      * Output: \\verbinclude HessenbergDecomposition_matrixH.out\n      *\n      * \\sa matrixQ(), packedMatrix()\n      */\n    MatrixHReturnType matrixH() const\n    {\n      eigen_assert(m_isInitialized && \"HessenbergDecomposition is not initialized.\");\n      return MatrixHReturnType(*this);\n    }\n\n  private:\n\n    typedef Matrix<Scalar, 1, Size, Options | RowMajor, 1, MaxSize> VectorType;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp);\n\n  protected:\n    MatrixType m_matrix;\n    CoeffVectorType m_hCoeffs;\n    VectorType m_temp;\n    bool m_isInitialized;\n};\n\n/** \\internal\n  * Performs a tridiagonal decomposition of \\a matA in place.\n  *\n  * \\param matA the input selfadjoint matrix\n  * \\param hCoeffs returned Householder coefficients\n  *\n  * The result is written in the lower triangular part of \\a matA.\n  *\n  * Implemented from Golub's \"%Matrix Computations\", algorithm 8.3.1.\n  *\n  * \\sa packedMatrix()\n  */\ntemplate<typename MatrixType>\nvoid HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs, VectorType& temp)\n{\n  eigen_assert(matA.rows()==matA.cols());\n  Index n = matA.rows();\n  temp.resize(n);\n  for (Index i = 0; i<n-1; ++i)\n  {\n    // let's consider the vector v = i-th column starting at position i+1\n    Index remainingSize = n-i-1;\n    RealScalar beta;\n    Scalar h;\n    matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);\n    matA.col(i).coeffRef(i+1) = beta;\n    hCoeffs.coeffRef(i) = h;\n\n    // Apply similarity transformation to remaining columns,\n    // i.e., compute A = H A H'\n\n    // A = H A\n    matA.bottomRightCorner(remainingSize, remainingSize)\n        .applyHouseholderOnTheLeft(matA.col(i).tail(remainingSize-1), h, &temp.coeffRef(0));\n\n    // A = A H'\n    matA.rightCols(remainingSize)\n        .applyHouseholderOnTheRight(matA.col(i).tail(remainingSize-1).conjugate(), numext::conj(h), &temp.coeffRef(0));\n  }\n}\n\nnamespace internal {\n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\brief Expression type for return value of HessenbergDecomposition::matrixH()\n  *\n  * \\tparam MatrixType type of matrix in the Hessenberg decomposition\n  *\n  * Objects of this type represent the Hessenberg matrix in the Hessenberg\n  * decomposition of some matrix. The object holds a reference to the\n  * HessenbergDecomposition class until the it is assigned or evaluated for\n  * some other reason (the reference should remain valid during the life time\n  * of this object). This class is the return type of\n  * HessenbergDecomposition::matrixH(); there is probably no other use for this\n  * class.\n  */\ntemplate<typename MatrixType> struct HessenbergDecompositionMatrixHReturnType\n: public ReturnByValue<HessenbergDecompositionMatrixHReturnType<MatrixType> >\n{\n  public:\n    /** \\brief Constructor.\n      *\n      * \\param[in] hess  Hessenberg decomposition\n      */\n    HessenbergDecompositionMatrixHReturnType(const HessenbergDecomposition<MatrixType>& hess) : m_hess(hess) { }\n\n    /** \\brief Hessenberg matrix in decomposition.\n      *\n      * \\param[out] result  Hessenberg matrix in decomposition \\p hess which\n      *                     was passed to the constructor\n      */\n    template <typename ResultType>\n    inline void evalTo(ResultType& result) const\n    {\n      result = m_hess.packedMatrix();\n      Index n = result.rows();\n      if (n>2)\n        result.bottomLeftCorner(n-2, n-2).template triangularView<Lower>().setZero();\n    }\n\n    Index rows() const { return m_hess.packedMatrix().rows(); }\n    Index cols() const { return m_hess.packedMatrix().cols(); }\n\n  protected:\n    const HessenbergDecomposition<MatrixType>& m_hess;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_HESSENBERGDECOMPOSITION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/MatrixBaseEigenvalues.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MATRIXBASEEIGENVALUES_H\n#define EIGEN_MATRIXBASEEIGENVALUES_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Derived, bool IsComplex>\nstruct eigenvalues_selector\n{\n  // this is the implementation for the case IsComplex = true\n  static inline typename MatrixBase<Derived>::EigenvaluesReturnType const\n  run(const MatrixBase<Derived>& m)\n  {\n    typedef typename Derived::PlainObject PlainObject;\n    PlainObject m_eval(m);\n    return ComplexEigenSolver<PlainObject>(m_eval, false).eigenvalues();\n  }\n};\n\ntemplate<typename Derived>\nstruct eigenvalues_selector<Derived, false>\n{\n  static inline typename MatrixBase<Derived>::EigenvaluesReturnType const\n  run(const MatrixBase<Derived>& m)\n  {\n    typedef typename Derived::PlainObject PlainObject;\n    PlainObject m_eval(m);\n    return EigenSolver<PlainObject>(m_eval, false).eigenvalues();\n  }\n};\n\n} // end namespace internal\n\n/** \\brief Computes the eigenvalues of a matrix \n  * \\returns Column vector containing the eigenvalues.\n  *\n  * \\eigenvalues_module\n  * This function computes the eigenvalues with the help of the EigenSolver\n  * class (for real matrices) or the ComplexEigenSolver class (for complex\n  * matrices). \n  *\n  * The eigenvalues are repeated according to their algebraic multiplicity,\n  * so there are as many eigenvalues as rows in the matrix.\n  *\n  * The SelfAdjointView class provides a better algorithm for selfadjoint\n  * matrices.\n  *\n  * Example: \\include MatrixBase_eigenvalues.cpp\n  * Output: \\verbinclude MatrixBase_eigenvalues.out\n  *\n  * \\sa EigenSolver::eigenvalues(), ComplexEigenSolver::eigenvalues(),\n  *     SelfAdjointView::eigenvalues()\n  */\ntemplate<typename Derived>\ninline typename MatrixBase<Derived>::EigenvaluesReturnType\nMatrixBase<Derived>::eigenvalues() const\n{\n  typedef typename internal::traits<Derived>::Scalar Scalar;\n  return internal::eigenvalues_selector<Derived, NumTraits<Scalar>::IsComplex>::run(derived());\n}\n\n/** \\brief Computes the eigenvalues of a matrix\n  * \\returns Column vector containing the eigenvalues.\n  *\n  * \\eigenvalues_module\n  * This function computes the eigenvalues with the help of the\n  * SelfAdjointEigenSolver class.  The eigenvalues are repeated according to\n  * their algebraic multiplicity, so there are as many eigenvalues as rows in\n  * the matrix.\n  *\n  * Example: \\include SelfAdjointView_eigenvalues.cpp\n  * Output: \\verbinclude SelfAdjointView_eigenvalues.out\n  *\n  * \\sa SelfAdjointEigenSolver::eigenvalues(), MatrixBase::eigenvalues()\n  */\ntemplate<typename MatrixType, unsigned int UpLo> \nEIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::EigenvaluesReturnType\nSelfAdjointView<MatrixType, UpLo>::eigenvalues() const\n{\n  typedef typename SelfAdjointView<MatrixType, UpLo>::PlainObject PlainObject;\n  PlainObject thisAsMatrix(*this);\n  return SelfAdjointEigenSolver<PlainObject>(thisAsMatrix, false).eigenvalues();\n}\n\n\n\n/** \\brief Computes the L2 operator norm\n  * \\returns Operator norm of the matrix.\n  *\n  * \\eigenvalues_module\n  * This function computes the L2 operator norm of a matrix, which is also\n  * known as the spectral norm. The norm of a matrix \\f$ A \\f$ is defined to be\n  * \\f[ \\|A\\|_2 = \\max_x \\frac{\\|Ax\\|_2}{\\|x\\|_2} \\f]\n  * where the maximum is over all vectors and the norm on the right is the\n  * Euclidean vector norm. The norm equals the largest singular value, which is\n  * the square root of the largest eigenvalue of the positive semi-definite\n  * matrix \\f$ A^*A \\f$.\n  *\n  * The current implementation uses the eigenvalues of \\f$ A^*A \\f$, as computed\n  * by SelfAdjointView::eigenvalues(), to compute the operator norm of a\n  * matrix.  The SelfAdjointView class provides a better algorithm for\n  * selfadjoint matrices.\n  *\n  * Example: \\include MatrixBase_operatorNorm.cpp\n  * Output: \\verbinclude MatrixBase_operatorNorm.out\n  *\n  * \\sa SelfAdjointView::eigenvalues(), SelfAdjointView::operatorNorm()\n  */\ntemplate<typename Derived>\ninline typename MatrixBase<Derived>::RealScalar\nMatrixBase<Derived>::operatorNorm() const\n{\n  using std::sqrt;\n  typename Derived::PlainObject m_eval(derived());\n  // FIXME if it is really guaranteed that the eigenvalues are already sorted,\n  // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.\n  return sqrt((m_eval*m_eval.adjoint())\n                 .eval()\n\t\t .template selfadjointView<Lower>()\n\t\t .eigenvalues()\n\t\t .maxCoeff()\n\t\t );\n}\n\n/** \\brief Computes the L2 operator norm\n  * \\returns Operator norm of the matrix.\n  *\n  * \\eigenvalues_module\n  * This function computes the L2 operator norm of a self-adjoint matrix. For a\n  * self-adjoint matrix, the operator norm is the largest eigenvalue.\n  *\n  * The current implementation uses the eigenvalues of the matrix, as computed\n  * by eigenvalues(), to compute the operator norm of the matrix.\n  *\n  * Example: \\include SelfAdjointView_operatorNorm.cpp\n  * Output: \\verbinclude SelfAdjointView_operatorNorm.out\n  *\n  * \\sa eigenvalues(), MatrixBase::operatorNorm()\n  */\ntemplate<typename MatrixType, unsigned int UpLo>\nEIGEN_DEVICE_FUNC inline typename SelfAdjointView<MatrixType, UpLo>::RealScalar\nSelfAdjointView<MatrixType, UpLo>::operatorNorm() const\n{\n  return eigenvalues().cwiseAbs().maxCoeff();\n}\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/RealQZ.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Alexey Korepanov <kaikaikai@yandex.ru>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REAL_QZ_H\n#define EIGEN_REAL_QZ_H\n\nnamespace Eigen {\n\n  /** \\eigenvalues_module \\ingroup Eigenvalues_Module\n   *\n   *\n   * \\class RealQZ\n   *\n   * \\brief Performs a real QZ decomposition of a pair of square matrices\n   *\n   * \\tparam _MatrixType the type of the matrix of which we are computing the\n   * real QZ decomposition; this is expected to be an instantiation of the\n   * Matrix class template.\n   *\n   * Given a real square matrices A and B, this class computes the real QZ\n   * decomposition: \\f$ A = Q S Z \\f$, \\f$ B = Q T Z \\f$ where Q and Z are\n   * real orthogonal matrixes, T is upper-triangular matrix, and S is upper\n   * quasi-triangular matrix. An orthogonal matrix is a matrix whose\n   * inverse is equal to its transpose, \\f$ U^{-1} = U^T \\f$. A quasi-triangular\n   * matrix is a block-triangular matrix whose diagonal consists of 1-by-1\n   * blocks and 2-by-2 blocks where further reduction is impossible due to\n   * complex eigenvalues. \n   *\n   * The eigenvalues of the pencil \\f$ A - z B \\f$ can be obtained from\n   * 1x1 and 2x2 blocks on the diagonals of S and T.\n   *\n   * Call the function compute() to compute the real QZ decomposition of a\n   * given pair of matrices. Alternatively, you can use the \n   * RealQZ(const MatrixType& B, const MatrixType& B, bool computeQZ)\n   * constructor which computes the real QZ decomposition at construction\n   * time. Once the decomposition is computed, you can use the matrixS(),\n   * matrixT(), matrixQ() and matrixZ() functions to retrieve the matrices\n   * S, T, Q and Z in the decomposition. If computeQZ==false, some time\n   * is saved by not computing matrices Q and Z.\n   *\n   * Example: \\include RealQZ_compute.cpp\n   * Output: \\include RealQZ_compute.out\n   *\n   * \\note The implementation is based on the algorithm in \"Matrix Computations\"\n   * by Gene H. Golub and Charles F. Van Loan, and a paper \"An algorithm for\n   * generalized eigenvalue problems\" by C.B.Moler and G.W.Stewart.\n   *\n   * \\sa class RealSchur, class ComplexSchur, class EigenSolver, class ComplexEigenSolver\n   */\n\n  template<typename _MatrixType> class RealQZ\n  {\n    public:\n      typedef _MatrixType MatrixType;\n      enum {\n        RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n        ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n        Options = MatrixType::Options,\n        MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n        MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n      };\n      typedef typename MatrixType::Scalar Scalar;\n      typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;\n      typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n      typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;\n      typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;\n\n      /** \\brief Default constructor.\n       *\n       * \\param [in] size  Positive integer, size of the matrix whose QZ decomposition will be computed.\n       *\n       * The default constructor is useful in cases in which the user intends to\n       * perform decompositions via compute().  The \\p size parameter is only\n       * used as a hint. It is not an error to give a wrong \\p size, but it may\n       * impair performance.\n       *\n       * \\sa compute() for an example.\n       */\n      explicit RealQZ(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime) :\n        m_S(size, size),\n        m_T(size, size),\n        m_Q(size, size),\n        m_Z(size, size),\n        m_workspace(size*2),\n        m_maxIters(400),\n        m_isInitialized(false)\n        { }\n\n      /** \\brief Constructor; computes real QZ decomposition of given matrices\n       * \n       * \\param[in]  A          Matrix A.\n       * \\param[in]  B          Matrix B.\n       * \\param[in]  computeQZ  If false, A and Z are not computed.\n       *\n       * This constructor calls compute() to compute the QZ decomposition.\n       */\n      RealQZ(const MatrixType& A, const MatrixType& B, bool computeQZ = true) :\n        m_S(A.rows(),A.cols()),\n        m_T(A.rows(),A.cols()),\n        m_Q(A.rows(),A.cols()),\n        m_Z(A.rows(),A.cols()),\n        m_workspace(A.rows()*2),\n        m_maxIters(400),\n        m_isInitialized(false) {\n          compute(A, B, computeQZ);\n        }\n\n      /** \\brief Returns matrix Q in the QZ decomposition. \n       *\n       * \\returns A const reference to the matrix Q.\n       */\n      const MatrixType& matrixQ() const {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        eigen_assert(m_computeQZ && \"The matrices Q and Z have not been computed during the QZ decomposition.\");\n        return m_Q;\n      }\n\n      /** \\brief Returns matrix Z in the QZ decomposition. \n       *\n       * \\returns A const reference to the matrix Z.\n       */\n      const MatrixType& matrixZ() const {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        eigen_assert(m_computeQZ && \"The matrices Q and Z have not been computed during the QZ decomposition.\");\n        return m_Z;\n      }\n\n      /** \\brief Returns matrix S in the QZ decomposition. \n       *\n       * \\returns A const reference to the matrix S.\n       */\n      const MatrixType& matrixS() const {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        return m_S;\n      }\n\n      /** \\brief Returns matrix S in the QZ decomposition. \n       *\n       * \\returns A const reference to the matrix S.\n       */\n      const MatrixType& matrixT() const {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        return m_T;\n      }\n\n      /** \\brief Computes QZ decomposition of given matrix. \n       * \n       * \\param[in]  A          Matrix A.\n       * \\param[in]  B          Matrix B.\n       * \\param[in]  computeQZ  If false, A and Z are not computed.\n       * \\returns    Reference to \\c *this\n       */\n      RealQZ& compute(const MatrixType& A, const MatrixType& B, bool computeQZ = true);\n\n      /** \\brief Reports whether previous computation was successful.\n       *\n       * \\returns \\c Success if computation was succesful, \\c NoConvergence otherwise.\n       */\n      ComputationInfo info() const\n      {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        return m_info;\n      }\n\n      /** \\brief Returns number of performed QR-like iterations.\n      */\n      Index iterations() const\n      {\n        eigen_assert(m_isInitialized && \"RealQZ is not initialized.\");\n        return m_global_iter;\n      }\n\n      /** Sets the maximal number of iterations allowed to converge to one eigenvalue\n       * or decouple the problem.\n      */\n      RealQZ& setMaxIterations(Index maxIters)\n      {\n        m_maxIters = maxIters;\n        return *this;\n      }\n\n    private:\n\n      MatrixType m_S, m_T, m_Q, m_Z;\n      Matrix<Scalar,Dynamic,1> m_workspace;\n      ComputationInfo m_info;\n      Index m_maxIters;\n      bool m_isInitialized;\n      bool m_computeQZ;\n      Scalar m_normOfT, m_normOfS;\n      Index m_global_iter;\n\n      typedef Matrix<Scalar,3,1> Vector3s;\n      typedef Matrix<Scalar,2,1> Vector2s;\n      typedef Matrix<Scalar,2,2> Matrix2s;\n      typedef JacobiRotation<Scalar> JRs;\n\n      void hessenbergTriangular();\n      void computeNorms();\n      Index findSmallSubdiagEntry(Index iu);\n      Index findSmallDiagEntry(Index f, Index l);\n      void splitOffTwoRows(Index i);\n      void pushDownZero(Index z, Index f, Index l);\n      void step(Index f, Index l, Index iter);\n\n  }; // RealQZ\n\n  /** \\internal Reduces S and T to upper Hessenberg - triangular form */\n  template<typename MatrixType>\n    void RealQZ<MatrixType>::hessenbergTriangular()\n    {\n\n      const Index dim = m_S.cols();\n\n      // perform QR decomposition of T, overwrite T with R, save Q\n      HouseholderQR<MatrixType> qrT(m_T);\n      m_T = qrT.matrixQR();\n      m_T.template triangularView<StrictlyLower>().setZero();\n      m_Q = qrT.householderQ();\n      // overwrite S with Q* S\n      m_S.applyOnTheLeft(m_Q.adjoint());\n      // init Z as Identity\n      if (m_computeQZ)\n        m_Z = MatrixType::Identity(dim,dim);\n      // reduce S to upper Hessenberg with Givens rotations\n      for (Index j=0; j<=dim-3; j++) {\n        for (Index i=dim-1; i>=j+2; i--) {\n          JRs G;\n          // kill S(i,j)\n          if(m_S.coeff(i,j) != 0)\n          {\n            G.makeGivens(m_S.coeff(i-1,j), m_S.coeff(i,j), &m_S.coeffRef(i-1, j));\n            m_S.coeffRef(i,j) = Scalar(0.0);\n            m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());\n            m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());\n            // update Q\n            if (m_computeQZ)\n              m_Q.applyOnTheRight(i-1,i,G);\n          }\n          // kill T(i,i-1)\n          if(m_T.coeff(i,i-1)!=Scalar(0))\n          {\n            G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));\n            m_T.coeffRef(i,i-1) = Scalar(0.0);\n            m_S.applyOnTheRight(i,i-1,G);\n            m_T.topRows(i).applyOnTheRight(i,i-1,G);\n            // update Z\n            if (m_computeQZ)\n              m_Z.applyOnTheLeft(i,i-1,G.adjoint());\n          }\n        }\n      }\n    }\n\n  /** \\internal Computes vector L1 norms of S and T when in Hessenberg-Triangular form already */\n  template<typename MatrixType>\n    inline void RealQZ<MatrixType>::computeNorms()\n    {\n      const Index size = m_S.cols();\n      m_normOfS = Scalar(0.0);\n      m_normOfT = Scalar(0.0);\n      for (Index j = 0; j < size; ++j)\n      {\n        m_normOfS += m_S.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();\n        m_normOfT += m_T.row(j).segment(j, size - j).cwiseAbs().sum();\n      }\n    }\n\n\n  /** \\internal Look for single small sub-diagonal element S(res, res-1) and return res (or 0) */\n  template<typename MatrixType>\n    inline Index RealQZ<MatrixType>::findSmallSubdiagEntry(Index iu)\n    {\n      using std::abs;\n      Index res = iu;\n      while (res > 0)\n      {\n        Scalar s = abs(m_S.coeff(res-1,res-1)) + abs(m_S.coeff(res,res));\n        if (s == Scalar(0.0))\n          s = m_normOfS;\n        if (abs(m_S.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)\n          break;\n        res--;\n      }\n      return res;\n    }\n\n  /** \\internal Look for single small diagonal element T(res, res) for res between f and l, and return res (or f-1)  */\n  template<typename MatrixType>\n    inline Index RealQZ<MatrixType>::findSmallDiagEntry(Index f, Index l)\n    {\n      using std::abs;\n      Index res = l;\n      while (res >= f) {\n        if (abs(m_T.coeff(res,res)) <= NumTraits<Scalar>::epsilon() * m_normOfT)\n          break;\n        res--;\n      }\n      return res;\n    }\n\n  /** \\internal decouple 2x2 diagonal block in rows i, i+1 if eigenvalues are real */\n  template<typename MatrixType>\n    inline void RealQZ<MatrixType>::splitOffTwoRows(Index i)\n    {\n      using std::abs;\n      using std::sqrt;\n      const Index dim=m_S.cols();\n      if (abs(m_S.coeff(i+1,i))==Scalar(0))\n        return;\n      Index j = findSmallDiagEntry(i,i+1);\n      if (j==i-1)\n      {\n        // block of (S T^{-1})\n        Matrix2s STi = m_T.template block<2,2>(i,i).template triangularView<Upper>().\n          template solve<OnTheRight>(m_S.template block<2,2>(i,i));\n        Scalar p = Scalar(0.5)*(STi(0,0)-STi(1,1));\n        Scalar q = p*p + STi(1,0)*STi(0,1);\n        if (q>=0) {\n          Scalar z = sqrt(q);\n          // one QR-like iteration for ABi - lambda I\n          // is enough - when we know exact eigenvalue in advance,\n          // convergence is immediate\n          JRs G;\n          if (p>=0)\n            G.makeGivens(p + z, STi(1,0));\n          else\n            G.makeGivens(p - z, STi(1,0));\n          m_S.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());\n          m_T.rightCols(dim-i).applyOnTheLeft(i,i+1,G.adjoint());\n          // update Q\n          if (m_computeQZ)\n            m_Q.applyOnTheRight(i,i+1,G);\n\n          G.makeGivens(m_T.coeff(i+1,i+1), m_T.coeff(i+1,i));\n          m_S.topRows(i+2).applyOnTheRight(i+1,i,G);\n          m_T.topRows(i+2).applyOnTheRight(i+1,i,G);\n          // update Z\n          if (m_computeQZ)\n            m_Z.applyOnTheLeft(i+1,i,G.adjoint());\n\n          m_S.coeffRef(i+1,i) = Scalar(0.0);\n          m_T.coeffRef(i+1,i) = Scalar(0.0);\n        }\n      }\n      else\n      {\n        pushDownZero(j,i,i+1);\n      }\n    }\n\n  /** \\internal use zero in T(z,z) to zero S(l,l-1), working in block f..l */\n  template<typename MatrixType>\n    inline void RealQZ<MatrixType>::pushDownZero(Index z, Index f, Index l)\n    {\n      JRs G;\n      const Index dim = m_S.cols();\n      for (Index zz=z; zz<l; zz++)\n      {\n        // push 0 down\n        Index firstColS = zz>f ? (zz-1) : zz;\n        G.makeGivens(m_T.coeff(zz, zz+1), m_T.coeff(zz+1, zz+1));\n        m_S.rightCols(dim-firstColS).applyOnTheLeft(zz,zz+1,G.adjoint());\n        m_T.rightCols(dim-zz).applyOnTheLeft(zz,zz+1,G.adjoint());\n        m_T.coeffRef(zz+1,zz+1) = Scalar(0.0);\n        // update Q\n        if (m_computeQZ)\n          m_Q.applyOnTheRight(zz,zz+1,G);\n        // kill S(zz+1, zz-1)\n        if (zz>f)\n        {\n          G.makeGivens(m_S.coeff(zz+1, zz), m_S.coeff(zz+1,zz-1));\n          m_S.topRows(zz+2).applyOnTheRight(zz, zz-1,G);\n          m_T.topRows(zz+1).applyOnTheRight(zz, zz-1,G);\n          m_S.coeffRef(zz+1,zz-1) = Scalar(0.0);\n          // update Z\n          if (m_computeQZ)\n            m_Z.applyOnTheLeft(zz,zz-1,G.adjoint());\n        }\n      }\n      // finally kill S(l,l-1)\n      G.makeGivens(m_S.coeff(l,l), m_S.coeff(l,l-1));\n      m_S.applyOnTheRight(l,l-1,G);\n      m_T.applyOnTheRight(l,l-1,G);\n      m_S.coeffRef(l,l-1)=Scalar(0.0);\n      // update Z\n      if (m_computeQZ)\n        m_Z.applyOnTheLeft(l,l-1,G.adjoint());\n    }\n\n  /** \\internal QR-like iterative step for block f..l */\n  template<typename MatrixType>\n    inline void RealQZ<MatrixType>::step(Index f, Index l, Index iter)\n    {\n      using std::abs;\n      const Index dim = m_S.cols();\n\n      // x, y, z\n      Scalar x, y, z;\n      if (iter==10)\n      {\n        // Wilkinson ad hoc shift\n        const Scalar\n          a11=m_S.coeff(f+0,f+0), a12=m_S.coeff(f+0,f+1),\n          a21=m_S.coeff(f+1,f+0), a22=m_S.coeff(f+1,f+1), a32=m_S.coeff(f+2,f+1),\n          b12=m_T.coeff(f+0,f+1),\n          b11i=Scalar(1.0)/m_T.coeff(f+0,f+0),\n          b22i=Scalar(1.0)/m_T.coeff(f+1,f+1),\n          a87=m_S.coeff(l-1,l-2),\n          a98=m_S.coeff(l-0,l-1),\n          b77i=Scalar(1.0)/m_T.coeff(l-2,l-2),\n          b88i=Scalar(1.0)/m_T.coeff(l-1,l-1);\n        Scalar ss = abs(a87*b77i) + abs(a98*b88i),\n               lpl = Scalar(1.5)*ss,\n               ll = ss*ss;\n        x = ll + a11*a11*b11i*b11i - lpl*a11*b11i + a12*a21*b11i*b22i\n          - a11*a21*b12*b11i*b11i*b22i;\n        y = a11*a21*b11i*b11i - lpl*a21*b11i + a21*a22*b11i*b22i \n          - a21*a21*b12*b11i*b11i*b22i;\n        z = a21*a32*b11i*b22i;\n      }\n      else if (iter==16)\n      {\n        // another exceptional shift\n        x = m_S.coeff(f,f)/m_T.coeff(f,f)-m_S.coeff(l,l)/m_T.coeff(l,l) + m_S.coeff(l,l-1)*m_T.coeff(l-1,l) /\n          (m_T.coeff(l-1,l-1)*m_T.coeff(l,l));\n        y = m_S.coeff(f+1,f)/m_T.coeff(f,f);\n        z = 0;\n      }\n      else if (iter>23 && !(iter%8))\n      {\n        // extremely exceptional shift\n        x = internal::random<Scalar>(-1.0,1.0);\n        y = internal::random<Scalar>(-1.0,1.0);\n        z = internal::random<Scalar>(-1.0,1.0);\n      }\n      else\n      {\n        // Compute the shifts: (x,y,z,0...) = (AB^-1 - l1 I) (AB^-1 - l2 I) e1\n        // where l1 and l2 are the eigenvalues of the 2x2 matrix C = U V^-1 where\n        // U and V are 2x2 bottom right sub matrices of A and B. Thus:\n        //  = AB^-1AB^-1 + l1 l2 I - (l1+l2)(AB^-1)\n        //  = AB^-1AB^-1 + det(M) - tr(M)(AB^-1)\n        // Since we are only interested in having x, y, z with a correct ratio, we have:\n        const Scalar\n          a11 = m_S.coeff(f,f),     a12 = m_S.coeff(f,f+1),\n          a21 = m_S.coeff(f+1,f),   a22 = m_S.coeff(f+1,f+1),\n                                    a32 = m_S.coeff(f+2,f+1),\n\n          a88 = m_S.coeff(l-1,l-1), a89 = m_S.coeff(l-1,l),\n          a98 = m_S.coeff(l,l-1),   a99 = m_S.coeff(l,l),\n\n          b11 = m_T.coeff(f,f),     b12 = m_T.coeff(f,f+1),\n                                    b22 = m_T.coeff(f+1,f+1),\n\n          b88 = m_T.coeff(l-1,l-1), b89 = m_T.coeff(l-1,l),\n                                    b99 = m_T.coeff(l,l);\n\n        x = ( (a88/b88 - a11/b11)*(a99/b99 - a11/b11) - (a89/b99)*(a98/b88) + (a98/b88)*(b89/b99)*(a11/b11) ) * (b11/a21)\n          + a12/b22 - (a11/b11)*(b12/b22);\n        y = (a22/b22-a11/b11) - (a21/b11)*(b12/b22) - (a88/b88-a11/b11) - (a99/b99-a11/b11) + (a98/b88)*(b89/b99);\n        z = a32/b22;\n      }\n\n      JRs G;\n\n      for (Index k=f; k<=l-2; k++)\n      {\n        // variables for Householder reflections\n        Vector2s essential2;\n        Scalar tau, beta;\n\n        Vector3s hr(x,y,z);\n\n        // Q_k to annihilate S(k+1,k-1) and S(k+2,k-1)\n        hr.makeHouseholderInPlace(tau, beta);\n        essential2 = hr.template bottomRows<2>();\n        Index fc=(std::max)(k-1,Index(0));  // first col to update\n        m_S.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());\n        m_T.template middleRows<3>(k).rightCols(dim-fc).applyHouseholderOnTheLeft(essential2, tau, m_workspace.data());\n        if (m_computeQZ)\n          m_Q.template middleCols<3>(k).applyHouseholderOnTheRight(essential2, tau, m_workspace.data());\n        if (k>f)\n          m_S.coeffRef(k+2,k-1) = m_S.coeffRef(k+1,k-1) = Scalar(0.0);\n\n        // Z_{k1} to annihilate T(k+2,k+1) and T(k+2,k)\n        hr << m_T.coeff(k+2,k+2),m_T.coeff(k+2,k),m_T.coeff(k+2,k+1);\n        hr.makeHouseholderInPlace(tau, beta);\n        essential2 = hr.template bottomRows<2>();\n        {\n          Index lr = (std::min)(k+4,dim); // last row to update\n          Map<Matrix<Scalar,Dynamic,1> > tmp(m_workspace.data(),lr);\n          // S\n          tmp = m_S.template middleCols<2>(k).topRows(lr) * essential2;\n          tmp += m_S.col(k+2).head(lr);\n          m_S.col(k+2).head(lr) -= tau*tmp;\n          m_S.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();\n          // T\n          tmp = m_T.template middleCols<2>(k).topRows(lr) * essential2;\n          tmp += m_T.col(k+2).head(lr);\n          m_T.col(k+2).head(lr) -= tau*tmp;\n          m_T.template middleCols<2>(k).topRows(lr) -= (tau*tmp) * essential2.adjoint();\n        }\n        if (m_computeQZ)\n        {\n          // Z\n          Map<Matrix<Scalar,1,Dynamic> > tmp(m_workspace.data(),dim);\n          tmp = essential2.adjoint()*(m_Z.template middleRows<2>(k));\n          tmp += m_Z.row(k+2);\n          m_Z.row(k+2) -= tau*tmp;\n          m_Z.template middleRows<2>(k) -= essential2 * (tau*tmp);\n        }\n        m_T.coeffRef(k+2,k) = m_T.coeffRef(k+2,k+1) = Scalar(0.0);\n\n        // Z_{k2} to annihilate T(k+1,k)\n        G.makeGivens(m_T.coeff(k+1,k+1), m_T.coeff(k+1,k));\n        m_S.applyOnTheRight(k+1,k,G);\n        m_T.applyOnTheRight(k+1,k,G);\n        // update Z\n        if (m_computeQZ)\n          m_Z.applyOnTheLeft(k+1,k,G.adjoint());\n        m_T.coeffRef(k+1,k) = Scalar(0.0);\n\n        // update x,y,z\n        x = m_S.coeff(k+1,k);\n        y = m_S.coeff(k+2,k);\n        if (k < l-2)\n          z = m_S.coeff(k+3,k);\n      } // loop over k\n\n      // Q_{n-1} to annihilate y = S(l,l-2)\n      G.makeGivens(x,y);\n      m_S.applyOnTheLeft(l-1,l,G.adjoint());\n      m_T.applyOnTheLeft(l-1,l,G.adjoint());\n      if (m_computeQZ)\n        m_Q.applyOnTheRight(l-1,l,G);\n      m_S.coeffRef(l,l-2) = Scalar(0.0);\n\n      // Z_{n-1} to annihilate T(l,l-1)\n      G.makeGivens(m_T.coeff(l,l),m_T.coeff(l,l-1));\n      m_S.applyOnTheRight(l,l-1,G);\n      m_T.applyOnTheRight(l,l-1,G);\n      if (m_computeQZ)\n        m_Z.applyOnTheLeft(l,l-1,G.adjoint());\n      m_T.coeffRef(l,l-1) = Scalar(0.0);\n    }\n\n  template<typename MatrixType>\n    RealQZ<MatrixType>& RealQZ<MatrixType>::compute(const MatrixType& A_in, const MatrixType& B_in, bool computeQZ)\n    {\n\n      const Index dim = A_in.cols();\n\n      eigen_assert (A_in.rows()==dim && A_in.cols()==dim \n          && B_in.rows()==dim && B_in.cols()==dim \n          && \"Need square matrices of the same dimension\");\n\n      m_isInitialized = true;\n      m_computeQZ = computeQZ;\n      m_S = A_in; m_T = B_in;\n      m_workspace.resize(dim*2);\n      m_global_iter = 0;\n\n      // entrance point: hessenberg triangular decomposition\n      hessenbergTriangular();\n      // compute L1 vector norms of T, S into m_normOfS, m_normOfT\n      computeNorms();\n\n      Index l = dim-1, \n            f, \n            local_iter = 0;\n\n      while (l>0 && local_iter<m_maxIters)\n      {\n        f = findSmallSubdiagEntry(l);\n        // now rows and columns f..l (including) decouple from the rest of the problem\n        if (f>0) m_S.coeffRef(f,f-1) = Scalar(0.0);\n        if (f == l) // One root found\n        {\n          l--;\n          local_iter = 0;\n        }\n        else if (f == l-1) // Two roots found\n        {\n          splitOffTwoRows(f);\n          l -= 2;\n          local_iter = 0;\n        }\n        else // No convergence yet\n        {\n          // if there's zero on diagonal of T, we can isolate an eigenvalue with Givens rotations\n          Index z = findSmallDiagEntry(f,l);\n          if (z>=f)\n          {\n            // zero found\n            pushDownZero(z,f,l);\n          }\n          else\n          {\n            // We are sure now that S.block(f,f, l-f+1,l-f+1) is underuced upper-Hessenberg \n            // and T.block(f,f, l-f+1,l-f+1) is invertible uper-triangular, which allows to\n            // apply a QR-like iteration to rows and columns f..l.\n            step(f,l, local_iter);\n            local_iter++;\n            m_global_iter++;\n          }\n        }\n      }\n      // check if we converged before reaching iterations limit\n      m_info = (local_iter<m_maxIters) ? Success : NoConvergence;\n\n      // For each non triangular 2x2 diagonal block of S,\n      //    reduce the respective 2x2 diagonal block of T to positive diagonal form using 2x2 SVD.\n      // This step is not mandatory for QZ, but it does help further extraction of eigenvalues/eigenvectors,\n      // and is in par with Lapack/Matlab QZ.\n      if(m_info==Success)\n      {\n        for(Index i=0; i<dim-1; ++i)\n        {\n          if(m_S.coeff(i+1, i) != Scalar(0))\n          {\n            JacobiRotation<Scalar> j_left, j_right;\n            internal::real_2x2_jacobi_svd(m_T, i, i+1, &j_left, &j_right);\n\n            // Apply resulting Jacobi rotations\n            m_S.applyOnTheLeft(i,i+1,j_left);\n            m_S.applyOnTheRight(i,i+1,j_right);\n            m_T.applyOnTheLeft(i,i+1,j_left);\n            m_T.applyOnTheRight(i,i+1,j_right);\n            m_T(i+1,i) = m_T(i,i+1) = Scalar(0);\n\n            if(m_computeQZ) {\n              m_Q.applyOnTheRight(i,i+1,j_left.transpose());\n              m_Z.applyOnTheLeft(i,i+1,j_right.transpose());\n            }\n\n            i++;\n          }\n        }\n      }\n\n      return *this;\n    } // end compute\n\n} // end namespace Eigen\n\n#endif //EIGEN_REAL_QZ\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/RealSchur.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010,2012 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REAL_SCHUR_H\n#define EIGEN_REAL_SCHUR_H\n\n#include \"./HessenbergDecomposition.h\"\n\nnamespace Eigen { \n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class RealSchur\n  *\n  * \\brief Performs a real Schur decomposition of a square matrix\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the\n  * real Schur decomposition; this is expected to be an instantiation of the\n  * Matrix class template.\n  *\n  * Given a real square matrix A, this class computes the real Schur\n  * decomposition: \\f$ A = U T U^T \\f$ where U is a real orthogonal matrix and\n  * T is a real quasi-triangular matrix. An orthogonal matrix is a matrix whose\n  * inverse is equal to its transpose, \\f$ U^{-1} = U^T \\f$. A quasi-triangular\n  * matrix is a block-triangular matrix whose diagonal consists of 1-by-1\n  * blocks and 2-by-2 blocks with complex eigenvalues. The eigenvalues of the\n  * blocks on the diagonal of T are the same as the eigenvalues of the matrix\n  * A, and thus the real Schur decomposition is used in EigenSolver to compute\n  * the eigendecomposition of a matrix.\n  *\n  * Call the function compute() to compute the real Schur decomposition of a\n  * given matrix. Alternatively, you can use the RealSchur(const MatrixType&, bool)\n  * constructor which computes the real Schur decomposition at construction\n  * time. Once the decomposition is computed, you can use the matrixU() and\n  * matrixT() functions to retrieve the matrices U and T in the decomposition.\n  *\n  * The documentation of RealSchur(const MatrixType&, bool) contains an example\n  * of the typical use of this class.\n  *\n  * \\note The implementation is adapted from\n  * <a href=\"http://math.nist.gov/javanumerics/jama/\">JAMA</a> (public domain).\n  * Their code is based on EISPACK.\n  *\n  * \\sa class ComplexSchur, class EigenSolver, class ComplexEigenSolver\n  */\ntemplate<typename _MatrixType> class RealSchur\n{\n  public:\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    typedef Matrix<ComplexScalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> EigenvalueType;\n    typedef Matrix<Scalar, ColsAtCompileTime, 1, Options & ~RowMajor, MaxColsAtCompileTime, 1> ColumnVectorType;\n\n    /** \\brief Default constructor.\n      *\n      * \\param [in] size  Positive integer, size of the matrix whose Schur decomposition will be computed.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute().  The \\p size parameter is only\n      * used as a hint. It is not an error to give a wrong \\p size, but it may\n      * impair performance.\n      *\n      * \\sa compute() for an example.\n      */\n    explicit RealSchur(Index size = RowsAtCompileTime==Dynamic ? 1 : RowsAtCompileTime)\n            : m_matT(size, size),\n              m_matU(size, size),\n              m_workspaceVector(size),\n              m_hess(size),\n              m_isInitialized(false),\n              m_matUisUptodate(false),\n              m_maxIters(-1)\n    { }\n\n    /** \\brief Constructor; computes real Schur decomposition of given matrix. \n      * \n      * \\param[in]  matrix    Square matrix whose Schur decomposition is to be computed.\n      * \\param[in]  computeU  If true, both T and U are computed; if false, only T is computed.\n      *\n      * This constructor calls compute() to compute the Schur decomposition.\n      *\n      * Example: \\include RealSchur_RealSchur_MatrixType.cpp\n      * Output: \\verbinclude RealSchur_RealSchur_MatrixType.out\n      */\n    template<typename InputType>\n    explicit RealSchur(const EigenBase<InputType>& matrix, bool computeU = true)\n            : m_matT(matrix.rows(),matrix.cols()),\n              m_matU(matrix.rows(),matrix.cols()),\n              m_workspaceVector(matrix.rows()),\n              m_hess(matrix.rows()),\n              m_isInitialized(false),\n              m_matUisUptodate(false),\n              m_maxIters(-1)\n    {\n      compute(matrix.derived(), computeU);\n    }\n\n    /** \\brief Returns the orthogonal matrix in the Schur decomposition. \n      *\n      * \\returns A const reference to the matrix U.\n      *\n      * \\pre Either the constructor RealSchur(const MatrixType&, bool) or the\n      * member function compute(const MatrixType&, bool) has been called before\n      * to compute the Schur decomposition of a matrix, and \\p computeU was set\n      * to true (the default value).\n      *\n      * \\sa RealSchur(const MatrixType&, bool) for an example\n      */\n    const MatrixType& matrixU() const\n    {\n      eigen_assert(m_isInitialized && \"RealSchur is not initialized.\");\n      eigen_assert(m_matUisUptodate && \"The matrix U has not been computed during the RealSchur decomposition.\");\n      return m_matU;\n    }\n\n    /** \\brief Returns the quasi-triangular matrix in the Schur decomposition. \n      *\n      * \\returns A const reference to the matrix T.\n      *\n      * \\pre Either the constructor RealSchur(const MatrixType&, bool) or the\n      * member function compute(const MatrixType&, bool) has been called before\n      * to compute the Schur decomposition of a matrix.\n      *\n      * \\sa RealSchur(const MatrixType&, bool) for an example\n      */\n    const MatrixType& matrixT() const\n    {\n      eigen_assert(m_isInitialized && \"RealSchur is not initialized.\");\n      return m_matT;\n    }\n  \n    /** \\brief Computes Schur decomposition of given matrix. \n      * \n      * \\param[in]  matrix    Square matrix whose Schur decomposition is to be computed.\n      * \\param[in]  computeU  If true, both T and U are computed; if false, only T is computed.\n      * \\returns    Reference to \\c *this\n      *\n      * The Schur decomposition is computed by first reducing the matrix to\n      * Hessenberg form using the class HessenbergDecomposition. The Hessenberg\n      * matrix is then reduced to triangular form by performing Francis QR\n      * iterations with implicit double shift. The cost of computing the Schur\n      * decomposition depends on the number of iterations; as a rough guide, it\n      * may be taken to be \\f$25n^3\\f$ flops if \\a computeU is true and\n      * \\f$10n^3\\f$ flops if \\a computeU is false.\n      *\n      * Example: \\include RealSchur_compute.cpp\n      * Output: \\verbinclude RealSchur_compute.out\n      *\n      * \\sa compute(const MatrixType&, bool, Index)\n      */\n    template<typename InputType>\n    RealSchur& compute(const EigenBase<InputType>& matrix, bool computeU = true);\n\n    /** \\brief Computes Schur decomposition of a Hessenberg matrix H = Z T Z^T\n     *  \\param[in] matrixH Matrix in Hessenberg form H\n     *  \\param[in] matrixQ orthogonal matrix Q that transform a matrix A to H : A = Q H Q^T\n     *  \\param computeU Computes the matriX U of the Schur vectors\n     * \\return Reference to \\c *this\n     * \n     *  This routine assumes that the matrix is already reduced in Hessenberg form matrixH\n     *  using either the class HessenbergDecomposition or another mean. \n     *  It computes the upper quasi-triangular matrix T of the Schur decomposition of H\n     *  When computeU is true, this routine computes the matrix U such that \n     *  A = U T U^T =  (QZ) T (QZ)^T = Q H Q^T where A is the initial matrix\n     * \n     * NOTE Q is referenced if computeU is true; so, if the initial orthogonal matrix\n     * is not available, the user should give an identity matrix (Q.setIdentity())\n     * \n     * \\sa compute(const MatrixType&, bool)\n     */\n    template<typename HessMatrixType, typename OrthMatrixType>\n    RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU);\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful, \\c NoConvergence otherwise.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"RealSchur is not initialized.\");\n      return m_info;\n    }\n\n    /** \\brief Sets the maximum number of iterations allowed. \n      *\n      * If not specified by the user, the maximum number of iterations is m_maxIterationsPerRow times the size\n      * of the matrix.\n      */\n    RealSchur& setMaxIterations(Index maxIters)\n    {\n      m_maxIters = maxIters;\n      return *this;\n    }\n\n    /** \\brief Returns the maximum number of iterations. */\n    Index getMaxIterations()\n    {\n      return m_maxIters;\n    }\n\n    /** \\brief Maximum number of iterations per row.\n      *\n      * If not otherwise specified, the maximum number of iterations is this number times the size of the\n      * matrix. It is currently set to 40.\n      */\n    static const int m_maxIterationsPerRow = 40;\n\n  private:\n    \n    MatrixType m_matT;\n    MatrixType m_matU;\n    ColumnVectorType m_workspaceVector;\n    HessenbergDecomposition<MatrixType> m_hess;\n    ComputationInfo m_info;\n    bool m_isInitialized;\n    bool m_matUisUptodate;\n    Index m_maxIters;\n\n    typedef Matrix<Scalar,3,1> Vector3s;\n\n    Scalar computeNormOfT();\n    Index findSmallSubdiagEntry(Index iu);\n    void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);\n    void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);\n    void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);\n    void performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace);\n};\n\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nRealSchur<MatrixType>& RealSchur<MatrixType>::compute(const EigenBase<InputType>& matrix, bool computeU)\n{\n  const Scalar considerAsZero = (std::numeric_limits<Scalar>::min)();\n\n  eigen_assert(matrix.cols() == matrix.rows());\n  Index maxIters = m_maxIters;\n  if (maxIters == -1)\n    maxIters = m_maxIterationsPerRow * matrix.rows();\n\n  Scalar scale = matrix.derived().cwiseAbs().maxCoeff();\n  if(scale<considerAsZero)\n  {\n    m_matT.setZero(matrix.rows(),matrix.cols());\n    if(computeU)\n      m_matU.setIdentity(matrix.rows(),matrix.cols());\n    m_info = Success;\n    m_isInitialized = true;\n    m_matUisUptodate = computeU;\n    return *this;\n  }\n\n  // Step 1. Reduce to Hessenberg form\n  m_hess.compute(matrix.derived()/scale);\n\n  // Step 2. Reduce to real Schur form  \n  computeFromHessenberg(m_hess.matrixH(), m_hess.matrixQ(), computeU);\n\n  m_matT *= scale;\n  \n  return *this;\n}\ntemplate<typename MatrixType>\ntemplate<typename HessMatrixType, typename OrthMatrixType>\nRealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ,  bool computeU)\n{\n  using std::abs;\n\n  m_matT = matrixH;\n  if(computeU)\n    m_matU = matrixQ;\n  \n  Index maxIters = m_maxIters;\n  if (maxIters == -1)\n    maxIters = m_maxIterationsPerRow * matrixH.rows();\n  m_workspaceVector.resize(m_matT.cols());\n  Scalar* workspace = &m_workspaceVector.coeffRef(0);\n\n  // The matrix m_matT is divided in three parts. \n  // Rows 0,...,il-1 are decoupled from the rest because m_matT(il,il-1) is zero. \n  // Rows il,...,iu is the part we are working on (the active window).\n  // Rows iu+1,...,end are already brought in triangular form.\n  Index iu = m_matT.cols() - 1;\n  Index iter = 0;      // iteration count for current eigenvalue\n  Index totalIter = 0; // iteration count for whole matrix\n  Scalar exshift(0);   // sum of exceptional shifts\n  Scalar norm = computeNormOfT();\n\n  if(norm!=0)\n  {\n    while (iu >= 0)\n    {\n      Index il = findSmallSubdiagEntry(iu);\n\n      // Check for convergence\n      if (il == iu) // One root found\n      {\n        m_matT.coeffRef(iu,iu) = m_matT.coeff(iu,iu) + exshift;\n        if (iu > 0)\n          m_matT.coeffRef(iu, iu-1) = Scalar(0);\n        iu--;\n        iter = 0;\n      }\n      else if (il == iu-1) // Two roots found\n      {\n        splitOffTwoRows(iu, computeU, exshift);\n        iu -= 2;\n        iter = 0;\n      }\n      else // No convergence yet\n      {\n        // The firstHouseholderVector vector has to be initialized to something to get rid of a silly GCC warning (-O1 -Wall -DNDEBUG )\n        Vector3s firstHouseholderVector(0,0,0), shiftInfo;\n        computeShift(iu, iter, exshift, shiftInfo);\n        iter = iter + 1;\n        totalIter = totalIter + 1;\n        if (totalIter > maxIters) break;\n        Index im;\n        initFrancisQRStep(il, iu, shiftInfo, im, firstHouseholderVector);\n        performFrancisQRStep(il, im, iu, computeU, firstHouseholderVector, workspace);\n      }\n    }\n  }\n  if(totalIter <= maxIters)\n    m_info = Success;\n  else\n    m_info = NoConvergence;\n\n  m_isInitialized = true;\n  m_matUisUptodate = computeU;\n  return *this;\n}\n\n/** \\internal Computes and returns vector L1 norm of T */\ntemplate<typename MatrixType>\ninline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()\n{\n  const Index size = m_matT.cols();\n  // FIXME to be efficient the following would requires a triangular reduxion code\n  // Scalar norm = m_matT.upper().cwiseAbs().sum() \n  //               + m_matT.bottomLeftCorner(size-1,size-1).diagonal().cwiseAbs().sum();\n  Scalar norm(0);\n  for (Index j = 0; j < size; ++j)\n    norm += m_matT.col(j).segment(0, (std::min)(size,j+2)).cwiseAbs().sum();\n  return norm;\n}\n\n/** \\internal Look for single small sub-diagonal element and returns its index */\ntemplate<typename MatrixType>\ninline Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)\n{\n  using std::abs;\n  Index res = iu;\n  while (res > 0)\n  {\n    Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));\n    if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)\n      break;\n    res--;\n  }\n  return res;\n}\n\n/** \\internal Update T given that rows iu-1 and iu decouple from the rest. */\ntemplate<typename MatrixType>\ninline void RealSchur<MatrixType>::splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift)\n{\n  using std::sqrt;\n  using std::abs;\n  const Index size = m_matT.cols();\n\n  // The eigenvalues of the 2x2 matrix [a b; c d] are \n  // trace +/- sqrt(discr/4) where discr = tr^2 - 4*det, tr = a + d, det = ad - bc\n  Scalar p = Scalar(0.5) * (m_matT.coeff(iu-1,iu-1) - m_matT.coeff(iu,iu));\n  Scalar q = p * p + m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);   // q = tr^2 / 4 - det = discr/4\n  m_matT.coeffRef(iu,iu) += exshift;\n  m_matT.coeffRef(iu-1,iu-1) += exshift;\n\n  if (q >= Scalar(0)) // Two real eigenvalues\n  {\n    Scalar z = sqrt(abs(q));\n    JacobiRotation<Scalar> rot;\n    if (p >= Scalar(0))\n      rot.makeGivens(p + z, m_matT.coeff(iu, iu-1));\n    else\n      rot.makeGivens(p - z, m_matT.coeff(iu, iu-1));\n\n    m_matT.rightCols(size-iu+1).applyOnTheLeft(iu-1, iu, rot.adjoint());\n    m_matT.topRows(iu+1).applyOnTheRight(iu-1, iu, rot);\n    m_matT.coeffRef(iu, iu-1) = Scalar(0); \n    if (computeU)\n      m_matU.applyOnTheRight(iu-1, iu, rot);\n  }\n\n  if (iu > 1) \n    m_matT.coeffRef(iu-1, iu-2) = Scalar(0);\n}\n\n/** \\internal Form shift in shiftInfo, and update exshift if an exceptional shift is performed. */\ntemplate<typename MatrixType>\ninline void RealSchur<MatrixType>::computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo)\n{\n  using std::sqrt;\n  using std::abs;\n  shiftInfo.coeffRef(0) = m_matT.coeff(iu,iu);\n  shiftInfo.coeffRef(1) = m_matT.coeff(iu-1,iu-1);\n  shiftInfo.coeffRef(2) = m_matT.coeff(iu,iu-1) * m_matT.coeff(iu-1,iu);\n\n  // Wilkinson's original ad hoc shift\n  if (iter == 10)\n  {\n    exshift += shiftInfo.coeff(0);\n    for (Index i = 0; i <= iu; ++i)\n      m_matT.coeffRef(i,i) -= shiftInfo.coeff(0);\n    Scalar s = abs(m_matT.coeff(iu,iu-1)) + abs(m_matT.coeff(iu-1,iu-2));\n    shiftInfo.coeffRef(0) = Scalar(0.75) * s;\n    shiftInfo.coeffRef(1) = Scalar(0.75) * s;\n    shiftInfo.coeffRef(2) = Scalar(-0.4375) * s * s;\n  }\n\n  // MATLAB's new ad hoc shift\n  if (iter == 30)\n  {\n    Scalar s = (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);\n    s = s * s + shiftInfo.coeff(2);\n    if (s > Scalar(0))\n    {\n      s = sqrt(s);\n      if (shiftInfo.coeff(1) < shiftInfo.coeff(0))\n        s = -s;\n      s = s + (shiftInfo.coeff(1) - shiftInfo.coeff(0)) / Scalar(2.0);\n      s = shiftInfo.coeff(0) - shiftInfo.coeff(2) / s;\n      exshift += s;\n      for (Index i = 0; i <= iu; ++i)\n        m_matT.coeffRef(i,i) -= s;\n      shiftInfo.setConstant(Scalar(0.964));\n    }\n  }\n}\n\n/** \\internal Compute index im at which Francis QR step starts and the first Householder vector. */\ntemplate<typename MatrixType>\ninline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector)\n{\n  using std::abs;\n  Vector3s& v = firstHouseholderVector; // alias to save typing\n\n  for (im = iu-2; im >= il; --im)\n  {\n    const Scalar Tmm = m_matT.coeff(im,im);\n    const Scalar r = shiftInfo.coeff(0) - Tmm;\n    const Scalar s = shiftInfo.coeff(1) - Tmm;\n    v.coeffRef(0) = (r * s - shiftInfo.coeff(2)) / m_matT.coeff(im+1,im) + m_matT.coeff(im,im+1);\n    v.coeffRef(1) = m_matT.coeff(im+1,im+1) - Tmm - r - s;\n    v.coeffRef(2) = m_matT.coeff(im+2,im+1);\n    if (im == il) {\n      break;\n    }\n    const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));\n    const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));\n    if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)\n      break;\n  }\n}\n\n/** \\internal Perform a Francis QR step involving rows il:iu and columns im:iu. */\ntemplate<typename MatrixType>\ninline void RealSchur<MatrixType>::performFrancisQRStep(Index il, Index im, Index iu, bool computeU, const Vector3s& firstHouseholderVector, Scalar* workspace)\n{\n  eigen_assert(im >= il);\n  eigen_assert(im <= iu-2);\n\n  const Index size = m_matT.cols();\n\n  for (Index k = im; k <= iu-2; ++k)\n  {\n    bool firstIteration = (k == im);\n\n    Vector3s v;\n    if (firstIteration)\n      v = firstHouseholderVector;\n    else\n      v = m_matT.template block<3,1>(k,k-1);\n\n    Scalar tau, beta;\n    Matrix<Scalar, 2, 1> ess;\n    v.makeHouseholder(ess, tau, beta);\n    \n    if (beta != Scalar(0)) // if v is not zero\n    {\n      if (firstIteration && k > il)\n        m_matT.coeffRef(k,k-1) = -m_matT.coeff(k,k-1);\n      else if (!firstIteration)\n        m_matT.coeffRef(k,k-1) = beta;\n\n      // These Householder transformations form the O(n^3) part of the algorithm\n      m_matT.block(k, k, 3, size-k).applyHouseholderOnTheLeft(ess, tau, workspace);\n      m_matT.block(0, k, (std::min)(iu,k+3) + 1, 3).applyHouseholderOnTheRight(ess, tau, workspace);\n      if (computeU)\n        m_matU.block(0, k, size, 3).applyHouseholderOnTheRight(ess, tau, workspace);\n    }\n  }\n\n  Matrix<Scalar, 2, 1> v = m_matT.template block<2,1>(iu-1, iu-2);\n  Scalar tau, beta;\n  Matrix<Scalar, 1, 1> ess;\n  v.makeHouseholder(ess, tau, beta);\n\n  if (beta != Scalar(0)) // if v is not zero\n  {\n    m_matT.coeffRef(iu-1, iu-2) = beta;\n    m_matT.block(iu-1, iu-1, 2, size-iu+1).applyHouseholderOnTheLeft(ess, tau, workspace);\n    m_matT.block(0, iu-1, iu+1, 2).applyHouseholderOnTheRight(ess, tau, workspace);\n    if (computeU)\n      m_matU.block(0, iu-1, size, 2).applyHouseholderOnTheRight(ess, tau, workspace);\n  }\n\n  // clean up pollution due to round-off errors\n  for (Index i = im+2; i <= iu; ++i)\n  {\n    m_matT.coeffRef(i,i-2) = Scalar(0);\n    if (i > im+2)\n      m_matT.coeffRef(i,i-3) = Scalar(0);\n  }\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_REAL_SCHUR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/RealSchur_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Real Schur needed to real unsymmetrical eigenvalues/eigenvectors.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_REAL_SCHUR_LAPACKE_H\n#define EIGEN_REAL_SCHUR_LAPACKE_H\n\nnamespace Eigen { \n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_SCHUR_REAL(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, LAPACKE_PREFIX_U, EIGCOLROW, LAPACKE_COLROW) \\\ntemplate<> template<typename InputType> inline \\\nRealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \\\nRealSchur<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, bool computeU) \\\n{ \\\n  eigen_assert(matrix.cols() == matrix.rows()); \\\n\\\n  lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), sdim, info; \\\n  lapack_int matrix_order = LAPACKE_COLROW; \\\n  char jobvs, sort='N'; \\\n  LAPACK_##LAPACKE_PREFIX_U##_SELECT2 select = 0; \\\n  jobvs = (computeU) ? 'V' : 'N'; \\\n  m_matU.resize(n, n); \\\n  lapack_int ldvs  = internal::convert_index<lapack_int>(m_matU.outerStride()); \\\n  m_matT = matrix; \\\n  lapack_int lda = internal::convert_index<lapack_int>(m_matT.outerStride()); \\\n  Matrix<EIGTYPE, Dynamic, Dynamic> wr, wi; \\\n  wr.resize(n, 1); wi.resize(n, 1); \\\n  info = LAPACKE_##LAPACKE_PREFIX##gees( matrix_order, jobvs, sort, select, n, (LAPACKE_TYPE*)m_matT.data(), lda, &sdim, (LAPACKE_TYPE*)wr.data(), (LAPACKE_TYPE*)wi.data(), (LAPACKE_TYPE*)m_matU.data(), ldvs ); \\\n  if(info == 0) \\\n    m_info = Success; \\\n  else \\\n    m_info = NoConvergence; \\\n\\\n  m_isInitialized = true; \\\n  m_matUisUptodate = computeU; \\\n  return *this; \\\n\\\n}\n\nEIGEN_LAPACKE_SCHUR_REAL(double,   double, d, D, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SCHUR_REAL(float,    float,  s, S, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SCHUR_REAL(double,   double, d, D, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_SCHUR_REAL(float,    float,  s, S, RowMajor, LAPACK_ROW_MAJOR)\n\n} // end namespace Eigen\n\n#endif // EIGEN_REAL_SCHUR_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H\n#define EIGEN_SELFADJOINTEIGENSOLVER_H\n\n#include \"./Tridiagonalization.h\"\n\nnamespace Eigen { \n\ntemplate<typename _MatrixType>\nclass GeneralizedSelfAdjointEigenSolver;\n\nnamespace internal {\ntemplate<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;\ntemplate<typename MatrixType, typename DiagType, typename SubDiagType>\nComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);\n}\n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class SelfAdjointEigenSolver\n  *\n  * \\brief Computes eigenvalues and eigenvectors of selfadjoint matrices\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the\n  * eigendecomposition; this is expected to be an instantiation of the Matrix\n  * class template.\n  *\n  * A matrix \\f$ A \\f$ is selfadjoint if it equals its adjoint. For real\n  * matrices, this means that the matrix is symmetric: it equals its\n  * transpose. This class computes the eigenvalues and eigenvectors of a\n  * selfadjoint matrix. These are the scalars \\f$ \\lambda \\f$ and vectors\n  * \\f$ v \\f$ such that \\f$ Av = \\lambda v \\f$.  The eigenvalues of a\n  * selfadjoint matrix are always real. If \\f$ D \\f$ is a diagonal matrix with\n  * the eigenvalues on the diagonal, and \\f$ V \\f$ is a matrix with the\n  * eigenvectors as its columns, then \\f$ A = V D V^{-1} \\f$ (for selfadjoint\n  * matrices, the matrix \\f$ V \\f$ is always invertible). This is called the\n  * eigendecomposition.\n  *\n  * The algorithm exploits the fact that the matrix is selfadjoint, making it\n  * faster and more accurate than the general purpose eigenvalue algorithms\n  * implemented in EigenSolver and ComplexEigenSolver.\n  *\n  * Only the \\b lower \\b triangular \\b part of the input matrix is referenced.\n  *\n  * Call the function compute() to compute the eigenvalues and eigenvectors of\n  * a given matrix. Alternatively, you can use the\n  * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes\n  * the eigenvalues and eigenvectors at construction time. Once the eigenvalue\n  * and eigenvectors are computed, they can be retrieved with the eigenvalues()\n  * and eigenvectors() functions.\n  *\n  * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)\n  * contains an example of the typical use of this class.\n  *\n  * To solve the \\em generalized eigenvalue problem \\f$ Av = \\lambda Bv \\f$ and\n  * the likes, see the class GeneralizedSelfAdjointEigenSolver.\n  *\n  * \\sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver\n  */\ntemplate<typename _MatrixType> class SelfAdjointEigenSolver\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    enum {\n      Size = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      Options = MatrixType::Options,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    \n    /** \\brief Scalar type for matrices of type \\p _MatrixType. */\n    typedef typename MatrixType::Scalar Scalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    \n    typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;\n\n    /** \\brief Real scalar type for \\p _MatrixType.\n      *\n      * This is just \\c Scalar if #Scalar is real (e.g., \\c float or\n      * \\c double), and the type of the real part of \\c Scalar if #Scalar is\n      * complex.\n      */\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    \n    friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;\n\n    /** \\brief Type for vector of eigenvalues as returned by eigenvalues().\n      *\n      * This is a column vector with entries of type #RealScalar.\n      * The length of the vector is the size of \\p _MatrixType.\n      */\n    typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;\n    typedef Tridiagonalization<MatrixType> TridiagonalizationType;\n    typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType;\n\n    /** \\brief Default constructor for fixed-size matrices.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute(). This constructor\n      * can only be used if \\p _MatrixType is a fixed-size matrix; use\n      * SelfAdjointEigenSolver(Index) for dynamic-size matrices.\n      *\n      * Example: \\include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out\n      */\n    EIGEN_DEVICE_FUNC\n    SelfAdjointEigenSolver()\n        : m_eivec(),\n          m_eivalues(),\n          m_subdiag(),\n          m_isInitialized(false)\n    { }\n\n    /** \\brief Constructor, pre-allocates memory for dynamic-size matrices.\n      *\n      * \\param [in]  size  Positive integer, size of the matrix whose\n      * eigenvalues and eigenvectors will be computed.\n      *\n      * This constructor is useful for dynamic-size matrices, when the user\n      * intends to perform decompositions via compute(). The \\p size\n      * parameter is only used as a hint. It is not an error to give a wrong\n      * \\p size, but it may impair performance.\n      *\n      * \\sa compute() for an example\n      */\n    EIGEN_DEVICE_FUNC\n    explicit SelfAdjointEigenSolver(Index size)\n        : m_eivec(size, size),\n          m_eivalues(size),\n          m_subdiag(size > 1 ? size - 1 : 1),\n          m_isInitialized(false)\n    {}\n\n    /** \\brief Constructor; computes eigendecomposition of given matrix.\n      *\n      * \\param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to\n      *    be computed. Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.\n      *\n      * This constructor calls compute(const MatrixType&, int) to compute the\n      * eigenvalues of the matrix \\p matrix. The eigenvectors are computed if\n      * \\p options equals #ComputeEigenvectors.\n      *\n      * Example: \\include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out\n      *\n      * \\sa compute(const MatrixType&, int)\n      */\n    template<typename InputType>\n    EIGEN_DEVICE_FUNC\n    explicit SelfAdjointEigenSolver(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors)\n      : m_eivec(matrix.rows(), matrix.cols()),\n        m_eivalues(matrix.cols()),\n        m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived(), options);\n    }\n\n    /** \\brief Computes eigendecomposition of given matrix.\n      *\n      * \\param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to\n      *    be computed. Only the lower triangular part of the matrix is referenced.\n      * \\param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.\n      * \\returns    Reference to \\c *this\n      *\n      * This function computes the eigenvalues of \\p matrix.  The eigenvalues()\n      * function can be used to retrieve them.  If \\p options equals #ComputeEigenvectors,\n      * then the eigenvectors are also computed and can be retrieved by\n      * calling eigenvectors().\n      *\n      * This implementation uses a symmetric QR algorithm. The matrix is first\n      * reduced to tridiagonal form using the Tridiagonalization class. The\n      * tridiagonal matrix is then brought to diagonal form with implicit\n      * symmetric QR steps with Wilkinson shift. Details can be found in\n      * Section 8.3 of Golub \\& Van Loan, <i>%Matrix Computations</i>.\n      *\n      * The cost of the computation is about \\f$ 9n^3 \\f$ if the eigenvectors\n      * are required and \\f$ 4n^3/3 \\f$ if they are not required.\n      *\n      * This method reuses the memory in the SelfAdjointEigenSolver object that\n      * was allocated when the object was constructed, if the size of the\n      * matrix does not change.\n      *\n      * Example: \\include SelfAdjointEigenSolver_compute_MatrixType.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_compute_MatrixType.out\n      *\n      * \\sa SelfAdjointEigenSolver(const MatrixType&, int)\n      */\n    template<typename InputType>\n    EIGEN_DEVICE_FUNC\n    SelfAdjointEigenSolver& compute(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors);\n    \n    /** \\brief Computes eigendecomposition of given matrix using a closed-form algorithm\n      *\n      * This is a variant of compute(const MatrixType&, int options) which\n      * directly solves the underlying polynomial equation.\n      * \n      * Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).\n      * \n      * This method is usually significantly faster than the QR iterative algorithm\n      * but it might also be less accurate. It is also worth noting that\n      * for 3x3 matrices it involves trigonometric operations which are\n      * not necessarily available for all scalar types.\n      * \n      * For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues:\n      *   - double: 1e-8\n      *   - float:  1e-3\n      *\n      * \\sa compute(const MatrixType&, int options)\n      */\n    EIGEN_DEVICE_FUNC\n    SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);\n\n    /**\n      *\\brief Computes the eigen decomposition from a tridiagonal symmetric matrix\n      *\n      * \\param[in] diag The vector containing the diagonal of the matrix.\n      * \\param[in] subdiag The subdiagonal of the matrix.\n      * \\param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.\n      * \\returns Reference to \\c *this\n      *\n      * This function assumes that the matrix has been reduced to tridiagonal form.\n      *\n      * \\sa compute(const MatrixType&, int) for more information\n      */\n    SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors);\n\n    /** \\brief Returns the eigenvectors of given matrix.\n      *\n      * \\returns  A const reference to the matrix whose columns are the eigenvectors.\n      *\n      * \\pre The eigenvectors have been computed before.\n      *\n      * Column \\f$ k \\f$ of the returned matrix is an eigenvector corresponding\n      * to eigenvalue number \\f$ k \\f$ as returned by eigenvalues().  The\n      * eigenvectors are normalized to have (Euclidean) norm equal to one. If\n      * this object was used to solve the eigenproblem for the selfadjoint\n      * matrix \\f$ A \\f$, then the matrix returned by this function is the\n      * matrix \\f$ V \\f$ in the eigendecomposition \\f$ A = V D V^{-1} \\f$.\n      *\n      * Example: \\include SelfAdjointEigenSolver_eigenvectors.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_eigenvectors.out\n      *\n      * \\sa eigenvalues()\n      */\n    EIGEN_DEVICE_FUNC\n    const EigenvectorsType& eigenvectors() const\n    {\n      eigen_assert(m_isInitialized && \"SelfAdjointEigenSolver is not initialized.\");\n      eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n      return m_eivec;\n    }\n\n    /** \\brief Returns the eigenvalues of given matrix.\n      *\n      * \\returns A const reference to the column vector containing the eigenvalues.\n      *\n      * \\pre The eigenvalues have been computed before.\n      *\n      * The eigenvalues are repeated according to their algebraic multiplicity,\n      * so there are as many eigenvalues as rows in the matrix. The eigenvalues\n      * are sorted in increasing order.\n      *\n      * Example: \\include SelfAdjointEigenSolver_eigenvalues.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_eigenvalues.out\n      *\n      * \\sa eigenvectors(), MatrixBase::eigenvalues()\n      */\n    EIGEN_DEVICE_FUNC\n    const RealVectorType& eigenvalues() const\n    {\n      eigen_assert(m_isInitialized && \"SelfAdjointEigenSolver is not initialized.\");\n      return m_eivalues;\n    }\n\n    /** \\brief Computes the positive-definite square root of the matrix.\n      *\n      * \\returns the positive-definite square root of the matrix\n      *\n      * \\pre The eigenvalues and eigenvectors of a positive-definite matrix\n      * have been computed before.\n      *\n      * The square root of a positive-definite matrix \\f$ A \\f$ is the\n      * positive-definite matrix whose square equals \\f$ A \\f$. This function\n      * uses the eigendecomposition \\f$ A = V D V^{-1} \\f$ to compute the\n      * square root as \\f$ A^{1/2} = V D^{1/2} V^{-1} \\f$.\n      *\n      * Example: \\include SelfAdjointEigenSolver_operatorSqrt.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_operatorSqrt.out\n      *\n      * \\sa operatorInverseSqrt(), <a href=\"unsupported/group__MatrixFunctions__Module.html\">MatrixFunctions Module</a>\n      */\n    EIGEN_DEVICE_FUNC\n    MatrixType operatorSqrt() const\n    {\n      eigen_assert(m_isInitialized && \"SelfAdjointEigenSolver is not initialized.\");\n      eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n      return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();\n    }\n\n    /** \\brief Computes the inverse square root of the matrix.\n      *\n      * \\returns the inverse positive-definite square root of the matrix\n      *\n      * \\pre The eigenvalues and eigenvectors of a positive-definite matrix\n      * have been computed before.\n      *\n      * This function uses the eigendecomposition \\f$ A = V D V^{-1} \\f$ to\n      * compute the inverse square root as \\f$ V D^{-1/2} V^{-1} \\f$. This is\n      * cheaper than first computing the square root with operatorSqrt() and\n      * then its inverse with MatrixBase::inverse().\n      *\n      * Example: \\include SelfAdjointEigenSolver_operatorInverseSqrt.cpp\n      * Output: \\verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out\n      *\n      * \\sa operatorSqrt(), MatrixBase::inverse(), <a href=\"unsupported/group__MatrixFunctions__Module.html\">MatrixFunctions Module</a>\n      */\n    EIGEN_DEVICE_FUNC\n    MatrixType operatorInverseSqrt() const\n    {\n      eigen_assert(m_isInitialized && \"SelfAdjointEigenSolver is not initialized.\");\n      eigen_assert(m_eigenvectorsOk && \"The eigenvectors have not been computed together with the eigenvalues.\");\n      return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();\n    }\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful, \\c NoConvergence otherwise.\n      */\n    EIGEN_DEVICE_FUNC\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"SelfAdjointEigenSolver is not initialized.\");\n      return m_info;\n    }\n\n    /** \\brief Maximum number of iterations.\n      *\n      * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n\n      * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).\n      */\n    static const int m_maxIterations = 30;\n\n  protected:\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n    \n    EigenvectorsType m_eivec;\n    RealVectorType m_eivalues;\n    typename TridiagonalizationType::SubDiagonalType m_subdiag;\n    ComputationInfo m_info;\n    bool m_isInitialized;\n    bool m_eigenvectorsOk;\n};\n\nnamespace internal {\n/** \\internal\n  *\n  * \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  * Performs a QR step on a tridiagonal symmetric matrix represented as a\n  * pair of two vectors \\a diag and \\a subdiag.\n  *\n  * \\param diag the diagonal part of the input selfadjoint tridiagonal matrix\n  * \\param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix\n  * \\param start starting index of the submatrix to work on\n  * \\param end last+1 index of the submatrix to work on\n  * \\param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0\n  * \\param n size of the input matrix\n  *\n  * For compilation efficiency reasons, this procedure does not use eigen expression\n  * for its arguments.\n  *\n  * Implemented from Golub's \"Matrix Computations\", algorithm 8.3.2:\n  * \"implicit symmetric QR step with Wilkinson shift\"\n  */\ntemplate<int StorageOrder,typename RealScalar, typename Scalar, typename Index>\nEIGEN_DEVICE_FUNC\nstatic void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nEIGEN_DEVICE_FUNC\nSelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>\n::compute(const EigenBase<InputType>& a_matrix, int options)\n{\n  check_template_parameters();\n  \n  const InputType &matrix(a_matrix.derived());\n  \n  using std::abs;\n  eigen_assert(matrix.cols() == matrix.rows());\n  eigen_assert((options&~(EigVecMask|GenEigMask))==0\n          && (options&EigVecMask)!=EigVecMask\n          && \"invalid option parameter\");\n  bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;\n  Index n = matrix.cols();\n  m_eivalues.resize(n,1);\n\n  if(n==1)\n  {\n    m_eivec = matrix;\n    m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));\n    if(computeEigenvectors)\n      m_eivec.setOnes(n,n);\n    m_info = Success;\n    m_isInitialized = true;\n    m_eigenvectorsOk = computeEigenvectors;\n    return *this;\n  }\n\n  // declare some aliases\n  RealVectorType& diag = m_eivalues;\n  EigenvectorsType& mat = m_eivec;\n\n  // map the matrix coefficients to [-1:1] to avoid over- and underflow.\n  mat = matrix.template triangularView<Lower>();\n  RealScalar scale = mat.cwiseAbs().maxCoeff();\n  if(scale==RealScalar(0)) scale = RealScalar(1);\n  mat.template triangularView<Lower>() /= scale;\n  m_subdiag.resize(n-1);\n  internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);\n\n  m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);\n  \n  // scale back the eigen values\n  m_eivalues *= scale;\n\n  m_isInitialized = true;\n  m_eigenvectorsOk = computeEigenvectors;\n  return *this;\n}\n\ntemplate<typename MatrixType>\nSelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>\n::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options)\n{\n  //TODO : Add an option to scale the values beforehand\n  bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;\n\n  m_eivalues = diag;\n  m_subdiag = subdiag;\n  if (computeEigenvectors)\n  {\n    m_eivec.setIdentity(diag.size(), diag.size());\n  }\n  m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);\n\n  m_isInitialized = true;\n  m_eigenvectorsOk = computeEigenvectors;\n  return *this;\n}\n\nnamespace internal {\n/**\n  * \\internal\n  * \\brief Compute the eigendecomposition from a tridiagonal matrix\n  *\n  * \\param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues\n  * \\param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)\n  * \\param[in] maxIterations : the maximum number of iterations\n  * \\param[in] computeEigenvectors : whether the eigenvectors have to be computed or not\n  * \\param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.\n  * \\returns \\c Success or \\c NoConvergence\n  */\ntemplate<typename MatrixType, typename DiagType, typename SubDiagType>\nComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)\n{\n  using std::abs;\n\n  ComputationInfo info;\n  typedef typename MatrixType::Scalar Scalar;\n\n  Index n = diag.size();\n  Index end = n-1;\n  Index start = 0;\n  Index iter = 0; // total number of iterations\n  \n  typedef typename DiagType::RealScalar RealScalar;\n  const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();\n  const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();\n  \n  while (end>0)\n  {\n    for (Index i = start; i<end; ++i)\n      if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)\n        subdiag[i] = 0;\n\n    // find the largest unreduced block\n    while (end>0 && subdiag[end-1]==RealScalar(0))\n    {\n      end--;\n    }\n    if (end<=0)\n      break;\n\n    // if we spent too many iterations, we give up\n    iter++;\n    if(iter > maxIterations * n) break;\n\n    start = end - 1;\n    while (start>0 && subdiag[start-1]!=0)\n      start--;\n\n    internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);\n  }\n  if (iter <= maxIterations * n)\n    info = Success;\n  else\n    info = NoConvergence;\n\n  // Sort eigenvalues and corresponding vectors.\n  // TODO make the sort optional ?\n  // TODO use a better sort algorithm !!\n  if (info == Success)\n  {\n    for (Index i = 0; i < n-1; ++i)\n    {\n      Index k;\n      diag.segment(i,n-i).minCoeff(&k);\n      if (k > 0)\n      {\n        std::swap(diag[i], diag[k+i]);\n        if(computeEigenvectors)\n          eivec.col(i).swap(eivec.col(k+i));\n      }\n    }\n  }\n  return info;\n}\n  \ntemplate<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)\n  { eig.compute(A,options); }\n};\n\ntemplate<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>\n{\n  typedef typename SolverType::MatrixType MatrixType;\n  typedef typename SolverType::RealVectorType VectorType;\n  typedef typename SolverType::Scalar Scalar;\n  typedef typename SolverType::EigenvectorsType EigenvectorsType;\n  \n\n  /** \\internal\n   * Computes the roots of the characteristic polynomial of \\a m.\n   * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.\n   */\n  EIGEN_DEVICE_FUNC\n  static inline void computeRoots(const MatrixType& m, VectorType& roots)\n  {\n    EIGEN_USING_STD_MATH(sqrt)\n    EIGEN_USING_STD_MATH(atan2)\n    EIGEN_USING_STD_MATH(cos)\n    EIGEN_USING_STD_MATH(sin)\n    const Scalar s_inv3 = Scalar(1)/Scalar(3);\n    const Scalar s_sqrt3 = sqrt(Scalar(3));\n\n    // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0.  The\n    // eigenvalues are the roots to this equation, all guaranteed to be\n    // real-valued, because the matrix is symmetric.\n    Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);\n    Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);\n    Scalar c2 = m(0,0) + m(1,1) + m(2,2);\n\n    // Construct the parameters used in classifying the roots of the equation\n    // and in solving the equation for the roots in closed form.\n    Scalar c2_over_3 = c2*s_inv3;\n    Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;\n    a_over_3 = numext::maxi(a_over_3, Scalar(0));\n\n    Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));\n\n    Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;\n    q = numext::maxi(q, Scalar(0));\n\n    // Compute the eigenvalues by solving for the roots of the polynomial.\n    Scalar rho = sqrt(a_over_3);\n    Scalar theta = atan2(sqrt(q),half_b)*s_inv3;  // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]\n    Scalar cos_theta = cos(theta);\n    Scalar sin_theta = sin(theta);\n    // roots are already sorted, since cos is monotonically decreasing on [0, pi]\n    roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)\n    roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)\n    roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)\n  {\n    using std::abs;\n    Index i0;\n    // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):\n    mat.diagonal().cwiseAbs().maxCoeff(&i0);\n    // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,\n    // so let's save it:\n    representative = mat.col(i0);\n    Scalar n0, n1;\n    VectorType c0, c1;\n    n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();\n    n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();\n    if(n0>n1) res = c0/std::sqrt(n0);\n    else      res = c1/std::sqrt(n1);\n\n    return true;\n  }\n\n  EIGEN_DEVICE_FUNC\n  static inline void run(SolverType& solver, const MatrixType& mat, int options)\n  {\n    eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());\n    eigen_assert((options&~(EigVecMask|GenEigMask))==0\n            && (options&EigVecMask)!=EigVecMask\n            && \"invalid option parameter\");\n    bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;\n    \n    EigenvectorsType& eivecs = solver.m_eivec;\n    VectorType& eivals = solver.m_eivalues;\n  \n    // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.\n    Scalar shift = mat.trace() / Scalar(3);\n    // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later\n    MatrixType scaledMat = mat.template selfadjointView<Lower>();\n    scaledMat.diagonal().array() -= shift;\n    Scalar scale = scaledMat.cwiseAbs().maxCoeff();\n    if(scale > 0) scaledMat /= scale;   // TODO for scale==0 we could save the remaining operations\n\n    // compute the eigenvalues\n    computeRoots(scaledMat,eivals);\n\n    // compute the eigenvectors\n    if(computeEigenvectors)\n    {\n      if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())\n      {\n        // All three eigenvalues are numerically the same\n        eivecs.setIdentity();\n      }\n      else\n      {\n        MatrixType tmp;\n        tmp = scaledMat;\n\n        // Compute the eigenvector of the most distinct eigenvalue\n        Scalar d0 = eivals(2) - eivals(1);\n        Scalar d1 = eivals(1) - eivals(0);\n        Index k(0), l(2);\n        if(d0 > d1)\n        {\n          numext::swap(k,l);\n          d0 = d1;\n        }\n\n        // Compute the eigenvector of index k\n        {\n          tmp.diagonal().array () -= eivals(k);\n          // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.\n          extract_kernel(tmp, eivecs.col(k), eivecs.col(l));\n        }\n\n        // Compute eigenvector of index l\n        if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)\n        {\n          // If d0 is too small, then the two other eigenvalues are numerically the same,\n          // and thus we only have to ortho-normalize the near orthogonal vector we saved above.\n          eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);\n          eivecs.col(l).normalize();\n        }\n        else\n        {\n          tmp = scaledMat;\n          tmp.diagonal().array () -= eivals(l);\n\n          VectorType dummy;\n          extract_kernel(tmp, eivecs.col(l), dummy);\n        }\n\n        // Compute last eigenvector from the other two\n        eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();\n      }\n    }\n\n    // Rescale back to the original size.\n    eivals *= scale;\n    eivals.array() += shift;\n    \n    solver.m_info = Success;\n    solver.m_isInitialized = true;\n    solver.m_eigenvectorsOk = computeEigenvectors;\n  }\n};\n\n// 2x2 direct eigenvalues decomposition, code from Hauke Heibel\ntemplate<typename SolverType> \nstruct direct_selfadjoint_eigenvalues<SolverType,2,false>\n{\n  typedef typename SolverType::MatrixType MatrixType;\n  typedef typename SolverType::RealVectorType VectorType;\n  typedef typename SolverType::Scalar Scalar;\n  typedef typename SolverType::EigenvectorsType EigenvectorsType;\n  \n  EIGEN_DEVICE_FUNC\n  static inline void computeRoots(const MatrixType& m, VectorType& roots)\n  {\n    using std::sqrt;\n    const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));\n    const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));\n    roots(0) = t1 - t0;\n    roots(1) = t1 + t0;\n  }\n  \n  EIGEN_DEVICE_FUNC\n  static inline void run(SolverType& solver, const MatrixType& mat, int options)\n  {\n    EIGEN_USING_STD_MATH(sqrt);\n    EIGEN_USING_STD_MATH(abs);\n    \n    eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());\n    eigen_assert((options&~(EigVecMask|GenEigMask))==0\n            && (options&EigVecMask)!=EigVecMask\n            && \"invalid option parameter\");\n    bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;\n    \n    EigenvectorsType& eivecs = solver.m_eivec;\n    VectorType& eivals = solver.m_eivalues;\n  \n    // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.\n    Scalar shift = mat.trace() / Scalar(2);\n    MatrixType scaledMat = mat;\n    scaledMat.coeffRef(0,1) = mat.coeff(1,0);\n    scaledMat.diagonal().array() -= shift;\n    Scalar scale = scaledMat.cwiseAbs().maxCoeff();\n    if(scale > Scalar(0))\n      scaledMat /= scale;\n\n    // Compute the eigenvalues\n    computeRoots(scaledMat,eivals);\n\n    // compute the eigen vectors\n    if(computeEigenvectors)\n    {\n      if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())\n      {\n        eivecs.setIdentity();\n      }\n      else\n      {\n        scaledMat.diagonal().array () -= eivals(1);\n        Scalar a2 = numext::abs2(scaledMat(0,0));\n        Scalar c2 = numext::abs2(scaledMat(1,1));\n        Scalar b2 = numext::abs2(scaledMat(1,0));\n        if(a2>c2)\n        {\n          eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);\n          eivecs.col(1) /= sqrt(a2+b2);\n        }\n        else\n        {\n          eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);\n          eivecs.col(1) /= sqrt(c2+b2);\n        }\n\n        eivecs.col(0) << eivecs.col(1).unitOrthogonal();\n      }\n    }\n\n    // Rescale back to the original size.\n    eivals *= scale;\n    eivals.array() += shift;\n\n    solver.m_info = Success;\n    solver.m_isInitialized = true;\n    solver.m_eigenvectorsOk = computeEigenvectors;\n  }\n};\n\n}\n\ntemplate<typename MatrixType>\nEIGEN_DEVICE_FUNC\nSelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>\n::computeDirect(const MatrixType& matrix, int options)\n{\n  internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);\n  return *this;\n}\n\nnamespace internal {\ntemplate<int StorageOrder,typename RealScalar, typename Scalar, typename Index>\nEIGEN_DEVICE_FUNC\nstatic void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)\n{\n  using std::abs;\n  RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);\n  RealScalar e = subdiag[end-1];\n  // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still\n  // underflow thus leading to inf/NaN values when using the following commented code:\n//   RealScalar e2 = numext::abs2(subdiag[end-1]);\n//   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));\n  // This explain the following, somewhat more complicated, version:\n  RealScalar mu = diag[end];\n  if(td==RealScalar(0))\n    mu -= abs(e);\n  else\n  {\n    RealScalar e2 = numext::abs2(subdiag[end-1]);\n    RealScalar h = numext::hypot(td,e);\n    if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);\n    else                  mu -= e2 / (td + (td>RealScalar(0) ? h : -h));\n  }\n  \n  RealScalar x = diag[start] - mu;\n  RealScalar z = subdiag[start];\n  for (Index k = start; k < end; ++k)\n  {\n    JacobiRotation<RealScalar> rot;\n    rot.makeGivens(x, z);\n\n    // do T = G' T G\n    RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];\n    RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];\n\n    diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);\n    diag[k+1] = rot.s() * sdk + rot.c() * dkp1;\n    subdiag[k] = rot.c() * sdk - rot.s() * dkp1;\n    \n\n    if (k > start)\n      subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;\n\n    x = subdiag[k];\n\n    if (k < end - 1)\n    {\n      z = -rot.s() * subdiag[k+1];\n      subdiag[k + 1] = rot.c() * subdiag[k+1];\n    }\n    \n    // apply the givens rotation to the unit matrix Q = Q * G\n    if (matrixQ)\n    {\n      // FIXME if StorageOrder == RowMajor this operation is not very efficient\n      Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);\n      q.applyOnTheRight(k,k+1,rot);\n    }\n  }\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SELFADJOINTEIGENSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/SelfAdjointEigenSolver_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Self-adjoint eigenvalues/eigenvectors.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_SAEIGENSOLVER_LAPACKE_H\n#define EIGEN_SAEIGENSOLVER_LAPACKE_H\n\nnamespace Eigen { \n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_EIG_SELFADJ(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_NAME, EIGCOLROW, LAPACKE_COLROW ) \\\ntemplate<> template<typename InputType> inline \\\nSelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >& \\\nSelfAdjointEigenSolver<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW> >::compute(const EigenBase<InputType>& matrix, int options) \\\n{ \\\n  eigen_assert(matrix.cols() == matrix.rows()); \\\n  eigen_assert((options&~(EigVecMask|GenEigMask))==0 \\\n          && (options&EigVecMask)!=EigVecMask \\\n          && \"invalid option parameter\"); \\\n  bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors; \\\n  lapack_int n = internal::convert_index<lapack_int>(matrix.cols()), lda, matrix_order, info; \\\n  m_eivalues.resize(n,1); \\\n  m_subdiag.resize(n-1); \\\n  m_eivec = matrix; \\\n\\\n  if(n==1) \\\n  { \\\n    m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0)); \\\n    if(computeEigenvectors) m_eivec.setOnes(n,n); \\\n    m_info = Success; \\\n    m_isInitialized = true; \\\n    m_eigenvectorsOk = computeEigenvectors; \\\n    return *this; \\\n  } \\\n\\\n  lda = internal::convert_index<lapack_int>(m_eivec.outerStride()); \\\n  matrix_order=LAPACKE_COLROW; \\\n  char jobz, uplo='L'/*, range='A'*/; \\\n  jobz = computeEigenvectors ? 'V' : 'N'; \\\n\\\n  info = LAPACKE_##LAPACKE_NAME( matrix_order, jobz, uplo, n, (LAPACKE_TYPE*)m_eivec.data(), lda, (LAPACKE_RTYPE*)m_eivalues.data() ); \\\n  m_info = (info==0) ? Success : NoConvergence; \\\n  m_isInitialized = true; \\\n  m_eigenvectorsOk = computeEigenvectors; \\\n  return *this; \\\n}\n\n\nEIGEN_LAPACKE_EIG_SELFADJ(double,   double,                double, dsyev, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(float,    float,                 float,  ssyev, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float,  float,  cheev, ColMajor, LAPACK_COL_MAJOR)\n\nEIGEN_LAPACKE_EIG_SELFADJ(double,   double,                double, dsyev, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(float,    float,                 float,  ssyev, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(dcomplex, lapack_complex_double, double, zheev, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_EIG_SELFADJ(scomplex, lapack_complex_float,  float,  cheev, RowMajor, LAPACK_ROW_MAJOR)\n\n} // end namespace Eigen\n\n#endif // EIGEN_SAEIGENSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Eigenvalues/Tridiagonalization.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRIDIAGONALIZATION_H\n#define EIGEN_TRIDIAGONALIZATION_H\n\nnamespace Eigen { \n\nnamespace internal {\n  \ntemplate<typename MatrixType> struct TridiagonalizationMatrixTReturnType;\ntemplate<typename MatrixType>\nstruct traits<TridiagonalizationMatrixTReturnType<MatrixType> >\n  : public traits<typename MatrixType::PlainObject>\n{\n  typedef typename MatrixType::PlainObject ReturnType; // FIXME shall it be a BandMatrix?\n  enum { Flags = 0 };\n};\n\ntemplate<typename MatrixType, typename CoeffVectorType>\nvoid tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs);\n}\n\n/** \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  *\n  * \\class Tridiagonalization\n  *\n  * \\brief Tridiagonal decomposition of a selfadjoint matrix\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the\n  * tridiagonal decomposition; this is expected to be an instantiation of the\n  * Matrix class template.\n  *\n  * This class performs a tridiagonal decomposition of a selfadjoint matrix \\f$ A \\f$ such that:\n  * \\f$ A = Q T Q^* \\f$ where \\f$ Q \\f$ is unitary and \\f$ T \\f$ a real symmetric tridiagonal matrix.\n  *\n  * A tridiagonal matrix is a matrix which has nonzero elements only on the\n  * main diagonal and the first diagonal below and above it. The Hessenberg\n  * decomposition of a selfadjoint matrix is in fact a tridiagonal\n  * decomposition. This class is used in SelfAdjointEigenSolver to compute the\n  * eigenvalues and eigenvectors of a selfadjoint matrix.\n  *\n  * Call the function compute() to compute the tridiagonal decomposition of a\n  * given matrix. Alternatively, you can use the Tridiagonalization(const MatrixType&)\n  * constructor which computes the tridiagonal Schur decomposition at\n  * construction time. Once the decomposition is computed, you can use the\n  * matrixQ() and matrixT() functions to retrieve the matrices Q and T in the\n  * decomposition.\n  *\n  * The documentation of Tridiagonalization(const MatrixType&) contains an\n  * example of the typical use of this class.\n  *\n  * \\sa class HessenbergDecomposition, class SelfAdjointEigenSolver\n  */\ntemplate<typename _MatrixType> class Tridiagonalization\n{\n  public:\n\n    /** \\brief Synonym for the template parameter \\p _MatrixType. */\n    typedef _MatrixType MatrixType;\n\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n\n    enum {\n      Size = MatrixType::RowsAtCompileTime,\n      SizeMinusOne = Size == Dynamic ? Dynamic : (Size > 1 ? Size - 1 : 1),\n      Options = MatrixType::Options,\n      MaxSize = MatrixType::MaxRowsAtCompileTime,\n      MaxSizeMinusOne = MaxSize == Dynamic ? Dynamic : (MaxSize > 1 ? MaxSize - 1 : 1)\n    };\n\n    typedef Matrix<Scalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> CoeffVectorType;\n    typedef typename internal::plain_col_type<MatrixType, RealScalar>::type DiagonalType;\n    typedef Matrix<RealScalar, SizeMinusOne, 1, Options & ~RowMajor, MaxSizeMinusOne, 1> SubDiagonalType;\n    typedef typename internal::remove_all<typename MatrixType::RealReturnType>::type MatrixTypeRealView;\n    typedef internal::TridiagonalizationMatrixTReturnType<MatrixTypeRealView> MatrixTReturnType;\n\n    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n              typename internal::add_const_on_value_type<typename Diagonal<const MatrixType>::RealReturnType>::type,\n              const Diagonal<const MatrixType>\n            >::type DiagonalReturnType;\n\n    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n              typename internal::add_const_on_value_type<typename Diagonal<const MatrixType, -1>::RealReturnType>::type,\n              const Diagonal<const MatrixType, -1>\n            >::type SubDiagonalReturnType;\n\n    /** \\brief Return type of matrixQ() */\n    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename CoeffVectorType::ConjugateReturnType>::type> HouseholderSequenceType;\n\n    /** \\brief Default constructor.\n      *\n      * \\param [in]  size  Positive integer, size of the matrix whose tridiagonal\n      * decomposition will be computed.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via compute().  The \\p size parameter is only\n      * used as a hint. It is not an error to give a wrong \\p size, but it may\n      * impair performance.\n      *\n      * \\sa compute() for an example.\n      */\n    explicit Tridiagonalization(Index size = Size==Dynamic ? 2 : Size)\n      : m_matrix(size,size),\n        m_hCoeffs(size > 1 ? size-1 : 1),\n        m_isInitialized(false)\n    {}\n\n    /** \\brief Constructor; computes tridiagonal decomposition of given matrix.\n      *\n      * \\param[in]  matrix  Selfadjoint matrix whose tridiagonal decomposition\n      * is to be computed.\n      *\n      * This constructor calls compute() to compute the tridiagonal decomposition.\n      *\n      * Example: \\include Tridiagonalization_Tridiagonalization_MatrixType.cpp\n      * Output: \\verbinclude Tridiagonalization_Tridiagonalization_MatrixType.out\n      */\n    template<typename InputType>\n    explicit Tridiagonalization(const EigenBase<InputType>& matrix)\n      : m_matrix(matrix.derived()),\n        m_hCoeffs(matrix.cols() > 1 ? matrix.cols()-1 : 1),\n        m_isInitialized(false)\n    {\n      internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);\n      m_isInitialized = true;\n    }\n\n    /** \\brief Computes tridiagonal decomposition of given matrix.\n      *\n      * \\param[in]  matrix  Selfadjoint matrix whose tridiagonal decomposition\n      * is to be computed.\n      * \\returns    Reference to \\c *this\n      *\n      * The tridiagonal decomposition is computed by bringing the columns of\n      * the matrix successively in the required form using Householder\n      * reflections. The cost is \\f$ 4n^3/3 \\f$ flops, where \\f$ n \\f$ denotes\n      * the size of the given matrix.\n      *\n      * This method reuses of the allocated data in the Tridiagonalization\n      * object, if the size of the matrix does not change.\n      *\n      * Example: \\include Tridiagonalization_compute.cpp\n      * Output: \\verbinclude Tridiagonalization_compute.out\n      */\n    template<typename InputType>\n    Tridiagonalization& compute(const EigenBase<InputType>& matrix)\n    {\n      m_matrix = matrix.derived();\n      m_hCoeffs.resize(matrix.rows()-1, 1);\n      internal::tridiagonalization_inplace(m_matrix, m_hCoeffs);\n      m_isInitialized = true;\n      return *this;\n    }\n\n    /** \\brief Returns the Householder coefficients.\n      *\n      * \\returns a const reference to the vector of Householder coefficients\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * The Householder coefficients allow the reconstruction of the matrix\n      * \\f$ Q \\f$ in the tridiagonal decomposition from the packed data.\n      *\n      * Example: \\include Tridiagonalization_householderCoefficients.cpp\n      * Output: \\verbinclude Tridiagonalization_householderCoefficients.out\n      *\n      * \\sa packedMatrix(), \\ref Householder_Module \"Householder module\"\n      */\n    inline CoeffVectorType householderCoefficients() const\n    {\n      eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n      return m_hCoeffs;\n    }\n\n    /** \\brief Returns the internal representation of the decomposition\n      *\n      *\t\\returns a const reference to a matrix with the internal representation\n      *\t         of the decomposition.\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * The returned matrix contains the following information:\n      *  - the strict upper triangular part is equal to the input matrix A.\n      *  - the diagonal and lower sub-diagonal represent the real tridiagonal\n      *    symmetric matrix T.\n      *  - the rest of the lower part contains the Householder vectors that,\n      *    combined with Householder coefficients returned by\n      *    householderCoefficients(), allows to reconstruct the matrix Q as\n      *       \\f$ Q = H_{N-1} \\ldots H_1 H_0 \\f$.\n      *    Here, the matrices \\f$ H_i \\f$ are the Householder transformations\n      *       \\f$ H_i = (I - h_i v_i v_i^T) \\f$\n      *    where \\f$ h_i \\f$ is the \\f$ i \\f$th Householder coefficient and\n      *    \\f$ v_i \\f$ is the Householder vector defined by\n      *       \\f$ v_i = [ 0, \\ldots, 0, 1, M(i+2,i), \\ldots, M(N-1,i) ]^T \\f$\n      *    with M the matrix returned by this function.\n      *\n      * See LAPACK for further details on this packed storage.\n      *\n      * Example: \\include Tridiagonalization_packedMatrix.cpp\n      * Output: \\verbinclude Tridiagonalization_packedMatrix.out\n      *\n      * \\sa householderCoefficients()\n      */\n    inline const MatrixType& packedMatrix() const\n    {\n      eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n      return m_matrix;\n    }\n\n    /** \\brief Returns the unitary matrix Q in the decomposition\n      *\n      * \\returns object representing the matrix Q\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * This function returns a light-weight object of template class\n      * HouseholderSequence. You can either apply it directly to a matrix or\n      * you can convert it to a matrix of type #MatrixType.\n      *\n      * \\sa Tridiagonalization(const MatrixType&) for an example,\n      *     matrixT(), class HouseholderSequence\n      */\n    HouseholderSequenceType matrixQ() const\n    {\n      eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n      return HouseholderSequenceType(m_matrix, m_hCoeffs.conjugate())\n             .setLength(m_matrix.rows() - 1)\n             .setShift(1);\n    }\n\n    /** \\brief Returns an expression of the tridiagonal matrix T in the decomposition\n      *\n      * \\returns expression object representing the matrix T\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * Currently, this function can be used to extract the matrix T from internal\n      * data and copy it to a dense matrix object. In most cases, it may be\n      * sufficient to directly use the packed matrix or the vector expressions\n      * returned by diagonal() and subDiagonal() instead of creating a new\n      * dense copy matrix with this function.\n      *\n      * \\sa Tridiagonalization(const MatrixType&) for an example,\n      * matrixQ(), packedMatrix(), diagonal(), subDiagonal()\n      */\n    MatrixTReturnType matrixT() const\n    {\n      eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n      return MatrixTReturnType(m_matrix.real());\n    }\n\n    /** \\brief Returns the diagonal of the tridiagonal matrix T in the decomposition.\n      *\n      * \\returns expression representing the diagonal of T\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * Example: \\include Tridiagonalization_diagonal.cpp\n      * Output: \\verbinclude Tridiagonalization_diagonal.out\n      *\n      * \\sa matrixT(), subDiagonal()\n      */\n    DiagonalReturnType diagonal() const;\n\n    /** \\brief Returns the subdiagonal of the tridiagonal matrix T in the decomposition.\n      *\n      * \\returns expression representing the subdiagonal of T\n      *\n      * \\pre Either the constructor Tridiagonalization(const MatrixType&) or\n      * the member function compute(const MatrixType&) has been called before\n      * to compute the tridiagonal decomposition of a matrix.\n      *\n      * \\sa diagonal() for an example, matrixT()\n      */\n    SubDiagonalReturnType subDiagonal() const;\n\n  protected:\n\n    MatrixType m_matrix;\n    CoeffVectorType m_hCoeffs;\n    bool m_isInitialized;\n};\n\ntemplate<typename MatrixType>\ntypename Tridiagonalization<MatrixType>::DiagonalReturnType\nTridiagonalization<MatrixType>::diagonal() const\n{\n  eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n  return m_matrix.diagonal().real();\n}\n\ntemplate<typename MatrixType>\ntypename Tridiagonalization<MatrixType>::SubDiagonalReturnType\nTridiagonalization<MatrixType>::subDiagonal() const\n{\n  eigen_assert(m_isInitialized && \"Tridiagonalization is not initialized.\");\n  return m_matrix.template diagonal<-1>().real();\n}\n\nnamespace internal {\n\n/** \\internal\n  * Performs a tridiagonal decomposition of the selfadjoint matrix \\a matA in-place.\n  *\n  * \\param[in,out] matA On input the selfadjoint matrix. Only the \\b lower triangular part is referenced.\n  *                     On output, the strict upper part is left unchanged, and the lower triangular part\n  *                     represents the T and Q matrices in packed format has detailed below.\n  * \\param[out]    hCoeffs returned Householder coefficients (see below)\n  *\n  * On output, the tridiagonal selfadjoint matrix T is stored in the diagonal\n  * and lower sub-diagonal of the matrix \\a matA.\n  * The unitary matrix Q is represented in a compact way as a product of\n  * Householder reflectors \\f$ H_i \\f$ such that:\n  *       \\f$ Q = H_{N-1} \\ldots H_1 H_0 \\f$.\n  * The Householder reflectors are defined as\n  *       \\f$ H_i = (I - h_i v_i v_i^T) \\f$\n  * where \\f$ h_i = hCoeffs[i]\\f$ is the \\f$ i \\f$th Householder coefficient and\n  * \\f$ v_i \\f$ is the Householder vector defined by\n  *       \\f$ v_i = [ 0, \\ldots, 0, 1, matA(i+2,i), \\ldots, matA(N-1,i) ]^T \\f$.\n  *\n  * Implemented from Golub's \"Matrix Computations\", algorithm 8.3.1.\n  *\n  * \\sa Tridiagonalization::packedMatrix()\n  */\ntemplate<typename MatrixType, typename CoeffVectorType>\nvoid tridiagonalization_inplace(MatrixType& matA, CoeffVectorType& hCoeffs)\n{\n  using numext::conj;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  Index n = matA.rows();\n  eigen_assert(n==matA.cols());\n  eigen_assert(n==hCoeffs.size()+1 || n==1);\n  \n  for (Index i = 0; i<n-1; ++i)\n  {\n    Index remainingSize = n-i-1;\n    RealScalar beta;\n    Scalar h;\n    matA.col(i).tail(remainingSize).makeHouseholderInPlace(h, beta);\n\n    // Apply similarity transformation to remaining columns,\n    // i.e., A = H A H' where H = I - h v v' and v = matA.col(i).tail(n-i-1)\n    matA.col(i).coeffRef(i+1) = 1;\n\n    hCoeffs.tail(n-i-1).noalias() = (matA.bottomRightCorner(remainingSize,remainingSize).template selfadjointView<Lower>()\n                                  * (conj(h) * matA.col(i).tail(remainingSize)));\n\n    hCoeffs.tail(n-i-1) += (conj(h)*RealScalar(-0.5)*(hCoeffs.tail(remainingSize).dot(matA.col(i).tail(remainingSize)))) * matA.col(i).tail(n-i-1);\n\n    matA.bottomRightCorner(remainingSize, remainingSize).template selfadjointView<Lower>()\n      .rankUpdate(matA.col(i).tail(remainingSize), hCoeffs.tail(remainingSize), Scalar(-1));\n\n    matA.col(i).coeffRef(i+1) = beta;\n    hCoeffs.coeffRef(i) = h;\n  }\n}\n\n// forward declaration, implementation at the end of this file\ntemplate<typename MatrixType,\n         int Size=MatrixType::ColsAtCompileTime,\n         bool IsComplex=NumTraits<typename MatrixType::Scalar>::IsComplex>\nstruct tridiagonalization_inplace_selector;\n\n/** \\brief Performs a full tridiagonalization in place\n  *\n  * \\param[in,out]  mat  On input, the selfadjoint matrix whose tridiagonal\n  *    decomposition is to be computed. Only the lower triangular part referenced.\n  *    The rest is left unchanged. On output, the orthogonal matrix Q\n  *    in the decomposition if \\p extractQ is true.\n  * \\param[out]  diag  The diagonal of the tridiagonal matrix T in the\n  *    decomposition.\n  * \\param[out]  subdiag  The subdiagonal of the tridiagonal matrix T in\n  *    the decomposition.\n  * \\param[in]  extractQ  If true, the orthogonal matrix Q in the\n  *    decomposition is computed and stored in \\p mat.\n  *\n  * Computes the tridiagonal decomposition of the selfadjoint matrix \\p mat in place\n  * such that \\f$ mat = Q T Q^* \\f$ where \\f$ Q \\f$ is unitary and \\f$ T \\f$ a real\n  * symmetric tridiagonal matrix.\n  *\n  * The tridiagonal matrix T is passed to the output parameters \\p diag and \\p subdiag. If\n  * \\p extractQ is true, then the orthogonal matrix Q is passed to \\p mat. Otherwise the lower\n  * part of the matrix \\p mat is destroyed.\n  *\n  * The vectors \\p diag and \\p subdiag are not resized. The function\n  * assumes that they are already of the correct size. The length of the\n  * vector \\p diag should equal the number of rows in \\p mat, and the\n  * length of the vector \\p subdiag should be one left.\n  *\n  * This implementation contains an optimized path for 3-by-3 matrices\n  * which is especially useful for plane fitting.\n  *\n  * \\note Currently, it requires two temporary vectors to hold the intermediate\n  * Householder coefficients, and to reconstruct the matrix Q from the Householder\n  * reflectors.\n  *\n  * Example (this uses the same matrix as the example in\n  *    Tridiagonalization::Tridiagonalization(const MatrixType&)):\n  *    \\include Tridiagonalization_decomposeInPlace.cpp\n  * Output: \\verbinclude Tridiagonalization_decomposeInPlace.out\n  *\n  * \\sa class Tridiagonalization\n  */\ntemplate<typename MatrixType, typename DiagonalType, typename SubDiagonalType>\nvoid tridiagonalization_inplace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)\n{\n  eigen_assert(mat.cols()==mat.rows() && diag.size()==mat.rows() && subdiag.size()==mat.rows()-1);\n  tridiagonalization_inplace_selector<MatrixType>::run(mat, diag, subdiag, extractQ);\n}\n\n/** \\internal\n  * General full tridiagonalization\n  */\ntemplate<typename MatrixType, int Size, bool IsComplex>\nstruct tridiagonalization_inplace_selector\n{\n  typedef typename Tridiagonalization<MatrixType>::CoeffVectorType CoeffVectorType;\n  typedef typename Tridiagonalization<MatrixType>::HouseholderSequenceType HouseholderSequenceType;\n  template<typename DiagonalType, typename SubDiagonalType>\n  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)\n  {\n    CoeffVectorType hCoeffs(mat.cols()-1);\n    tridiagonalization_inplace(mat,hCoeffs);\n    diag = mat.diagonal().real();\n    subdiag = mat.template diagonal<-1>().real();\n    if(extractQ)\n      mat = HouseholderSequenceType(mat, hCoeffs.conjugate())\n            .setLength(mat.rows() - 1)\n            .setShift(1);\n  }\n};\n\n/** \\internal\n  * Specialization for 3x3 real matrices.\n  * Especially useful for plane fitting.\n  */\ntemplate<typename MatrixType>\nstruct tridiagonalization_inplace_selector<MatrixType,3,false>\n{\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n\n  template<typename DiagonalType, typename SubDiagonalType>\n  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)\n  {\n    using std::sqrt;\n    const RealScalar tol = (std::numeric_limits<RealScalar>::min)();\n    diag[0] = mat(0,0);\n    RealScalar v1norm2 = numext::abs2(mat(2,0));\n    if(v1norm2 <= tol)\n    {\n      diag[1] = mat(1,1);\n      diag[2] = mat(2,2);\n      subdiag[0] = mat(1,0);\n      subdiag[1] = mat(2,1);\n      if (extractQ)\n        mat.setIdentity();\n    }\n    else\n    {\n      RealScalar beta = sqrt(numext::abs2(mat(1,0)) + v1norm2);\n      RealScalar invBeta = RealScalar(1)/beta;\n      Scalar m01 = mat(1,0) * invBeta;\n      Scalar m02 = mat(2,0) * invBeta;\n      Scalar q = RealScalar(2)*m01*mat(2,1) + m02*(mat(2,2) - mat(1,1));\n      diag[1] = mat(1,1) + m02*q;\n      diag[2] = mat(2,2) - m02*q;\n      subdiag[0] = beta;\n      subdiag[1] = mat(2,1) - m01 * q;\n      if (extractQ)\n      {\n        mat << 1,   0,    0,\n               0, m01,  m02,\n               0, m02, -m01;\n      }\n    }\n  }\n};\n\n/** \\internal\n  * Trivial specialization for 1x1 matrices\n  */\ntemplate<typename MatrixType, bool IsComplex>\nstruct tridiagonalization_inplace_selector<MatrixType,1,IsComplex>\n{\n  typedef typename MatrixType::Scalar Scalar;\n\n  template<typename DiagonalType, typename SubDiagonalType>\n  static void run(MatrixType& mat, DiagonalType& diag, SubDiagonalType&, bool extractQ)\n  {\n    diag(0,0) = numext::real(mat(0,0));\n    if(extractQ)\n      mat(0,0) = Scalar(1);\n  }\n};\n\n/** \\internal\n  * \\eigenvalues_module \\ingroup Eigenvalues_Module\n  *\n  * \\brief Expression type for return value of Tridiagonalization::matrixT()\n  *\n  * \\tparam MatrixType type of underlying dense matrix\n  */\ntemplate<typename MatrixType> struct TridiagonalizationMatrixTReturnType\n: public ReturnByValue<TridiagonalizationMatrixTReturnType<MatrixType> >\n{\n  public:\n    /** \\brief Constructor.\n      *\n      * \\param[in] mat The underlying dense matrix\n      */\n    TridiagonalizationMatrixTReturnType(const MatrixType& mat) : m_matrix(mat) { }\n\n    template <typename ResultType>\n    inline void evalTo(ResultType& result) const\n    {\n      result.setZero();\n      result.template diagonal<1>() = m_matrix.template diagonal<-1>().conjugate();\n      result.diagonal() = m_matrix.diagonal();\n      result.template diagonal<-1>() = m_matrix.template diagonal<-1>();\n    }\n\n    Index rows() const { return m_matrix.rows(); }\n    Index cols() const { return m_matrix.cols(); }\n\n  protected:\n    typename MatrixType::Nested m_matrix;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRIDIAGONALIZATION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/AlignedBox.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ALIGNEDBOX_H\n#define EIGEN_ALIGNEDBOX_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  *\n  * \\class AlignedBox\n  *\n  * \\brief An axis aligned box\n  *\n  * \\tparam _Scalar the type of the scalar coefficients\n  * \\tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.\n  *\n  * This class represents an axis aligned box as a pair of the minimal and maximal corners.\n  * \\warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().\n  * \\sa alignedboxtypedefs\n  */\ntemplate <typename _Scalar, int _AmbientDim>\nclass AlignedBox\n{\npublic:\nEIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)\n  enum { AmbientDimAtCompileTime = _AmbientDim };\n  typedef _Scalar                                   Scalar;\n  typedef NumTraits<Scalar>                         ScalarTraits;\n  typedef Eigen::Index                              Index; ///< \\deprecated since Eigen 3.3\n  typedef typename ScalarTraits::Real               RealScalar;\n  typedef typename ScalarTraits::NonInteger         NonInteger;\n  typedef Matrix<Scalar,AmbientDimAtCompileTime,1>  VectorType;\n  typedef CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const VectorType, const VectorType> VectorTypeSum;\n\n  /** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */\n  enum CornerType\n  {\n    /** 1D names @{ */\n    Min=0, Max=1,\n    /** @} */\n\n    /** Identifier for 2D corner @{ */\n    BottomLeft=0, BottomRight=1,\n    TopLeft=2, TopRight=3,\n    /** @} */\n\n    /** Identifier for 3D corner  @{ */\n    BottomLeftFloor=0, BottomRightFloor=1,\n    TopLeftFloor=2, TopRightFloor=3,\n    BottomLeftCeil=4, BottomRightCeil=5,\n    TopLeftCeil=6, TopRightCeil=7\n    /** @} */\n  };\n\n\n  /** Default constructor initializing a null box. */\n  EIGEN_DEVICE_FUNC inline AlignedBox()\n  { if (EIGEN_CONST_CONDITIONAL(AmbientDimAtCompileTime!=Dynamic)) setEmpty(); }\n\n  /** Constructs a null box with \\a _dim the dimension of the ambient space. */\n  EIGEN_DEVICE_FUNC inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)\n  { setEmpty(); }\n\n  /** Constructs a box with extremities \\a _min and \\a _max.\n   * \\warning If either component of \\a _min is larger than the same component of \\a _max, the constructed box is empty. */\n  template<typename OtherVectorType1, typename OtherVectorType2>\n  EIGEN_DEVICE_FUNC inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}\n\n  /** Constructs a box containing a single point \\a p. */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)\n  { }\n\n  EIGEN_DEVICE_FUNC ~AlignedBox() {}\n\n  /** \\returns the dimension in which the box holds */\n  EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }\n\n  /** \\deprecated use isEmpty() */\n  EIGEN_DEVICE_FUNC inline bool isNull() const { return isEmpty(); }\n\n  /** \\deprecated use setEmpty() */\n  EIGEN_DEVICE_FUNC inline void setNull() { setEmpty(); }\n\n  /** \\returns true if the box is empty.\n   * \\sa setEmpty */\n  EIGEN_DEVICE_FUNC inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }\n\n  /** Makes \\c *this an empty box.\n   * \\sa isEmpty */\n  EIGEN_DEVICE_FUNC inline void setEmpty()\n  {\n    m_min.setConstant( ScalarTraits::highest() );\n    m_max.setConstant( ScalarTraits::lowest() );\n  }\n\n  /** \\returns the minimal corner */\n  EIGEN_DEVICE_FUNC inline const VectorType& (min)() const { return m_min; }\n  /** \\returns a non const reference to the minimal corner */\n  EIGEN_DEVICE_FUNC inline VectorType& (min)() { return m_min; }\n  /** \\returns the maximal corner */\n  EIGEN_DEVICE_FUNC inline const VectorType& (max)() const { return m_max; }\n  /** \\returns a non const reference to the maximal corner */\n  EIGEN_DEVICE_FUNC inline VectorType& (max)() { return m_max; }\n\n  /** \\returns the center of the box */\n  EIGEN_DEVICE_FUNC inline const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(VectorTypeSum, RealScalar, quotient)\n  center() const\n  { return (m_min+m_max)/RealScalar(2); }\n\n  /** \\returns the lengths of the sides of the bounding box.\n    * Note that this function does not get the same\n    * result for integral or floating scalar types: see\n    */\n  EIGEN_DEVICE_FUNC inline const CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> sizes() const\n  { return m_max - m_min; }\n\n  /** \\returns the volume of the bounding box */\n  EIGEN_DEVICE_FUNC inline Scalar volume() const\n  { return sizes().prod(); }\n\n  /** \\returns an expression for the bounding box diagonal vector\n    * if the length of the diagonal is needed: diagonal().norm()\n    * will provide it.\n    */\n  EIGEN_DEVICE_FUNC inline CwiseBinaryOp< internal::scalar_difference_op<Scalar,Scalar>, const VectorType, const VectorType> diagonal() const\n  { return sizes(); }\n\n  /** \\returns the vertex of the bounding box at the corner defined by\n    * the corner-id corner. It works only for a 1D, 2D or 3D bounding box.\n    * For 1D bounding boxes corners are named by 2 enum constants:\n    * BottomLeft and BottomRight.\n    * For 2D bounding boxes, corners are named by 4 enum constants:\n    * BottomLeft, BottomRight, TopLeft, TopRight.\n    * For 3D bounding boxes, the following names are added:\n    * BottomLeftCeil, BottomRightCeil, TopLeftCeil, TopRightCeil.\n    */\n  EIGEN_DEVICE_FUNC inline VectorType corner(CornerType corner) const\n  {\n    EIGEN_STATIC_ASSERT(_AmbientDim <= 3, THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE);\n\n    VectorType res;\n\n    Index mult = 1;\n    for(Index d=0; d<dim(); ++d)\n    {\n      if( mult & corner ) res[d] = m_max[d];\n      else                res[d] = m_min[d];\n      mult *= 2;\n    }\n    return res;\n  }\n\n  /** \\returns a random point inside the bounding box sampled with\n   * a uniform distribution */\n  EIGEN_DEVICE_FUNC inline VectorType sample() const\n  {\n    VectorType r(dim());\n    for(Index d=0; d<dim(); ++d)\n    {\n      if(!ScalarTraits::IsInteger)\n      {\n        r[d] = m_min[d] + (m_max[d]-m_min[d])\n             * internal::random<Scalar>(Scalar(0), Scalar(1));\n      }\n      else\n        r[d] = internal::random(m_min[d], m_max[d]);\n    }\n    return r;\n  }\n\n  /** \\returns true if the point \\a p is inside the box \\c *this. */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline bool contains(const MatrixBase<Derived>& p) const\n  {\n    typename internal::nested_eval<Derived,2>::type p_n(p.derived());\n    return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();\n  }\n\n  /** \\returns true if the box \\a b is entirely inside the box \\c *this. */\n  EIGEN_DEVICE_FUNC inline bool contains(const AlignedBox& b) const\n  { return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }\n\n  /** \\returns true if the box \\a b is intersecting the box \\c *this.\n   * \\sa intersection, clamp */\n  EIGEN_DEVICE_FUNC inline bool intersects(const AlignedBox& b) const\n  { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }\n\n  /** Extends \\c *this such that it contains the point \\a p and returns a reference to \\c *this.\n   * \\sa extend(const AlignedBox&) */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline AlignedBox& extend(const MatrixBase<Derived>& p)\n  {\n    typename internal::nested_eval<Derived,2>::type p_n(p.derived());\n    m_min = m_min.cwiseMin(p_n);\n    m_max = m_max.cwiseMax(p_n);\n    return *this;\n  }\n\n  /** Extends \\c *this such that it contains the box \\a b and returns a reference to \\c *this.\n   * \\sa merged, extend(const MatrixBase&) */\n  EIGEN_DEVICE_FUNC inline AlignedBox& extend(const AlignedBox& b)\n  {\n    m_min = m_min.cwiseMin(b.m_min);\n    m_max = m_max.cwiseMax(b.m_max);\n    return *this;\n  }\n\n  /** Clamps \\c *this by the box \\a b and returns a reference to \\c *this.\n   * \\note If the boxes don't intersect, the resulting box is empty.\n   * \\sa intersection(), intersects() */\n  EIGEN_DEVICE_FUNC inline AlignedBox& clamp(const AlignedBox& b)\n  {\n    m_min = m_min.cwiseMax(b.m_min);\n    m_max = m_max.cwiseMin(b.m_max);\n    return *this;\n  }\n\n  /** Returns an AlignedBox that is the intersection of \\a b and \\c *this\n   * \\note If the boxes don't intersect, the resulting box is empty.\n   * \\sa intersects(), clamp, contains()  */\n  EIGEN_DEVICE_FUNC inline AlignedBox intersection(const AlignedBox& b) const\n  {return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }\n\n  /** Returns an AlignedBox that is the union of \\a b and \\c *this.\n   * \\note Merging with an empty box may result in a box bigger than \\c *this. \n   * \\sa extend(const AlignedBox&) */\n  EIGEN_DEVICE_FUNC inline AlignedBox merged(const AlignedBox& b) const\n  { return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }\n\n  /** Translate \\c *this by the vector \\a t and returns a reference to \\c *this. */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline AlignedBox& translate(const MatrixBase<Derived>& a_t)\n  {\n    const typename internal::nested_eval<Derived,2>::type t(a_t.derived());\n    m_min += t;\n    m_max += t;\n    return *this;\n  }\n\n  /** \\returns the squared distance between the point \\a p and the box \\c *this,\n    * and zero if \\a p is inside the box.\n    * \\sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)\n    */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;\n\n  /** \\returns the squared distance between the boxes \\a b and \\c *this,\n    * and zero if the boxes intersect.\n    * \\sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)\n    */\n  EIGEN_DEVICE_FUNC inline Scalar squaredExteriorDistance(const AlignedBox& b) const;\n\n  /** \\returns the distance between the point \\a p and the box \\c *this,\n    * and zero if \\a p is inside the box.\n    * \\sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)\n    */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const\n  { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(p))); }\n\n  /** \\returns the distance between the boxes \\a b and \\c *this,\n    * and zero if the boxes intersect.\n    * \\sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)\n    */\n  EIGEN_DEVICE_FUNC inline NonInteger exteriorDistance(const AlignedBox& b) const\n  { EIGEN_USING_STD_MATH(sqrt) return sqrt(NonInteger(squaredExteriorDistance(b))); }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AlignedBox,\n           AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type cast() const\n  {\n    return typename internal::cast_return_type<AlignedBox,\n                    AlignedBox<NewScalarType,AmbientDimAtCompileTime> >::type(*this);\n  }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  EIGEN_DEVICE_FUNC inline explicit AlignedBox(const AlignedBox<OtherScalarType,AmbientDimAtCompileTime>& other)\n  {\n    m_min = (other.min)().template cast<Scalar>();\n    m_max = (other.max)().template cast<Scalar>();\n  }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const AlignedBox& other, const RealScalar& prec = ScalarTraits::dummy_precision()) const\n  { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); }\n\nprotected:\n\n  VectorType m_min, m_max;\n};\n\n\n\ntemplate<typename Scalar,int AmbientDim>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const MatrixBase<Derived>& a_p) const\n{\n  typename internal::nested_eval<Derived,2*AmbientDim>::type p(a_p.derived());\n  Scalar dist2(0);\n  Scalar aux;\n  for (Index k=0; k<dim(); ++k)\n  {\n    if( m_min[k] > p[k] )\n    {\n      aux = m_min[k] - p[k];\n      dist2 += aux*aux;\n    }\n    else if( p[k] > m_max[k] )\n    {\n      aux = p[k] - m_max[k];\n      dist2 += aux*aux;\n    }\n  }\n  return dist2;\n}\n\ntemplate<typename Scalar,int AmbientDim>\nEIGEN_DEVICE_FUNC inline Scalar AlignedBox<Scalar,AmbientDim>::squaredExteriorDistance(const AlignedBox& b) const\n{\n  Scalar dist2(0);\n  Scalar aux;\n  for (Index k=0; k<dim(); ++k)\n  {\n    if( m_min[k] > b.m_max[k] )\n    {\n      aux = m_min[k] - b.m_max[k];\n      dist2 += aux*aux;\n    }\n    else if( b.m_min[k] > m_max[k] )\n    {\n      aux = b.m_min[k] - m_max[k];\n      dist2 += aux*aux;\n    }\n  }\n  return dist2;\n}\n\n/** \\defgroup alignedboxtypedefs Global aligned box typedefs\n  *\n  * \\ingroup Geometry_Module\n  *\n  * Eigen defines several typedef shortcuts for most common aligned box types.\n  *\n  * The general patterns are the following:\n  *\n  * \\c AlignedBoxSizeType where \\c Size can be \\c 1, \\c 2,\\c 3,\\c 4 for fixed size boxes or \\c X for dynamic size,\n  * and where \\c Type can be \\c i for integer, \\c f for float, \\c d for double.\n  *\n  * For example, \\c AlignedBox3d is a fixed-size 3x3 aligned box type of doubles, and \\c AlignedBoxXf is a dynamic-size aligned box of floats.\n  *\n  * \\sa class AlignedBox\n  */\n\n#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)    \\\n/** \\ingroup alignedboxtypedefs */                                 \\\ntypedef AlignedBox<Type, Size>   AlignedBox##SizeSuffix##TypeSuffix;\n\n#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 1, 1) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \\\nEIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)\n\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)\nEIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)\n\n#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES\n#undef EIGEN_MAKE_TYPEDEFS\n\n} // end namespace Eigen\n\n#endif // EIGEN_ALIGNEDBOX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/AngleAxis.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ANGLEAXIS_H\n#define EIGEN_ANGLEAXIS_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class AngleAxis\n  *\n  * \\brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis\n  *\n  * \\param _Scalar the scalar type, i.e., the type of the coefficients.\n  *\n  * \\warning When setting up an AngleAxis object, the axis vector \\b must \\b be \\b normalized.\n  *\n  * The following two typedefs are provided for convenience:\n  * \\li \\c AngleAxisf for \\c float\n  * \\li \\c AngleAxisd for \\c double\n  *\n  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily\n  * mimic Euler-angles. Here is an example:\n  * \\include AngleAxis_mimic_euler.cpp\n  * Output: \\verbinclude AngleAxis_mimic_euler.out\n  *\n  * \\note This class is not aimed to be used to store a rotation transformation,\n  * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix)\n  * and transformation objects.\n  *\n  * \\sa class Quaternion, class Transform, MatrixBase::UnitX()\n  */\n\nnamespace internal {\ntemplate<typename _Scalar> struct traits<AngleAxis<_Scalar> >\n{\n  typedef _Scalar Scalar;\n};\n}\n\ntemplate<typename _Scalar>\nclass AngleAxis : public RotationBase<AngleAxis<_Scalar>,3>\n{\n  typedef RotationBase<AngleAxis<_Scalar>,3> Base;\n\npublic:\n\n  using Base::operator*;\n\n  enum { Dim = 3 };\n  /** the scalar type of the coefficients */\n  typedef _Scalar Scalar;\n  typedef Matrix<Scalar,3,3> Matrix3;\n  typedef Matrix<Scalar,3,1> Vector3;\n  typedef Quaternion<Scalar> QuaternionType;\n\nprotected:\n\n  Vector3 m_axis;\n  Scalar m_angle;\n\npublic:\n\n  /** Default constructor without initialization. */\n  EIGEN_DEVICE_FUNC AngleAxis() {}\n  /** Constructs and initialize the angle-axis rotation from an \\a angle in radian\n    * and an \\a axis which \\b must \\b be \\b normalized.\n    *\n    * \\warning If the \\a axis vector is not normalized, then the angle-axis object\n    *          represents an invalid rotation. */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC \n  inline AngleAxis(const Scalar& angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}\n  /** Constructs and initialize the angle-axis rotation from a quaternion \\a q.\n    * This function implicitly normalizes the quaternion \\a q.\n    */\n  template<typename QuatDerived> \n  EIGEN_DEVICE_FUNC inline explicit AngleAxis(const QuaternionBase<QuatDerived>& q) { *this = q; }\n  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }\n\n  /** \\returns the value of the rotation angle in radian */\n  EIGEN_DEVICE_FUNC Scalar angle() const { return m_angle; }\n  /** \\returns a read-write reference to the stored angle in radian */\n  EIGEN_DEVICE_FUNC Scalar& angle() { return m_angle; }\n\n  /** \\returns the rotation axis */\n  EIGEN_DEVICE_FUNC const Vector3& axis() const { return m_axis; }\n  /** \\returns a read-write reference to the stored rotation axis.\n    *\n    * \\warning The rotation axis must remain a \\b unit vector.\n    */\n  EIGEN_DEVICE_FUNC Vector3& axis() { return m_axis; }\n\n  /** Concatenates two rotations */\n  EIGEN_DEVICE_FUNC inline QuaternionType operator* (const AngleAxis& other) const\n  { return QuaternionType(*this) * QuaternionType(other); }\n\n  /** Concatenates two rotations */\n  EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& other) const\n  { return QuaternionType(*this) * other; }\n\n  /** Concatenates two rotations */\n  friend EIGEN_DEVICE_FUNC inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)\n  { return a * QuaternionType(b); }\n\n  /** \\returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */\n  EIGEN_DEVICE_FUNC AngleAxis inverse() const\n  { return AngleAxis(-m_angle, m_axis); }\n\n  template<class QuatDerived>\n  EIGEN_DEVICE_FUNC AngleAxis& operator=(const QuaternionBase<QuatDerived>& q);\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC AngleAxis& operator=(const MatrixBase<Derived>& m);\n\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);\n  EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix(void) const;\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type cast() const\n  { return typename internal::cast_return_type<AngleAxis,AngleAxis<NewScalarType> >::type(*this); }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  EIGEN_DEVICE_FUNC inline explicit AngleAxis(const AngleAxis<OtherScalarType>& other)\n  {\n    m_axis = other.axis().template cast<Scalar>();\n    m_angle = Scalar(other.angle());\n  }\n\n  EIGEN_DEVICE_FUNC static inline const AngleAxis Identity() { return AngleAxis(Scalar(0), Vector3::UnitX()); }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const AngleAxis& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return m_axis.isApprox(other.m_axis, prec) && internal::isApprox(m_angle,other.m_angle, prec); }\n};\n\n/** \\ingroup Geometry_Module\n  * single precision angle-axis type */\ntypedef AngleAxis<float> AngleAxisf;\n/** \\ingroup Geometry_Module\n  * double precision angle-axis type */\ntypedef AngleAxis<double> AngleAxisd;\n\n/** Set \\c *this from a \\b unit quaternion.\n  *\n  * The resulting axis is normalized, and the computed angle is in the [0,pi] range.\n  * \n  * This function implicitly normalizes the quaternion \\a q.\n  */\ntemplate<typename Scalar>\ntemplate<typename QuatDerived>\nEIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionBase<QuatDerived>& q)\n{\n  EIGEN_USING_STD_MATH(atan2)\n  EIGEN_USING_STD_MATH(abs)\n  Scalar n = q.vec().norm();\n  if(n<NumTraits<Scalar>::epsilon())\n    n = q.vec().stableNorm();\n\n  if (n != Scalar(0))\n  {\n    m_angle = Scalar(2)*atan2(n, abs(q.w()));\n    if(q.w() < 0)\n      n = -n;\n    m_axis  = q.vec() / n;\n  }\n  else\n  {\n    m_angle = Scalar(0);\n    m_axis << Scalar(1), Scalar(0), Scalar(0);\n  }\n  return *this;\n}\n\n/** Set \\c *this from a 3x3 rotation matrix \\a mat.\n  */\ntemplate<typename Scalar>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)\n{\n  // Since a direct conversion would not be really faster,\n  // let's use the robust Quaternion implementation:\n  return *this = QuaternionType(mat);\n}\n\n/**\n* \\brief Sets \\c *this from a 3x3 rotation matrix.\n**/\ntemplate<typename Scalar>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC AngleAxis<Scalar>& AngleAxis<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)\n{\n  return *this = QuaternionType(mat);\n}\n\n/** Constructs and \\returns an equivalent 3x3 rotation matrix.\n  */\ntemplate<typename Scalar>\ntypename AngleAxis<Scalar>::Matrix3\nEIGEN_DEVICE_FUNC AngleAxis<Scalar>::toRotationMatrix(void) const\n{\n  EIGEN_USING_STD_MATH(sin)\n  EIGEN_USING_STD_MATH(cos)\n  Matrix3 res;\n  Vector3 sin_axis  = sin(m_angle) * m_axis;\n  Scalar c = cos(m_angle);\n  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;\n\n  Scalar tmp;\n  tmp = cos1_axis.x() * m_axis.y();\n  res.coeffRef(0,1) = tmp - sin_axis.z();\n  res.coeffRef(1,0) = tmp + sin_axis.z();\n\n  tmp = cos1_axis.x() * m_axis.z();\n  res.coeffRef(0,2) = tmp + sin_axis.y();\n  res.coeffRef(2,0) = tmp - sin_axis.y();\n\n  tmp = cos1_axis.y() * m_axis.z();\n  res.coeffRef(1,2) = tmp - sin_axis.x();\n  res.coeffRef(2,1) = tmp + sin_axis.x();\n\n  res.diagonal() = (cos1_axis.cwiseProduct(m_axis)).array() + c;\n\n  return res;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_ANGLEAXIS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/EulerAngles.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_EULERANGLES_H\n#define EIGEN_EULERANGLES_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  *\n  * \\returns the Euler-angles of the rotation matrix \\c *this using the convention defined by the triplet (\\a a0,\\a a1,\\a a2)\n  *\n  * Each of the three parameters \\a a0,\\a a1,\\a a2 represents the respective rotation axis as an integer in {0,1,2}.\n  * For instance, in:\n  * \\code Vector3f ea = mat.eulerAngles(2, 0, 2); \\endcode\n  * \"2\" represents the z axis and \"0\" the x axis, etc. The returned angles are such that\n  * we have the following equality:\n  * \\code\n  * mat == AngleAxisf(ea[0], Vector3f::UnitZ())\n  *      * AngleAxisf(ea[1], Vector3f::UnitX())\n  *      * AngleAxisf(ea[2], Vector3f::UnitZ()); \\endcode\n  * This corresponds to the right-multiply conventions (with right hand side frames).\n  * \n  * The returned angles are in the ranges [0:pi]x[-pi:pi]x[-pi:pi].\n  * \n  * \\sa class AngleAxis\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Matrix<typename MatrixBase<Derived>::Scalar,3,1>\nMatrixBase<Derived>::eulerAngles(Index a0, Index a1, Index a2) const\n{\n  EIGEN_USING_STD_MATH(atan2)\n  EIGEN_USING_STD_MATH(sin)\n  EIGEN_USING_STD_MATH(cos)\n  /* Implemented from Graphics Gems IV */\n  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3)\n\n  Matrix<Scalar,3,1> res;\n  typedef Matrix<typename Derived::Scalar,2,1> Vector2;\n\n  const Index odd = ((a0+1)%3 == a1) ? 0 : 1;\n  const Index i = a0;\n  const Index j = (a0 + 1 + odd)%3;\n  const Index k = (a0 + 2 - odd)%3;\n  \n  if (a0==a2)\n  {\n    res[0] = atan2(coeff(j,i), coeff(k,i));\n    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0)))\n    {\n      if(res[0] > Scalar(0)) {\n        res[0] -= Scalar(EIGEN_PI);\n      }\n      else {\n        res[0] += Scalar(EIGEN_PI);\n      }\n      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();\n      res[1] = -atan2(s2, coeff(i,i));\n    }\n    else\n    {\n      Scalar s2 = Vector2(coeff(j,i), coeff(k,i)).norm();\n      res[1] = atan2(s2, coeff(i,i));\n    }\n    \n    // With a=(0,1,0), we have i=0; j=1; k=2, and after computing the first two angles,\n    // we can compute their respective rotation, and apply its inverse to M. Since the result must\n    // be a rotation around x, we have:\n    //\n    //  c2  s1.s2 c1.s2                   1  0   0 \n    //  0   c1    -s1       *    M    =   0  c3  s3\n    //  -s2 s1.c2 c1.c2                   0 -s3  c3\n    //\n    //  Thus:  m11.c1 - m21.s1 = c3  &   m12.c1 - m22.s1 = s3\n    \n    Scalar s1 = sin(res[0]);\n    Scalar c1 = cos(res[0]);\n    res[2] = atan2(c1*coeff(j,k)-s1*coeff(k,k), c1*coeff(j,j) - s1 * coeff(k,j));\n  } \n  else\n  {\n    res[0] = atan2(coeff(j,k), coeff(k,k));\n    Scalar c2 = Vector2(coeff(i,i), coeff(i,j)).norm();\n    if((odd && res[0]<Scalar(0)) || ((!odd) && res[0]>Scalar(0))) {\n      if(res[0] > Scalar(0)) {\n        res[0] -= Scalar(EIGEN_PI);\n      }\n      else {\n        res[0] += Scalar(EIGEN_PI);\n      }\n      res[1] = atan2(-coeff(i,k), -c2);\n    }\n    else\n      res[1] = atan2(-coeff(i,k), c2);\n    Scalar s1 = sin(res[0]);\n    Scalar c1 = cos(res[0]);\n    res[2] = atan2(s1*coeff(k,i)-c1*coeff(j,i), c1*coeff(j,j) - s1 * coeff(k,j));\n  }\n  if (!odd)\n    res = -res;\n  \n  return res;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_EULERANGLES_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Homogeneous.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HOMOGENEOUS_H\n#define EIGEN_HOMOGENEOUS_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Homogeneous\n  *\n  * \\brief Expression of one (or a set of) homogeneous vector(s)\n  *\n  * \\param MatrixType the type of the object in which we are making homogeneous\n  *\n  * This class represents an expression of one (or a set of) homogeneous vector(s).\n  * It is the return type of MatrixBase::homogeneous() and most of the time\n  * this is the only way it is used.\n  *\n  * \\sa MatrixBase::homogeneous()\n  */\n\nnamespace internal {\n\ntemplate<typename MatrixType,int Direction>\nstruct traits<Homogeneous<MatrixType,Direction> >\n : traits<MatrixType>\n{\n  typedef typename traits<MatrixType>::StorageKind StorageKind;\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;\n  enum {\n    RowsPlusOne = (MatrixType::RowsAtCompileTime != Dynamic) ?\n                  int(MatrixType::RowsAtCompileTime) + 1 : Dynamic,\n    ColsPlusOne = (MatrixType::ColsAtCompileTime != Dynamic) ?\n                  int(MatrixType::ColsAtCompileTime) + 1 : Dynamic,\n    RowsAtCompileTime = Direction==Vertical  ?  RowsPlusOne : MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = Direction==Horizontal ? ColsPlusOne : MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = RowsAtCompileTime,\n    MaxColsAtCompileTime = ColsAtCompileTime,\n    TmpFlags = _MatrixTypeNested::Flags & HereditaryBits,\n    Flags = ColsAtCompileTime==1 ? (TmpFlags & ~RowMajorBit)\n          : RowsAtCompileTime==1 ? (TmpFlags | RowMajorBit)\n          : TmpFlags\n  };\n};\n\ntemplate<typename MatrixType,typename Lhs> struct homogeneous_left_product_impl;\ntemplate<typename MatrixType,typename Rhs> struct homogeneous_right_product_impl;\n\n} // end namespace internal\n\ntemplate<typename MatrixType,int _Direction> class Homogeneous\n  : public MatrixBase<Homogeneous<MatrixType,_Direction> >, internal::no_assignment_operator\n{\n  public:\n\n    typedef MatrixType NestedExpression;\n    enum { Direction = _Direction };\n\n    typedef MatrixBase<Homogeneous> Base;\n    EIGEN_DENSE_PUBLIC_INTERFACE(Homogeneous)\n\n    EIGEN_DEVICE_FUNC explicit inline Homogeneous(const MatrixType& matrix)\n      : m_matrix(matrix)\n    {}\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_matrix.rows() + (int(Direction)==Vertical   ? 1 : 0); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_matrix.cols() + (int(Direction)==Horizontal ? 1 : 0); }\n    \n    EIGEN_DEVICE_FUNC const NestedExpression& nestedExpression() const { return m_matrix; }\n\n    template<typename Rhs>\n    EIGEN_DEVICE_FUNC inline const Product<Homogeneous,Rhs>\n    operator* (const MatrixBase<Rhs>& rhs) const\n    {\n      eigen_assert(int(Direction)==Horizontal);\n      return Product<Homogeneous,Rhs>(*this,rhs.derived());\n    }\n\n    template<typename Lhs> friend\n    EIGEN_DEVICE_FUNC inline const Product<Lhs,Homogeneous>\n    operator* (const MatrixBase<Lhs>& lhs, const Homogeneous& rhs)\n    {\n      eigen_assert(int(Direction)==Vertical);\n      return Product<Lhs,Homogeneous>(lhs.derived(),rhs);\n    }\n\n    template<typename Scalar, int Dim, int Mode, int Options> friend\n    EIGEN_DEVICE_FUNC inline const Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous >\n    operator* (const Transform<Scalar,Dim,Mode,Options>& lhs, const Homogeneous& rhs)\n    {\n      eigen_assert(int(Direction)==Vertical);\n      return Product<Transform<Scalar,Dim,Mode,Options>, Homogeneous>(lhs,rhs);\n    }\n\n    template<typename Func>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::result_of<Func(Scalar,Scalar)>::type\n    redux(const Func& func) const\n    {\n      return func(m_matrix.redux(func), Scalar(1));\n    }\n\n  protected:\n    typename MatrixType::Nested m_matrix;\n};\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns a vector expression that is one longer than the vector argument, with the value 1 symbolically appended as the last coefficient.\n  *\n  * This can be used to convert affine coordinates to homogeneous coordinates.\n  *\n  * \\only_for_vectors\n  *\n  * Example: \\include MatrixBase_homogeneous.cpp\n  * Output: \\verbinclude MatrixBase_homogeneous.out\n  *\n  * \\sa VectorwiseOp::homogeneous(), class Homogeneous\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::HomogeneousReturnType\nMatrixBase<Derived>::homogeneous() const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n  return HomogeneousReturnType(derived());\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns an expression where the value 1 is symbolically appended as the final coefficient to each column (or row) of the matrix.\n  *\n  * This can be used to convert affine coordinates to homogeneous coordinates.\n  *\n  * Example: \\include VectorwiseOp_homogeneous.cpp\n  * Output: \\verbinclude VectorwiseOp_homogeneous.out\n  *\n  * \\sa MatrixBase::homogeneous(), class Homogeneous */\ntemplate<typename ExpressionType, int Direction>\nEIGEN_DEVICE_FUNC inline Homogeneous<ExpressionType,Direction>\nVectorwiseOp<ExpressionType,Direction>::homogeneous() const\n{\n  return HomogeneousReturnType(_expression());\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\brief homogeneous normalization\n  *\n  * \\returns a vector expression of the N-1 first coefficients of \\c *this divided by that last coefficient.\n  *\n  * This can be used to convert homogeneous coordinates to affine coordinates.\n  *\n  * It is essentially a shortcut for:\n  * \\code\n    this->head(this->size()-1)/this->coeff(this->size()-1);\n    \\endcode\n  *\n  * Example: \\include MatrixBase_hnormalized.cpp\n  * Output: \\verbinclude MatrixBase_hnormalized.out\n  *\n  * \\sa VectorwiseOp::hnormalized() */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline const typename MatrixBase<Derived>::HNormalizedReturnType\nMatrixBase<Derived>::hnormalized() const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n  return ConstStartMinusOne(derived(),0,0,\n    ColsAtCompileTime==1?size()-1:1,\n    ColsAtCompileTime==1?1:size()-1) / coeff(size()-1);\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\brief column or row-wise homogeneous normalization\n  *\n  * \\returns an expression of the first N-1 coefficients of each column (or row) of \\c *this divided by the last coefficient of each column (or row).\n  *\n  * This can be used to convert homogeneous coordinates to affine coordinates.\n  *\n  * It is conceptually equivalent to calling MatrixBase::hnormalized() to each column (or row) of \\c *this.\n  *\n  * Example: \\include DirectionWise_hnormalized.cpp\n  * Output: \\verbinclude DirectionWise_hnormalized.out\n  *\n  * \\sa MatrixBase::hnormalized() */\ntemplate<typename ExpressionType, int Direction>\nEIGEN_DEVICE_FUNC inline const typename VectorwiseOp<ExpressionType,Direction>::HNormalizedReturnType\nVectorwiseOp<ExpressionType,Direction>::hnormalized() const\n{\n  return HNormalized_Block(_expression(),0,0,\n      Direction==Vertical   ? _expression().rows()-1 : _expression().rows(),\n      Direction==Horizontal ? _expression().cols()-1 : _expression().cols()).cwiseQuotient(\n      Replicate<HNormalized_Factors,\n                Direction==Vertical   ? HNormalized_SizeMinusOne : 1,\n                Direction==Horizontal ? HNormalized_SizeMinusOne : 1>\n        (HNormalized_Factors(_expression(),\n          Direction==Vertical    ? _expression().rows()-1:0,\n          Direction==Horizontal  ? _expression().cols()-1:0,\n          Direction==Vertical    ? 1 : _expression().rows(),\n          Direction==Horizontal  ? 1 : _expression().cols()),\n         Direction==Vertical   ? _expression().rows()-1 : 1,\n         Direction==Horizontal ? _expression().cols()-1 : 1));\n}\n\nnamespace internal {\n\ntemplate<typename MatrixOrTransformType>\nstruct take_matrix_for_product\n{\n  typedef MatrixOrTransformType type;\n  EIGEN_DEVICE_FUNC static const type& run(const type &x) { return x; }\n};\n\ntemplate<typename Scalar, int Dim, int Mode,int Options>\nstruct take_matrix_for_product<Transform<Scalar, Dim, Mode, Options> >\n{\n  typedef Transform<Scalar, Dim, Mode, Options> TransformType;\n  typedef typename internal::add_const<typename TransformType::ConstAffinePart>::type type;\n  EIGEN_DEVICE_FUNC static type run (const TransformType& x) { return x.affine(); }\n};\n\ntemplate<typename Scalar, int Dim, int Options>\nstruct take_matrix_for_product<Transform<Scalar, Dim, Projective, Options> >\n{\n  typedef Transform<Scalar, Dim, Projective, Options> TransformType;\n  typedef typename TransformType::MatrixType type;\n  EIGEN_DEVICE_FUNC static const type& run (const TransformType& x) { return x.matrix(); }\n};\n\ntemplate<typename MatrixType,typename Lhs>\nstruct traits<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >\n{\n  typedef typename take_matrix_for_product<Lhs>::type LhsMatrixType;\n  typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;\n  typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;\n  typedef typename make_proper_matrix_type<\n                 typename traits<MatrixTypeCleaned>::Scalar,\n                 LhsMatrixTypeCleaned::RowsAtCompileTime,\n                 MatrixTypeCleaned::ColsAtCompileTime,\n                 MatrixTypeCleaned::PlainObject::Options,\n                 LhsMatrixTypeCleaned::MaxRowsAtCompileTime,\n                 MatrixTypeCleaned::MaxColsAtCompileTime>::type ReturnType;\n};\n\ntemplate<typename MatrixType,typename Lhs>\nstruct homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs>\n  : public ReturnByValue<homogeneous_left_product_impl<Homogeneous<MatrixType,Vertical>,Lhs> >\n{\n  typedef typename traits<homogeneous_left_product_impl>::LhsMatrixType LhsMatrixType;\n  typedef typename remove_all<LhsMatrixType>::type LhsMatrixTypeCleaned;\n  typedef typename remove_all<typename LhsMatrixTypeCleaned::Nested>::type LhsMatrixTypeNested;\n  EIGEN_DEVICE_FUNC homogeneous_left_product_impl(const Lhs& lhs, const MatrixType& rhs)\n    : m_lhs(take_matrix_for_product<Lhs>::run(lhs)),\n      m_rhs(rhs)\n  {}\n\n  EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }\n  EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }\n\n  template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const\n  {\n    // FIXME investigate how to allow lazy evaluation of this product when possible\n    dst = Block<const LhsMatrixTypeNested,\n              LhsMatrixTypeNested::RowsAtCompileTime,\n              LhsMatrixTypeNested::ColsAtCompileTime==Dynamic?Dynamic:LhsMatrixTypeNested::ColsAtCompileTime-1>\n            (m_lhs,0,0,m_lhs.rows(),m_lhs.cols()-1) * m_rhs;\n    dst += m_lhs.col(m_lhs.cols()-1).rowwise()\n            .template replicate<MatrixType::ColsAtCompileTime>(m_rhs.cols());\n  }\n\n  typename LhsMatrixTypeCleaned::Nested m_lhs;\n  typename MatrixType::Nested m_rhs;\n};\n\ntemplate<typename MatrixType,typename Rhs>\nstruct traits<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >\n{\n  typedef typename make_proper_matrix_type<typename traits<MatrixType>::Scalar,\n                 MatrixType::RowsAtCompileTime,\n                 Rhs::ColsAtCompileTime,\n                 MatrixType::PlainObject::Options,\n                 MatrixType::MaxRowsAtCompileTime,\n                 Rhs::MaxColsAtCompileTime>::type ReturnType;\n};\n\ntemplate<typename MatrixType,typename Rhs>\nstruct homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs>\n  : public ReturnByValue<homogeneous_right_product_impl<Homogeneous<MatrixType,Horizontal>,Rhs> >\n{\n  typedef typename remove_all<typename Rhs::Nested>::type RhsNested;\n  EIGEN_DEVICE_FUNC homogeneous_right_product_impl(const MatrixType& lhs, const Rhs& rhs)\n    : m_lhs(lhs), m_rhs(rhs)\n  {}\n\n  EIGEN_DEVICE_FUNC inline Index rows() const { return m_lhs.rows(); }\n  EIGEN_DEVICE_FUNC inline Index cols() const { return m_rhs.cols(); }\n\n  template<typename Dest> EIGEN_DEVICE_FUNC void evalTo(Dest& dst) const\n  {\n    // FIXME investigate how to allow lazy evaluation of this product when possible\n    dst = m_lhs * Block<const RhsNested,\n                        RhsNested::RowsAtCompileTime==Dynamic?Dynamic:RhsNested::RowsAtCompileTime-1,\n                        RhsNested::ColsAtCompileTime>\n            (m_rhs,0,0,m_rhs.rows()-1,m_rhs.cols());\n    dst += m_rhs.row(m_rhs.rows()-1).colwise()\n            .template replicate<MatrixType::RowsAtCompileTime>(m_lhs.rows());\n  }\n\n  typename MatrixType::Nested m_lhs;\n  typename Rhs::Nested m_rhs;\n};\n\ntemplate<typename ArgType,int Direction>\nstruct evaluator_traits<Homogeneous<ArgType,Direction> >\n{\n  typedef typename storage_kind_to_evaluator_kind<typename ArgType::StorageKind>::Kind Kind;\n  typedef HomogeneousShape Shape;  \n};\n\ntemplate<> struct AssignmentKind<DenseShape,HomogeneousShape> { typedef Dense2Dense Kind; };\n\n\ntemplate<typename ArgType,int Direction>\nstruct unary_evaluator<Homogeneous<ArgType,Direction>, IndexBased>\n  : evaluator<typename Homogeneous<ArgType,Direction>::PlainObject >\n{\n  typedef Homogeneous<ArgType,Direction> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op)\n    : Base(), m_temp(op)\n  {\n    ::new (static_cast<Base*>(this)) Base(m_temp);\n  }\n\nprotected:\n  PlainObject m_temp;\n};\n\n// dense = homogeneous\ntemplate< typename DstXprType, typename ArgType, typename Scalar>\nstruct Assignment<DstXprType, Homogeneous<ArgType,Vertical>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense>\n{\n  typedef Homogeneous<ArgType,Vertical> SrcXprType;\n  EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    dst.template topRows<ArgType::RowsAtCompileTime>(src.nestedExpression().rows()) = src.nestedExpression();\n    dst.row(dst.rows()-1).setOnes();\n  }\n};\n\n// dense = homogeneous\ntemplate< typename DstXprType, typename ArgType, typename Scalar>\nstruct Assignment<DstXprType, Homogeneous<ArgType,Horizontal>, internal::assign_op<Scalar,typename ArgType::Scalar>, Dense2Dense>\n{\n  typedef Homogeneous<ArgType,Horizontal> SrcXprType;\n  EIGEN_DEVICE_FUNC static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename ArgType::Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    dst.template leftCols<ArgType::ColsAtCompileTime>(src.nestedExpression().cols()) = src.nestedExpression();\n    dst.col(dst.cols()-1).setOnes();\n  }\n};\n\ntemplate<typename LhsArg, typename Rhs, int ProductTag>\nstruct generic_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs, HomogeneousShape, DenseShape, ProductTag>\n{\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Homogeneous<LhsArg,Horizontal>& lhs, const Rhs& rhs)\n  {\n    homogeneous_right_product_impl<Homogeneous<LhsArg,Horizontal>, Rhs>(lhs.nestedExpression(), rhs).evalTo(dst);\n  }\n};\n\ntemplate<typename Lhs,typename Rhs>\nstruct homogeneous_right_product_refactoring_helper\n{\n  enum {\n    Dim  = Lhs::ColsAtCompileTime,\n    Rows = Lhs::RowsAtCompileTime\n  };\n  typedef typename Rhs::template ConstNRowsBlockXpr<Dim>::Type          LinearBlockConst;\n  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;\n  typedef typename Rhs::ConstRowXpr                                     ConstantColumn;\n  typedef Replicate<const ConstantColumn,Rows,1>                        ConstantBlock;\n  typedef Product<Lhs,LinearBlock,LazyProduct>                          LinearProduct;\n  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, HomogeneousShape, DenseShape>\n : public evaluator<typename homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs>::Xpr>\n{\n  typedef Product<Lhs, Rhs, LazyProduct> XprType;\n  typedef homogeneous_right_product_refactoring_helper<typename Lhs::NestedExpression,Rhs> helper;\n  typedef typename helper::ConstantBlock ConstantBlock;\n  typedef typename helper::Xpr RefactoredXpr;\n  typedef evaluator<RefactoredXpr> Base;\n  \n  EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)\n    : Base(  xpr.lhs().nestedExpression() .lazyProduct(  xpr.rhs().template topRows<helper::Dim>(xpr.lhs().nestedExpression().cols()) )\n            + ConstantBlock(xpr.rhs().row(xpr.rhs().rows()-1),xpr.lhs().rows(), 1) )\n  {}\n};\n\ntemplate<typename Lhs, typename RhsArg, int ProductTag>\nstruct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag>\n{\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs)\n  {\n    homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, Lhs>(lhs, rhs.nestedExpression()).evalTo(dst);\n  }\n};\n\n// TODO: the following specialization is to address a regression from 3.2 to 3.3\n// In the future, this path should be optimized.\ntemplate<typename Lhs, typename RhsArg, int ProductTag>\nstruct generic_product_impl<Lhs, Homogeneous<RhsArg,Vertical>, TriangularShape, HomogeneousShape, ProductTag>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Homogeneous<RhsArg,Vertical>& rhs)\n  {\n    dst.noalias() = lhs * rhs.eval();\n  }\n};\n\ntemplate<typename Lhs,typename Rhs>\nstruct homogeneous_left_product_refactoring_helper\n{\n  enum {\n    Dim = Rhs::RowsAtCompileTime,\n    Cols = Rhs::ColsAtCompileTime\n  };\n  typedef typename Lhs::template ConstNColsBlockXpr<Dim>::Type          LinearBlockConst;\n  typedef typename remove_const<LinearBlockConst>::type                 LinearBlock;\n  typedef typename Lhs::ConstColXpr                                     ConstantColumn;\n  typedef Replicate<const ConstantColumn,1,Cols>                        ConstantBlock;\n  typedef Product<LinearBlock,Rhs,LazyProduct>                          LinearProduct;\n  typedef CwiseBinaryOp<internal::scalar_sum_op<typename Lhs::Scalar,typename Rhs::Scalar>, const LinearProduct, const ConstantBlock> Xpr;\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, LazyProduct>, ProductTag, DenseShape, HomogeneousShape>\n : public evaluator<typename homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression>::Xpr>\n{\n  typedef Product<Lhs, Rhs, LazyProduct> XprType;\n  typedef homogeneous_left_product_refactoring_helper<Lhs,typename Rhs::NestedExpression> helper;\n  typedef typename helper::ConstantBlock ConstantBlock;\n  typedef typename helper::Xpr RefactoredXpr;\n  typedef evaluator<RefactoredXpr> Base;\n  \n  EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr)\n    : Base(   xpr.lhs().template leftCols<helper::Dim>(xpr.rhs().nestedExpression().rows()) .lazyProduct( xpr.rhs().nestedExpression() )\n            + ConstantBlock(xpr.lhs().col(xpr.lhs().cols()-1),1,xpr.rhs().cols()) )\n  {}\n};\n\ntemplate<typename Scalar, int Dim, int Mode,int Options, typename RhsArg, int ProductTag>\nstruct generic_product_impl<Transform<Scalar,Dim,Mode,Options>, Homogeneous<RhsArg,Vertical>, DenseShape, HomogeneousShape, ProductTag>\n{\n  typedef Transform<Scalar,Dim,Mode,Options> TransformType;\n  template<typename Dest>\n  EIGEN_DEVICE_FUNC static void evalTo(Dest& dst, const TransformType& lhs, const Homogeneous<RhsArg,Vertical>& rhs)\n  {\n    homogeneous_left_product_impl<Homogeneous<RhsArg,Vertical>, TransformType>(lhs, rhs.nestedExpression()).evalTo(dst);\n  }\n};\n\ntemplate<typename ExpressionType, int Side, bool Transposed>\nstruct permutation_matrix_product<ExpressionType, Side, Transposed, HomogeneousShape>\n  : public permutation_matrix_product<ExpressionType, Side, Transposed, DenseShape>\n{};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_HOMOGENEOUS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Hyperplane.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HYPERPLANE_H\n#define EIGEN_HYPERPLANE_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Hyperplane\n  *\n  * \\brief A hyperplane\n  *\n  * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n.\n  * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane.\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients\n  * \\tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.\n  *             Notice that the dimension of the hyperplane is _AmbientDim-1.\n  *\n  * This class represents an hyperplane as the zero set of the implicit equation\n  * \\f$ n \\cdot x + d = 0 \\f$ where \\f$ n \\f$ is a unit normal vector of the plane (linear part)\n  * and \\f$ d \\f$ is the distance (offset) to the origin.\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\nclass Hyperplane\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1)\n  enum {\n    AmbientDimAtCompileTime = _AmbientDim,\n    Options = _Options\n  };\n  typedef _Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n  typedef Matrix<Scalar,AmbientDimAtCompileTime,1> VectorType;\n  typedef Matrix<Scalar,Index(AmbientDimAtCompileTime)==Dynamic\n                        ? Dynamic\n                        : Index(AmbientDimAtCompileTime)+1,1,Options> Coefficients;\n  typedef Block<Coefficients,AmbientDimAtCompileTime,1> NormalReturnType;\n  typedef const Block<const Coefficients,AmbientDimAtCompileTime,1> ConstNormalReturnType;\n\n  /** Default constructor without initialization */\n  EIGEN_DEVICE_FUNC inline Hyperplane() {}\n  \n  template<int OtherOptions>\n  EIGEN_DEVICE_FUNC Hyperplane(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)\n   : m_coeffs(other.coeffs())\n  {}\n\n  /** Constructs a dynamic-size hyperplane with \\a _dim the dimension\n    * of the ambient space */\n  EIGEN_DEVICE_FUNC inline explicit Hyperplane(Index _dim) : m_coeffs(_dim+1) {}\n\n  /** Construct a plane from its normal \\a n and a point \\a e onto the plane.\n    * \\warning the vector normal is assumed to be normalized.\n    */\n  EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const VectorType& e)\n    : m_coeffs(n.size()+1)\n  {\n    normal() = n;\n    offset() = -n.dot(e);\n  }\n\n  /** Constructs a plane from its normal \\a n and distance to the origin \\a d\n    * such that the algebraic equation of the plane is \\f$ n \\cdot x + d = 0 \\f$.\n    * \\warning the vector normal is assumed to be normalized.\n    */\n  EIGEN_DEVICE_FUNC inline Hyperplane(const VectorType& n, const Scalar& d)\n    : m_coeffs(n.size()+1)\n  {\n    normal() = n;\n    offset() = d;\n  }\n\n  /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space\n    * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made.\n    */\n  EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1)\n  {\n    Hyperplane result(p0.size());\n    result.normal() = (p1 - p0).unitOrthogonal();\n    result.offset() = -p0.dot(result.normal());\n    return result;\n  }\n\n  /** Constructs a hyperplane passing through the three points. The dimension of the ambient space\n    * is required to be exactly 3.\n    */\n  EIGEN_DEVICE_FUNC static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2)\n  {\n    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3)\n    Hyperplane result(p0.size());\n    VectorType v0(p2 - p0), v1(p1 - p0);\n    result.normal() = v0.cross(v1);\n    RealScalar norm = result.normal().norm();\n    if(norm <= v0.norm() * v1.norm() * NumTraits<RealScalar>::epsilon())\n    {\n      Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();\n      JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);\n      result.normal() = svd.matrixV().col(2);\n    }\n    else\n      result.normal() /= norm;\n    result.offset() = -p0.dot(result.normal());\n    return result;\n  }\n\n  /** Constructs a hyperplane passing through the parametrized line \\a parametrized.\n    * If the dimension of the ambient space is greater than 2, then there isn't uniqueness,\n    * so an arbitrary choice is made.\n    */\n  // FIXME to be consitent with the rest this could be implemented as a static Through function ??\n  EIGEN_DEVICE_FUNC explicit Hyperplane(const ParametrizedLine<Scalar, AmbientDimAtCompileTime>& parametrized)\n  {\n    normal() = parametrized.direction().unitOrthogonal();\n    offset() = -parametrized.origin().dot(normal());\n  }\n\n  EIGEN_DEVICE_FUNC ~Hyperplane() {}\n\n  /** \\returns the dimension in which the plane holds */\n  EIGEN_DEVICE_FUNC inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : Index(AmbientDimAtCompileTime); }\n\n  /** normalizes \\c *this */\n  EIGEN_DEVICE_FUNC void normalize(void)\n  {\n    m_coeffs /= normal().norm();\n  }\n\n  /** \\returns the signed distance between the plane \\c *this and a point \\a p.\n    * \\sa absDistance()\n    */\n  EIGEN_DEVICE_FUNC inline Scalar signedDistance(const VectorType& p) const { return normal().dot(p) + offset(); }\n\n  /** \\returns the absolute distance between the plane \\c *this and a point \\a p.\n    * \\sa signedDistance()\n    */\n  EIGEN_DEVICE_FUNC inline Scalar absDistance(const VectorType& p) const { return numext::abs(signedDistance(p)); }\n\n  /** \\returns the projection of a point \\a p onto the plane \\c *this.\n    */\n  EIGEN_DEVICE_FUNC inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); }\n\n  /** \\returns a constant reference to the unit normal vector of the plane, which corresponds\n    * to the linear part of the implicit equation.\n    */\n  EIGEN_DEVICE_FUNC inline ConstNormalReturnType normal() const { return ConstNormalReturnType(m_coeffs,0,0,dim(),1); }\n\n  /** \\returns a non-constant reference to the unit normal vector of the plane, which corresponds\n    * to the linear part of the implicit equation.\n    */\n  EIGEN_DEVICE_FUNC inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); }\n\n  /** \\returns the distance to the origin, which is also the \"constant term\" of the implicit equation\n    * \\warning the vector normal is assumed to be normalized.\n    */\n  EIGEN_DEVICE_FUNC inline const Scalar& offset() const { return m_coeffs.coeff(dim()); }\n\n  /** \\returns a non-constant reference to the distance to the origin, which is also the constant part\n    * of the implicit equation */\n  EIGEN_DEVICE_FUNC inline Scalar& offset() { return m_coeffs(dim()); }\n\n  /** \\returns a constant reference to the coefficients c_i of the plane equation:\n    * \\f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \\f$\n    */\n  EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }\n\n  /** \\returns a non-constant reference to the coefficients c_i of the plane equation:\n    * \\f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \\f$\n    */\n  EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }\n\n  /** \\returns the intersection of *this with \\a other.\n    *\n    * \\warning The ambient space must be a plane, i.e. have dimension 2, so that \\c *this and \\a other are lines.\n    *\n    * \\note If \\a other is approximately parallel to *this, this method will return any point on *this.\n    */\n  EIGEN_DEVICE_FUNC VectorType intersection(const Hyperplane& other) const\n  {\n    EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)\n    Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0);\n    // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests\n    // whether the two lines are approximately parallel.\n    if(internal::isMuchSmallerThan(det, Scalar(1)))\n    {   // special case where the two lines are approximately parallel. Pick any point on the first line.\n        if(numext::abs(coeffs().coeff(1))>numext::abs(coeffs().coeff(0)))\n            return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0));\n        else\n            return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0));\n    }\n    else\n    {   // general case\n        Scalar invdet = Scalar(1) / det;\n        return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)),\n                          invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2)));\n    }\n  }\n\n  /** Applies the transformation matrix \\a mat to \\c *this and returns a reference to \\c *this.\n    *\n    * \\param mat the Dim x Dim transformation matrix\n    * \\param traits specifies whether the matrix \\a mat represents an #Isometry\n    *               or a more generic #Affine transformation. The default is #Affine.\n    */\n  template<typename XprType>\n  EIGEN_DEVICE_FUNC inline Hyperplane& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)\n  {\n    if (traits==Affine)\n    {\n      normal() = mat.inverse().transpose() * normal();\n      m_coeffs /= normal().norm();\n    }\n    else if (traits==Isometry)\n      normal() = mat * normal();\n    else\n    {\n      eigen_assert(0 && \"invalid traits value in Hyperplane::transform()\");\n    }\n    return *this;\n  }\n\n  /** Applies the transformation \\a t to \\c *this and returns a reference to \\c *this.\n    *\n    * \\param t the transformation of dimension Dim\n    * \\param traits specifies whether the transformation \\a t represents an #Isometry\n    *               or a more generic #Affine transformation. The default is #Affine.\n    *               Other kind of transformations are not supported.\n    */\n  template<int TrOptions>\n  EIGEN_DEVICE_FUNC inline Hyperplane& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,\n                                TransformTraits traits = Affine)\n  {\n    transform(t.linear(), traits);\n    offset() -= normal().dot(t.translation());\n    return *this;\n  }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Hyperplane,\n           Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const\n  {\n    return typename internal::cast_return_type<Hyperplane,\n                    Hyperplane<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);\n  }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType,int OtherOptions>\n  EIGEN_DEVICE_FUNC inline explicit Hyperplane(const Hyperplane<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)\n  { m_coeffs = other.coeffs().template cast<Scalar>(); }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  template<int OtherOptions>\n  EIGEN_DEVICE_FUNC bool isApprox(const Hyperplane<Scalar,AmbientDimAtCompileTime,OtherOptions>& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return m_coeffs.isApprox(other.m_coeffs, prec); }\n\nprotected:\n\n  Coefficients m_coeffs;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_HYPERPLANE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/OrthoMethods.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ORTHOMETHODS_H\n#define EIGEN_ORTHOMETHODS_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns the cross product of \\c *this and \\a other\n  *\n  * Here is a very good explanation of cross-product: http://xkcd.com/199/\n  * \n  * With complex numbers, the cross product is implemented as\n  * \\f$ (\\mathbf{a}+i\\mathbf{b}) \\times (\\mathbf{c}+i\\mathbf{d}) = (\\mathbf{a} \\times \\mathbf{c} - \\mathbf{b} \\times \\mathbf{d}) - i(\\mathbf{a} \\times \\mathbf{d} - \\mathbf{b} \\times \\mathbf{c})\\f$\n  * \n  * \\sa MatrixBase::cross3()\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::template cross_product_return_type<OtherDerived>::type\n#else\ninline typename MatrixBase<Derived>::PlainObject\n#endif\nMatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)\n\n  // Note that there is no need for an expression here since the compiler\n  // optimize such a small temporary very well (even within a complex expression)\n  typename internal::nested_eval<Derived,2>::type lhs(derived());\n  typename internal::nested_eval<OtherDerived,2>::type rhs(other.derived());\n  return typename cross_product_return_type<OtherDerived>::type(\n    numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),\n    numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),\n    numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0))\n  );\n}\n\nnamespace internal {\n\ntemplate< int Arch,typename VectorLhs,typename VectorRhs,\n          typename Scalar = typename VectorLhs::Scalar,\n          bool Vectorizable = bool((VectorLhs::Flags&VectorRhs::Flags)&PacketAccessBit)>\nstruct cross3_impl {\n  EIGEN_DEVICE_FUNC static inline typename internal::plain_matrix_type<VectorLhs>::type\n  run(const VectorLhs& lhs, const VectorRhs& rhs)\n  {\n    return typename internal::plain_matrix_type<VectorLhs>::type(\n      numext::conj(lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1)),\n      numext::conj(lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2)),\n      numext::conj(lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)),\n      0\n    );\n  }\n};\n\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns the cross product of \\c *this and \\a other using only the x, y, and z coefficients\n  *\n  * The size of \\c *this and \\a other must be four. This function is especially useful\n  * when using 4D vectors instead of 3D ones to get advantage of SSE/AltiVec vectorization.\n  *\n  * \\sa MatrixBase::cross()\n  */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::PlainObject\nMatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4)\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,4)\n\n  typedef typename internal::nested_eval<Derived,2>::type DerivedNested;\n  typedef typename internal::nested_eval<OtherDerived,2>::type OtherDerivedNested;\n  DerivedNested lhs(derived());\n  OtherDerivedNested rhs(other.derived());\n\n  return internal::cross3_impl<Architecture::Target,\n                        typename internal::remove_all<DerivedNested>::type,\n                        typename internal::remove_all<OtherDerivedNested>::type>::run(lhs,rhs);\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns a matrix expression of the cross product of each column or row\n  * of the referenced expression with the \\a other vector.\n  *\n  * The referenced matrix must have one dimension equal to 3.\n  * The result matrix has the same dimensions than the referenced one.\n  *\n  * \\sa MatrixBase::cross() */\ntemplate<typename ExpressionType, int Direction>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC \nconst typename VectorwiseOp<ExpressionType,Direction>::CrossReturnType\nVectorwiseOp<ExpressionType,Direction>::cross(const MatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3)\n  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),\n    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n  \n  typename internal::nested_eval<ExpressionType,2>::type mat(_expression());\n  typename internal::nested_eval<OtherDerived,2>::type vec(other.derived());\n\n  CrossReturnType res(_expression().rows(),_expression().cols());\n  if(Direction==Vertical)\n  {\n    eigen_assert(CrossReturnType::RowsAtCompileTime==3 && \"the matrix must have exactly 3 rows\");\n    res.row(0) = (mat.row(1) * vec.coeff(2) - mat.row(2) * vec.coeff(1)).conjugate();\n    res.row(1) = (mat.row(2) * vec.coeff(0) - mat.row(0) * vec.coeff(2)).conjugate();\n    res.row(2) = (mat.row(0) * vec.coeff(1) - mat.row(1) * vec.coeff(0)).conjugate();\n  }\n  else\n  {\n    eigen_assert(CrossReturnType::ColsAtCompileTime==3 && \"the matrix must have exactly 3 columns\");\n    res.col(0) = (mat.col(1) * vec.coeff(2) - mat.col(2) * vec.coeff(1)).conjugate();\n    res.col(1) = (mat.col(2) * vec.coeff(0) - mat.col(0) * vec.coeff(2)).conjugate();\n    res.col(2) = (mat.col(0) * vec.coeff(1) - mat.col(1) * vec.coeff(0)).conjugate();\n  }\n  return res;\n}\n\nnamespace internal {\n\ntemplate<typename Derived, int Size = Derived::SizeAtCompileTime>\nstruct unitOrthogonal_selector\n{\n  typedef typename plain_matrix_type<Derived>::type VectorType;\n  typedef typename traits<Derived>::Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  typedef Matrix<Scalar,2,1> Vector2;\n  EIGEN_DEVICE_FUNC\n  static inline VectorType run(const Derived& src)\n  {\n    VectorType perp = VectorType::Zero(src.size());\n    Index maxi = 0;\n    Index sndi = 0;\n    src.cwiseAbs().maxCoeff(&maxi);\n    if (maxi==0)\n      sndi = 1;\n    RealScalar invnm = RealScalar(1)/(Vector2() << src.coeff(sndi),src.coeff(maxi)).finished().norm();\n    perp.coeffRef(maxi) = -numext::conj(src.coeff(sndi)) * invnm;\n    perp.coeffRef(sndi) =  numext::conj(src.coeff(maxi)) * invnm;\n\n    return perp;\n   }\n};\n\ntemplate<typename Derived>\nstruct unitOrthogonal_selector<Derived,3>\n{\n  typedef typename plain_matrix_type<Derived>::type VectorType;\n  typedef typename traits<Derived>::Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  EIGEN_DEVICE_FUNC\n  static inline VectorType run(const Derived& src)\n  {\n    VectorType perp;\n    /* Let us compute the crossed product of *this with a vector\n     * that is not too close to being colinear to *this.\n     */\n\n    /* unless the x and y coords are both close to zero, we can\n     * simply take ( -y, x, 0 ) and normalize it.\n     */\n    if((!isMuchSmallerThan(src.x(), src.z()))\n    || (!isMuchSmallerThan(src.y(), src.z())))\n    {\n      RealScalar invnm = RealScalar(1)/src.template head<2>().norm();\n      perp.coeffRef(0) = -numext::conj(src.y())*invnm;\n      perp.coeffRef(1) = numext::conj(src.x())*invnm;\n      perp.coeffRef(2) = 0;\n    }\n    /* if both x and y are close to zero, then the vector is close\n     * to the z-axis, so it's far from colinear to the x-axis for instance.\n     * So we take the crossed product with (1,0,0) and normalize it.\n     */\n    else\n    {\n      RealScalar invnm = RealScalar(1)/src.template tail<2>().norm();\n      perp.coeffRef(0) = 0;\n      perp.coeffRef(1) = -numext::conj(src.z())*invnm;\n      perp.coeffRef(2) = numext::conj(src.y())*invnm;\n    }\n\n    return perp;\n   }\n};\n\ntemplate<typename Derived>\nstruct unitOrthogonal_selector<Derived,2>\n{\n  typedef typename plain_matrix_type<Derived>::type VectorType;\n  EIGEN_DEVICE_FUNC\n  static inline VectorType run(const Derived& src)\n  { return VectorType(-numext::conj(src.y()), numext::conj(src.x())).normalized(); }\n};\n\n} // end namespace internal\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\returns a unit vector which is orthogonal to \\c *this\n  *\n  * The size of \\c *this must be at least 2. If the size is exactly 2,\n  * then the returned vector is a counter clock wise rotation of \\c *this, i.e., (-y,x).normalized().\n  *\n  * \\sa cross()\n  */\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC typename MatrixBase<Derived>::PlainObject\nMatrixBase<Derived>::unitOrthogonal() const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return internal::unitOrthogonal_selector<Derived>::run(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_ORTHOMETHODS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/ParametrizedLine.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARAMETRIZEDLINE_H\n#define EIGEN_PARAMETRIZEDLINE_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class ParametrizedLine\n  *\n  * \\brief A parametrized line\n  *\n  * A parametrized line is defined by an origin point \\f$ \\mathbf{o} \\f$ and a unit\n  * direction vector \\f$ \\mathbf{d} \\f$ such that the line corresponds to\n  * the set \\f$ l(t) = \\mathbf{o} + t \\mathbf{d} \\f$, \\f$ t \\in \\mathbf{R} \\f$.\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients\n  * \\tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\nclass ParametrizedLine\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)\n  enum {\n    AmbientDimAtCompileTime = _AmbientDim,\n    Options = _Options\n  };\n  typedef _Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n  typedef Matrix<Scalar,AmbientDimAtCompileTime,1,Options> VectorType;\n\n  /** Default constructor without initialization */\n  EIGEN_DEVICE_FUNC inline ParametrizedLine() {}\n  \n  template<int OtherOptions>\n  EIGEN_DEVICE_FUNC ParametrizedLine(const ParametrizedLine<Scalar,AmbientDimAtCompileTime,OtherOptions>& other)\n   : m_origin(other.origin()), m_direction(other.direction())\n  {}\n\n  /** Constructs a dynamic-size line with \\a _dim the dimension\n    * of the ambient space */\n  EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(Index _dim) : m_origin(_dim), m_direction(_dim) {}\n\n  /** Initializes a parametrized line of direction \\a direction and origin \\a origin.\n    * \\warning the vector direction is assumed to be normalized.\n    */\n  EIGEN_DEVICE_FUNC ParametrizedLine(const VectorType& origin, const VectorType& direction)\n    : m_origin(origin), m_direction(direction) {}\n\n  template <int OtherOptions>\n  EIGEN_DEVICE_FUNC explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane);\n\n  /** Constructs a parametrized line going from \\a p0 to \\a p1. */\n  EIGEN_DEVICE_FUNC static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1)\n  { return ParametrizedLine(p0, (p1-p0).normalized()); }\n\n  EIGEN_DEVICE_FUNC ~ParametrizedLine() {}\n\n  /** \\returns the dimension in which the line holds */\n  EIGEN_DEVICE_FUNC inline Index dim() const { return m_direction.size(); }\n\n  EIGEN_DEVICE_FUNC const VectorType& origin() const { return m_origin; }\n  EIGEN_DEVICE_FUNC VectorType& origin() { return m_origin; }\n\n  EIGEN_DEVICE_FUNC const VectorType& direction() const { return m_direction; }\n  EIGEN_DEVICE_FUNC VectorType& direction() { return m_direction; }\n\n  /** \\returns the squared distance of a point \\a p to its projection onto the line \\c *this.\n    * \\sa distance()\n    */\n  EIGEN_DEVICE_FUNC RealScalar squaredDistance(const VectorType& p) const\n  {\n    VectorType diff = p - origin();\n    return (diff - direction().dot(diff) * direction()).squaredNorm();\n  }\n  /** \\returns the distance of a point \\a p to its projection onto the line \\c *this.\n    * \\sa squaredDistance()\n    */\n  EIGEN_DEVICE_FUNC RealScalar distance(const VectorType& p) const { EIGEN_USING_STD_MATH(sqrt) return sqrt(squaredDistance(p)); }\n\n  /** \\returns the projection of a point \\a p onto the line \\c *this. */\n  EIGEN_DEVICE_FUNC VectorType projection(const VectorType& p) const\n  { return origin() + direction().dot(p-origin()) * direction(); }\n\n  EIGEN_DEVICE_FUNC VectorType pointAt(const Scalar& t) const;\n  \n  template <int OtherOptions>\n  EIGEN_DEVICE_FUNC Scalar intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;\n \n  template <int OtherOptions>\n  EIGEN_DEVICE_FUNC Scalar intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;\n  \n  template <int OtherOptions>\n  EIGEN_DEVICE_FUNC VectorType intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const;\n\n  /** Applies the transformation matrix \\a mat to \\c *this and returns a reference to \\c *this.\n    *\n    * \\param mat the Dim x Dim transformation matrix\n    * \\param traits specifies whether the matrix \\a mat represents an #Isometry\n    *               or a more generic #Affine transformation. The default is #Affine.\n    */\n  template<typename XprType>\n  EIGEN_DEVICE_FUNC inline ParametrizedLine& transform(const MatrixBase<XprType>& mat, TransformTraits traits = Affine)\n  {\n    if (traits==Affine)\n      direction() = (mat * direction()).normalized();\n    else if (traits==Isometry)\n      direction() = mat * direction();\n    else\n    {\n      eigen_assert(0 && \"invalid traits value in ParametrizedLine::transform()\");\n    }\n    origin() = mat * origin();\n    return *this;\n  }\n\n  /** Applies the transformation \\a t to \\c *this and returns a reference to \\c *this.\n    *\n    * \\param t the transformation of dimension Dim\n    * \\param traits specifies whether the transformation \\a t represents an #Isometry\n    *               or a more generic #Affine transformation. The default is #Affine.\n    *               Other kind of transformations are not supported.\n    */\n  template<int TrOptions>\n  EIGEN_DEVICE_FUNC inline ParametrizedLine& transform(const Transform<Scalar,AmbientDimAtCompileTime,Affine,TrOptions>& t,\n                                                       TransformTraits traits = Affine)\n  {\n    transform(t.linear(), traits);\n    origin() += t.translation();\n    return *this;\n  }\n\n/** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<ParametrizedLine,\n           ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type cast() const\n  {\n    return typename internal::cast_return_type<ParametrizedLine,\n                    ParametrizedLine<NewScalarType,AmbientDimAtCompileTime,Options> >::type(*this);\n  }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType,int OtherOptions>\n  EIGEN_DEVICE_FUNC inline explicit ParametrizedLine(const ParametrizedLine<OtherScalarType,AmbientDimAtCompileTime,OtherOptions>& other)\n  {\n    m_origin = other.origin().template cast<Scalar>();\n    m_direction = other.direction().template cast<Scalar>();\n  }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const ParametrizedLine& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); }\n\nprotected:\n\n  VectorType m_origin, m_direction;\n};\n\n/** Constructs a parametrized line from a 2D hyperplane\n  *\n  * \\warning the ambient space must have dimension 2 such that the hyperplane actually describes a line\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\ntemplate <int OtherOptions>\nEIGEN_DEVICE_FUNC inline ParametrizedLine<_Scalar, _AmbientDim,_Options>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim,OtherOptions>& hyperplane)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2)\n  direction() = hyperplane.normal().unitOrthogonal();\n  origin() = -hyperplane.normal()*hyperplane.offset();\n}\n\n/** \\returns the point at \\a t along this line\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\nEIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType\nParametrizedLine<_Scalar, _AmbientDim,_Options>::pointAt(const _Scalar& t) const\n{\n  return origin() + (direction()*t); \n}\n\n/** \\returns the parameter value of the intersection between \\c *this and the given \\a hyperplane\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\ntemplate <int OtherOptions>\nEIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionParameter(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const\n{\n  return -(hyperplane.offset()+hyperplane.normal().dot(origin()))\n          / hyperplane.normal().dot(direction());\n}\n\n\n/** \\deprecated use intersectionParameter()\n  * \\returns the parameter value of the intersection between \\c *this and the given \\a hyperplane\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\ntemplate <int OtherOptions>\nEIGEN_DEVICE_FUNC inline _Scalar ParametrizedLine<_Scalar, _AmbientDim,_Options>::intersection(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const\n{\n  return intersectionParameter(hyperplane);\n}\n\n/** \\returns the point of the intersection between \\c *this and the given hyperplane\n  */\ntemplate <typename _Scalar, int _AmbientDim, int _Options>\ntemplate <int OtherOptions>\nEIGEN_DEVICE_FUNC inline typename ParametrizedLine<_Scalar, _AmbientDim,_Options>::VectorType\nParametrizedLine<_Scalar, _AmbientDim,_Options>::intersectionPoint(const Hyperplane<_Scalar, _AmbientDim, OtherOptions>& hyperplane) const\n{\n  return pointAt(intersectionParameter(hyperplane));\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARAMETRIZEDLINE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Quaternion.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Mathieu Gautier <mathieu.gautier@cea.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_QUATERNION_H\n#define EIGEN_QUATERNION_H\nnamespace Eigen { \n\n\n/***************************************************************************\n* Definition of QuaternionBase<Derived>\n* The implementation is at the end of the file\n***************************************************************************/\n\nnamespace internal {\ntemplate<typename Other,\n         int OtherRows=Other::RowsAtCompileTime,\n         int OtherCols=Other::ColsAtCompileTime>\nstruct quaternionbase_assign_impl;\n}\n\n/** \\geometry_module \\ingroup Geometry_Module\n  * \\class QuaternionBase\n  * \\brief Base class for quaternion expressions\n  * \\tparam Derived derived type (CRTP)\n  * \\sa class Quaternion\n  */\ntemplate<class Derived>\nclass QuaternionBase : public RotationBase<Derived, 3>\n{\n public:\n  typedef RotationBase<Derived, 3> Base;\n\n  using Base::operator*;\n  using Base::derived;\n\n  typedef typename internal::traits<Derived>::Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  typedef typename internal::traits<Derived>::Coefficients Coefficients;\n  enum {\n    Flags = Eigen::internal::traits<Derived>::Flags\n  };\n\n // typedef typename Matrix<Scalar,4,1> Coefficients;\n  /** the type of a 3D vector */\n  typedef Matrix<Scalar,3,1> Vector3;\n  /** the equivalent rotation matrix type */\n  typedef Matrix<Scalar,3,3> Matrix3;\n  /** the equivalent angle-axis type */\n  typedef AngleAxis<Scalar> AngleAxisType;\n\n\n\n  /** \\returns the \\c x coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar x() const { return this->derived().coeffs().coeff(0); }\n  /** \\returns the \\c y coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar y() const { return this->derived().coeffs().coeff(1); }\n  /** \\returns the \\c z coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar z() const { return this->derived().coeffs().coeff(2); }\n  /** \\returns the \\c w coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar w() const { return this->derived().coeffs().coeff(3); }\n\n  /** \\returns a reference to the \\c x coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar& x() { return this->derived().coeffs().coeffRef(0); }\n  /** \\returns a reference to the \\c y coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar& y() { return this->derived().coeffs().coeffRef(1); }\n  /** \\returns a reference to the \\c z coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar& z() { return this->derived().coeffs().coeffRef(2); }\n  /** \\returns a reference to the \\c w coefficient */\n  EIGEN_DEVICE_FUNC inline Scalar& w() { return this->derived().coeffs().coeffRef(3); }\n\n  /** \\returns a read-only vector expression of the imaginary part (x,y,z) */\n  EIGEN_DEVICE_FUNC inline const VectorBlock<const Coefficients,3> vec() const { return coeffs().template head<3>(); }\n\n  /** \\returns a vector expression of the imaginary part (x,y,z) */\n  EIGEN_DEVICE_FUNC inline VectorBlock<Coefficients,3> vec() { return coeffs().template head<3>(); }\n\n  /** \\returns a read-only vector expression of the coefficients (x,y,z,w) */\n  EIGEN_DEVICE_FUNC inline const typename internal::traits<Derived>::Coefficients& coeffs() const { return derived().coeffs(); }\n\n  /** \\returns a vector expression of the coefficients (x,y,z,w) */\n  EIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Coefficients& coeffs() { return derived().coeffs(); }\n\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& operator=(const QuaternionBase<Derived>& other);\n  template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const QuaternionBase<OtherDerived>& other);\n\n// disabled this copy operator as it is giving very strange compilation errors when compiling\n// test_stdvector with GCC 4.4.2. This looks like a GCC bug though, so feel free to re-enable it if it's\n// useful; however notice that we already have the templated operator= above and e.g. in MatrixBase\n// we didn't have to add, in addition to templated operator=, such a non-templated copy operator.\n//  Derived& operator=(const QuaternionBase& other)\n//  { return operator=<Derived>(other); }\n\n  EIGEN_DEVICE_FUNC Derived& operator=(const AngleAxisType& aa);\n  template<class OtherDerived> EIGEN_DEVICE_FUNC Derived& operator=(const MatrixBase<OtherDerived>& m);\n\n  /** \\returns a quaternion representing an identity rotation\n    * \\sa MatrixBase::Identity()\n    */\n  EIGEN_DEVICE_FUNC static inline Quaternion<Scalar> Identity() { return Quaternion<Scalar>(Scalar(1), Scalar(0), Scalar(0), Scalar(0)); }\n\n  /** \\sa QuaternionBase::Identity(), MatrixBase::setIdentity()\n    */\n  EIGEN_DEVICE_FUNC inline QuaternionBase& setIdentity() { coeffs() << Scalar(0), Scalar(0), Scalar(0), Scalar(1); return *this; }\n\n  /** \\returns the squared norm of the quaternion's coefficients\n    * \\sa QuaternionBase::norm(), MatrixBase::squaredNorm()\n    */\n  EIGEN_DEVICE_FUNC inline Scalar squaredNorm() const { return coeffs().squaredNorm(); }\n\n  /** \\returns the norm of the quaternion's coefficients\n    * \\sa QuaternionBase::squaredNorm(), MatrixBase::norm()\n    */\n  EIGEN_DEVICE_FUNC inline Scalar norm() const { return coeffs().norm(); }\n\n  /** Normalizes the quaternion \\c *this\n    * \\sa normalized(), MatrixBase::normalize() */\n  EIGEN_DEVICE_FUNC inline void normalize() { coeffs().normalize(); }\n  /** \\returns a normalized copy of \\c *this\n    * \\sa normalize(), MatrixBase::normalized() */\n  EIGEN_DEVICE_FUNC inline Quaternion<Scalar> normalized() const { return Quaternion<Scalar>(coeffs().normalized()); }\n\n    /** \\returns the dot product of \\c *this and \\a other\n    * Geometrically speaking, the dot product of two unit quaternions\n    * corresponds to the cosine of half the angle between the two rotations.\n    * \\sa angularDistance()\n    */\n  template<class OtherDerived> EIGEN_DEVICE_FUNC inline Scalar dot(const QuaternionBase<OtherDerived>& other) const { return coeffs().dot(other.coeffs()); }\n\n  template<class OtherDerived> EIGEN_DEVICE_FUNC Scalar angularDistance(const QuaternionBase<OtherDerived>& other) const;\n\n  /** \\returns an equivalent 3x3 rotation matrix */\n  EIGEN_DEVICE_FUNC Matrix3 toRotationMatrix() const;\n\n  /** \\returns the quaternion which transform \\a a into \\a b through a rotation */\n  template<typename Derived1, typename Derived2>\n  EIGEN_DEVICE_FUNC Derived& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);\n\n  template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<Scalar> operator* (const QuaternionBase<OtherDerived>& q) const;\n  template<class OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator*= (const QuaternionBase<OtherDerived>& q);\n\n  /** \\returns the quaternion describing the inverse rotation */\n  EIGEN_DEVICE_FUNC Quaternion<Scalar> inverse() const;\n\n  /** \\returns the conjugated quaternion */\n  EIGEN_DEVICE_FUNC Quaternion<Scalar> conjugate() const;\n\n  template<class OtherDerived> EIGEN_DEVICE_FUNC Quaternion<Scalar> slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const;\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  template<class OtherDerived>\n  EIGEN_DEVICE_FUNC bool isApprox(const QuaternionBase<OtherDerived>& other, const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return coeffs().isApprox(other.coeffs(), prec); }\n\n  /** return the result vector of \\a v through the rotation*/\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type cast() const\n  {\n    return typename internal::cast_return_type<Derived,Quaternion<NewScalarType> >::type(derived());\n  }\n\n#ifdef EIGEN_QUATERNIONBASE_PLUGIN\n# include EIGEN_QUATERNIONBASE_PLUGIN\n#endif\n};\n\n/***************************************************************************\n* Definition/implementation of Quaternion<Scalar>\n***************************************************************************/\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Quaternion\n  *\n  * \\brief The quaternion class used to represent 3D orientations and rotations\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients\n  * \\tparam _Options controls the memory alignment of the coefficients. Can be \\# AutoAlign or \\# DontAlign. Default is AutoAlign.\n  *\n  * This class represents a quaternion \\f$ w+xi+yj+zk \\f$ that is a convenient representation of\n  * orientations and rotations of objects in three dimensions. Compared to other representations\n  * like Euler angles or 3x3 matrices, quaternions offer the following advantages:\n  * \\li \\b compact storage (4 scalars)\n  * \\li \\b efficient to compose (28 flops),\n  * \\li \\b stable spherical interpolation\n  *\n  * The following two typedefs are provided for convenience:\n  * \\li \\c Quaternionf for \\c float\n  * \\li \\c Quaterniond for \\c double\n  *\n  * \\warning Operations interpreting the quaternion as rotation have undefined behavior if the quaternion is not normalized.\n  *\n  * \\sa  class AngleAxis, class Transform\n  */\n\nnamespace internal {\ntemplate<typename _Scalar,int _Options>\nstruct traits<Quaternion<_Scalar,_Options> >\n{\n  typedef Quaternion<_Scalar,_Options> PlainObject;\n  typedef _Scalar Scalar;\n  typedef Matrix<_Scalar,4,1,_Options> Coefficients;\n  enum{\n    Alignment = internal::traits<Coefficients>::Alignment,\n    Flags = LvalueBit\n  };\n};\n}\n\ntemplate<typename _Scalar, int _Options>\nclass Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >\n{\npublic:\n  typedef QuaternionBase<Quaternion<_Scalar,_Options> > Base;\n  enum { NeedsAlignment = internal::traits<Quaternion>::Alignment>0 };\n\n  typedef _Scalar Scalar;\n\n  EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)\n  using Base::operator*=;\n\n  typedef typename internal::traits<Quaternion>::Coefficients Coefficients;\n  typedef typename Base::AngleAxisType AngleAxisType;\n\n  /** Default constructor leaving the quaternion uninitialized. */\n  EIGEN_DEVICE_FUNC inline Quaternion() {}\n\n  /** Constructs and initializes the quaternion \\f$ w+xi+yj+zk \\f$ from\n    * its four coefficients \\a w, \\a x, \\a y and \\a z.\n    *\n    * \\warning Note the order of the arguments: the real \\a w coefficient first,\n    * while internally the coefficients are stored in the following order:\n    * [\\c x, \\c y, \\c z, \\c w]\n    */\n  EIGEN_DEVICE_FUNC inline Quaternion(const Scalar& w, const Scalar& x, const Scalar& y, const Scalar& z) : m_coeffs(x, y, z, w){}\n\n  /** Constructs and initialize a quaternion from the array data */\n  EIGEN_DEVICE_FUNC explicit inline Quaternion(const Scalar* data) : m_coeffs(data) {}\n\n  /** Copy constructor */\n  template<class Derived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion(const QuaternionBase<Derived>& other) { this->Base::operator=(other); }\n\n  /** Constructs and initializes a quaternion from the angle-axis \\a aa */\n  EIGEN_DEVICE_FUNC explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }\n\n  /** Constructs and initializes a quaternion from either:\n    *  - a rotation matrix expression,\n    *  - a 4D vector expression representing quaternion coefficients.\n    */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }\n\n  /** Explicit copy constructor with scalar conversion */\n  template<typename OtherScalar, int OtherOptions>\n  EIGEN_DEVICE_FUNC explicit inline Quaternion(const Quaternion<OtherScalar, OtherOptions>& other)\n  { m_coeffs = other.coeffs().template cast<Scalar>(); }\n\n  EIGEN_DEVICE_FUNC static Quaternion UnitRandom();\n\n  template<typename Derived1, typename Derived2>\n  EIGEN_DEVICE_FUNC static Quaternion FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);\n\n  EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs;}\n  EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;}\n\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(NeedsAlignment))\n  \n#ifdef EIGEN_QUATERNION_PLUGIN\n# include EIGEN_QUATERNION_PLUGIN\n#endif\n\nprotected:\n  Coefficients m_coeffs;\n  \n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    static EIGEN_STRONG_INLINE void _check_template_params()\n    {\n      EIGEN_STATIC_ASSERT( (_Options & DontAlign) == _Options,\n        INVALID_MATRIX_TEMPLATE_PARAMETERS)\n    }\n#endif\n};\n\n/** \\ingroup Geometry_Module\n  * single precision quaternion type */\ntypedef Quaternion<float> Quaternionf;\n/** \\ingroup Geometry_Module\n  * double precision quaternion type */\ntypedef Quaternion<double> Quaterniond;\n\n/***************************************************************************\n* Specialization of Map<Quaternion<Scalar>>\n***************************************************************************/\n\nnamespace internal {\n  template<typename _Scalar, int _Options>\n  struct traits<Map<Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >\n  {\n    typedef Map<Matrix<_Scalar,4,1>, _Options> Coefficients;\n  };\n}\n\nnamespace internal {\n  template<typename _Scalar, int _Options>\n  struct traits<Map<const Quaternion<_Scalar>, _Options> > : traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> >\n  {\n    typedef Map<const Matrix<_Scalar,4,1>, _Options> Coefficients;\n    typedef traits<Quaternion<_Scalar, (int(_Options)&Aligned)==Aligned ? AutoAlign : DontAlign> > TraitsBase;\n    enum {\n      Flags = TraitsBase::Flags & ~LvalueBit\n    };\n  };\n}\n\n/** \\ingroup Geometry_Module\n  * \\brief Quaternion expression mapping a constant memory buffer\n  *\n  * \\tparam _Scalar the type of the Quaternion coefficients\n  * \\tparam _Options see class Map\n  *\n  * This is a specialization of class Map for Quaternion. This class allows to view\n  * a 4 scalar memory buffer as an Eigen's Quaternion object.\n  *\n  * \\sa class Map, class Quaternion, class QuaternionBase\n  */\ntemplate<typename _Scalar, int _Options>\nclass Map<const Quaternion<_Scalar>, _Options >\n  : public QuaternionBase<Map<const Quaternion<_Scalar>, _Options> >\n{\n  public:\n    typedef QuaternionBase<Map<const Quaternion<_Scalar>, _Options> > Base;\n\n    typedef _Scalar Scalar;\n    typedef typename internal::traits<Map>::Coefficients Coefficients;\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)\n    using Base::operator*=;\n\n    /** Constructs a Mapped Quaternion object from the pointer \\a coeffs\n      *\n      * The pointer \\a coeffs must reference the four coefficients of Quaternion in the following order:\n      * \\code *coeffs == {x, y, z, w} \\endcode\n      *\n      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */\n    EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(const Scalar* coeffs) : m_coeffs(coeffs) {}\n\n    EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs;}\n\n  protected:\n    const Coefficients m_coeffs;\n};\n\n/** \\ingroup Geometry_Module\n  * \\brief Expression of a quaternion from a memory buffer\n  *\n  * \\tparam _Scalar the type of the Quaternion coefficients\n  * \\tparam _Options see class Map\n  *\n  * This is a specialization of class Map for Quaternion. This class allows to view\n  * a 4 scalar memory buffer as an Eigen's  Quaternion object.\n  *\n  * \\sa class Map, class Quaternion, class QuaternionBase\n  */\ntemplate<typename _Scalar, int _Options>\nclass Map<Quaternion<_Scalar>, _Options >\n  : public QuaternionBase<Map<Quaternion<_Scalar>, _Options> >\n{\n  public:\n    typedef QuaternionBase<Map<Quaternion<_Scalar>, _Options> > Base;\n\n    typedef _Scalar Scalar;\n    typedef typename internal::traits<Map>::Coefficients Coefficients;\n    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)\n    using Base::operator*=;\n\n    /** Constructs a Mapped Quaternion object from the pointer \\a coeffs\n      *\n      * The pointer \\a coeffs must reference the four coefficients of Quaternion in the following order:\n      * \\code *coeffs == {x, y, z, w} \\endcode\n      *\n      * If the template parameter _Options is set to #Aligned, then the pointer coeffs must be aligned. */\n    EIGEN_DEVICE_FUNC explicit EIGEN_STRONG_INLINE Map(Scalar* coeffs) : m_coeffs(coeffs) {}\n\n    EIGEN_DEVICE_FUNC inline Coefficients& coeffs() { return m_coeffs; }\n    EIGEN_DEVICE_FUNC inline const Coefficients& coeffs() const { return m_coeffs; }\n\n  protected:\n    Coefficients m_coeffs;\n};\n\n/** \\ingroup Geometry_Module\n  * Map an unaligned array of single precision scalars as a quaternion */\ntypedef Map<Quaternion<float>, 0>         QuaternionMapf;\n/** \\ingroup Geometry_Module\n  * Map an unaligned array of double precision scalars as a quaternion */\ntypedef Map<Quaternion<double>, 0>        QuaternionMapd;\n/** \\ingroup Geometry_Module\n  * Map a 16-byte aligned array of single precision scalars as a quaternion */\ntypedef Map<Quaternion<float>, Aligned>   QuaternionMapAlignedf;\n/** \\ingroup Geometry_Module\n  * Map a 16-byte aligned array of double precision scalars as a quaternion */\ntypedef Map<Quaternion<double>, Aligned>  QuaternionMapAlignedd;\n\n/***************************************************************************\n* Implementation of QuaternionBase methods\n***************************************************************************/\n\n// Generic Quaternion * Quaternion product\n// This product can be specialized for a given architecture via the Arch template argument.\nnamespace internal {\ntemplate<int Arch, class Derived1, class Derived2, typename Scalar, int _Options> struct quat_product\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived1>& a, const QuaternionBase<Derived2>& b){\n    return Quaternion<Scalar>\n    (\n      a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),\n      a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),\n      a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),\n      a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()\n    );\n  }\n};\n}\n\n/** \\returns the concatenation of two rotations as a quaternion-quaternion product */\ntemplate <class Derived>\ntemplate <class OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Quaternion<typename internal::traits<Derived>::Scalar>\nQuaternionBase<Derived>::operator* (const QuaternionBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename OtherDerived::Scalar>::value),\n   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n  return internal::quat_product<Architecture::Target, Derived, OtherDerived,\n                         typename internal::traits<Derived>::Scalar,\n                         EIGEN_PLAIN_ENUM_MIN(internal::traits<Derived>::Alignment, internal::traits<OtherDerived>::Alignment)>::run(*this, other);\n}\n\n/** \\sa operator*(Quaternion) */\ntemplate <class Derived>\ntemplate <class OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const QuaternionBase<OtherDerived>& other)\n{\n  derived() = derived() * other.derived();\n  return derived();\n}\n\n/** Rotation of a vector by a quaternion.\n  * \\remarks If the quaternion is used to rotate several points (>1)\n  * then it is much more efficient to first convert it to a 3x3 Matrix.\n  * Comparison of the operation cost for n transformations:\n  *   - Quaternion2:    30n\n  *   - Via a Matrix3: 24 + 15n\n  */\ntemplate <class Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3\nQuaternionBase<Derived>::_transformVector(const Vector3& v) const\n{\n    // Note that this algorithm comes from the optimization by hand\n    // of the conversion to a Matrix followed by a Matrix/Vector product.\n    // It appears to be much faster than the common algorithm found\n    // in the literature (30 versus 39 flops). It also requires two\n    // Vector3 as temporaries.\n    Vector3 uv = this->vec().cross(v);\n    uv += uv;\n    return v + this->w() * uv + this->vec().cross(uv);\n}\n\ntemplate<class Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE QuaternionBase<Derived>& QuaternionBase<Derived>::operator=(const QuaternionBase<Derived>& other)\n{\n  coeffs() = other.coeffs();\n  return derived();\n}\n\ntemplate<class Derived>\ntemplate<class OtherDerived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const QuaternionBase<OtherDerived>& other)\n{\n  coeffs() = other.coeffs();\n  return derived();\n}\n\n/** Set \\c *this from an angle-axis \\a aa and returns a reference to \\c *this\n  */\ntemplate<class Derived>\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator=(const AngleAxisType& aa)\n{\n  EIGEN_USING_STD_MATH(cos)\n  EIGEN_USING_STD_MATH(sin)\n  Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings\n  this->w() = cos(ha);\n  this->vec() = sin(ha) * aa.axis();\n  return derived();\n}\n\n/** Set \\c *this from the expression \\a xpr:\n  *   - if \\a xpr is a 4x1 vector, then \\a xpr is assumed to be a quaternion\n  *   - if \\a xpr is a 3x3 matrix, then \\a xpr is assumed to be rotation matrix\n  *     and \\a xpr is converted to a quaternion\n  */\n\ntemplate<class Derived>\ntemplate<class MatrixDerived>\nEIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::operator=(const MatrixBase<MatrixDerived>& xpr)\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<typename Derived::Scalar, typename MatrixDerived::Scalar>::value),\n   YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n  internal::quaternionbase_assign_impl<MatrixDerived>::run(*this, xpr.derived());\n  return derived();\n}\n\n/** Convert the quaternion to a 3x3 rotation matrix. The quaternion is required to\n  * be normalized, otherwise the result is undefined.\n  */\ntemplate<class Derived>\nEIGEN_DEVICE_FUNC inline typename QuaternionBase<Derived>::Matrix3\nQuaternionBase<Derived>::toRotationMatrix(void) const\n{\n  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)\n  // if not inlined then the cost of the return by value is huge ~ +35%,\n  // however, not inlining this function is an order of magnitude slower, so\n  // it has to be inlined, and so the return by value is not an issue\n  Matrix3 res;\n\n  const Scalar tx  = Scalar(2)*this->x();\n  const Scalar ty  = Scalar(2)*this->y();\n  const Scalar tz  = Scalar(2)*this->z();\n  const Scalar twx = tx*this->w();\n  const Scalar twy = ty*this->w();\n  const Scalar twz = tz*this->w();\n  const Scalar txx = tx*this->x();\n  const Scalar txy = ty*this->x();\n  const Scalar txz = tz*this->x();\n  const Scalar tyy = ty*this->y();\n  const Scalar tyz = tz*this->y();\n  const Scalar tzz = tz*this->z();\n\n  res.coeffRef(0,0) = Scalar(1)-(tyy+tzz);\n  res.coeffRef(0,1) = txy-twz;\n  res.coeffRef(0,2) = txz+twy;\n  res.coeffRef(1,0) = txy+twz;\n  res.coeffRef(1,1) = Scalar(1)-(txx+tzz);\n  res.coeffRef(1,2) = tyz-twx;\n  res.coeffRef(2,0) = txz-twy;\n  res.coeffRef(2,1) = tyz+twx;\n  res.coeffRef(2,2) = Scalar(1)-(txx+tyy);\n\n  return res;\n}\n\n/** Sets \\c *this to be a quaternion representing a rotation between\n  * the two arbitrary vectors \\a a and \\a b. In other words, the built\n  * rotation represent a rotation sending the line of direction \\a a\n  * to the line of direction \\a b, both lines passing through the origin.\n  *\n  * \\returns a reference to \\c *this.\n  *\n  * Note that the two input vectors do \\b not have to be normalized, and\n  * do not need to have the same norm.\n  */\ntemplate<class Derived>\ntemplate<typename Derived1, typename Derived2>\nEIGEN_DEVICE_FUNC inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)\n{\n  EIGEN_USING_STD_MATH(sqrt)\n  Vector3 v0 = a.normalized();\n  Vector3 v1 = b.normalized();\n  Scalar c = v1.dot(v0);\n\n  // if dot == -1, vectors are nearly opposites\n  // => accurately compute the rotation axis by computing the\n  //    intersection of the two planes. This is done by solving:\n  //       x^T v0 = 0\n  //       x^T v1 = 0\n  //    under the constraint:\n  //       ||x|| = 1\n  //    which yields a singular value problem\n  if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())\n  {\n    c = numext::maxi(c,Scalar(-1));\n    Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();\n    JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);\n    Vector3 axis = svd.matrixV().col(2);\n\n    Scalar w2 = (Scalar(1)+c)*Scalar(0.5);\n    this->w() = sqrt(w2);\n    this->vec() = axis * sqrt(Scalar(1) - w2);\n    return derived();\n  }\n  Vector3 axis = v0.cross(v1);\n  Scalar s = sqrt((Scalar(1)+c)*Scalar(2));\n  Scalar invs = Scalar(1)/s;\n  this->vec() = axis * invs;\n  this->w() = s * Scalar(0.5);\n\n  return derived();\n}\n\n/** \\returns a random unit quaternion following a uniform distribution law on SO(3)\n  *\n  * \\note The implementation is based on http://planning.cs.uiuc.edu/node198.html\n  */\ntemplate<typename Scalar, int Options>\nEIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::UnitRandom()\n{\n  EIGEN_USING_STD_MATH(sqrt)\n  EIGEN_USING_STD_MATH(sin)\n  EIGEN_USING_STD_MATH(cos)\n  const Scalar u1 = internal::random<Scalar>(0, 1),\n               u2 = internal::random<Scalar>(0, 2*EIGEN_PI),\n               u3 = internal::random<Scalar>(0, 2*EIGEN_PI);\n  const Scalar a = sqrt(1 - u1),\n               b = sqrt(u1);\n  return Quaternion (a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3));\n}\n\n\n/** Returns a quaternion representing a rotation between\n  * the two arbitrary vectors \\a a and \\a b. In other words, the built\n  * rotation represent a rotation sending the line of direction \\a a\n  * to the line of direction \\a b, both lines passing through the origin.\n  *\n  * \\returns resulting quaternion\n  *\n  * Note that the two input vectors do \\b not have to be normalized, and\n  * do not need to have the same norm.\n  */\ntemplate<typename Scalar, int Options>\ntemplate<typename Derived1, typename Derived2>\nEIGEN_DEVICE_FUNC Quaternion<Scalar,Options> Quaternion<Scalar,Options>::FromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)\n{\n    Quaternion quat;\n    quat.setFromTwoVectors(a, b);\n    return quat;\n}\n\n\n/** \\returns the multiplicative inverse of \\c *this\n  * Note that in most cases, i.e., if you simply want the opposite rotation,\n  * and/or the quaternion is normalized, then it is enough to use the conjugate.\n  *\n  * \\sa QuaternionBase::conjugate()\n  */\ntemplate <class Derived>\nEIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Derived>::inverse() const\n{\n  // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??\n  Scalar n2 = this->squaredNorm();\n  if (n2 > Scalar(0))\n    return Quaternion<Scalar>(conjugate().coeffs() / n2);\n  else\n  {\n    // return an invalid result to flag the error\n    return Quaternion<Scalar>(Coefficients::Zero());\n  }\n}\n\n// Generic conjugate of a Quaternion\nnamespace internal {\ntemplate<int Arch, class Derived, typename Scalar, int _Options> struct quat_conj\n{\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE Quaternion<Scalar> run(const QuaternionBase<Derived>& q){\n    return Quaternion<Scalar>(q.w(),-q.x(),-q.y(),-q.z());\n  }\n};\n}\n                         \n/** \\returns the conjugate of the \\c *this which is equal to the multiplicative inverse\n  * if the quaternion is normalized.\n  * The conjugate of a quaternion represents the opposite rotation.\n  *\n  * \\sa Quaternion2::inverse()\n  */\ntemplate <class Derived>\nEIGEN_DEVICE_FUNC inline Quaternion<typename internal::traits<Derived>::Scalar>\nQuaternionBase<Derived>::conjugate() const\n{\n  return internal::quat_conj<Architecture::Target, Derived,\n                         typename internal::traits<Derived>::Scalar,\n                         internal::traits<Derived>::Alignment>::run(*this);\n                         \n}\n\n/** \\returns the angle (in radian) between two rotations\n  * \\sa dot()\n  */\ntemplate <class Derived>\ntemplate <class OtherDerived>\nEIGEN_DEVICE_FUNC inline typename internal::traits<Derived>::Scalar\nQuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const\n{\n  EIGEN_USING_STD_MATH(atan2)\n  Quaternion<Scalar> d = (*this) * other.conjugate();\n  return Scalar(2) * atan2( d.vec().norm(), numext::abs(d.w()) );\n}\n\n \n    \n/** \\returns the spherical linear interpolation between the two quaternions\n  * \\c *this and \\a other at the parameter \\a t in [0;1].\n  * \n  * This represents an interpolation for a constant motion between \\c *this and \\a other,\n  * see also http://en.wikipedia.org/wiki/Slerp.\n  */\ntemplate <class Derived>\ntemplate <class OtherDerived>\nEIGEN_DEVICE_FUNC Quaternion<typename internal::traits<Derived>::Scalar>\nQuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerived>& other) const\n{\n  EIGEN_USING_STD_MATH(acos)\n  EIGEN_USING_STD_MATH(sin)\n  const Scalar one = Scalar(1) - NumTraits<Scalar>::epsilon();\n  Scalar d = this->dot(other);\n  Scalar absD = numext::abs(d);\n\n  Scalar scale0;\n  Scalar scale1;\n\n  if(absD>=one)\n  {\n    scale0 = Scalar(1) - t;\n    scale1 = t;\n  }\n  else\n  {\n    // theta is the angle between the 2 quaternions\n    Scalar theta = acos(absD);\n    Scalar sinTheta = sin(theta);\n\n    scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;\n    scale1 = sin( ( t * theta) ) / sinTheta;\n  }\n  if(d<Scalar(0)) scale1 = -scale1;\n\n  return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());\n}\n\nnamespace internal {\n\n// set from a rotation matrix\ntemplate<typename Other>\nstruct quaternionbase_assign_impl<Other,3,3>\n{\n  typedef typename Other::Scalar Scalar;\n  template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& a_mat)\n  {\n    const typename internal::nested_eval<Other,2>::type mat(a_mat);\n    EIGEN_USING_STD_MATH(sqrt)\n    // This algorithm comes from  \"Quaternion Calculus and Fast Animation\",\n    // Ken Shoemake, 1987 SIGGRAPH course notes\n    Scalar t = mat.trace();\n    if (t > Scalar(0))\n    {\n      t = sqrt(t + Scalar(1.0));\n      q.w() = Scalar(0.5)*t;\n      t = Scalar(0.5)/t;\n      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;\n      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;\n      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;\n    }\n    else\n    {\n      Index i = 0;\n      if (mat.coeff(1,1) > mat.coeff(0,0))\n        i = 1;\n      if (mat.coeff(2,2) > mat.coeff(i,i))\n        i = 2;\n      Index j = (i+1)%3;\n      Index k = (j+1)%3;\n\n      t = sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0));\n      q.coeffs().coeffRef(i) = Scalar(0.5) * t;\n      t = Scalar(0.5)/t;\n      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;\n      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;\n      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;\n    }\n  }\n};\n\n// set from a vector of coefficients assumed to be a quaternion\ntemplate<typename Other>\nstruct quaternionbase_assign_impl<Other,4,1>\n{\n  typedef typename Other::Scalar Scalar;\n  template<class Derived> EIGEN_DEVICE_FUNC static inline void run(QuaternionBase<Derived>& q, const Other& vec)\n  {\n    q.coeffs() = vec;\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_QUATERNION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Rotation2D.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ROTATION2D_H\n#define EIGEN_ROTATION2D_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Rotation2D\n  *\n  * \\brief Represents a rotation/orientation in a 2 dimensional space.\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients\n  *\n  * This class is equivalent to a single scalar representing a counter clock wise rotation\n  * as a single angle in radian. It provides some additional features such as the automatic\n  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar\n  * interface to Quaternion in order to facilitate the writing of generic algorithms\n  * dealing with rotations.\n  *\n  * \\sa class Quaternion, class Transform\n  */\n\nnamespace internal {\n\ntemplate<typename _Scalar> struct traits<Rotation2D<_Scalar> >\n{\n  typedef _Scalar Scalar;\n};\n} // end namespace internal\n\ntemplate<typename _Scalar>\nclass Rotation2D : public RotationBase<Rotation2D<_Scalar>,2>\n{\n  typedef RotationBase<Rotation2D<_Scalar>,2> Base;\n\npublic:\n\n  using Base::operator*;\n\n  enum { Dim = 2 };\n  /** the scalar type of the coefficients */\n  typedef _Scalar Scalar;\n  typedef Matrix<Scalar,2,1> Vector2;\n  typedef Matrix<Scalar,2,2> Matrix2;\n\nprotected:\n\n  Scalar m_angle;\n\npublic:\n\n  /** Construct a 2D counter clock wise rotation from the angle \\a a in radian. */\n  EIGEN_DEVICE_FUNC explicit inline Rotation2D(const Scalar& a) : m_angle(a) {}\n  \n  /** Default constructor wihtout initialization. The represented rotation is undefined. */\n  EIGEN_DEVICE_FUNC Rotation2D() {}\n\n  /** Construct a 2D rotation from a 2x2 rotation matrix \\a mat.\n    *\n    * \\sa fromRotationMatrix()\n    */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC explicit Rotation2D(const MatrixBase<Derived>& m)\n  {\n    fromRotationMatrix(m.derived());\n  }\n\n  /** \\returns the rotation angle */\n  EIGEN_DEVICE_FUNC inline Scalar angle() const { return m_angle; }\n\n  /** \\returns a read-write reference to the rotation angle */\n  EIGEN_DEVICE_FUNC inline Scalar& angle() { return m_angle; }\n  \n  /** \\returns the rotation angle in [0,2pi] */\n  EIGEN_DEVICE_FUNC inline Scalar smallestPositiveAngle() const {\n    Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));\n    return tmp<Scalar(0) ? tmp + Scalar(2*EIGEN_PI) : tmp;\n  }\n  \n  /** \\returns the rotation angle in [-pi,pi] */\n  EIGEN_DEVICE_FUNC inline Scalar smallestAngle() const {\n    Scalar tmp = numext::fmod(m_angle,Scalar(2*EIGEN_PI));\n    if(tmp>Scalar(EIGEN_PI))       tmp -= Scalar(2*EIGEN_PI);\n    else if(tmp<-Scalar(EIGEN_PI)) tmp += Scalar(2*EIGEN_PI);\n    return tmp;\n  }\n\n  /** \\returns the inverse rotation */\n  EIGEN_DEVICE_FUNC inline Rotation2D inverse() const { return Rotation2D(-m_angle); }\n\n  /** Concatenates two rotations */\n  EIGEN_DEVICE_FUNC inline Rotation2D operator*(const Rotation2D& other) const\n  { return Rotation2D(m_angle + other.m_angle); }\n\n  /** Concatenates two rotations */\n  EIGEN_DEVICE_FUNC inline Rotation2D& operator*=(const Rotation2D& other)\n  { m_angle += other.m_angle; return *this; }\n\n  /** Applies the rotation to a 2D vector */\n  EIGEN_DEVICE_FUNC Vector2 operator* (const Vector2& vec) const\n  { return toRotationMatrix() * vec; }\n  \n  template<typename Derived>\n  EIGEN_DEVICE_FUNC Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);\n  EIGEN_DEVICE_FUNC Matrix2 toRotationMatrix() const;\n\n  /** Set \\c *this from a 2x2 rotation matrix \\a mat.\n    * In other words, this function extract the rotation angle from the rotation matrix.\n    *\n    * This method is an alias for fromRotationMatrix()\n    *\n    * \\sa fromRotationMatrix()\n    */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC Rotation2D& operator=(const  MatrixBase<Derived>& m)\n  { return fromRotationMatrix(m.derived()); }\n\n  /** \\returns the spherical interpolation between \\c *this and \\a other using\n    * parameter \\a t. It is in fact equivalent to a linear interpolation.\n    */\n  EIGEN_DEVICE_FUNC inline Rotation2D slerp(const Scalar& t, const Rotation2D& other) const\n  {\n    Scalar dist = Rotation2D(other.m_angle-m_angle).smallestAngle();\n    return Rotation2D(m_angle + dist*t);\n  }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type cast() const\n  { return typename internal::cast_return_type<Rotation2D,Rotation2D<NewScalarType> >::type(*this); }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  EIGEN_DEVICE_FUNC inline explicit Rotation2D(const Rotation2D<OtherScalarType>& other)\n  {\n    m_angle = Scalar(other.angle());\n  }\n\n  EIGEN_DEVICE_FUNC static inline Rotation2D Identity() { return Rotation2D(0); }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const Rotation2D& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return internal::isApprox(m_angle,other.m_angle, prec); }\n  \n};\n\n/** \\ingroup Geometry_Module\n  * single precision 2D rotation type */\ntypedef Rotation2D<float> Rotation2Df;\n/** \\ingroup Geometry_Module\n  * double precision 2D rotation type */\ntypedef Rotation2D<double> Rotation2Dd;\n\n/** Set \\c *this from a 2x2 rotation matrix \\a mat.\n  * In other words, this function extract the rotation angle\n  * from the rotation matrix.\n  */\ntemplate<typename Scalar>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)\n{\n  EIGEN_USING_STD_MATH(atan2)\n  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE)\n  m_angle = atan2(mat.coeff(1,0), mat.coeff(0,0));\n  return *this;\n}\n\n/** Constructs and \\returns an equivalent 2x2 rotation matrix.\n  */\ntemplate<typename Scalar>\ntypename Rotation2D<Scalar>::Matrix2\nEIGEN_DEVICE_FUNC Rotation2D<Scalar>::toRotationMatrix(void) const\n{\n  EIGEN_USING_STD_MATH(sin)\n  EIGEN_USING_STD_MATH(cos)\n  Scalar sinA = sin(m_angle);\n  Scalar cosA = cos(m_angle);\n  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_ROTATION2D_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/RotationBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ROTATIONBASE_H\n#define EIGEN_ROTATIONBASE_H\n\nnamespace Eigen { \n\n// forward declaration\nnamespace internal {\ntemplate<typename RotationDerived, typename MatrixType, bool IsVector=MatrixType::IsVectorAtCompileTime>\nstruct rotation_base_generic_product_selector;\n}\n\n/** \\class RotationBase\n  *\n  * \\brief Common base class for compact rotation representations\n  *\n  * \\tparam Derived is the derived type, i.e., a rotation type\n  * \\tparam _Dim the dimension of the space\n  */\ntemplate<typename Derived, int _Dim>\nclass RotationBase\n{\n  public:\n    enum { Dim = _Dim };\n    /** the scalar type of the coefficients */\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n\n    /** corresponding linear transformation matrix type */\n    typedef Matrix<Scalar,Dim,Dim> RotationMatrixType;\n    typedef Matrix<Scalar,Dim,1> VectorType;\n\n  public:\n    EIGEN_DEVICE_FUNC inline const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    EIGEN_DEVICE_FUNC inline Derived& derived() { return *static_cast<Derived*>(this); }\n\n    /** \\returns an equivalent rotation matrix */\n    EIGEN_DEVICE_FUNC inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); }\n\n    /** \\returns an equivalent rotation matrix \n      * This function is added to be conform with the Transform class' naming scheme.\n      */\n    EIGEN_DEVICE_FUNC inline RotationMatrixType matrix() const { return derived().toRotationMatrix(); }\n\n    /** \\returns the inverse rotation */\n    EIGEN_DEVICE_FUNC inline Derived inverse() const { return derived().inverse(); }\n\n    /** \\returns the concatenation of the rotation \\c *this with a translation \\a t */\n    EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Isometry> operator*(const Translation<Scalar,Dim>& t) const\n    { return Transform<Scalar,Dim,Isometry>(*this) * t; }\n\n    /** \\returns the concatenation of the rotation \\c *this with a uniform scaling \\a s */\n    EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const UniformScaling<Scalar>& s) const\n    { return toRotationMatrix() * s.factor(); }\n\n    /** \\returns the concatenation of the rotation \\c *this with a generic expression \\a e\n      * \\a e can be:\n      *  - a DimxDim linear transformation matrix\n      *  - a DimxDim diagonal matrix (axis aligned scaling)\n      *  - a vector of size Dim\n      */\n    template<typename OtherDerived>\n    EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType\n    operator*(const EigenBase<OtherDerived>& e) const\n    { return internal::rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); }\n\n    /** \\returns the concatenation of a linear transformation \\a l with the rotation \\a r */\n    template<typename OtherDerived> friend\n    EIGEN_DEVICE_FUNC inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r)\n    { return l.derived() * r.toRotationMatrix(); }\n\n    /** \\returns the concatenation of a scaling \\a l with the rotation \\a r */\n    EIGEN_DEVICE_FUNC friend inline Transform<Scalar,Dim,Affine> operator*(const DiagonalMatrix<Scalar,Dim>& l, const Derived& r)\n    { \n      Transform<Scalar,Dim,Affine> res(r);\n      res.linear().applyOnTheLeft(l);\n      return res;\n    }\n\n    /** \\returns the concatenation of the rotation \\c *this with a transformation \\a t */\n    template<int Mode, int Options>\n    EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator*(const Transform<Scalar,Dim,Mode,Options>& t) const\n    { return toRotationMatrix() * t; }\n\n    template<typename OtherVectorType>\n    EIGEN_DEVICE_FUNC inline VectorType _transformVector(const OtherVectorType& v) const\n    { return toRotationMatrix() * v; }\n};\n\nnamespace internal {\n\n// implementation of the generic product rotation * matrix\ntemplate<typename RotationDerived, typename MatrixType>\nstruct rotation_base_generic_product_selector<RotationDerived,MatrixType,false>\n{\n  enum { Dim = RotationDerived::Dim };\n  typedef Matrix<typename RotationDerived::Scalar,Dim,Dim> ReturnType;\n  EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const MatrixType& m)\n  { return r.toRotationMatrix() * m; }\n};\n\ntemplate<typename RotationDerived, typename Scalar, int Dim, int MaxDim>\nstruct rotation_base_generic_product_selector< RotationDerived, DiagonalMatrix<Scalar,Dim,MaxDim>, false >\n{\n  typedef Transform<Scalar,Dim,Affine> ReturnType;\n  EIGEN_DEVICE_FUNC static inline ReturnType run(const RotationDerived& r, const DiagonalMatrix<Scalar,Dim,MaxDim>& m)\n  {\n    ReturnType res(r);\n    res.linear() *= m;\n    return res;\n  }\n};\n\ntemplate<typename RotationDerived,typename OtherVectorType>\nstruct rotation_base_generic_product_selector<RotationDerived,OtherVectorType,true>\n{\n  enum { Dim = RotationDerived::Dim };\n  typedef Matrix<typename RotationDerived::Scalar,Dim,1> ReturnType;\n  EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE ReturnType run(const RotationDerived& r, const OtherVectorType& v)\n  {\n    return r._transformVector(v);\n  }\n};\n\n} // end namespace internal\n\n/** \\geometry_module\n  *\n  * \\brief Constructs a Dim x Dim rotation matrix from the rotation \\a r\n  */\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>\n::Matrix(const RotationBase<OtherDerived,ColsAtCompileTime>& r)\n{\n  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))\n  *this = r.toRotationMatrix();\n}\n\n/** \\geometry_module\n  *\n  * \\brief Set a Dim x Dim rotation matrix from the rotation \\a r\n  */\ntemplate<typename _Scalar, int _Rows, int _Cols, int _Storage, int _MaxRows, int _MaxCols>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>&\nMatrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>\n::operator=(const RotationBase<OtherDerived,ColsAtCompileTime>& r)\n{\n  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim))\n  return *this = r.toRotationMatrix();\n}\n\nnamespace internal {\n\n/** \\internal\n  *\n  * Helper function to return an arbitrary rotation object to a rotation matrix.\n  *\n  * \\tparam Scalar the numeric type of the matrix coefficients\n  * \\tparam Dim the dimension of the current space\n  *\n  * It returns a Dim x Dim fixed size matrix.\n  *\n  * Default specializations are provided for:\n  *   - any scalar type (2D),\n  *   - any matrix expression,\n  *   - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D)\n  *\n  * Currently toRotationMatrix is only used by Transform.\n  *\n  * \\sa class Transform, class Rotation2D, class Quaternion, class AngleAxis\n  */\ntemplate<typename Scalar, int Dim>\nEIGEN_DEVICE_FUNC static inline Matrix<Scalar,2,2> toRotationMatrix(const Scalar& s)\n{\n  EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE)\n  return Rotation2D<Scalar>(s).toRotationMatrix();\n}\n\ntemplate<typename Scalar, int Dim, typename OtherDerived>\nEIGEN_DEVICE_FUNC static inline Matrix<Scalar,Dim,Dim> toRotationMatrix(const RotationBase<OtherDerived,Dim>& r)\n{\n  return r.toRotationMatrix();\n}\n\ntemplate<typename Scalar, int Dim, typename OtherDerived>\nEIGEN_DEVICE_FUNC static inline const MatrixBase<OtherDerived>& toRotationMatrix(const MatrixBase<OtherDerived>& mat)\n{\n  EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,\n    YOU_MADE_A_PROGRAMMING_MISTAKE)\n  return mat;\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_ROTATIONBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Scaling.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SCALING_H\n#define EIGEN_SCALING_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Scaling\n  *\n  * \\brief Represents a generic uniform scaling transformation\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients.\n  *\n  * This class represent a uniform scaling transformation. It is the return\n  * type of Scaling(Scalar), and most of the time this is the only way it\n  * is used. In particular, this class is not aimed to be used to store a scaling transformation,\n  * but rather to make easier the constructions and updates of Transform objects.\n  *\n  * To represent an axis aligned scaling, use the DiagonalMatrix class.\n  *\n  * \\sa Scaling(), class DiagonalMatrix, MatrixBase::asDiagonal(), class Translation, class Transform\n  */\ntemplate<typename _Scalar>\nclass UniformScaling\n{\npublic:\n  /** the scalar type of the coefficients */\n  typedef _Scalar Scalar;\n\nprotected:\n\n  Scalar m_factor;\n\npublic:\n\n  /** Default constructor without initialization. */\n  UniformScaling() {}\n  /** Constructs and initialize a uniform scaling transformation */\n  explicit inline UniformScaling(const Scalar& s) : m_factor(s) {}\n\n  inline const Scalar& factor() const { return m_factor; }\n  inline Scalar& factor() { return m_factor; }\n\n  /** Concatenates two uniform scaling */\n  inline UniformScaling operator* (const UniformScaling& other) const\n  { return UniformScaling(m_factor * other.factor()); }\n\n  /** Concatenates a uniform scaling and a translation */\n  template<int Dim>\n  inline Transform<Scalar,Dim,Affine> operator* (const Translation<Scalar,Dim>& t) const;\n\n  /** Concatenates a uniform scaling and an affine transformation */\n  template<int Dim, int Mode, int Options>\n  inline Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> operator* (const Transform<Scalar,Dim, Mode, Options>& t) const\n  {\n    Transform<Scalar,Dim,(int(Mode)==int(Isometry)?Affine:Mode)> res = t;\n    res.prescale(factor());\n    return res;\n  }\n\n  /** Concatenates a uniform scaling and a linear transformation matrix */\n  // TODO returns an expression\n  template<typename Derived>\n  inline typename internal::plain_matrix_type<Derived>::type operator* (const MatrixBase<Derived>& other) const\n  { return other * m_factor; }\n\n  template<typename Derived,int Dim>\n  inline Matrix<Scalar,Dim,Dim> operator*(const RotationBase<Derived,Dim>& r) const\n  { return r.toRotationMatrix() * m_factor; }\n\n  /** \\returns the inverse scaling */\n  inline UniformScaling inverse() const\n  { return UniformScaling(Scalar(1)/m_factor); }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  inline UniformScaling<NewScalarType> cast() const\n  { return UniformScaling<NewScalarType>(NewScalarType(m_factor)); }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  inline explicit UniformScaling(const UniformScaling<OtherScalarType>& other)\n  { m_factor = Scalar(other.factor()); }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  bool isApprox(const UniformScaling& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return internal::isApprox(m_factor, other.factor(), prec); }\n\n};\n\n/** \\addtogroup Geometry_Module */\n//@{\n\n/** Concatenates a linear transformation matrix and a uniform scaling\n  * \\relates UniformScaling\n  */\n// NOTE this operator is defiend in MatrixBase and not as a friend function\n// of UniformScaling to fix an internal crash of Intel's ICC\ntemplate<typename Derived,typename Scalar>\nEIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)\noperator*(const MatrixBase<Derived>& matrix, const UniformScaling<Scalar>& s)\n{ return matrix.derived() * s.factor(); }\n\n/** Constructs a uniform scaling from scale factor \\a s */\ninline UniformScaling<float> Scaling(float s) { return UniformScaling<float>(s); }\n/** Constructs a uniform scaling from scale factor \\a s */\ninline UniformScaling<double> Scaling(double s) { return UniformScaling<double>(s); }\n/** Constructs a uniform scaling from scale factor \\a s */\ntemplate<typename RealScalar>\ninline UniformScaling<std::complex<RealScalar> > Scaling(const std::complex<RealScalar>& s)\n{ return UniformScaling<std::complex<RealScalar> >(s); }\n\n/** Constructs a 2D axis aligned scaling */\ntemplate<typename Scalar>\ninline DiagonalMatrix<Scalar,2> Scaling(const Scalar& sx, const Scalar& sy)\n{ return DiagonalMatrix<Scalar,2>(sx, sy); }\n/** Constructs a 3D axis aligned scaling */\ntemplate<typename Scalar>\ninline DiagonalMatrix<Scalar,3> Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz)\n{ return DiagonalMatrix<Scalar,3>(sx, sy, sz); }\n\n/** Constructs an axis aligned scaling expression from vector expression \\a coeffs\n  * This is an alias for coeffs.asDiagonal()\n  */\ntemplate<typename Derived>\ninline const DiagonalWrapper<const Derived> Scaling(const MatrixBase<Derived>& coeffs)\n{ return coeffs.asDiagonal(); }\n\n/** \\deprecated */\ntypedef DiagonalMatrix<float, 2> AlignedScaling2f;\n/** \\deprecated */\ntypedef DiagonalMatrix<double,2> AlignedScaling2d;\n/** \\deprecated */\ntypedef DiagonalMatrix<float, 3> AlignedScaling3f;\n/** \\deprecated */\ntypedef DiagonalMatrix<double,3> AlignedScaling3d;\n//@}\n\ntemplate<typename Scalar>\ntemplate<int Dim>\ninline Transform<Scalar,Dim,Affine>\nUniformScaling<Scalar>::operator* (const Translation<Scalar,Dim>& t) const\n{\n  Transform<Scalar,Dim,Affine> res;\n  res.matrix().setZero();\n  res.linear().diagonal().fill(factor());\n  res.translation() = factor() * t.vector();\n  res(Dim,Dim) = Scalar(1);\n  return res;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SCALING_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Transform.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2010 Hauke Heibel <hauke.heibel@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRANSFORM_H\n#define EIGEN_TRANSFORM_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Transform>\nstruct transform_traits\n{\n  enum\n  {\n    Dim = Transform::Dim,\n    HDim = Transform::HDim,\n    Mode = Transform::Mode,\n    IsProjective = (int(Mode)==int(Projective))\n  };\n};\n\ntemplate< typename TransformType,\n          typename MatrixType,\n          int Case = transform_traits<TransformType>::IsProjective ? 0\n                   : int(MatrixType::RowsAtCompileTime) == int(transform_traits<TransformType>::HDim) ? 1\n                   : 2,\n          int RhsCols = MatrixType::ColsAtCompileTime>\nstruct transform_right_product_impl;\n\ntemplate< typename Other,\n          int Mode,\n          int Options,\n          int Dim,\n          int HDim,\n          int OtherRows=Other::RowsAtCompileTime,\n          int OtherCols=Other::ColsAtCompileTime>\nstruct transform_left_product_impl;\n\ntemplate< typename Lhs,\n          typename Rhs,\n          bool AnyProjective = \n            transform_traits<Lhs>::IsProjective ||\n            transform_traits<Rhs>::IsProjective>\nstruct transform_transform_product_impl;\n\ntemplate< typename Other,\n          int Mode,\n          int Options,\n          int Dim,\n          int HDim,\n          int OtherRows=Other::RowsAtCompileTime,\n          int OtherCols=Other::ColsAtCompileTime>\nstruct transform_construct_from_matrix;\n\ntemplate<typename TransformType> struct transform_take_affine_part;\n\ntemplate<typename _Scalar, int _Dim, int _Mode, int _Options>\nstruct traits<Transform<_Scalar,_Dim,_Mode,_Options> >\n{\n  typedef _Scalar Scalar;\n  typedef Eigen::Index StorageIndex;\n  typedef Dense StorageKind;\n  enum {\n    Dim1 = _Dim==Dynamic ? _Dim : _Dim + 1,\n    RowsAtCompileTime = _Mode==Projective ? Dim1 : _Dim,\n    ColsAtCompileTime = Dim1,\n    MaxRowsAtCompileTime = RowsAtCompileTime,\n    MaxColsAtCompileTime = ColsAtCompileTime,\n    Flags = 0\n  };\n};\n\ntemplate<int Mode> struct transform_make_affine;\n\n} // end namespace internal\n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Transform\n  *\n  * \\brief Represents an homogeneous transformation in a N dimensional space\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients\n  * \\tparam _Dim the dimension of the space\n  * \\tparam _Mode the type of the transformation. Can be:\n  *              - #Affine: the transformation is stored as a (Dim+1)^2 matrix,\n  *                         where the last row is assumed to be [0 ... 0 1].\n  *              - #AffineCompact: the transformation is stored as a (Dim)x(Dim+1) matrix.\n  *              - #Projective: the transformation is stored as a (Dim+1)^2 matrix\n  *                             without any assumption.\n  * \\tparam _Options has the same meaning as in class Matrix. It allows to specify DontAlign and/or RowMajor.\n  *                  These Options are passed directly to the underlying matrix type.\n  *\n  * The homography is internally represented and stored by a matrix which\n  * is available through the matrix() method. To understand the behavior of\n  * this class you have to think a Transform object as its internal\n  * matrix representation. The chosen convention is right multiply:\n  *\n  * \\code v' = T * v \\endcode\n  *\n  * Therefore, an affine transformation matrix M is shaped like this:\n  *\n  * \\f$ \\left( \\begin{array}{cc}\n  * linear & translation\\\\\n  * 0 ... 0 & 1\n  * \\end{array} \\right) \\f$\n  *\n  * Note that for a projective transformation the last row can be anything,\n  * and then the interpretation of different parts might be sightly different.\n  *\n  * However, unlike a plain matrix, the Transform class provides many features\n  * simplifying both its assembly and usage. In particular, it can be composed\n  * with any other transformations (Transform,Translation,RotationBase,DiagonalMatrix)\n  * and can be directly used to transform implicit homogeneous vectors. All these\n  * operations are handled via the operator*. For the composition of transformations,\n  * its principle consists to first convert the right/left hand sides of the product\n  * to a compatible (Dim+1)^2 matrix and then perform a pure matrix product.\n  * Of course, internally, operator* tries to perform the minimal number of operations\n  * according to the nature of each terms. Likewise, when applying the transform\n  * to points, the latters are automatically promoted to homogeneous vectors\n  * before doing the matrix product. The conventions to homogeneous representations\n  * are performed as follow:\n  *\n  * \\b Translation t (Dim)x(1):\n  * \\f$ \\left( \\begin{array}{cc}\n  * I & t \\\\\n  * 0\\,...\\,0 & 1\n  * \\end{array} \\right) \\f$\n  *\n  * \\b Rotation R (Dim)x(Dim):\n  * \\f$ \\left( \\begin{array}{cc}\n  * R & 0\\\\\n  * 0\\,...\\,0 & 1\n  * \\end{array} \\right) \\f$\n  *<!--\n  * \\b Linear \\b Matrix L (Dim)x(Dim):\n  * \\f$ \\left( \\begin{array}{cc}\n  * L & 0\\\\\n  * 0\\,...\\,0 & 1\n  * \\end{array} \\right) \\f$\n  *\n  * \\b Affine \\b Matrix A (Dim)x(Dim+1):\n  * \\f$ \\left( \\begin{array}{c}\n  * A\\\\\n  * 0\\,...\\,0\\,1\n  * \\end{array} \\right) \\f$\n  *-->\n  * \\b Scaling \\b DiagonalMatrix S (Dim)x(Dim):\n  * \\f$ \\left( \\begin{array}{cc}\n  * S & 0\\\\\n  * 0\\,...\\,0 & 1\n  * \\end{array} \\right) \\f$\n  *\n  * \\b Column \\b point v (Dim)x(1):\n  * \\f$ \\left( \\begin{array}{c}\n  * v\\\\\n  * 1\n  * \\end{array} \\right) \\f$\n  *\n  * \\b Set \\b of \\b column \\b points V1...Vn (Dim)x(n):\n  * \\f$ \\left( \\begin{array}{ccc}\n  * v_1 & ... & v_n\\\\\n  * 1 & ... & 1\n  * \\end{array} \\right) \\f$\n  *\n  * The concatenation of a Transform object with any kind of other transformation\n  * always returns a Transform object.\n  *\n  * A little exception to the \"as pure matrix product\" rule is the case of the\n  * transformation of non homogeneous vectors by an affine transformation. In\n  * that case the last matrix row can be ignored, and the product returns non\n  * homogeneous vectors.\n  *\n  * Since, for instance, a Dim x Dim matrix is interpreted as a linear transformation,\n  * it is not possible to directly transform Dim vectors stored in a Dim x Dim matrix.\n  * The solution is either to use a Dim x Dynamic matrix or explicitly request a\n  * vector transformation by making the vector homogeneous:\n  * \\code\n  * m' = T * m.colwise().homogeneous();\n  * \\endcode\n  * Note that there is zero overhead.\n  *\n  * Conversion methods from/to Qt's QMatrix and QTransform are available if the\n  * preprocessor token EIGEN_QT_SUPPORT is defined.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_TRANSFORM_PLUGIN.\n  *\n  * \\sa class Matrix, class Quaternion\n  */\ntemplate<typename _Scalar, int _Dim, int _Mode, int _Options>\nclass Transform\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1))\n  enum {\n    Mode = _Mode,\n    Options = _Options,\n    Dim = _Dim,     ///< space dimension in which the transformation holds\n    HDim = _Dim+1,  ///< size of a respective homogeneous vector\n    Rows = int(Mode)==(AffineCompact) ? Dim : HDim\n  };\n  /** the scalar type of the coefficients */\n  typedef _Scalar Scalar;\n  typedef Eigen::Index StorageIndex;\n  typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n  /** type of the matrix used to represent the transformation */\n  typedef typename internal::make_proper_matrix_type<Scalar,Rows,HDim,Options>::type MatrixType;\n  /** constified MatrixType */\n  typedef const MatrixType ConstMatrixType;\n  /** type of the matrix used to represent the linear part of the transformation */\n  typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;\n  /** type of read/write reference to the linear part of the transformation */\n  typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;\n  /** type of read reference to the linear part of the transformation */\n  typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;\n  /** type of read/write reference to the affine part of the transformation */\n  typedef typename internal::conditional<int(Mode)==int(AffineCompact),\n                              MatrixType&,\n                              Block<MatrixType,Dim,HDim> >::type AffinePart;\n  /** type of read reference to the affine part of the transformation */\n  typedef typename internal::conditional<int(Mode)==int(AffineCompact),\n                              const MatrixType&,\n                              const Block<const MatrixType,Dim,HDim> >::type ConstAffinePart;\n  /** type of a vector */\n  typedef Matrix<Scalar,Dim,1> VectorType;\n  /** type of a read/write reference to the translation part of the rotation */\n  typedef Block<MatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> TranslationPart;\n  /** type of a read reference to the translation part of the rotation */\n  typedef const Block<ConstMatrixType,Dim,1,!(internal::traits<MatrixType>::Flags & RowMajorBit)> ConstTranslationPart;\n  /** corresponding translation type */\n  typedef Translation<Scalar,Dim> TranslationType;\n  \n  // this intermediate enum is needed to avoid an ICE with gcc 3.4 and 4.0\n  enum { TransformTimeDiagonalMode = ((Mode==int(Isometry))?Affine:int(Mode)) };\n  /** The return type of the product between a diagonal matrix and a transform */\n  typedef Transform<Scalar,Dim,TransformTimeDiagonalMode> TransformTimeDiagonalReturnType;\n\nprotected:\n\n  MatrixType m_matrix;\n\npublic:\n\n  /** Default constructor without initialization of the meaningful coefficients.\n    * If Mode==Affine, then the last row is set to [0 ... 0 1] */\n  EIGEN_DEVICE_FUNC inline Transform()\n  {\n    check_template_params();\n    internal::transform_make_affine<(int(Mode)==Affine) ? Affine : AffineCompact>::run(m_matrix);\n  }\n\n  EIGEN_DEVICE_FUNC inline Transform(const Transform& other)\n  {\n    check_template_params();\n    m_matrix = other.m_matrix;\n  }\n\n  EIGEN_DEVICE_FUNC inline explicit Transform(const TranslationType& t)\n  {\n    check_template_params();\n    *this = t;\n  }\n  EIGEN_DEVICE_FUNC inline explicit Transform(const UniformScaling<Scalar>& s)\n  {\n    check_template_params();\n    *this = s;\n  }\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline explicit Transform(const RotationBase<Derived, Dim>& r)\n  {\n    check_template_params();\n    *this = r;\n  }\n\n  EIGEN_DEVICE_FUNC inline Transform& operator=(const Transform& other)\n  { m_matrix = other.m_matrix; return *this; }\n\n  typedef internal::transform_take_affine_part<Transform> take_affine_part;\n\n  /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC inline explicit Transform(const EigenBase<OtherDerived>& other)\n  {\n    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),\n      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);\n\n    check_template_params();\n    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());\n  }\n\n  /** Set \\c *this from a Dim^2 or (Dim+1)^2 matrix. */\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC inline Transform& operator=(const EigenBase<OtherDerived>& other)\n  {\n    EIGEN_STATIC_ASSERT((internal::is_same<Scalar,typename OtherDerived::Scalar>::value),\n      YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY);\n\n    internal::transform_construct_from_matrix<OtherDerived,Mode,Options,Dim,HDim>::run(this, other.derived());\n    return *this;\n  }\n  \n  template<int OtherOptions>\n  EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,Mode,OtherOptions>& other)\n  {\n    check_template_params();\n    // only the options change, we can directly copy the matrices\n    m_matrix = other.matrix();\n  }\n\n  template<int OtherMode,int OtherOptions>\n  EIGEN_DEVICE_FUNC inline Transform(const Transform<Scalar,Dim,OtherMode,OtherOptions>& other)\n  {\n    check_template_params();\n    // prevent conversions as:\n    // Affine | AffineCompact | Isometry = Projective\n    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Projective), Mode==int(Projective)),\n                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)\n\n    // prevent conversions as:\n    // Isometry = Affine | AffineCompact\n    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(OtherMode==int(Affine)||OtherMode==int(AffineCompact), Mode!=int(Isometry)),\n                        YOU_PERFORMED_AN_INVALID_TRANSFORMATION_CONVERSION)\n\n    enum { ModeIsAffineCompact = Mode == int(AffineCompact),\n           OtherModeIsAffineCompact = OtherMode == int(AffineCompact)\n    };\n\n    if(EIGEN_CONST_CONDITIONAL(ModeIsAffineCompact == OtherModeIsAffineCompact))\n    {\n      // We need the block expression because the code is compiled for all\n      // combinations of transformations and will trigger a compile time error\n      // if one tries to assign the matrices directly\n      m_matrix.template block<Dim,Dim+1>(0,0) = other.matrix().template block<Dim,Dim+1>(0,0);\n      makeAffine();\n    }\n    else if(EIGEN_CONST_CONDITIONAL(OtherModeIsAffineCompact))\n    {\n      typedef typename Transform<Scalar,Dim,OtherMode,OtherOptions>::MatrixType OtherMatrixType;\n      internal::transform_construct_from_matrix<OtherMatrixType,Mode,Options,Dim,HDim>::run(this, other.matrix());\n    }\n    else\n    {\n      // here we know that Mode == AffineCompact and OtherMode != AffineCompact.\n      // if OtherMode were Projective, the static assert above would already have caught it.\n      // So the only possibility is that OtherMode == Affine\n      linear() = other.linear();\n      translation() = other.translation();\n    }\n  }\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC Transform(const ReturnByValue<OtherDerived>& other)\n  {\n    check_template_params();\n    other.evalTo(*this);\n  }\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC Transform& operator=(const ReturnByValue<OtherDerived>& other)\n  {\n    other.evalTo(*this);\n    return *this;\n  }\n\n  #ifdef EIGEN_QT_SUPPORT\n  inline Transform(const QMatrix& other);\n  inline Transform& operator=(const QMatrix& other);\n  inline QMatrix toQMatrix(void) const;\n  inline Transform(const QTransform& other);\n  inline Transform& operator=(const QTransform& other);\n  inline QTransform toQTransform(void) const;\n  #endif\n  \n  EIGEN_DEVICE_FUNC Index rows() const { return int(Mode)==int(Projective) ? m_matrix.cols() : (m_matrix.cols()-1); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_matrix.cols(); }\n\n  /** shortcut for m_matrix(row,col);\n    * \\sa MatrixBase::operator(Index,Index) const */\n  EIGEN_DEVICE_FUNC inline Scalar operator() (Index row, Index col) const { return m_matrix(row,col); }\n  /** shortcut for m_matrix(row,col);\n    * \\sa MatrixBase::operator(Index,Index) */\n  EIGEN_DEVICE_FUNC inline Scalar& operator() (Index row, Index col) { return m_matrix(row,col); }\n\n  /** \\returns a read-only expression of the transformation matrix */\n  EIGEN_DEVICE_FUNC inline const MatrixType& matrix() const { return m_matrix; }\n  /** \\returns a writable expression of the transformation matrix */\n  EIGEN_DEVICE_FUNC inline MatrixType& matrix() { return m_matrix; }\n\n  /** \\returns a read-only expression of the linear part of the transformation */\n  EIGEN_DEVICE_FUNC inline ConstLinearPart linear() const { return ConstLinearPart(m_matrix,0,0); }\n  /** \\returns a writable expression of the linear part of the transformation */\n  EIGEN_DEVICE_FUNC inline LinearPart linear() { return LinearPart(m_matrix,0,0); }\n\n  /** \\returns a read-only expression of the Dim x HDim affine part of the transformation */\n  EIGEN_DEVICE_FUNC inline ConstAffinePart affine() const { return take_affine_part::run(m_matrix); }\n  /** \\returns a writable expression of the Dim x HDim affine part of the transformation */\n  EIGEN_DEVICE_FUNC inline AffinePart affine() { return take_affine_part::run(m_matrix); }\n\n  /** \\returns a read-only expression of the translation vector of the transformation */\n  EIGEN_DEVICE_FUNC inline ConstTranslationPart translation() const { return ConstTranslationPart(m_matrix,0,Dim); }\n  /** \\returns a writable expression of the translation vector of the transformation */\n  EIGEN_DEVICE_FUNC inline TranslationPart translation() { return TranslationPart(m_matrix,0,Dim); }\n\n  /** \\returns an expression of the product between the transform \\c *this and a matrix expression \\a other.\n    *\n    * The right-hand-side \\a other can be either:\n    * \\li an homogeneous vector of size Dim+1,\n    * \\li a set of homogeneous vectors of size Dim+1 x N,\n    * \\li a transformation matrix of size Dim+1 x Dim+1.\n    *\n    * Moreover, if \\c *this represents an affine transformation (i.e., Mode!=Projective), then \\a other can also be:\n    * \\li a point of size Dim (computes: \\code this->linear() * other + this->translation()\\endcode),\n    * \\li a set of N points as a Dim x N matrix (computes: \\code (this->linear() * other).colwise() + this->translation()\\endcode),\n    *\n    * In all cases, the return type is a matrix or vector of same sizes as the right-hand-side \\a other.\n    *\n    * If you want to interpret \\a other as a linear or affine transformation, then first convert it to a Transform<> type,\n    * or do your own cooking.\n    *\n    * Finally, if you want to apply Affine transformations to vectors, then explicitly apply the linear part only:\n    * \\code\n    * Affine3f A;\n    * Vector3f v1, v2;\n    * v2 = A.linear() * v1;\n    * \\endcode\n    *\n    */\n  // note: this function is defined here because some compilers cannot find the respective declaration\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const typename internal::transform_right_product_impl<Transform, OtherDerived>::ResultType\n  operator * (const EigenBase<OtherDerived> &other) const\n  { return internal::transform_right_product_impl<Transform, OtherDerived>::run(*this,other.derived()); }\n\n  /** \\returns the product expression of a transformation matrix \\a a times a transform \\a b\n    *\n    * The left hand side \\a other can be either:\n    * \\li a linear transformation matrix of size Dim x Dim,\n    * \\li an affine transformation matrix of size Dim x Dim+1,\n    * \\li a general transformation matrix of size Dim+1 x Dim+1.\n    */\n  template<typename OtherDerived> friend\n  EIGEN_DEVICE_FUNC inline const typename internal::transform_left_product_impl<OtherDerived,Mode,Options,_Dim,_Dim+1>::ResultType\n    operator * (const EigenBase<OtherDerived> &a, const Transform &b)\n  { return internal::transform_left_product_impl<OtherDerived,Mode,Options,Dim,HDim>::run(a.derived(),b); }\n\n  /** \\returns The product expression of a transform \\a a times a diagonal matrix \\a b\n    *\n    * The rhs diagonal matrix is interpreted as an affine scaling transformation. The\n    * product results in a Transform of the same type (mode) as the lhs only if the lhs \n    * mode is no isometry. In that case, the returned transform is an affinity.\n    */\n  template<typename DiagonalDerived>\n  EIGEN_DEVICE_FUNC inline const TransformTimeDiagonalReturnType\n    operator * (const DiagonalBase<DiagonalDerived> &b) const\n  {\n    TransformTimeDiagonalReturnType res(*this);\n    res.linearExt() *= b;\n    return res;\n  }\n\n  /** \\returns The product expression of a diagonal matrix \\a a times a transform \\a b\n    *\n    * The lhs diagonal matrix is interpreted as an affine scaling transformation. The\n    * product results in a Transform of the same type (mode) as the lhs only if the lhs \n    * mode is no isometry. In that case, the returned transform is an affinity.\n    */\n  template<typename DiagonalDerived>\n  EIGEN_DEVICE_FUNC friend inline TransformTimeDiagonalReturnType\n    operator * (const DiagonalBase<DiagonalDerived> &a, const Transform &b)\n  {\n    TransformTimeDiagonalReturnType res;\n    res.linear().noalias() = a*b.linear();\n    res.translation().noalias() = a*b.translation();\n    if (EIGEN_CONST_CONDITIONAL(Mode!=int(AffineCompact)))\n      res.matrix().row(Dim) = b.matrix().row(Dim);\n    return res;\n  }\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }\n\n  /** Concatenates two transformations */\n  EIGEN_DEVICE_FUNC inline const Transform operator * (const Transform& other) const\n  {\n    return internal::transform_transform_product_impl<Transform,Transform>::run(*this,other);\n  }\n  \n  #if EIGEN_COMP_ICC\nprivate:\n  // this intermediate structure permits to workaround a bug in ICC 11:\n  //   error: template instantiation resulted in unexpected function type of \"Eigen::Transform<double, 3, 32, 0>\n  //             (const Eigen::Transform<double, 3, 2, 0> &) const\"\n  //  (the meaning of a name may have changed since the template declaration -- the type of the template is:\n  // \"Eigen::internal::transform_transform_product_impl<Eigen::Transform<double, 3, 32, 0>,\n  //     Eigen::Transform<double, 3, Mode, Options>, <expression>>::ResultType (const Eigen::Transform<double, 3, Mode, Options> &) const\")\n  // \n  template<int OtherMode,int OtherOptions> struct icc_11_workaround\n  {\n    typedef internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> > ProductType;\n    typedef typename ProductType::ResultType ResultType;\n  };\n  \npublic:\n  /** Concatenates two different transformations */\n  template<int OtherMode,int OtherOptions>\n  inline typename icc_11_workaround<OtherMode,OtherOptions>::ResultType\n    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const\n  {\n    typedef typename icc_11_workaround<OtherMode,OtherOptions>::ProductType ProductType;\n    return ProductType::run(*this,other);\n  }\n  #else\n  /** Concatenates two different transformations */\n  template<int OtherMode,int OtherOptions>\n  EIGEN_DEVICE_FUNC inline typename internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::ResultType\n    operator * (const Transform<Scalar,Dim,OtherMode,OtherOptions>& other) const\n  {\n    return internal::transform_transform_product_impl<Transform,Transform<Scalar,Dim,OtherMode,OtherOptions> >::run(*this,other);\n  }\n  #endif\n\n  /** \\sa MatrixBase::setIdentity() */\n  EIGEN_DEVICE_FUNC void setIdentity() { m_matrix.setIdentity(); }\n\n  /**\n   * \\brief Returns an identity transformation.\n   * \\todo In the future this function should be returning a Transform expression.\n   */\n  EIGEN_DEVICE_FUNC static const Transform Identity()\n  {\n    return Transform(MatrixType::Identity());\n  }\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC \n  inline Transform& scale(const MatrixBase<OtherDerived> &other);\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC\n  inline Transform& prescale(const MatrixBase<OtherDerived> &other);\n\n  EIGEN_DEVICE_FUNC inline Transform& scale(const Scalar& s);\n  EIGEN_DEVICE_FUNC inline Transform& prescale(const Scalar& s);\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC\n  inline Transform& translate(const MatrixBase<OtherDerived> &other);\n\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC\n  inline Transform& pretranslate(const MatrixBase<OtherDerived> &other);\n\n  template<typename RotationType>\n  EIGEN_DEVICE_FUNC\n  inline Transform& rotate(const RotationType& rotation);\n\n  template<typename RotationType>\n  EIGEN_DEVICE_FUNC\n  inline Transform& prerotate(const RotationType& rotation);\n\n  EIGEN_DEVICE_FUNC Transform& shear(const Scalar& sx, const Scalar& sy);\n  EIGEN_DEVICE_FUNC Transform& preshear(const Scalar& sx, const Scalar& sy);\n\n  EIGEN_DEVICE_FUNC inline Transform& operator=(const TranslationType& t);\n  \n  EIGEN_DEVICE_FUNC\n  inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); }\n  \n  EIGEN_DEVICE_FUNC inline Transform operator*(const TranslationType& t) const;\n\n  EIGEN_DEVICE_FUNC \n  inline Transform& operator=(const UniformScaling<Scalar>& t);\n  \n  EIGEN_DEVICE_FUNC\n  inline Transform& operator*=(const UniformScaling<Scalar>& s) { return scale(s.factor()); }\n  \n  EIGEN_DEVICE_FUNC\n  inline TransformTimeDiagonalReturnType operator*(const UniformScaling<Scalar>& s) const\n  {\n    TransformTimeDiagonalReturnType res = *this;\n    res.scale(s.factor());\n    return res;\n  }\n\n  EIGEN_DEVICE_FUNC\n  inline Transform& operator*=(const DiagonalMatrix<Scalar,Dim>& s) { linearExt() *= s; return *this; }\n\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline Transform& operator=(const RotationBase<Derived,Dim>& r);\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline Transform& operator*=(const RotationBase<Derived,Dim>& r) { return rotate(r.toRotationMatrix()); }\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline Transform operator*(const RotationBase<Derived,Dim>& r) const;\n\n  EIGEN_DEVICE_FUNC const LinearMatrixType rotation() const;\n  template<typename RotationMatrixType, typename ScalingMatrixType>\n  EIGEN_DEVICE_FUNC\n  void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const;\n  template<typename ScalingMatrixType, typename RotationMatrixType>\n  EIGEN_DEVICE_FUNC\n  void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const;\n\n  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>\n  EIGEN_DEVICE_FUNC\n  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,\n    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);\n\n  EIGEN_DEVICE_FUNC\n  inline Transform inverse(TransformTraits traits = (TransformTraits)Mode) const;\n\n  /** \\returns a const pointer to the column major internal matrix */\n  EIGEN_DEVICE_FUNC const Scalar* data() const { return m_matrix.data(); }\n  /** \\returns a non-const pointer to the column major internal matrix */\n  EIGEN_DEVICE_FUNC Scalar* data() { return m_matrix.data(); }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type cast() const\n  { return typename internal::cast_return_type<Transform,Transform<NewScalarType,Dim,Mode,Options> >::type(*this); }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  EIGEN_DEVICE_FUNC inline explicit Transform(const Transform<OtherScalarType,Dim,Mode,Options>& other)\n  {\n    check_template_params();\n    m_matrix = other.matrix().template cast<Scalar>();\n  }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const Transform& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return m_matrix.isApprox(other.m_matrix, prec); }\n\n  /** Sets the last row to [0 ... 0 1]\n    */\n  EIGEN_DEVICE_FUNC void makeAffine()\n  {\n    internal::transform_make_affine<int(Mode)>::run(m_matrix);\n  }\n\n  /** \\internal\n    * \\returns the Dim x Dim linear part if the transformation is affine,\n    *          and the HDim x Dim part for projective transformations.\n    */\n  EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt()\n  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }\n  /** \\internal\n    * \\returns the Dim x Dim linear part if the transformation is affine,\n    *          and the HDim x Dim part for projective transformations.\n    */\n  EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,Dim> linearExt() const\n  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,Dim>(0,0); }\n\n  /** \\internal\n    * \\returns the translation part if the transformation is affine,\n    *          and the last column for projective transformations.\n    */\n  EIGEN_DEVICE_FUNC inline Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt()\n  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }\n  /** \\internal\n    * \\returns the translation part if the transformation is affine,\n    *          and the last column for projective transformations.\n    */\n  EIGEN_DEVICE_FUNC inline const Block<MatrixType,int(Mode)==int(Projective)?HDim:Dim,1> translationExt() const\n  { return m_matrix.template block<int(Mode)==int(Projective)?HDim:Dim,1>(0,Dim); }\n\n\n  #ifdef EIGEN_TRANSFORM_PLUGIN\n  #include EIGEN_TRANSFORM_PLUGIN\n  #endif\n  \nprotected:\n  #ifndef EIGEN_PARSED_BY_DOXYGEN\n    EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void check_template_params()\n    {\n      EIGEN_STATIC_ASSERT((Options & (DontAlign|RowMajor)) == Options, INVALID_MATRIX_TEMPLATE_PARAMETERS)\n    }\n  #endif\n\n};\n\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,2,Isometry> Isometry2f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,3,Isometry> Isometry3f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,2,Isometry> Isometry2d;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,3,Isometry> Isometry3d;\n\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,2,Affine> Affine2f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,3,Affine> Affine3f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,2,Affine> Affine2d;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,3,Affine> Affine3d;\n\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,2,AffineCompact> AffineCompact2f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,3,AffineCompact> AffineCompact3f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,2,AffineCompact> AffineCompact2d;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,3,AffineCompact> AffineCompact3d;\n\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,2,Projective> Projective2f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<float,3,Projective> Projective3f;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,2,Projective> Projective2d;\n/** \\ingroup Geometry_Module */\ntypedef Transform<double,3,Projective> Projective3d;\n\n/**************************\n*** Optional QT support ***\n**************************/\n\n#ifdef EIGEN_QT_SUPPORT\n/** Initializes \\c *this from a QMatrix assuming the dimension is 2.\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode,int Options>\nTransform<Scalar,Dim,Mode,Options>::Transform(const QMatrix& other)\n{\n  check_template_params();\n  *this = other;\n}\n\n/** Set \\c *this from a QMatrix assuming the dimension is 2.\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode,int Options>\nTransform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QMatrix& other)\n{\n  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  if (EIGEN_CONST_CONDITIONAL(Mode == int(AffineCompact)))\n    m_matrix << other.m11(), other.m21(), other.dx(),\n                other.m12(), other.m22(), other.dy();\n  else\n    m_matrix << other.m11(), other.m21(), other.dx(),\n                other.m12(), other.m22(), other.dy(),\n                0, 0, 1;\n  return *this;\n}\n\n/** \\returns a QMatrix from \\c *this assuming the dimension is 2.\n  *\n  * \\warning this conversion might loss data if \\c *this is not affine\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nQMatrix Transform<Scalar,Dim,Mode,Options>::toQMatrix(void) const\n{\n  check_template_params();\n  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0),\n                 m_matrix.coeff(0,1), m_matrix.coeff(1,1),\n                 m_matrix.coeff(0,2), m_matrix.coeff(1,2));\n}\n\n/** Initializes \\c *this from a QTransform assuming the dimension is 2.\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode,int Options>\nTransform<Scalar,Dim,Mode,Options>::Transform(const QTransform& other)\n{\n  check_template_params();\n  *this = other;\n}\n\n/** Set \\c *this from a QTransform assuming the dimension is 2.\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nTransform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const QTransform& other)\n{\n  check_template_params();\n  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  if (EIGEN_CONST_CONDITIONAL(Mode == int(AffineCompact)))\n    m_matrix << other.m11(), other.m21(), other.dx(),\n                other.m12(), other.m22(), other.dy();\n  else\n    m_matrix << other.m11(), other.m21(), other.dx(),\n                other.m12(), other.m22(), other.dy(),\n                other.m13(), other.m23(), other.m33();\n  return *this;\n}\n\n/** \\returns a QTransform from \\c *this assuming the dimension is 2.\n  *\n  * This function is available only if the token EIGEN_QT_SUPPORT is defined.\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nQTransform Transform<Scalar,Dim,Mode,Options>::toQTransform(void) const\n{\n  EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  if (EIGEN_CONST_CONDITIONAL(Mode == int(AffineCompact)))\n    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0),\n                      m_matrix.coeff(0,1), m_matrix.coeff(1,1),\n                      m_matrix.coeff(0,2), m_matrix.coeff(1,2));\n  else\n    return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0),\n                      m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1),\n                      m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2));\n}\n#endif\n\n/*********************\n*** Procedural API ***\n*********************/\n\n/** Applies on the right the non uniform scale transformation represented\n  * by the vector \\a other to \\c *this and returns a reference to \\c *this.\n  * \\sa prescale()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::scale(const MatrixBase<OtherDerived> &other)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  linearExt().noalias() = (linearExt() * other.asDiagonal());\n  return *this;\n}\n\n/** Applies on the right a uniform scale of a factor \\a c to \\c *this\n  * and returns a reference to \\c *this.\n  * \\sa prescale(Scalar)\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::scale(const Scalar& s)\n{\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  linearExt() *= s;\n  return *this;\n}\n\n/** Applies on the left the non uniform scale transformation represented\n  * by the vector \\a other to \\c *this and returns a reference to \\c *this.\n  * \\sa scale()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::prescale(const MatrixBase<OtherDerived> &other)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  affine().noalias() = (other.asDiagonal() * affine());\n  return *this;\n}\n\n/** Applies on the left a uniform scale of a factor \\a c to \\c *this\n  * and returns a reference to \\c *this.\n  * \\sa scale(Scalar)\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::prescale(const Scalar& s)\n{\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  m_matrix.template topRows<Dim>() *= s;\n  return *this;\n}\n\n/** Applies on the right the translation matrix represented by the vector \\a other\n  * to \\c *this and returns a reference to \\c *this.\n  * \\sa pretranslate()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::translate(const MatrixBase<OtherDerived> &other)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))\n  translationExt() += linearExt() * other;\n  return *this;\n}\n\n/** Applies on the left the translation matrix represented by the vector \\a other\n  * to \\c *this and returns a reference to \\c *this.\n  * \\sa translate()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::pretranslate(const MatrixBase<OtherDerived> &other)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim))\n  if(EIGEN_CONST_CONDITIONAL(int(Mode)==int(Projective)))\n    affine() += other * m_matrix.row(Dim);\n  else\n    translation() += other;\n  return *this;\n}\n\n/** Applies on the right the rotation represented by the rotation \\a rotation\n  * to \\c *this and returns a reference to \\c *this.\n  *\n  * The template parameter \\a RotationType is the type of the rotation which\n  * must be known by internal::toRotationMatrix<>.\n  *\n  * Natively supported types includes:\n  *   - any scalar (2D),\n  *   - a Dim x Dim matrix expression,\n  *   - a Quaternion (3D),\n  *   - a AngleAxis (3D)\n  *\n  * This mechanism is easily extendable to support user types such as Euler angles,\n  * or a pair of Quaternion for 4D rotations.\n  *\n  * \\sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType)\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename RotationType>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::rotate(const RotationType& rotation)\n{\n  linearExt() *= internal::toRotationMatrix<Scalar,Dim>(rotation);\n  return *this;\n}\n\n/** Applies on the left the rotation represented by the rotation \\a rotation\n  * to \\c *this and returns a reference to \\c *this.\n  *\n  * See rotate() for further details.\n  *\n  * \\sa rotate()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename RotationType>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::prerotate(const RotationType& rotation)\n{\n  m_matrix.template block<Dim,HDim>(0,0) = internal::toRotationMatrix<Scalar,Dim>(rotation)\n                                         * m_matrix.template block<Dim,HDim>(0,0);\n  return *this;\n}\n\n/** Applies on the right the shear transformation represented\n  * by the vector \\a other to \\c *this and returns a reference to \\c *this.\n  * \\warning 2D only.\n  * \\sa preshear()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::shear(const Scalar& sx, const Scalar& sy)\n{\n  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  VectorType tmp = linear().col(0)*sy + linear().col(1);\n  linear() << linear().col(0) + linear().col(1)*sx, tmp;\n  return *this;\n}\n\n/** Applies on the left the shear transformation represented\n  * by the vector \\a other to \\c *this and returns a reference to \\c *this.\n  * \\warning 2D only.\n  * \\sa shear()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::preshear(const Scalar& sx, const Scalar& sy)\n{\n  EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE)\n  EIGEN_STATIC_ASSERT(Mode!=int(Isometry), THIS_METHOD_IS_ONLY_FOR_SPECIFIC_TRANSFORMATIONS)\n  m_matrix.template block<Dim,HDim>(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);\n  return *this;\n}\n\n/******************************************************\n*** Scaling, Translation and Rotation compatibility ***\n******************************************************/\n\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const TranslationType& t)\n{\n  linear().setIdentity();\n  translation() = t.vector();\n  makeAffine();\n  return *this;\n}\n\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const TranslationType& t) const\n{\n  Transform res = *this;\n  res.translate(t.vector());\n  return res;\n}\n\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const UniformScaling<Scalar>& s)\n{\n  m_matrix.setZero();\n  linear().diagonal().fill(s.factor());\n  makeAffine();\n  return *this;\n}\n\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options>& Transform<Scalar,Dim,Mode,Options>::operator=(const RotationBase<Derived,Dim>& r)\n{\n  linear() = internal::toRotationMatrix<Scalar,Dim>(r);\n  translation().setZero();\n  makeAffine();\n  return *this;\n}\n\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode,Options> Transform<Scalar,Dim,Mode,Options>::operator*(const RotationBase<Derived,Dim>& r) const\n{\n  Transform res = *this;\n  res.rotate(r.derived());\n  return res;\n}\n\n/************************\n*** Special functions ***\n************************/\n\n/** \\returns the rotation part of the transformation\n  *\n  *\n  * \\svd_module\n  *\n  * \\sa computeRotationScaling(), computeScalingRotation(), class SVD\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC const typename Transform<Scalar,Dim,Mode,Options>::LinearMatrixType\nTransform<Scalar,Dim,Mode,Options>::rotation() const\n{\n  LinearMatrixType result;\n  computeRotationScaling(&result, (LinearMatrixType*)0);\n  return result;\n}\n\n\n/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being\n  * not necessarily positive.\n  *\n  * If either pointer is zero, the corresponding computation is skipped.\n  *\n  *\n  *\n  * \\svd_module\n  *\n  * \\sa computeScalingRotation(), rotation(), class SVD\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename RotationMatrixType, typename ScalingMatrixType>\nEIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const\n{\n  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);\n\n  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1\n  VectorType sv(svd.singularValues());\n  sv.coeffRef(0) *= x;\n  if(scaling) scaling->lazyAssign(svd.matrixV() * sv.asDiagonal() * svd.matrixV().adjoint());\n  if(rotation)\n  {\n    LinearMatrixType m(svd.matrixU());\n    m.col(0) /= x;\n    rotation->lazyAssign(m * svd.matrixV().adjoint());\n  }\n}\n\n/** decomposes the linear part of the transformation as a product scaling x rotation, the scaling being\n  * not necessarily positive.\n  *\n  * If either pointer is zero, the corresponding computation is skipped.\n  *\n  *\n  *\n  * \\svd_module\n  *\n  * \\sa computeRotationScaling(), rotation(), class SVD\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename ScalingMatrixType, typename RotationMatrixType>\nEIGEN_DEVICE_FUNC void Transform<Scalar,Dim,Mode,Options>::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const\n{\n  JacobiSVD<LinearMatrixType> svd(linear(), ComputeFullU | ComputeFullV);\n\n  Scalar x = (svd.matrixU() * svd.matrixV().adjoint()).determinant(); // so x has absolute value 1\n  VectorType sv(svd.singularValues());\n  sv.coeffRef(0) *= x;\n  if(scaling) scaling->lazyAssign(svd.matrixU() * sv.asDiagonal() * svd.matrixU().adjoint());\n  if(rotation)\n  {\n    LinearMatrixType m(svd.matrixU());\n    m.col(0) /= x;\n    rotation->lazyAssign(m * svd.matrixV().adjoint());\n  }\n}\n\n/** Convenient method to set \\c *this from a position, orientation and scale\n  * of a 3D object.\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\ntemplate<typename PositionDerived, typename OrientationType, typename ScaleDerived>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>&\nTransform<Scalar,Dim,Mode,Options>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,\n  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)\n{\n  linear() = internal::toRotationMatrix<Scalar,Dim>(orientation);\n  linear() *= scale.asDiagonal();\n  translation() = position;\n  makeAffine();\n  return *this;\n}\n\nnamespace internal {\n\ntemplate<int Mode>\nstruct transform_make_affine\n{\n  template<typename MatrixType>\n  EIGEN_DEVICE_FUNC static void run(MatrixType &mat)\n  {\n    static const int Dim = MatrixType::ColsAtCompileTime-1;\n    mat.template block<1,Dim>(Dim,0).setZero();\n    mat.coeffRef(Dim,Dim) = typename MatrixType::Scalar(1);\n  }\n};\n\ntemplate<>\nstruct transform_make_affine<AffineCompact>\n{\n  template<typename MatrixType> EIGEN_DEVICE_FUNC static void run(MatrixType &) { }\n};\n    \n// selector needed to avoid taking the inverse of a 3x4 matrix\ntemplate<typename TransformType, int Mode=TransformType::Mode>\nstruct projective_transform_inverse\n{\n  EIGEN_DEVICE_FUNC static inline void run(const TransformType&, TransformType&)\n  {}\n};\n\ntemplate<typename TransformType>\nstruct projective_transform_inverse<TransformType, Projective>\n{\n  EIGEN_DEVICE_FUNC static inline void run(const TransformType& m, TransformType& res)\n  {\n    res.matrix() = m.matrix().inverse();\n  }\n};\n\n} // end namespace internal\n\n\n/**\n  *\n  * \\returns the inverse transformation according to some given knowledge\n  * on \\c *this.\n  *\n  * \\param hint allows to optimize the inversion process when the transformation\n  * is known to be not a general transformation (optional). The possible values are:\n  *  - #Projective if the transformation is not necessarily affine, i.e., if the\n  *    last row is not guaranteed to be [0 ... 0 1]\n  *  - #Affine if the last row can be assumed to be [0 ... 0 1]\n  *  - #Isometry if the transformation is only a concatenations of translations\n  *    and rotations.\n  *  The default is the template class parameter \\c Mode.\n  *\n  * \\warning unless \\a traits is always set to NoShear or NoScaling, this function\n  * requires the generic inverse method of MatrixBase defined in the LU module. If\n  * you forget to include this module, then you will get hard to debug linking errors.\n  *\n  * \\sa MatrixBase::inverse()\n  */\ntemplate<typename Scalar, int Dim, int Mode, int Options>\nEIGEN_DEVICE_FUNC Transform<Scalar,Dim,Mode,Options>\nTransform<Scalar,Dim,Mode,Options>::inverse(TransformTraits hint) const\n{\n  Transform res;\n  if (hint == Projective)\n  {\n    internal::projective_transform_inverse<Transform>::run(*this, res);\n  }\n  else\n  {\n    if (hint == Isometry)\n    {\n      res.matrix().template topLeftCorner<Dim,Dim>() = linear().transpose();\n    }\n    else if(hint&Affine)\n    {\n      res.matrix().template topLeftCorner<Dim,Dim>() = linear().inverse();\n    }\n    else\n    {\n      eigen_assert(false && \"Invalid transform traits in Transform::Inverse\");\n    }\n    // translation and remaining parts\n    res.matrix().template topRightCorner<Dim,1>()\n      = - res.matrix().template topLeftCorner<Dim,Dim>() * translation();\n    res.makeAffine(); // we do need this, because in the beginning res is uninitialized\n  }\n  return res;\n}\n\nnamespace internal {\n\n/*****************************************************\n*** Specializations of take affine part            ***\n*****************************************************/\n\ntemplate<typename TransformType> struct transform_take_affine_part {\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef typename TransformType::AffinePart AffinePart;\n  typedef typename TransformType::ConstAffinePart ConstAffinePart;\n  static inline AffinePart run(MatrixType& m)\n  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }\n  static inline ConstAffinePart run(const MatrixType& m)\n  { return m.template block<TransformType::Dim,TransformType::HDim>(0,0); }\n};\n\ntemplate<typename Scalar, int Dim, int Options>\nstruct transform_take_affine_part<Transform<Scalar,Dim,AffineCompact, Options> > {\n  typedef typename Transform<Scalar,Dim,AffineCompact,Options>::MatrixType MatrixType;\n  static inline MatrixType& run(MatrixType& m) { return m; }\n  static inline const MatrixType& run(const MatrixType& m) { return m; }\n};\n\n/*****************************************************\n*** Specializations of construct from matrix       ***\n*****************************************************/\n\ntemplate<typename Other, int Mode, int Options, int Dim, int HDim>\nstruct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,Dim>\n{\n  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)\n  {\n    transform->linear() = other;\n    transform->translation().setZero();\n    transform->makeAffine();\n  }\n};\n\ntemplate<typename Other, int Mode, int Options, int Dim, int HDim>\nstruct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, Dim,HDim>\n{\n  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)\n  {\n    transform->affine() = other;\n    transform->makeAffine();\n  }\n};\n\ntemplate<typename Other, int Mode, int Options, int Dim, int HDim>\nstruct transform_construct_from_matrix<Other, Mode,Options,Dim,HDim, HDim,HDim>\n{\n  static inline void run(Transform<typename Other::Scalar,Dim,Mode,Options> *transform, const Other& other)\n  { transform->matrix() = other; }\n};\n\ntemplate<typename Other, int Options, int Dim, int HDim>\nstruct transform_construct_from_matrix<Other, AffineCompact,Options,Dim,HDim, HDim,HDim>\n{\n  static inline void run(Transform<typename Other::Scalar,Dim,AffineCompact,Options> *transform, const Other& other)\n  { transform->matrix() = other.template block<Dim,HDim>(0,0); }\n};\n\n/**********************************************************\n***   Specializations of operator* with rhs EigenBase   ***\n**********************************************************/\n\ntemplate<int LhsMode,int RhsMode>\nstruct transform_product_result\n{\n  enum \n  { \n    Mode =\n      (LhsMode == (int)Projective    || RhsMode == (int)Projective    ) ? Projective :\n      (LhsMode == (int)Affine        || RhsMode == (int)Affine        ) ? Affine :\n      (LhsMode == (int)AffineCompact || RhsMode == (int)AffineCompact ) ? AffineCompact :\n      (LhsMode == (int)Isometry      || RhsMode == (int)Isometry      ) ? Isometry : Projective\n  };\n};\n\ntemplate< typename TransformType, typename MatrixType, int RhsCols>\nstruct transform_right_product_impl< TransformType, MatrixType, 0, RhsCols>\n{\n  typedef typename MatrixType::PlainObject ResultType;\n\n  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)\n  {\n    return T.matrix() * other;\n  }\n};\n\ntemplate< typename TransformType, typename MatrixType, int RhsCols>\nstruct transform_right_product_impl< TransformType, MatrixType, 1, RhsCols>\n{\n  enum { \n    Dim = TransformType::Dim, \n    HDim = TransformType::HDim,\n    OtherRows = MatrixType::RowsAtCompileTime,\n    OtherCols = MatrixType::ColsAtCompileTime\n  };\n\n  typedef typename MatrixType::PlainObject ResultType;\n\n  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)\n  {\n    EIGEN_STATIC_ASSERT(OtherRows==HDim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);\n\n    typedef Block<ResultType, Dim, OtherCols, int(MatrixType::RowsAtCompileTime)==Dim> TopLeftLhs;\n\n    ResultType res(other.rows(),other.cols());\n    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() = T.affine() * other;\n    res.row(OtherRows-1) = other.row(OtherRows-1);\n    \n    return res;\n  }\n};\n\ntemplate< typename TransformType, typename MatrixType, int RhsCols>\nstruct transform_right_product_impl< TransformType, MatrixType, 2, RhsCols>\n{\n  enum { \n    Dim = TransformType::Dim, \n    HDim = TransformType::HDim,\n    OtherRows = MatrixType::RowsAtCompileTime,\n    OtherCols = MatrixType::ColsAtCompileTime\n  };\n\n  typedef typename MatrixType::PlainObject ResultType;\n\n  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)\n  {\n    EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);\n\n    typedef Block<ResultType, Dim, OtherCols, true> TopLeftLhs;\n    ResultType res(Replicate<typename TransformType::ConstTranslationPart, 1, OtherCols>(T.translation(),1,other.cols()));\n    TopLeftLhs(res, 0, 0, Dim, other.cols()).noalias() += T.linear() * other;\n\n    return res;\n  }\n};\n\ntemplate< typename TransformType, typename MatrixType >\nstruct transform_right_product_impl< TransformType, MatrixType, 2, 1> // rhs is a vector of size Dim\n{\n  typedef typename TransformType::MatrixType TransformMatrix;\n  enum {\n    Dim = TransformType::Dim,\n    HDim = TransformType::HDim,\n    OtherRows = MatrixType::RowsAtCompileTime,\n    WorkingRows = EIGEN_PLAIN_ENUM_MIN(TransformMatrix::RowsAtCompileTime,HDim)\n  };\n\n  typedef typename MatrixType::PlainObject ResultType;\n\n  static EIGEN_STRONG_INLINE ResultType run(const TransformType& T, const MatrixType& other)\n  {\n    EIGEN_STATIC_ASSERT(OtherRows==Dim, YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES);\n\n    Matrix<typename ResultType::Scalar, Dim+1, 1> rhs;\n    rhs.template head<Dim>() = other; rhs[Dim] = typename ResultType::Scalar(1);\n    Matrix<typename ResultType::Scalar, WorkingRows, 1> res(T.matrix() * rhs);\n    return res.template head<Dim>();\n  }\n};\n\n/**********************************************************\n***   Specializations of operator* with lhs EigenBase   ***\n**********************************************************/\n\n// generic HDim x HDim matrix * T => Projective\ntemplate<typename Other,int Mode, int Options, int Dim, int HDim>\nstruct transform_left_product_impl<Other,Mode,Options,Dim,HDim, HDim,HDim>\n{\n  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;\n  static ResultType run(const Other& other,const TransformType& tr)\n  { return ResultType(other * tr.matrix()); }\n};\n\n// generic HDim x HDim matrix * AffineCompact => Projective\ntemplate<typename Other, int Options, int Dim, int HDim>\nstruct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, HDim,HDim>\n{\n  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef Transform<typename Other::Scalar,Dim,Projective,Options> ResultType;\n  static ResultType run(const Other& other,const TransformType& tr)\n  {\n    ResultType res;\n    res.matrix().noalias() = other.template block<HDim,Dim>(0,0) * tr.matrix();\n    res.matrix().col(Dim) += other.col(Dim);\n    return res;\n  }\n};\n\n// affine matrix * T\ntemplate<typename Other,int Mode, int Options, int Dim, int HDim>\nstruct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,HDim>\n{\n  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef TransformType ResultType;\n  static ResultType run(const Other& other,const TransformType& tr)\n  {\n    ResultType res;\n    res.affine().noalias() = other * tr.matrix();\n    res.matrix().row(Dim) = tr.matrix().row(Dim);\n    return res;\n  }\n};\n\n// affine matrix * AffineCompact\ntemplate<typename Other, int Options, int Dim, int HDim>\nstruct transform_left_product_impl<Other,AffineCompact,Options,Dim,HDim, Dim,HDim>\n{\n  typedef Transform<typename Other::Scalar,Dim,AffineCompact,Options> TransformType;\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef TransformType ResultType;\n  static ResultType run(const Other& other,const TransformType& tr)\n  {\n    ResultType res;\n    res.matrix().noalias() = other.template block<Dim,Dim>(0,0) * tr.matrix();\n    res.translation() += other.col(Dim);\n    return res;\n  }\n};\n\n// linear matrix * T\ntemplate<typename Other,int Mode, int Options, int Dim, int HDim>\nstruct transform_left_product_impl<Other,Mode,Options,Dim,HDim, Dim,Dim>\n{\n  typedef Transform<typename Other::Scalar,Dim,Mode,Options> TransformType;\n  typedef typename TransformType::MatrixType MatrixType;\n  typedef TransformType ResultType;\n  static ResultType run(const Other& other, const TransformType& tr)\n  {\n    TransformType res;\n    if(Mode!=int(AffineCompact))\n      res.matrix().row(Dim) = tr.matrix().row(Dim);\n    res.matrix().template topRows<Dim>().noalias()\n      = other * tr.matrix().template topRows<Dim>();\n    return res;\n  }\n};\n\n/**********************************************************\n*** Specializations of operator* with another Transform ***\n**********************************************************/\n\ntemplate<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>\nstruct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,false >\n{\n  enum { ResultMode = transform_product_result<LhsMode,RhsMode>::Mode };\n  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;\n  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;\n  typedef Transform<Scalar,Dim,ResultMode,LhsOptions> ResultType;\n  static ResultType run(const Lhs& lhs, const Rhs& rhs)\n  {\n    ResultType res;\n    res.linear() = lhs.linear() * rhs.linear();\n    res.translation() = lhs.linear() * rhs.translation() + lhs.translation();\n    res.makeAffine();\n    return res;\n  }\n};\n\ntemplate<typename Scalar, int Dim, int LhsMode, int LhsOptions, int RhsMode, int RhsOptions>\nstruct transform_transform_product_impl<Transform<Scalar,Dim,LhsMode,LhsOptions>,Transform<Scalar,Dim,RhsMode,RhsOptions>,true >\n{\n  typedef Transform<Scalar,Dim,LhsMode,LhsOptions> Lhs;\n  typedef Transform<Scalar,Dim,RhsMode,RhsOptions> Rhs;\n  typedef Transform<Scalar,Dim,Projective> ResultType;\n  static ResultType run(const Lhs& lhs, const Rhs& rhs)\n  {\n    return ResultType( lhs.matrix() * rhs.matrix() );\n  }\n};\n\ntemplate<typename Scalar, int Dim, int LhsOptions, int RhsOptions>\nstruct transform_transform_product_impl<Transform<Scalar,Dim,AffineCompact,LhsOptions>,Transform<Scalar,Dim,Projective,RhsOptions>,true >\n{\n  typedef Transform<Scalar,Dim,AffineCompact,LhsOptions> Lhs;\n  typedef Transform<Scalar,Dim,Projective,RhsOptions> Rhs;\n  typedef Transform<Scalar,Dim,Projective> ResultType;\n  static ResultType run(const Lhs& lhs, const Rhs& rhs)\n  {\n    ResultType res;\n    res.matrix().template topRows<Dim>() = lhs.matrix() * rhs.matrix();\n    res.matrix().row(Dim) = rhs.matrix().row(Dim);\n    return res;\n  }\n};\n\ntemplate<typename Scalar, int Dim, int LhsOptions, int RhsOptions>\nstruct transform_transform_product_impl<Transform<Scalar,Dim,Projective,LhsOptions>,Transform<Scalar,Dim,AffineCompact,RhsOptions>,true >\n{\n  typedef Transform<Scalar,Dim,Projective,LhsOptions> Lhs;\n  typedef Transform<Scalar,Dim,AffineCompact,RhsOptions> Rhs;\n  typedef Transform<Scalar,Dim,Projective> ResultType;\n  static ResultType run(const Lhs& lhs, const Rhs& rhs)\n  {\n    ResultType res(lhs.matrix().template leftCols<Dim>() * rhs.matrix());\n    res.matrix().col(Dim) += lhs.matrix().col(Dim);\n    return res;\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRANSFORM_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Translation.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_TRANSLATION_H\n#define EIGEN_TRANSLATION_H\n\nnamespace Eigen { \n\n/** \\geometry_module \\ingroup Geometry_Module\n  *\n  * \\class Translation\n  *\n  * \\brief Represents a translation transformation\n  *\n  * \\tparam _Scalar the scalar type, i.e., the type of the coefficients.\n  * \\tparam _Dim the  dimension of the space, can be a compile time value or Dynamic\n  *\n  * \\note This class is not aimed to be used to store a translation transformation,\n  * but rather to make easier the constructions and updates of Transform objects.\n  *\n  * \\sa class Scaling, class Transform\n  */\ntemplate<typename _Scalar, int _Dim>\nclass Translation\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim)\n  /** dimension of the space */\n  enum { Dim = _Dim };\n  /** the scalar type of the coefficients */\n  typedef _Scalar Scalar;\n  /** corresponding vector type */\n  typedef Matrix<Scalar,Dim,1> VectorType;\n  /** corresponding linear transformation matrix type */\n  typedef Matrix<Scalar,Dim,Dim> LinearMatrixType;\n  /** corresponding affine transformation type */\n  typedef Transform<Scalar,Dim,Affine> AffineTransformType;\n  /** corresponding isometric transformation type */\n  typedef Transform<Scalar,Dim,Isometry> IsometryTransformType;\n\nprotected:\n\n  VectorType m_coeffs;\n\npublic:\n\n  /** Default constructor without initialization. */\n  EIGEN_DEVICE_FUNC Translation() {}\n  /**  */\n  EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy)\n  {\n    eigen_assert(Dim==2);\n    m_coeffs.x() = sx;\n    m_coeffs.y() = sy;\n  }\n  /**  */\n  EIGEN_DEVICE_FUNC inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz)\n  {\n    eigen_assert(Dim==3);\n    m_coeffs.x() = sx;\n    m_coeffs.y() = sy;\n    m_coeffs.z() = sz;\n  }\n  /** Constructs and initialize the translation transformation from a vector of translation coefficients */\n  EIGEN_DEVICE_FUNC explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {}\n\n  /** \\brief Retruns the x-translation by value. **/\n  EIGEN_DEVICE_FUNC inline Scalar x() const { return m_coeffs.x(); }\n  /** \\brief Retruns the y-translation by value. **/\n  EIGEN_DEVICE_FUNC inline Scalar y() const { return m_coeffs.y(); }\n  /** \\brief Retruns the z-translation by value. **/\n  EIGEN_DEVICE_FUNC inline Scalar z() const { return m_coeffs.z(); }\n\n  /** \\brief Retruns the x-translation as a reference. **/\n  EIGEN_DEVICE_FUNC inline Scalar& x() { return m_coeffs.x(); }\n  /** \\brief Retruns the y-translation as a reference. **/\n  EIGEN_DEVICE_FUNC inline Scalar& y() { return m_coeffs.y(); }\n  /** \\brief Retruns the z-translation as a reference. **/\n  EIGEN_DEVICE_FUNC inline Scalar& z() { return m_coeffs.z(); }\n\n  EIGEN_DEVICE_FUNC const VectorType& vector() const { return m_coeffs; }\n  EIGEN_DEVICE_FUNC VectorType& vector() { return m_coeffs; }\n\n  EIGEN_DEVICE_FUNC const VectorType& translation() const { return m_coeffs; }\n  EIGEN_DEVICE_FUNC VectorType& translation() { return m_coeffs; }\n\n  /** Concatenates two translation */\n  EIGEN_DEVICE_FUNC inline Translation operator* (const Translation& other) const\n  { return Translation(m_coeffs + other.m_coeffs); }\n\n  /** Concatenates a translation and a uniform scaling */\n  EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const UniformScaling<Scalar>& other) const;\n\n  /** Concatenates a translation and a linear transformation */\n  template<typename OtherDerived>\n  EIGEN_DEVICE_FUNC inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const;\n\n  /** Concatenates a translation and a rotation */\n  template<typename Derived>\n  EIGEN_DEVICE_FUNC inline IsometryTransformType operator*(const RotationBase<Derived,Dim>& r) const\n  { return *this * IsometryTransformType(r); }\n\n  /** \\returns the concatenation of a linear transformation \\a l with the translation \\a t */\n  // its a nightmare to define a templated friend function outside its declaration\n  template<typename OtherDerived> friend\n  EIGEN_DEVICE_FUNC inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t)\n  {\n    AffineTransformType res;\n    res.matrix().setZero();\n    res.linear() = linear.derived();\n    res.translation() = linear.derived() * t.m_coeffs;\n    res.matrix().row(Dim).setZero();\n    res(Dim,Dim) = Scalar(1);\n    return res;\n  }\n\n  /** Concatenates a translation and a transformation */\n  template<int Mode, int Options>\n  EIGEN_DEVICE_FUNC inline Transform<Scalar,Dim,Mode> operator* (const Transform<Scalar,Dim,Mode,Options>& t) const\n  {\n    Transform<Scalar,Dim,Mode> res = t;\n    res.pretranslate(m_coeffs);\n    return res;\n  }\n\n  /** Applies translation to vector */\n  template<typename Derived>\n  inline typename internal::enable_if<Derived::IsVectorAtCompileTime,VectorType>::type\n  operator* (const MatrixBase<Derived>& vec) const\n  { return m_coeffs + vec.derived(); }\n\n  /** \\returns the inverse translation (opposite) */\n  Translation inverse() const { return Translation(-m_coeffs); }\n\n  Translation& operator=(const Translation& other)\n  {\n    m_coeffs = other.m_coeffs;\n    return *this;\n  }\n\n  static const Translation Identity() { return Translation(VectorType::Zero()); }\n\n  /** \\returns \\c *this with scalar type casted to \\a NewScalarType\n    *\n    * Note that if \\a NewScalarType is equal to the current scalar type of \\c *this\n    * then this function smartly returns a const reference to \\c *this.\n    */\n  template<typename NewScalarType>\n  EIGEN_DEVICE_FUNC inline typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type cast() const\n  { return typename internal::cast_return_type<Translation,Translation<NewScalarType,Dim> >::type(*this); }\n\n  /** Copy constructor with scalar type conversion */\n  template<typename OtherScalarType>\n  EIGEN_DEVICE_FUNC inline explicit Translation(const Translation<OtherScalarType,Dim>& other)\n  { m_coeffs = other.vector().template cast<Scalar>(); }\n\n  /** \\returns \\c true if \\c *this is approximately equal to \\a other, within the precision\n    * determined by \\a prec.\n    *\n    * \\sa MatrixBase::isApprox() */\n  EIGEN_DEVICE_FUNC bool isApprox(const Translation& other, const typename NumTraits<Scalar>::Real& prec = NumTraits<Scalar>::dummy_precision()) const\n  { return m_coeffs.isApprox(other.m_coeffs, prec); }\n\n};\n\n/** \\addtogroup Geometry_Module */\n//@{\ntypedef Translation<float, 2> Translation2f;\ntypedef Translation<double,2> Translation2d;\ntypedef Translation<float, 3> Translation3f;\ntypedef Translation<double,3> Translation3d;\n//@}\n\ntemplate<typename Scalar, int Dim>\nEIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType\nTranslation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const\n{\n  AffineTransformType res;\n  res.matrix().setZero();\n  res.linear().diagonal().fill(other.factor());\n  res.translation() = m_coeffs;\n  res(Dim,Dim) = Scalar(1);\n  return res;\n}\n\ntemplate<typename Scalar, int Dim>\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC inline typename Translation<Scalar,Dim>::AffineTransformType\nTranslation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const\n{\n  AffineTransformType res;\n  res.matrix().setZero();\n  res.linear() = linear.derived();\n  res.translation() = m_coeffs;\n  res.matrix().row(Dim).setZero();\n  res(Dim,Dim) = Scalar(1);\n  return res;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_TRANSLATION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/Umeyama.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_UMEYAMA_H\n#define EIGEN_UMEYAMA_H\n\n// This file requires the user to include \n// * Eigen/Core\n// * Eigen/LU \n// * Eigen/SVD\n// * Eigen/Array\n\nnamespace Eigen { \n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n\n// These helpers are required since it allows to use mixed types as parameters\n// for the Umeyama. The problem with mixed parameters is that the return type\n// cannot trivially be deduced when float and double types are mixed.\nnamespace internal {\n\n// Compile time return type deduction for different MatrixBase types.\n// Different means here different alignment and parameters but the same underlying\n// real scalar type.\ntemplate<typename MatrixType, typename OtherMatrixType>\nstruct umeyama_transform_matrix_type\n{\n  enum {\n    MinRowsAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(MatrixType::RowsAtCompileTime, OtherMatrixType::RowsAtCompileTime),\n\n    // When possible we want to choose some small fixed size value since the result\n    // is likely to fit on the stack. So here, EIGEN_SIZE_MIN_PREFER_DYNAMIC is not what we want.\n    HomogeneousDimension = int(MinRowsAtCompileTime) == Dynamic ? Dynamic : int(MinRowsAtCompileTime)+1\n  };\n\n  typedef Matrix<typename traits<MatrixType>::Scalar,\n    HomogeneousDimension,\n    HomogeneousDimension,\n    AutoAlign | (traits<MatrixType>::Flags & RowMajorBit ? RowMajor : ColMajor),\n    HomogeneousDimension,\n    HomogeneousDimension\n  > type;\n};\n\n}\n\n#endif\n\n/**\n* \\geometry_module \\ingroup Geometry_Module\n*\n* \\brief Returns the transformation between two point sets.\n*\n* The algorithm is based on:\n* \"Least-squares estimation of transformation parameters between two point patterns\",\n* Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573\n*\n* It estimates parameters \\f$ c, \\mathbf{R}, \\f$ and \\f$ \\mathbf{t} \\f$ such that\n* \\f{align*}\n*   \\frac{1}{n} \\sum_{i=1}^n \\vert\\vert y_i - (c\\mathbf{R}x_i + \\mathbf{t}) \\vert\\vert_2^2\n* \\f}\n* is minimized.\n*\n* The algorithm is based on the analysis of the covariance matrix\n* \\f$ \\Sigma_{\\mathbf{x}\\mathbf{y}} \\in \\mathbb{R}^{d \\times d} \\f$\n* of the input point sets \\f$ \\mathbf{x} \\f$ and \\f$ \\mathbf{y} \\f$ where \n* \\f$d\\f$ is corresponding to the dimension (which is typically small).\n* The analysis is involving the SVD having a complexity of \\f$O(d^3)\\f$\n* though the actual computational effort lies in the covariance\n* matrix computation which has an asymptotic lower bound of \\f$O(dm)\\f$ when \n* the input point sets have dimension \\f$d \\times m\\f$.\n*\n* Currently the method is working only for floating point matrices.\n*\n* \\todo Should the return type of umeyama() become a Transform?\n*\n* \\param src Source points \\f$ \\mathbf{x} = \\left( x_1, \\hdots, x_n \\right) \\f$.\n* \\param dst Destination points \\f$ \\mathbf{y} = \\left( y_1, \\hdots, y_n \\right) \\f$.\n* \\param with_scaling Sets \\f$ c=1 \\f$ when <code>false</code> is passed.\n* \\return The homogeneous transformation \n* \\f{align*}\n*   T = \\begin{bmatrix} c\\mathbf{R} & \\mathbf{t} \\\\ \\mathbf{0} & 1 \\end{bmatrix}\n* \\f}\n* minimizing the resudiual above. This transformation is always returned as an \n* Eigen::Matrix.\n*/\ntemplate <typename Derived, typename OtherDerived>\ntypename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type\numeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, bool with_scaling = true)\n{\n  typedef typename internal::umeyama_transform_matrix_type<Derived, OtherDerived>::type TransformationMatrixType;\n  typedef typename internal::traits<TransformationMatrixType>::Scalar Scalar;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n\n  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)\n  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename internal::traits<OtherDerived>::Scalar>::value),\n    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n\n  enum { Dimension = EIGEN_SIZE_MIN_PREFER_DYNAMIC(Derived::RowsAtCompileTime, OtherDerived::RowsAtCompileTime) };\n\n  typedef Matrix<Scalar, Dimension, 1> VectorType;\n  typedef Matrix<Scalar, Dimension, Dimension> MatrixType;\n  typedef typename internal::plain_matrix_type_row_major<Derived>::type RowMajorMatrixType;\n\n  const Index m = src.rows(); // dimension\n  const Index n = src.cols(); // number of measurements\n\n  // required for demeaning ...\n  const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);\n\n  // computation of mean\n  const VectorType src_mean = src.rowwise().sum() * one_over_n;\n  const VectorType dst_mean = dst.rowwise().sum() * one_over_n;\n\n  // demeaning of src and dst points\n  const RowMajorMatrixType src_demean = src.colwise() - src_mean;\n  const RowMajorMatrixType dst_demean = dst.colwise() - dst_mean;\n\n  // Eq. (36)-(37)\n  const Scalar src_var = src_demean.rowwise().squaredNorm().sum() * one_over_n;\n\n  // Eq. (38)\n  const MatrixType sigma = one_over_n * dst_demean * src_demean.transpose();\n\n  JacobiSVD<MatrixType> svd(sigma, ComputeFullU | ComputeFullV);\n\n  // Initialize the resulting transformation with an identity matrix...\n  TransformationMatrixType Rt = TransformationMatrixType::Identity(m+1,m+1);\n\n  // Eq. (39)\n  VectorType S = VectorType::Ones(m);\n\n  if  ( svd.matrixU().determinant() * svd.matrixV().determinant() < 0 )\n    S(m-1) = -1;\n\n  // Eq. (40) and (43)\n  Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();\n\n  if (with_scaling)\n  {\n    // Eq. (42)\n    const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);\n\n    // Eq. (41)\n    Rt.col(m).head(m) = dst_mean;\n    Rt.col(m).head(m).noalias() -= c*Rt.topLeftCorner(m,m)*src_mean;\n    Rt.block(0,0,m,m) *= c;\n  }\n  else\n  {\n    Rt.col(m).head(m) = dst_mean;\n    Rt.col(m).head(m).noalias() -= Rt.topLeftCorner(m,m)*src_mean;\n  }\n\n  return Rt;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_UMEYAMA_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Geometry/arch/Geometry_SSE.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Rohit Garg <rpg.314@gmail.com>\n// Copyright (C) 2009-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_GEOMETRY_SSE_H\n#define EIGEN_GEOMETRY_SSE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<class Derived, class OtherDerived>\nstruct quat_product<Architecture::SSE, Derived, OtherDerived, float, Aligned16>\n{\n  static inline Quaternion<float> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)\n  {\n    Quaternion<float> res;\n    const __m128 mask = _mm_setr_ps(0.f,0.f,0.f,-0.f);\n    __m128 a = _a.coeffs().template packet<Aligned16>(0);\n    __m128 b = _b.coeffs().template packet<Aligned16>(0);\n    __m128 s1 = _mm_mul_ps(vec4f_swizzle1(a,1,2,0,2),vec4f_swizzle1(b,2,0,1,2));\n    __m128 s2 = _mm_mul_ps(vec4f_swizzle1(a,3,3,3,1),vec4f_swizzle1(b,0,1,2,1));\n    pstore(&res.x(),\n              _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,vec4f_swizzle1(b,3,3,3,3)),\n                                    _mm_mul_ps(vec4f_swizzle1(a,2,0,1,0),\n                                               vec4f_swizzle1(b,1,2,0,0))),\n                         _mm_xor_ps(mask,_mm_add_ps(s1,s2))));\n    \n    return res;\n  }\n};\n\ntemplate<class Derived, int Alignment>\nstruct quat_conj<Architecture::SSE, Derived, float, Alignment>\n{\n  static inline Quaternion<float> run(const QuaternionBase<Derived>& q)\n  {\n    Quaternion<float> res;\n    const __m128 mask = _mm_setr_ps(-0.f,-0.f,-0.f,0.f);\n    pstore(&res.x(), _mm_xor_ps(mask, q.coeffs().template packet<Alignment>(0)));\n    return res;\n  }\n};\n\n\ntemplate<typename VectorLhs,typename VectorRhs>\nstruct cross3_impl<Architecture::SSE,VectorLhs,VectorRhs,float,true>\n{\n  static inline typename plain_matrix_type<VectorLhs>::type\n  run(const VectorLhs& lhs, const VectorRhs& rhs)\n  {\n    __m128 a = lhs.template packet<traits<VectorLhs>::Alignment>(0);\n    __m128 b = rhs.template packet<traits<VectorRhs>::Alignment>(0);\n    __m128 mul1=_mm_mul_ps(vec4f_swizzle1(a,1,2,0,3),vec4f_swizzle1(b,2,0,1,3));\n    __m128 mul2=_mm_mul_ps(vec4f_swizzle1(a,2,0,1,3),vec4f_swizzle1(b,1,2,0,3));\n    typename plain_matrix_type<VectorLhs>::type res;\n    pstore(&res.x(),_mm_sub_ps(mul1,mul2));\n    return res;\n  }\n};\n\n\n\n\ntemplate<class Derived, class OtherDerived, int Alignment>\nstruct quat_product<Architecture::SSE, Derived, OtherDerived, double, Alignment>\n{\n  static inline Quaternion<double> run(const QuaternionBase<Derived>& _a, const QuaternionBase<OtherDerived>& _b)\n  {\n  const Packet2d mask = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));\n\n  Quaternion<double> res;\n\n  const double* a = _a.coeffs().data();\n  Packet2d b_xy = _b.coeffs().template packet<Alignment>(0);\n  Packet2d b_zw = _b.coeffs().template packet<Alignment>(2);\n  Packet2d a_xx = pset1<Packet2d>(a[0]);\n  Packet2d a_yy = pset1<Packet2d>(a[1]);\n  Packet2d a_zz = pset1<Packet2d>(a[2]);\n  Packet2d a_ww = pset1<Packet2d>(a[3]);\n\n  // two temporaries:\n  Packet2d t1, t2;\n\n  /*\n   * t1 = ww*xy + yy*zw\n   * t2 = zz*xy - xx*zw\n   * res.xy = t1 +/- swap(t2)\n   */\n  t1 = padd(pmul(a_ww, b_xy), pmul(a_yy, b_zw));\n  t2 = psub(pmul(a_zz, b_xy), pmul(a_xx, b_zw));\n#ifdef EIGEN_VECTORIZE_SSE3\n  EIGEN_UNUSED_VARIABLE(mask)\n  pstore(&res.x(), _mm_addsub_pd(t1, preverse(t2)));\n#else\n  pstore(&res.x(), padd(t1, pxor(mask,preverse(t2))));\n#endif\n  \n  /*\n   * t1 = ww*zw - yy*xy\n   * t2 = zz*zw + xx*xy\n   * res.zw = t1 -/+ swap(t2) = swap( swap(t1) +/- t2)\n   */\n  t1 = psub(pmul(a_ww, b_zw), pmul(a_yy, b_xy));\n  t2 = padd(pmul(a_zz, b_zw), pmul(a_xx, b_xy));\n#ifdef EIGEN_VECTORIZE_SSE3\n  EIGEN_UNUSED_VARIABLE(mask)\n  pstore(&res.z(), preverse(_mm_addsub_pd(preverse(t1), t2)));\n#else\n  pstore(&res.z(), psub(t1, pxor(mask,preverse(t2))));\n#endif\n\n  return res;\n}\n};\n\ntemplate<class Derived, int Alignment>\nstruct quat_conj<Architecture::SSE, Derived, double, Alignment>\n{\n  static inline Quaternion<double> run(const QuaternionBase<Derived>& q)\n  {\n    Quaternion<double> res;\n    const __m128d mask0 = _mm_setr_pd(-0.,-0.);\n    const __m128d mask2 = _mm_setr_pd(-0.,0.);\n    pstore(&res.x(), _mm_xor_pd(mask0, q.coeffs().template packet<Alignment>(0)));\n    pstore(&res.z(), _mm_xor_pd(mask2, q.coeffs().template packet<Alignment>(2)));\n    return res;\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_GEOMETRY_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Householder/BlockHouseholder.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Vincent Lejeune\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BLOCK_HOUSEHOLDER_H\n#define EIGEN_BLOCK_HOUSEHOLDER_H\n\n// This file contains some helper function to deal with block householder reflectors\n\nnamespace Eigen { \n\nnamespace internal {\n  \n/** \\internal */\n// template<typename TriangularFactorType,typename VectorsType,typename CoeffsType>\n// void make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)\n// {\n//   typedef typename VectorsType::Scalar Scalar;\n//   const Index nbVecs = vectors.cols();\n//   eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);\n// \n//   for(Index i = 0; i < nbVecs; i++)\n//   {\n//     Index rs = vectors.rows() - i;\n//     // Warning, note that hCoeffs may alias with vectors.\n//     // It is then necessary to copy it before modifying vectors(i,i). \n//     typename CoeffsType::Scalar h = hCoeffs(i);\n//     // This hack permits to pass trough nested Block<> and Transpose<> expressions.\n//     Scalar *Vii_ptr = const_cast<Scalar*>(vectors.data() + vectors.outerStride()*i + vectors.innerStride()*i);\n//     Scalar Vii = *Vii_ptr;\n//     *Vii_ptr = Scalar(1);\n//     triFactor.col(i).head(i).noalias() = -h * vectors.block(i, 0, rs, i).adjoint()\n//                                        * vectors.col(i).tail(rs);\n//     *Vii_ptr = Vii;\n//     // FIXME add .noalias() once the triangular product can work inplace\n//     triFactor.col(i).head(i) = triFactor.block(0,0,i,i).template triangularView<Upper>()\n//                              * triFactor.col(i).head(i);\n//     triFactor(i,i) = hCoeffs(i);\n//   }\n// }\n\n/** \\internal */\n// This variant avoid modifications in vectors\ntemplate<typename TriangularFactorType,typename VectorsType,typename CoeffsType>\nvoid make_block_householder_triangular_factor(TriangularFactorType& triFactor, const VectorsType& vectors, const CoeffsType& hCoeffs)\n{\n  const Index nbVecs = vectors.cols();\n  eigen_assert(triFactor.rows() == nbVecs && triFactor.cols() == nbVecs && vectors.rows()>=nbVecs);\n\n  for(Index i = nbVecs-1; i >=0 ; --i)\n  {\n    Index rs = vectors.rows() - i - 1;\n    Index rt = nbVecs-i-1;\n\n    if(rt>0)\n    {\n      triFactor.row(i).tail(rt).noalias() = -hCoeffs(i) * vectors.col(i).tail(rs).adjoint()\n                                                        * vectors.bottomRightCorner(rs, rt).template triangularView<UnitLower>();\n            \n      // FIXME add .noalias() once the triangular product can work inplace\n      triFactor.row(i).tail(rt) = triFactor.row(i).tail(rt) * triFactor.bottomRightCorner(rt,rt).template triangularView<Upper>();\n      \n    }\n    triFactor(i,i) = hCoeffs(i);\n  }\n}\n\n/** \\internal\n  * if forward then perform   mat = H0 * H1 * H2 * mat\n  * otherwise perform         mat = H2 * H1 * H0 * mat\n  */\ntemplate<typename MatrixType,typename VectorsType,typename CoeffsType>\nvoid apply_block_householder_on_the_left(MatrixType& mat, const VectorsType& vectors, const CoeffsType& hCoeffs, bool forward)\n{\n  enum { TFactorSize = MatrixType::ColsAtCompileTime };\n  Index nbVecs = vectors.cols();\n  Matrix<typename MatrixType::Scalar, TFactorSize, TFactorSize, RowMajor> T(nbVecs,nbVecs);\n  \n  if(forward) make_block_householder_triangular_factor(T, vectors, hCoeffs);\n  else        make_block_householder_triangular_factor(T, vectors, hCoeffs.conjugate());  \n  const TriangularView<const VectorsType, UnitLower> V(vectors);\n\n  // A -= V T V^* A\n  Matrix<typename MatrixType::Scalar,VectorsType::ColsAtCompileTime,MatrixType::ColsAtCompileTime,\n         (VectorsType::MaxColsAtCompileTime==1 && MatrixType::MaxColsAtCompileTime!=1)?RowMajor:ColMajor,\n         VectorsType::MaxColsAtCompileTime,MatrixType::MaxColsAtCompileTime> tmp = V.adjoint() * mat;\n  // FIXME add .noalias() once the triangular product can work inplace\n  if(forward) tmp = T.template triangularView<Upper>()           * tmp;\n  else        tmp = T.template triangularView<Upper>().adjoint() * tmp;\n  mat.noalias() -= V * tmp;\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BLOCK_HOUSEHOLDER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Householder/Householder.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HOUSEHOLDER_H\n#define EIGEN_HOUSEHOLDER_H\n\nnamespace Eigen { \n\nnamespace internal {\ntemplate<int n> struct decrement_size\n{\n  enum {\n    ret = n==Dynamic ? n : n-1\n  };\n};\n}\n\n/** Computes the elementary reflector H such that:\n  * \\f$ H *this = [ beta 0 ... 0]^T \\f$\n  * where the transformation H is:\n  * \\f$ H = I - tau v v^*\\f$\n  * and the vector v is:\n  * \\f$ v^T = [1 essential^T] \\f$\n  *\n  * The essential part of the vector \\c v is stored in *this.\n  * \n  * On output:\n  * \\param tau the scaling factor of the Householder transformation\n  * \\param beta the result of H * \\c *this\n  *\n  * \\sa MatrixBase::makeHouseholder(), MatrixBase::applyHouseholderOnTheLeft(),\n  *     MatrixBase::applyHouseholderOnTheRight()\n  */\ntemplate<typename Derived>\nvoid MatrixBase<Derived>::makeHouseholderInPlace(Scalar& tau, RealScalar& beta)\n{\n  VectorBlock<Derived, internal::decrement_size<Base::SizeAtCompileTime>::ret> essentialPart(derived(), 1, size()-1);\n  makeHouseholder(essentialPart, tau, beta);\n}\n\n/** Computes the elementary reflector H such that:\n  * \\f$ H *this = [ beta 0 ... 0]^T \\f$\n  * where the transformation H is:\n  * \\f$ H = I - tau v v^*\\f$\n  * and the vector v is:\n  * \\f$ v^T = [1 essential^T] \\f$\n  *\n  * On output:\n  * \\param essential the essential part of the vector \\c v\n  * \\param tau the scaling factor of the Householder transformation\n  * \\param beta the result of H * \\c *this\n  *\n  * \\sa MatrixBase::makeHouseholderInPlace(), MatrixBase::applyHouseholderOnTheLeft(),\n  *     MatrixBase::applyHouseholderOnTheRight()\n  */\ntemplate<typename Derived>\ntemplate<typename EssentialPart>\nvoid MatrixBase<Derived>::makeHouseholder(\n  EssentialPart& essential,\n  Scalar& tau,\n  RealScalar& beta) const\n{\n  using std::sqrt;\n  using numext::conj;\n  \n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(EssentialPart)\n  VectorBlock<const Derived, EssentialPart::SizeAtCompileTime> tail(derived(), 1, size()-1);\n  \n  RealScalar tailSqNorm = size()==1 ? RealScalar(0) : tail.squaredNorm();\n  Scalar c0 = coeff(0);\n  const RealScalar tol = (std::numeric_limits<RealScalar>::min)();\n\n  if(tailSqNorm <= tol && numext::abs2(numext::imag(c0))<=tol)\n  {\n    tau = RealScalar(0);\n    beta = numext::real(c0);\n    essential.setZero();\n  }\n  else\n  {\n    beta = sqrt(numext::abs2(c0) + tailSqNorm);\n    if (numext::real(c0)>=RealScalar(0))\n      beta = -beta;\n    essential = tail / (c0 - beta);\n    tau = conj((beta - c0) / beta);\n  }\n}\n\n/** Apply the elementary reflector H given by\n  * \\f$ H = I - tau v v^*\\f$\n  * with\n  * \\f$ v^T = [1 essential^T] \\f$\n  * from the left to a vector or matrix.\n  *\n  * On input:\n  * \\param essential the essential part of the vector \\c v\n  * \\param tau the scaling factor of the Householder transformation\n  * \\param workspace a pointer to working space with at least\n  *                  this->cols() * essential.size() entries\n  *\n  * \\sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), \n  *     MatrixBase::applyHouseholderOnTheRight()\n  */\ntemplate<typename Derived>\ntemplate<typename EssentialPart>\nvoid MatrixBase<Derived>::applyHouseholderOnTheLeft(\n  const EssentialPart& essential,\n  const Scalar& tau,\n  Scalar* workspace)\n{\n  if(rows() == 1)\n  {\n    *this *= Scalar(1)-tau;\n  }\n  else if(tau!=Scalar(0))\n  {\n    Map<typename internal::plain_row_type<PlainObject>::type> tmp(workspace,cols());\n    Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());\n    tmp.noalias() = essential.adjoint() * bottom;\n    tmp += this->row(0);\n    this->row(0) -= tau * tmp;\n    bottom.noalias() -= tau * essential * tmp;\n  }\n}\n\n/** Apply the elementary reflector H given by\n  * \\f$ H = I - tau v v^*\\f$\n  * with\n  * \\f$ v^T = [1 essential^T] \\f$\n  * from the right to a vector or matrix.\n  *\n  * On input:\n  * \\param essential the essential part of the vector \\c v\n  * \\param tau the scaling factor of the Householder transformation\n  * \\param workspace a pointer to working space with at least\n  *                  this->cols() * essential.size() entries\n  *\n  * \\sa MatrixBase::makeHouseholder(), MatrixBase::makeHouseholderInPlace(), \n  *     MatrixBase::applyHouseholderOnTheLeft()\n  */\ntemplate<typename Derived>\ntemplate<typename EssentialPart>\nvoid MatrixBase<Derived>::applyHouseholderOnTheRight(\n  const EssentialPart& essential,\n  const Scalar& tau,\n  Scalar* workspace)\n{\n  if(cols() == 1)\n  {\n    *this *= Scalar(1)-tau;\n  }\n  else if(tau!=Scalar(0))\n  {\n    Map<typename internal::plain_col_type<PlainObject>::type> tmp(workspace,rows());\n    Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);\n    tmp.noalias() = right * essential.conjugate();\n    tmp += this->col(0);\n    this->col(0) -= tau * tmp;\n    right.noalias() -= tau * tmp * essential.transpose();\n  }\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_HOUSEHOLDER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Householder/HouseholderSequence.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_HOUSEHOLDER_SEQUENCE_H\n#define EIGEN_HOUSEHOLDER_SEQUENCE_H\n\nnamespace Eigen { \n\n/** \\ingroup Householder_Module\n  * \\householder_module\n  * \\class HouseholderSequence\n  * \\brief Sequence of Householder reflections acting on subspaces with decreasing size\n  * \\tparam VectorsType type of matrix containing the Householder vectors\n  * \\tparam CoeffsType  type of vector containing the Householder coefficients\n  * \\tparam Side        either OnTheLeft (the default) or OnTheRight\n  *\n  * This class represents a product sequence of Householder reflections where the first Householder reflection\n  * acts on the whole space, the second Householder reflection leaves the one-dimensional subspace spanned by\n  * the first unit vector invariant, the third Householder reflection leaves the two-dimensional subspace\n  * spanned by the first two unit vectors invariant, and so on up to the last reflection which leaves all but\n  * one dimensions invariant and acts only on the last dimension. Such sequences of Householder reflections\n  * are used in several algorithms to zero out certain parts of a matrix. Indeed, the methods\n  * HessenbergDecomposition::matrixQ(), Tridiagonalization::matrixQ(), HouseholderQR::householderQ(),\n  * and ColPivHouseholderQR::householderQ() all return a %HouseholderSequence.\n  *\n  * More precisely, the class %HouseholderSequence represents an \\f$ n \\times n \\f$ matrix \\f$ H \\f$ of the\n  * form \\f$ H = \\prod_{i=0}^{n-1} H_i \\f$ where the i-th Householder reflection is \\f$ H_i = I - h_i v_i\n  * v_i^* \\f$. The i-th Householder coefficient \\f$ h_i \\f$ is a scalar and the i-th Householder vector \\f$\n  * v_i \\f$ is a vector of the form\n  * \\f[ \n  * v_i = [\\underbrace{0, \\ldots, 0}_{i-1\\mbox{ zeros}}, 1, \\underbrace{*, \\ldots,*}_{n-i\\mbox{ arbitrary entries}} ]. \n  * \\f]\n  * The last \\f$ n-i \\f$ entries of \\f$ v_i \\f$ are called the essential part of the Householder vector.\n  *\n  * Typical usages are listed below, where H is a HouseholderSequence:\n  * \\code\n  * A.applyOnTheRight(H);             // A = A * H\n  * A.applyOnTheLeft(H);              // A = H * A\n  * A.applyOnTheRight(H.adjoint());   // A = A * H^*\n  * A.applyOnTheLeft(H.adjoint());    // A = H^* * A\n  * MatrixXd Q = H;                   // conversion to a dense matrix\n  * \\endcode\n  * In addition to the adjoint, you can also apply the inverse (=adjoint), the transpose, and the conjugate operators.\n  *\n  * See the documentation for HouseholderSequence(const VectorsType&, const CoeffsType&) for an example.\n  *\n  * \\sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\n\nnamespace internal {\n\ntemplate<typename VectorsType, typename CoeffsType, int Side>\nstruct traits<HouseholderSequence<VectorsType,CoeffsType,Side> >\n{\n  typedef typename VectorsType::Scalar Scalar;\n  typedef typename VectorsType::StorageIndex StorageIndex;\n  typedef typename VectorsType::StorageKind StorageKind;\n  enum {\n    RowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::RowsAtCompileTime\n                                        : traits<VectorsType>::ColsAtCompileTime,\n    ColsAtCompileTime = RowsAtCompileTime,\n    MaxRowsAtCompileTime = Side==OnTheLeft ? traits<VectorsType>::MaxRowsAtCompileTime\n                                           : traits<VectorsType>::MaxColsAtCompileTime,\n    MaxColsAtCompileTime = MaxRowsAtCompileTime,\n    Flags = 0\n  };\n};\n\nstruct HouseholderSequenceShape {};\n\ntemplate<typename VectorsType, typename CoeffsType, int Side>\nstruct evaluator_traits<HouseholderSequence<VectorsType,CoeffsType,Side> >\n  : public evaluator_traits_base<HouseholderSequence<VectorsType,CoeffsType,Side> >\n{\n  typedef HouseholderSequenceShape Shape;\n};\n\ntemplate<typename VectorsType, typename CoeffsType, int Side>\nstruct hseq_side_dependent_impl\n{\n  typedef Block<const VectorsType, Dynamic, 1> EssentialVectorType;\n  typedef HouseholderSequence<VectorsType, CoeffsType, OnTheLeft> HouseholderSequenceType;\n  static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)\n  {\n    Index start = k+1+h.m_shift;\n    return Block<const VectorsType,Dynamic,1>(h.m_vectors, start, k, h.rows()-start, 1);\n  }\n};\n\ntemplate<typename VectorsType, typename CoeffsType>\nstruct hseq_side_dependent_impl<VectorsType, CoeffsType, OnTheRight>\n{\n  typedef Transpose<Block<const VectorsType, 1, Dynamic> > EssentialVectorType;\n  typedef HouseholderSequence<VectorsType, CoeffsType, OnTheRight> HouseholderSequenceType;\n  static inline const EssentialVectorType essentialVector(const HouseholderSequenceType& h, Index k)\n  {\n    Index start = k+1+h.m_shift;\n    return Block<const VectorsType,1,Dynamic>(h.m_vectors, k, start, 1, h.rows()-start).transpose();\n  }\n};\n\ntemplate<typename OtherScalarType, typename MatrixType> struct matrix_type_times_scalar_type\n{\n  typedef typename ScalarBinaryOpTraits<OtherScalarType, typename MatrixType::Scalar>::ReturnType\n    ResultScalar;\n  typedef Matrix<ResultScalar, MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime,\n                 0, MatrixType::MaxRowsAtCompileTime, MatrixType::MaxColsAtCompileTime> Type;\n};\n\n} // end namespace internal\n\ntemplate<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence\n  : public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >\n{\n    typedef typename internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::EssentialVectorType EssentialVectorType;\n  \n  public:\n    enum {\n      RowsAtCompileTime = internal::traits<HouseholderSequence>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<HouseholderSequence>::ColsAtCompileTime,\n      MaxRowsAtCompileTime = internal::traits<HouseholderSequence>::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = internal::traits<HouseholderSequence>::MaxColsAtCompileTime\n    };\n    typedef typename internal::traits<HouseholderSequence>::Scalar Scalar;\n\n    typedef HouseholderSequence<\n      typename internal::conditional<NumTraits<Scalar>::IsComplex,\n        typename internal::remove_all<typename VectorsType::ConjugateReturnType>::type,\n        VectorsType>::type,\n      typename internal::conditional<NumTraits<Scalar>::IsComplex,\n        typename internal::remove_all<typename CoeffsType::ConjugateReturnType>::type,\n        CoeffsType>::type,\n      Side\n    > ConjugateReturnType;\n\n    /** \\brief Constructor.\n      * \\param[in]  v      %Matrix containing the essential parts of the Householder vectors\n      * \\param[in]  h      Vector containing the Householder coefficients\n      *\n      * Constructs the Householder sequence with coefficients given by \\p h and vectors given by \\p v. The\n      * i-th Householder coefficient \\f$ h_i \\f$ is given by \\p h(i) and the essential part of the i-th\n      * Householder vector \\f$ v_i \\f$ is given by \\p v(k,i) with \\p k > \\p i (the subdiagonal part of the\n      * i-th column). If \\p v has fewer columns than rows, then the Householder sequence contains as many\n      * Householder reflections as there are columns.\n      *\n      * \\note The %HouseholderSequence object stores \\p v and \\p h by reference.\n      *\n      * Example: \\include HouseholderSequence_HouseholderSequence.cpp\n      * Output: \\verbinclude HouseholderSequence_HouseholderSequence.out\n      *\n      * \\sa setLength(), setShift()\n      */\n    HouseholderSequence(const VectorsType& v, const CoeffsType& h)\n      : m_vectors(v), m_coeffs(h), m_trans(false), m_length(v.diagonalSize()),\n        m_shift(0)\n    {\n    }\n\n    /** \\brief Copy constructor. */\n    HouseholderSequence(const HouseholderSequence& other)\n      : m_vectors(other.m_vectors),\n        m_coeffs(other.m_coeffs),\n        m_trans(other.m_trans),\n        m_length(other.m_length),\n        m_shift(other.m_shift)\n    {\n    }\n\n    /** \\brief Number of rows of transformation viewed as a matrix.\n      * \\returns Number of rows \n      * \\details This equals the dimension of the space that the transformation acts on.\n      */\n    Index rows() const { return Side==OnTheLeft ? m_vectors.rows() : m_vectors.cols(); }\n\n    /** \\brief Number of columns of transformation viewed as a matrix.\n      * \\returns Number of columns\n      * \\details This equals the dimension of the space that the transformation acts on.\n      */\n    Index cols() const { return rows(); }\n\n    /** \\brief Essential part of a Householder vector.\n      * \\param[in]  k  Index of Householder reflection\n      * \\returns    Vector containing non-trivial entries of k-th Householder vector\n      *\n      * This function returns the essential part of the Householder vector \\f$ v_i \\f$. This is a vector of\n      * length \\f$ n-i \\f$ containing the last \\f$ n-i \\f$ entries of the vector\n      * \\f[ \n      * v_i = [\\underbrace{0, \\ldots, 0}_{i-1\\mbox{ zeros}}, 1, \\underbrace{*, \\ldots,*}_{n-i\\mbox{ arbitrary entries}} ]. \n      * \\f]\n      * The index \\f$ i \\f$ equals \\p k + shift(), corresponding to the k-th column of the matrix \\p v\n      * passed to the constructor.\n      *\n      * \\sa setShift(), shift()\n      */\n    const EssentialVectorType essentialVector(Index k) const\n    {\n      eigen_assert(k >= 0 && k < m_length);\n      return internal::hseq_side_dependent_impl<VectorsType,CoeffsType,Side>::essentialVector(*this, k);\n    }\n\n    /** \\brief %Transpose of the Householder sequence. */\n    HouseholderSequence transpose() const\n    {\n      return HouseholderSequence(*this).setTrans(!m_trans);\n    }\n\n    /** \\brief Complex conjugate of the Householder sequence. */\n    ConjugateReturnType conjugate() const\n    {\n      return ConjugateReturnType(m_vectors.conjugate(), m_coeffs.conjugate())\n             .setTrans(m_trans)\n             .setLength(m_length)\n             .setShift(m_shift);\n    }\n\n    /** \\brief Adjoint (conjugate transpose) of the Householder sequence. */\n    ConjugateReturnType adjoint() const\n    {\n      return conjugate().setTrans(!m_trans);\n    }\n\n    /** \\brief Inverse of the Householder sequence (equals the adjoint). */\n    ConjugateReturnType inverse() const { return adjoint(); }\n\n    /** \\internal */\n    template<typename DestType> inline void evalTo(DestType& dst) const\n    {\n      Matrix<Scalar, DestType::RowsAtCompileTime, 1,\n             AutoAlign|ColMajor, DestType::MaxRowsAtCompileTime, 1> workspace(rows());\n      evalTo(dst, workspace);\n    }\n\n    /** \\internal */\n    template<typename Dest, typename Workspace>\n    void evalTo(Dest& dst, Workspace& workspace) const\n    {\n      workspace.resize(rows());\n      Index vecs = m_length;\n      if(internal::is_same_dense(dst,m_vectors))\n      {\n        // in-place\n        dst.diagonal().setOnes();\n        dst.template triangularView<StrictlyUpper>().setZero();\n        for(Index k = vecs-1; k >= 0; --k)\n        {\n          Index cornerSize = rows() - k - m_shift;\n          if(m_trans)\n            dst.bottomRightCorner(cornerSize, cornerSize)\n               .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), workspace.data());\n          else\n            dst.bottomRightCorner(cornerSize, cornerSize)\n               .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), workspace.data());\n\n          // clear the off diagonal vector\n          dst.col(k).tail(rows()-k-1).setZero();\n        }\n        // clear the remaining columns if needed\n        for(Index k = 0; k<cols()-vecs ; ++k)\n          dst.col(k).tail(rows()-k-1).setZero();\n      }\n      else\n      {\n        dst.setIdentity(rows(), rows());\n        for(Index k = vecs-1; k >= 0; --k)\n        {\n          Index cornerSize = rows() - k - m_shift;\n          if(m_trans)\n            dst.bottomRightCorner(cornerSize, cornerSize)\n               .applyHouseholderOnTheRight(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));\n          else\n            dst.bottomRightCorner(cornerSize, cornerSize)\n               .applyHouseholderOnTheLeft(essentialVector(k), m_coeffs.coeff(k), &workspace.coeffRef(0));\n        }\n      }\n    }\n\n    /** \\internal */\n    template<typename Dest> inline void applyThisOnTheRight(Dest& dst) const\n    {\n      Matrix<Scalar,1,Dest::RowsAtCompileTime,RowMajor,1,Dest::MaxRowsAtCompileTime> workspace(dst.rows());\n      applyThisOnTheRight(dst, workspace);\n    }\n\n    /** \\internal */\n    template<typename Dest, typename Workspace>\n    inline void applyThisOnTheRight(Dest& dst, Workspace& workspace) const\n    {\n      workspace.resize(dst.rows());\n      for(Index k = 0; k < m_length; ++k)\n      {\n        Index actual_k = m_trans ? m_length-k-1 : k;\n        dst.rightCols(rows()-m_shift-actual_k)\n           .applyHouseholderOnTheRight(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());\n      }\n    }\n\n    /** \\internal */\n    template<typename Dest> inline void applyThisOnTheLeft(Dest& dst) const\n    {\n      Matrix<Scalar,1,Dest::ColsAtCompileTime,RowMajor,1,Dest::MaxColsAtCompileTime> workspace;\n      applyThisOnTheLeft(dst, workspace);\n    }\n\n    /** \\internal */\n    template<typename Dest, typename Workspace>\n    inline void applyThisOnTheLeft(Dest& dst, Workspace& workspace) const\n    {\n      const Index BlockSize = 48;\n      // if the entries are large enough, then apply the reflectors by block\n      if(m_length>=BlockSize && dst.cols()>1)\n      {\n        for(Index i = 0; i < m_length; i+=BlockSize)\n        {\n          Index end = m_trans ? (std::min)(m_length,i+BlockSize) : m_length-i;\n          Index k = m_trans ? i : (std::max)(Index(0),end-BlockSize);\n          Index bs = end-k;\n          Index start = k + m_shift;\n          \n          typedef Block<typename internal::remove_all<VectorsType>::type,Dynamic,Dynamic> SubVectorsType;\n          SubVectorsType sub_vecs1(m_vectors.const_cast_derived(), Side==OnTheRight ? k : start,\n                                                                   Side==OnTheRight ? start : k,\n                                                                   Side==OnTheRight ? bs : m_vectors.rows()-start,\n                                                                   Side==OnTheRight ? m_vectors.cols()-start : bs);\n          typename internal::conditional<Side==OnTheRight, Transpose<SubVectorsType>, SubVectorsType&>::type sub_vecs(sub_vecs1);\n          Block<Dest,Dynamic,Dynamic> sub_dst(dst,dst.rows()-rows()+m_shift+k,0, rows()-m_shift-k,dst.cols());\n          apply_block_householder_on_the_left(sub_dst, sub_vecs, m_coeffs.segment(k, bs), !m_trans);\n        }\n      }\n      else\n      {\n        workspace.resize(dst.cols());\n        for(Index k = 0; k < m_length; ++k)\n        {\n          Index actual_k = m_trans ? k : m_length-k-1;\n          dst.bottomRows(rows()-m_shift-actual_k)\n            .applyHouseholderOnTheLeft(essentialVector(actual_k), m_coeffs.coeff(actual_k), workspace.data());\n        }\n      }\n    }\n\n    /** \\brief Computes the product of a Householder sequence with a matrix.\n      * \\param[in]  other  %Matrix being multiplied.\n      * \\returns    Expression object representing the product.\n      *\n      * This function computes \\f$ HM \\f$ where \\f$ H \\f$ is the Householder sequence represented by \\p *this\n      * and \\f$ M \\f$ is the matrix \\p other.\n      */\n    template<typename OtherDerived>\n    typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other) const\n    {\n      typename internal::matrix_type_times_scalar_type<Scalar, OtherDerived>::Type\n        res(other.template cast<typename internal::matrix_type_times_scalar_type<Scalar,OtherDerived>::ResultScalar>());\n      applyThisOnTheLeft(res);\n      return res;\n    }\n\n    template<typename _VectorsType, typename _CoeffsType, int _Side> friend struct internal::hseq_side_dependent_impl;\n\n    /** \\brief Sets the length of the Householder sequence.\n      * \\param [in]  length  New value for the length.\n      *\n      * By default, the length \\f$ n \\f$ of the Householder sequence \\f$ H = H_0 H_1 \\ldots H_{n-1} \\f$ is set\n      * to the number of columns of the matrix \\p v passed to the constructor, or the number of rows if that\n      * is smaller. After this function is called, the length equals \\p length.\n      *\n      * \\sa length()\n      */\n    HouseholderSequence& setLength(Index length)\n    {\n      m_length = length;\n      return *this;\n    }\n\n    /** \\brief Sets the shift of the Householder sequence.\n      * \\param [in]  shift  New value for the shift.\n      *\n      * By default, a %HouseholderSequence object represents \\f$ H = H_0 H_1 \\ldots H_{n-1} \\f$ and the i-th\n      * column of the matrix \\p v passed to the constructor corresponds to the i-th Householder\n      * reflection. After this function is called, the object represents \\f$ H = H_{\\mathrm{shift}}\n      * H_{\\mathrm{shift}+1} \\ldots H_{n-1} \\f$ and the i-th column of \\p v corresponds to the (shift+i)-th\n      * Householder reflection.\n      *\n      * \\sa shift()\n      */\n    HouseholderSequence& setShift(Index shift)\n    {\n      m_shift = shift;\n      return *this;\n    }\n\n    Index length() const { return m_length; }  /**< \\brief Returns the length of the Householder sequence. */\n    Index shift() const { return m_shift; }    /**< \\brief Returns the shift of the Householder sequence. */\n\n    /* Necessary for .adjoint() and .conjugate() */\n    template <typename VectorsType2, typename CoeffsType2, int Side2> friend class HouseholderSequence;\n\n  protected:\n\n    /** \\brief Sets the transpose flag.\n      * \\param [in]  trans  New value of the transpose flag.\n      *\n      * By default, the transpose flag is not set. If the transpose flag is set, then this object represents \n      * \\f$ H^T = H_{n-1}^T \\ldots H_1^T H_0^T \\f$ instead of \\f$ H = H_0 H_1 \\ldots H_{n-1} \\f$.\n      *\n      * \\sa trans()\n      */\n    HouseholderSequence& setTrans(bool trans)\n    {\n      m_trans = trans;\n      return *this;\n    }\n\n    bool trans() const { return m_trans; }     /**< \\brief Returns the transpose flag. */\n\n    typename VectorsType::Nested m_vectors;\n    typename CoeffsType::Nested m_coeffs;\n    bool m_trans;\n    Index m_length;\n    Index m_shift;\n};\n\n/** \\brief Computes the product of a matrix with a Householder sequence.\n  * \\param[in]  other  %Matrix being multiplied.\n  * \\param[in]  h      %HouseholderSequence being multiplied.\n  * \\returns    Expression object representing the product.\n  *\n  * This function computes \\f$ MH \\f$ where \\f$ M \\f$ is the matrix \\p other and \\f$ H \\f$ is the\n  * Householder sequence represented by \\p h.\n  */\ntemplate<typename OtherDerived, typename VectorsType, typename CoeffsType, int Side>\ntypename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type operator*(const MatrixBase<OtherDerived>& other, const HouseholderSequence<VectorsType,CoeffsType,Side>& h)\n{\n  typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::Type\n    res(other.template cast<typename internal::matrix_type_times_scalar_type<typename VectorsType::Scalar,OtherDerived>::ResultScalar>());\n  h.applyThisOnTheRight(res);\n  return res;\n}\n\n/** \\ingroup Householder_Module \\householder_module\n  * \\brief Convenience function for constructing a Householder sequence. \n  * \\returns A HouseholderSequence constructed from the specified arguments.\n  */\ntemplate<typename VectorsType, typename CoeffsType>\nHouseholderSequence<VectorsType,CoeffsType> householderSequence(const VectorsType& v, const CoeffsType& h)\n{\n  return HouseholderSequence<VectorsType,CoeffsType,OnTheLeft>(v, h);\n}\n\n/** \\ingroup Householder_Module \\householder_module\n  * \\brief Convenience function for constructing a Householder sequence. \n  * \\returns A HouseholderSequence constructed from the specified arguments.\n  * \\details This function differs from householderSequence() in that the template argument \\p OnTheSide of\n  * the constructed HouseholderSequence is set to OnTheRight, instead of the default OnTheLeft.\n  */\ntemplate<typename VectorsType, typename CoeffsType>\nHouseholderSequence<VectorsType,CoeffsType,OnTheRight> rightHouseholderSequence(const VectorsType& v, const CoeffsType& h)\n{\n  return HouseholderSequence<VectorsType,CoeffsType,OnTheRight>(v, h);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_HOUSEHOLDER_SEQUENCE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/BasicPreconditioners.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BASIC_PRECONDITIONERS_H\n#define EIGEN_BASIC_PRECONDITIONERS_H\n\nnamespace Eigen { \n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief A preconditioner based on the digonal entries\n  *\n  * This class allows to approximately solve for A.x = b problems assuming A is a diagonal matrix.\n  * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:\n    \\code\n    A.diagonal().asDiagonal() . x = b\n    \\endcode\n  *\n  * \\tparam _Scalar the type of the scalar.\n  *\n  * \\implsparsesolverconcept\n  *\n  * This preconditioner is suitable for both selfadjoint and general problems.\n  * The diagonal entries are pre-inverted and stored into a dense vector.\n  *\n  * \\note A variant that has yet to be implemented would attempt to preserve the norm of each column.\n  *\n  * \\sa class LeastSquareDiagonalPreconditioner, class ConjugateGradient\n  */\ntemplate <typename _Scalar>\nclass DiagonalPreconditioner\n{\n    typedef _Scalar Scalar;\n    typedef Matrix<Scalar,Dynamic,1> Vector;\n  public:\n    typedef typename Vector::StorageIndex StorageIndex;\n    enum {\n      ColsAtCompileTime = Dynamic,\n      MaxColsAtCompileTime = Dynamic\n    };\n\n    DiagonalPreconditioner() : m_isInitialized(false) {}\n\n    template<typename MatType>\n    explicit DiagonalPreconditioner(const MatType& mat) : m_invdiag(mat.cols())\n    {\n      compute(mat);\n    }\n\n    Index rows() const { return m_invdiag.size(); }\n    Index cols() const { return m_invdiag.size(); }\n    \n    template<typename MatType>\n    DiagonalPreconditioner& analyzePattern(const MatType& )\n    {\n      return *this;\n    }\n    \n    template<typename MatType>\n    DiagonalPreconditioner& factorize(const MatType& mat)\n    {\n      m_invdiag.resize(mat.cols());\n      for(int j=0; j<mat.outerSize(); ++j)\n      {\n        typename MatType::InnerIterator it(mat,j);\n        while(it && it.index()!=j) ++it;\n        if(it && it.index()==j && it.value()!=Scalar(0))\n          m_invdiag(j) = Scalar(1)/it.value();\n        else\n          m_invdiag(j) = Scalar(1);\n      }\n      m_isInitialized = true;\n      return *this;\n    }\n    \n    template<typename MatType>\n    DiagonalPreconditioner& compute(const MatType& mat)\n    {\n      return factorize(mat);\n    }\n\n    /** \\internal */\n    template<typename Rhs, typename Dest>\n    void _solve_impl(const Rhs& b, Dest& x) const\n    {\n      x = m_invdiag.array() * b.array() ;\n    }\n\n    template<typename Rhs> inline const Solve<DiagonalPreconditioner, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"DiagonalPreconditioner is not initialized.\");\n      eigen_assert(m_invdiag.size()==b.rows()\n                && \"DiagonalPreconditioner::solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<DiagonalPreconditioner, Rhs>(*this, b.derived());\n    }\n    \n    ComputationInfo info() { return Success; }\n\n  protected:\n    Vector m_invdiag;\n    bool m_isInitialized;\n};\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief Jacobi preconditioner for LeastSquaresConjugateGradient\n  *\n  * This class allows to approximately solve for A' A x  = A' b problems assuming A' A is a diagonal matrix.\n  * In other words, this preconditioner neglects all off diagonal entries and, in Eigen's language, solves for:\n    \\code\n    (A.adjoint() * A).diagonal().asDiagonal() * x = b\n    \\endcode\n  *\n  * \\tparam _Scalar the type of the scalar.\n  *\n  * \\implsparsesolverconcept\n  *\n  * The diagonal entries are pre-inverted and stored into a dense vector.\n  * \n  * \\sa class LeastSquaresConjugateGradient, class DiagonalPreconditioner\n  */\ntemplate <typename _Scalar>\nclass LeastSquareDiagonalPreconditioner : public DiagonalPreconditioner<_Scalar>\n{\n    typedef _Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef DiagonalPreconditioner<_Scalar> Base;\n    using Base::m_invdiag;\n  public:\n\n    LeastSquareDiagonalPreconditioner() : Base() {}\n\n    template<typename MatType>\n    explicit LeastSquareDiagonalPreconditioner(const MatType& mat) : Base()\n    {\n      compute(mat);\n    }\n\n    template<typename MatType>\n    LeastSquareDiagonalPreconditioner& analyzePattern(const MatType& )\n    {\n      return *this;\n    }\n    \n    template<typename MatType>\n    LeastSquareDiagonalPreconditioner& factorize(const MatType& mat)\n    {\n      // Compute the inverse squared-norm of each column of mat\n      m_invdiag.resize(mat.cols());\n      for(Index j=0; j<mat.outerSize(); ++j)\n      {\n        RealScalar sum = mat.innerVector(j).squaredNorm();\n        if(sum>0)\n          m_invdiag(j) = RealScalar(1)/sum;\n        else\n          m_invdiag(j) = RealScalar(1);\n      }\n      Base::m_isInitialized = true;\n      return *this;\n    }\n    \n    template<typename MatType>\n    LeastSquareDiagonalPreconditioner& compute(const MatType& mat)\n    {\n      return factorize(mat);\n    }\n    \n    ComputationInfo info() { return Success; }\n\n  protected:\n};\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief A naive preconditioner which approximates any matrix as the identity matrix\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa class DiagonalPreconditioner\n  */\nclass IdentityPreconditioner\n{\n  public:\n\n    IdentityPreconditioner() {}\n\n    template<typename MatrixType>\n    explicit IdentityPreconditioner(const MatrixType& ) {}\n    \n    template<typename MatrixType>\n    IdentityPreconditioner& analyzePattern(const MatrixType& ) { return *this; }\n    \n    template<typename MatrixType>\n    IdentityPreconditioner& factorize(const MatrixType& ) { return *this; }\n\n    template<typename MatrixType>\n    IdentityPreconditioner& compute(const MatrixType& ) { return *this; }\n    \n    template<typename Rhs>\n    inline const Rhs& solve(const Rhs& b) const { return b; }\n    \n    ComputationInfo info() { return Success; }\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_BASIC_PRECONDITIONERS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BICGSTAB_H\n#define EIGEN_BICGSTAB_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal Low-level bi conjugate gradient stabilized algorithm\n  * \\param mat The matrix A\n  * \\param rhs The right hand side vector b\n  * \\param x On input and initial solution, on output the computed solution.\n  * \\param precond A preconditioner being able to efficiently solve for an\n  *                approximation of Ax=b (regardless of b)\n  * \\param iters On input the max number of iteration, on output the number of performed iterations.\n  * \\param tol_error On input the tolerance error, on output an estimation of the relative error.\n  * \\return false in the case of numerical issue, for example a break down of BiCGSTAB. \n  */\ntemplate<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>\nbool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,\n              const Preconditioner& precond, Index& iters,\n              typename Dest::RealScalar& tol_error)\n{\n  using std::sqrt;\n  using std::abs;\n  typedef typename Dest::RealScalar RealScalar;\n  typedef typename Dest::Scalar Scalar;\n  typedef Matrix<Scalar,Dynamic,1> VectorType;\n  RealScalar tol = tol_error;\n  Index maxIters = iters;\n\n  Index n = mat.cols();\n  VectorType r  = rhs - mat * x;\n  VectorType r0 = r;\n  \n  RealScalar r0_sqnorm = r0.squaredNorm();\n  RealScalar rhs_sqnorm = rhs.squaredNorm();\n  if(rhs_sqnorm == 0)\n  {\n    x.setZero();\n    return true;\n  }\n  Scalar rho    = 1;\n  Scalar alpha  = 1;\n  Scalar w      = 1;\n  \n  VectorType v = VectorType::Zero(n), p = VectorType::Zero(n);\n  VectorType y(n),  z(n);\n  VectorType kt(n), ks(n);\n\n  VectorType s(n), t(n);\n\n  RealScalar tol2 = tol*tol*rhs_sqnorm;\n  RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();\n  Index i = 0;\n  Index restarts = 0;\n\n  while ( r.squaredNorm() > tol2 && i<maxIters )\n  {\n    Scalar rho_old = rho;\n\n    rho = r0.dot(r);\n    if (abs(rho) < eps2*r0_sqnorm)\n    {\n      // The new residual vector became too orthogonal to the arbitrarily chosen direction r0\n      // Let's restart with a new r0:\n      r  = rhs - mat * x;\n      r0 = r;\n      rho = r0_sqnorm = r.squaredNorm();\n      if(restarts++ == 0)\n        i = 0;\n    }\n    Scalar beta = (rho/rho_old) * (alpha / w);\n    p = r + beta * (p - w * v);\n    \n    y = precond.solve(p);\n    \n    v.noalias() = mat * y;\n\n    alpha = rho / r0.dot(v);\n    s = r - alpha * v;\n\n    z = precond.solve(s);\n    t.noalias() = mat * z;\n\n    RealScalar tmp = t.squaredNorm();\n    if(tmp>RealScalar(0))\n      w = t.dot(s) / tmp;\n    else\n      w = Scalar(0);\n    x += alpha * y + w * z;\n    r = s - w * t;\n    ++i;\n  }\n  tol_error = sqrt(r.squaredNorm()/rhs_sqnorm);\n  iters = i;\n  return true; \n}\n\n}\n\ntemplate< typename _MatrixType,\n          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >\nclass BiCGSTAB;\n\nnamespace internal {\n\ntemplate< typename _MatrixType, typename _Preconditioner>\nstruct traits<BiCGSTAB<_MatrixType,_Preconditioner> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Preconditioner Preconditioner;\n};\n\n}\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief A bi conjugate gradient stabilized solver for sparse square problems\n  *\n  * This class allows to solve for A.x = b sparse linear problems using a bi conjugate gradient\n  * stabilized algorithm. The vectors x and b can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.\n  * \\tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner\n  *\n  * \\implsparsesolverconcept\n  *\n  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()\n  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations\n  * and NumTraits<Scalar>::epsilon() for the tolerance.\n  * \n  * The tolerance corresponds to the relative residual error: |Ax-b|/|b|\n  * \n  * \\b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format.\n  * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled.\n  * See \\ref TopicMultiThreading for details.\n  * \n  * This class can be used as the direct solver classes. Here is a typical usage example:\n  * \\include BiCGSTAB_simple.cpp\n  * \n  * By default the iterations start with x=0 as an initial guess of the solution.\n  * One can control the start using the solveWithGuess() method.\n  * \n  * BiCGSTAB can also be used in a matrix-free context, see the following \\link MatrixfreeSolverExample example \\endlink.\n  *\n  * \\sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner\n  */\ntemplate< typename _MatrixType, typename _Preconditioner>\nclass BiCGSTAB : public IterativeSolverBase<BiCGSTAB<_MatrixType,_Preconditioner> >\n{\n  typedef IterativeSolverBase<BiCGSTAB> Base;\n  using Base::matrix;\n  using Base::m_error;\n  using Base::m_iterations;\n  using Base::m_info;\n  using Base::m_isInitialized;\npublic:\n  typedef _MatrixType MatrixType;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  typedef _Preconditioner Preconditioner;\n\npublic:\n\n  /** Default constructor. */\n  BiCGSTAB() : Base() {}\n\n  /** Initialize the solver with matrix \\a A for further \\c Ax=b solving.\n    * \n    * This constructor is a shortcut for the default constructor followed\n    * by a call to compute().\n    * \n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  explicit BiCGSTAB(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}\n\n  ~BiCGSTAB() {}\n\n  /** \\internal */\n  template<typename Rhs,typename Dest>\n  void _solve_with_guess_impl(const Rhs& b, Dest& x) const\n  {    \n    bool failed = false;\n    for(Index j=0; j<b.cols(); ++j)\n    {\n      m_iterations = Base::maxIterations();\n      m_error = Base::m_tolerance;\n      \n      typename Dest::ColXpr xj(x,j);\n      if(!internal::bicgstab(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error))\n        failed = true;\n    }\n    m_info = failed ? NumericalIssue\n           : m_error <= Base::m_tolerance ? Success\n           : NoConvergence;\n    m_isInitialized = true;\n  }\n\n  /** \\internal */\n  using Base::_solve_impl;\n  template<typename Rhs,typename Dest>\n  void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const\n  {\n    x.resize(this->rows(),b.cols());\n    x.setZero();\n    _solve_with_guess_impl(b,x);\n  }\n\nprotected:\n\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_BICGSTAB_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CONJUGATE_GRADIENT_H\n#define EIGEN_CONJUGATE_GRADIENT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal Low-level conjugate gradient algorithm\n  * \\param mat The matrix A\n  * \\param rhs The right hand side vector b\n  * \\param x On input and initial solution, on output the computed solution.\n  * \\param precond A preconditioner being able to efficiently solve for an\n  *                approximation of Ax=b (regardless of b)\n  * \\param iters On input the max number of iteration, on output the number of performed iterations.\n  * \\param tol_error On input the tolerance error, on output an estimation of the relative error.\n  */\ntemplate<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>\nEIGEN_DONT_INLINE\nvoid conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,\n                        const Preconditioner& precond, Index& iters,\n                        typename Dest::RealScalar& tol_error)\n{\n  using std::sqrt;\n  using std::abs;\n  typedef typename Dest::RealScalar RealScalar;\n  typedef typename Dest::Scalar Scalar;\n  typedef Matrix<Scalar,Dynamic,1> VectorType;\n  \n  RealScalar tol = tol_error;\n  Index maxIters = iters;\n  \n  Index n = mat.cols();\n\n  VectorType residual = rhs - mat * x; //initial residual\n\n  RealScalar rhsNorm2 = rhs.squaredNorm();\n  if(rhsNorm2 == 0) \n  {\n    x.setZero();\n    iters = 0;\n    tol_error = 0;\n    return;\n  }\n  RealScalar threshold = tol*tol*rhsNorm2;\n  RealScalar residualNorm2 = residual.squaredNorm();\n  if (residualNorm2 < threshold)\n  {\n    iters = 0;\n    tol_error = sqrt(residualNorm2 / rhsNorm2);\n    return;\n  }\n  \n  VectorType p(n);\n  p = precond.solve(residual);      // initial search direction\n\n  VectorType z(n), tmp(n);\n  RealScalar absNew = numext::real(residual.dot(p));  // the square of the absolute value of r scaled by invM\n  Index i = 0;\n  while(i < maxIters)\n  {\n    tmp.noalias() = mat * p;                    // the bottleneck of the algorithm\n\n    Scalar alpha = absNew / p.dot(tmp);         // the amount we travel on dir\n    x += alpha * p;                             // update solution\n    residual -= alpha * tmp;                    // update residual\n    \n    residualNorm2 = residual.squaredNorm();\n    if(residualNorm2 < threshold)\n      break;\n    \n    z = precond.solve(residual);                // approximately solve for \"A z = residual\"\n\n    RealScalar absOld = absNew;\n    absNew = numext::real(residual.dot(z));     // update the absolute value of r\n    RealScalar beta = absNew / absOld;          // calculate the Gram-Schmidt value used to create the new search direction\n    p = z + beta * p;                           // update search direction\n    i++;\n  }\n  tol_error = sqrt(residualNorm2 / rhsNorm2);\n  iters = i;\n}\n\n}\n\ntemplate< typename _MatrixType, int _UpLo=Lower,\n          typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >\nclass ConjugateGradient;\n\nnamespace internal {\n\ntemplate< typename _MatrixType, int _UpLo, typename _Preconditioner>\nstruct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Preconditioner Preconditioner;\n};\n\n}\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief A conjugate gradient solver for sparse (or dense) self-adjoint problems\n  *\n  * This class allows to solve for A.x = b linear problems using an iterative conjugate gradient algorithm.\n  * The matrix A must be selfadjoint. The matrix A and the vectors x and b can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower,\n  *               \\c Upper, or \\c Lower|Upper in which the full matrix entries will be considered.\n  *               Default is \\c Lower, best performance is \\c Lower|Upper.\n  * \\tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner\n  *\n  * \\implsparsesolverconcept\n  *\n  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()\n  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations\n  * and NumTraits<Scalar>::epsilon() for the tolerance.\n  * \n  * The tolerance corresponds to the relative residual error: |Ax-b|/|b|\n  * \n  * \\b Performance: Even though the default value of \\c _UpLo is \\c Lower, significantly higher performance is\n  * achieved when using a complete matrix and \\b Lower|Upper as the \\a _UpLo template parameter. Moreover, in this\n  * case multi-threading can be exploited if the user code is compiled with OpenMP enabled.\n  * See \\ref TopicMultiThreading for details.\n  * \n  * This class can be used as the direct solver classes. Here is a typical usage example:\n    \\code\n    int n = 10000;\n    VectorXd x(n), b(n);\n    SparseMatrix<double> A(n,n);\n    // fill A and b\n    ConjugateGradient<SparseMatrix<double>, Lower|Upper> cg;\n    cg.compute(A);\n    x = cg.solve(b);\n    std::cout << \"#iterations:     \" << cg.iterations() << std::endl;\n    std::cout << \"estimated error: \" << cg.error()      << std::endl;\n    // update b, and solve again\n    x = cg.solve(b);\n    \\endcode\n  * \n  * By default the iterations start with x=0 as an initial guess of the solution.\n  * One can control the start using the solveWithGuess() method.\n  * \n  * ConjugateGradient can also be used in a matrix-free context, see the following \\link MatrixfreeSolverExample example \\endlink.\n  *\n  * \\sa class LeastSquaresConjugateGradient, class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner\n  */\ntemplate< typename _MatrixType, int _UpLo, typename _Preconditioner>\nclass ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >\n{\n  typedef IterativeSolverBase<ConjugateGradient> Base;\n  using Base::matrix;\n  using Base::m_error;\n  using Base::m_iterations;\n  using Base::m_info;\n  using Base::m_isInitialized;\npublic:\n  typedef _MatrixType MatrixType;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  typedef _Preconditioner Preconditioner;\n\n  enum {\n    UpLo = _UpLo\n  };\n\npublic:\n\n  /** Default constructor. */\n  ConjugateGradient() : Base() {}\n\n  /** Initialize the solver with matrix \\a A for further \\c Ax=b solving.\n    * \n    * This constructor is a shortcut for the default constructor followed\n    * by a call to compute().\n    * \n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}\n\n  ~ConjugateGradient() {}\n\n  /** \\internal */\n  template<typename Rhs,typename Dest>\n  void _solve_with_guess_impl(const Rhs& b, Dest& x) const\n  {\n    typedef typename Base::MatrixWrapper MatrixWrapper;\n    typedef typename Base::ActualMatrixType ActualMatrixType;\n    enum {\n      TransposeInput  =   (!MatrixWrapper::MatrixFree)\n                      &&  (UpLo==(Lower|Upper))\n                      &&  (!MatrixType::IsRowMajor)\n                      &&  (!NumTraits<Scalar>::IsComplex)\n    };\n    typedef typename internal::conditional<TransposeInput,Transpose<const ActualMatrixType>, ActualMatrixType const&>::type RowMajorWrapper;\n    EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY);\n    typedef typename internal::conditional<UpLo==(Lower|Upper),\n                                           RowMajorWrapper,\n                                           typename MatrixWrapper::template ConstSelfAdjointViewReturnType<UpLo>::Type\n                                          >::type SelfAdjointWrapper;\n    m_iterations = Base::maxIterations();\n    m_error = Base::m_tolerance;\n\n    for(Index j=0; j<b.cols(); ++j)\n    {\n      m_iterations = Base::maxIterations();\n      m_error = Base::m_tolerance;\n\n      typename Dest::ColXpr xj(x,j);\n      RowMajorWrapper row_mat(matrix());\n      internal::conjugate_gradient(SelfAdjointWrapper(row_mat), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);\n    }\n\n    m_isInitialized = true;\n    m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;\n  }\n  \n  /** \\internal */\n  using Base::_solve_impl;\n  template<typename Rhs,typename Dest>\n  void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const\n  {\n    x.setZero();\n    _solve_with_guess_impl(b.derived(),x);\n  }\n\nprotected:\n\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_CONJUGATE_GRADIENT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/IncompleteCholesky.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_INCOMPLETE_CHOlESKY_H\n#define EIGEN_INCOMPLETE_CHOlESKY_H\n\n#include <vector>\n#include <list>\n\nnamespace Eigen {  \n/** \n  * \\brief Modified Incomplete Cholesky with dual threshold\n  *\n  * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with\n  *              Limited memory, SIAM J. Sci. Comput.  21(1), pp. 24-45, 1999\n  *\n  * \\tparam Scalar the scalar type of the input matrices\n  * \\tparam _UpLo The triangular part that will be used for the computations. It can be Lower\n    *               or Upper. Default is Lower.\n  * \\tparam _OrderingType The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<int>,\n  *                       unless EIGEN_MPL2_ONLY is defined, in which case the default is NaturalOrdering<int>.\n  *\n  * \\implsparsesolverconcept\n  *\n  * It performs the following incomplete factorization: \\f$ S P A P' S \\approx L L' \\f$\n  * where L is a lower triangular factor, S is a diagonal scaling matrix, and P is a\n  * fill-in reducing permutation as computed by the ordering method.\n  *\n  * \\b Shifting \\b strategy: Let \\f$ B = S P A P' S \\f$  be the scaled matrix on which the factorization is carried out,\n  * and \\f$ \\beta \\f$ be the minimum value of the diagonal. If \\f$ \\beta > 0 \\f$ then, the factorization is directly performed\n  * on the matrix B. Otherwise, the factorization is performed on the shifted matrix \\f$ B + (\\sigma+|\\beta| I \\f$ where\n  * \\f$ \\sigma \\f$ is the initial shift value as returned and set by setInitialShift() method. The default value is \\f$ \\sigma = 10^{-3} \\f$.\n  * If the factorization fails, then the shift in doubled until it succeed or a maximum of ten attempts. If it still fails, as returned by\n  * the info() method, then you can either increase the initial shift, or better use another preconditioning technique.\n  *\n  */\ntemplate <typename Scalar, int _UpLo = Lower, typename _OrderingType =\n#ifndef EIGEN_MPL2_ONLY\nAMDOrdering<int>\n#else\nNaturalOrdering<int>\n#endif\n>\nclass IncompleteCholesky : public SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> >\n{\n  protected:\n    typedef SparseSolverBase<IncompleteCholesky<Scalar,_UpLo,_OrderingType> > Base;\n    using Base::m_isInitialized;\n  public:\n    typedef typename NumTraits<Scalar>::Real RealScalar; \n    typedef _OrderingType OrderingType;\n    typedef typename OrderingType::PermutationType PermutationType;\n    typedef typename PermutationType::StorageIndex StorageIndex; \n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> FactorType;\n    typedef Matrix<Scalar,Dynamic,1> VectorSx;\n    typedef Matrix<RealScalar,Dynamic,1> VectorRx;\n    typedef Matrix<StorageIndex,Dynamic, 1> VectorIx;\n    typedef std::vector<std::list<StorageIndex> > VectorList; \n    enum { UpLo = _UpLo };\n    enum {\n      ColsAtCompileTime = Dynamic,\n      MaxColsAtCompileTime = Dynamic\n    };\n  public:\n\n    /** Default constructor leaving the object in a partly non-initialized stage.\n      *\n      * You must call compute() or the pair analyzePattern()/factorize() to make it valid.\n      *\n      * \\sa IncompleteCholesky(const MatrixType&)\n      */\n    IncompleteCholesky() : m_initialShift(1e-3),m_factorizationIsOk(false) {}\n    \n    /** Constructor computing the incomplete factorization for the given matrix \\a matrix.\n      */\n    template<typename MatrixType>\n    IncompleteCholesky(const MatrixType& matrix) : m_initialShift(1e-3),m_factorizationIsOk(false)\n    {\n      compute(matrix);\n    }\n    \n    /** \\returns number of rows of the factored matrix */\n    Index rows() const { return m_L.rows(); }\n    \n    /** \\returns number of columns of the factored matrix */\n    Index cols() const { return m_L.cols(); }\n    \n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * It triggers an assertion if \\c *this has not been initialized through the respective constructor,\n      * or a call to compute() or analyzePattern().\n      *\n      * \\returns \\c Success if computation was successful,\n      *          \\c NumericalIssue if the matrix appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"IncompleteCholesky is not initialized.\");\n      return m_info;\n    }\n    \n    /** \\brief Set the initial shift parameter \\f$ \\sigma \\f$.\n      */\n    void setInitialShift(RealScalar shift) { m_initialShift = shift; }\n    \n    /** \\brief Computes the fill reducing permutation vector using the sparsity pattern of \\a mat\n      */\n    template<typename MatrixType>\n    void analyzePattern(const MatrixType& mat)\n    {\n      OrderingType ord; \n      PermutationType pinv;\n      ord(mat.template selfadjointView<UpLo>(), pinv); \n      if(pinv.size()>0) m_perm = pinv.inverse();\n      else              m_perm.resize(0);\n      m_L.resize(mat.rows(), mat.cols());\n      m_analysisIsOk = true;\n      m_isInitialized = true;\n      m_info = Success;\n    }\n    \n    /** \\brief Performs the numerical factorization of the input matrix \\a mat\n      *\n      * The method analyzePattern() or compute() must have been called beforehand\n      * with a matrix having the same pattern.\n      *\n      * \\sa compute(), analyzePattern()\n      */\n    template<typename MatrixType>\n    void factorize(const MatrixType& mat);\n    \n    /** Computes or re-computes the incomplete Cholesky factorization of the input matrix \\a mat\n      *\n      * It is a shortcut for a sequential call to the analyzePattern() and factorize() methods.\n      *\n      * \\sa analyzePattern(), factorize()\n      */\n    template<typename MatrixType>\n    void compute(const MatrixType& mat)\n    {\n      analyzePattern(mat);\n      factorize(mat);\n    }\n    \n    // internal\n    template<typename Rhs, typename Dest>\n    void _solve_impl(const Rhs& b, Dest& x) const\n    {\n      eigen_assert(m_factorizationIsOk && \"factorize() should be called first\");\n      if (m_perm.rows() == b.rows())  x = m_perm * b;\n      else                            x = b;\n      x = m_scale.asDiagonal() * x;\n      x = m_L.template triangularView<Lower>().solve(x);\n      x = m_L.adjoint().template triangularView<Upper>().solve(x);\n      x = m_scale.asDiagonal() * x;\n      if (m_perm.rows() == b.rows())\n        x = m_perm.inverse() * x;\n    }\n\n    /** \\returns the sparse lower triangular factor L */\n    const FactorType& matrixL() const { eigen_assert(\"m_factorizationIsOk\"); return m_L; }\n\n    /** \\returns a vector representing the scaling factor S */\n    const VectorRx& scalingS() const { eigen_assert(\"m_factorizationIsOk\"); return m_scale; }\n\n    /** \\returns the fill-in reducing permutation P (can be empty for a natural ordering) */\n    const PermutationType& permutationP() const { eigen_assert(\"m_analysisIsOk\"); return m_perm; }\n\n  protected:\n    FactorType m_L;              // The lower part stored in CSC\n    VectorRx m_scale;            // The vector for scaling the matrix \n    RealScalar m_initialShift;   // The initial shift parameter\n    bool m_analysisIsOk; \n    bool m_factorizationIsOk; \n    ComputationInfo m_info;\n    PermutationType m_perm; \n\n  private:\n    inline void updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol); \n}; \n\n// Based on the following paper:\n//   C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with\n//   Limited memory, SIAM J. Sci. Comput.  21(1), pp. 24-45, 1999\n//   http://ftp.mcs.anl.gov/pub/tech_reports/reports/P682.pdf\ntemplate<typename Scalar, int _UpLo, typename OrderingType>\ntemplate<typename _MatrixType>\nvoid IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)\n{\n  using std::sqrt;\n  eigen_assert(m_analysisIsOk && \"analyzePattern() should be called first\"); \n    \n  // Dropping strategy : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added\n  \n  // Apply the fill-reducing permutation computed in analyzePattern()\n  if (m_perm.rows() == mat.rows() ) // To detect the null permutation\n  {\n    // The temporary is needed to make sure that the diagonal entry is properly sorted\n    FactorType tmp(mat.rows(), mat.cols());\n    tmp = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);\n    m_L.template selfadjointView<Lower>() = tmp.template selfadjointView<Lower>();\n  }\n  else\n  {\n    m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();\n  }\n  \n  Index n = m_L.cols(); \n  Index nnz = m_L.nonZeros();\n  Map<VectorSx> vals(m_L.valuePtr(), nnz);         //values\n  Map<VectorIx> rowIdx(m_L.innerIndexPtr(), nnz);  //Row indices\n  Map<VectorIx> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row\n  VectorIx firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization\n  VectorList listCol(n);  // listCol(j) is a linked list of columns to update column j\n  VectorSx col_vals(n);   // Store a  nonzero values in each column\n  VectorIx col_irow(n);   // Row indices of nonzero elements in each column\n  VectorIx col_pattern(n);\n  col_pattern.fill(-1);\n  StorageIndex col_nnz;\n  \n  \n  // Computes the scaling factors \n  m_scale.resize(n);\n  m_scale.setZero();\n  for (Index j = 0; j < n; j++)\n    for (Index k = colPtr[j]; k < colPtr[j+1]; k++)\n    {\n      m_scale(j) += numext::abs2(vals(k));\n      if(rowIdx[k]!=j)\n        m_scale(rowIdx[k]) += numext::abs2(vals(k));\n    }\n  \n  m_scale = m_scale.cwiseSqrt().cwiseSqrt();\n\n  for (Index j = 0; j < n; ++j)\n    if(m_scale(j)>(std::numeric_limits<RealScalar>::min)())\n      m_scale(j) = RealScalar(1)/m_scale(j);\n    else\n      m_scale(j) = 1;\n\n  // TODO disable scaling if not needed, i.e., if it is roughly uniform? (this will make solve() faster)\n  \n  // Scale and compute the shift for the matrix \n  RealScalar mindiag = NumTraits<RealScalar>::highest();\n  for (Index j = 0; j < n; j++)\n  {\n    for (Index k = colPtr[j]; k < colPtr[j+1]; k++)\n      vals[k] *= (m_scale(j)*m_scale(rowIdx[k]));\n    eigen_internal_assert(rowIdx[colPtr[j]]==j && \"IncompleteCholesky: only the lower triangular part must be stored\");\n    mindiag = numext::mini(numext::real(vals[colPtr[j]]), mindiag);\n  }\n\n  FactorType L_save = m_L;\n  \n  RealScalar shift = 0;\n  if(mindiag <= RealScalar(0.))\n    shift = m_initialShift - mindiag;\n\n  m_info = NumericalIssue;\n\n  // Try to perform the incomplete factorization using the current shift\n  int iter = 0;\n  do\n  {\n    // Apply the shift to the diagonal elements of the matrix\n    for (Index j = 0; j < n; j++)\n      vals[colPtr[j]] += shift;\n\n    // jki version of the Cholesky factorization\n    Index j=0;\n    for (; j < n; ++j)\n    {\n      // Left-looking factorization of the j-th column\n      // First, load the j-th column into col_vals\n      Scalar diag = vals[colPtr[j]];  // It is assumed that only the lower part is stored\n      col_nnz = 0;\n      for (Index i = colPtr[j] + 1; i < colPtr[j+1]; i++)\n      {\n        StorageIndex l = rowIdx[i];\n        col_vals(col_nnz) = vals[i];\n        col_irow(col_nnz) = l;\n        col_pattern(l) = col_nnz;\n        col_nnz++;\n      }\n      {\n        typename std::list<StorageIndex>::iterator k;\n        // Browse all previous columns that will update column j\n        for(k = listCol[j].begin(); k != listCol[j].end(); k++)\n        {\n          Index jk = firstElt(*k); // First element to use in the column\n          eigen_internal_assert(rowIdx[jk]==j);\n          Scalar v_j_jk = numext::conj(vals[jk]);\n\n          jk += 1;\n          for (Index i = jk; i < colPtr[*k+1]; i++)\n          {\n            StorageIndex l = rowIdx[i];\n            if(col_pattern[l]<0)\n            {\n              col_vals(col_nnz) = vals[i] * v_j_jk;\n              col_irow[col_nnz] = l;\n              col_pattern(l) = col_nnz;\n              col_nnz++;\n            }\n            else\n              col_vals(col_pattern[l]) -= vals[i] * v_j_jk;\n          }\n          updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);\n        }\n      }\n\n      // Scale the current column\n      if(numext::real(diag) <= 0)\n      {\n        if(++iter>=10)\n          return;\n\n        // increase shift\n        shift = numext::maxi(m_initialShift,RealScalar(2)*shift);\n        // restore m_L, col_pattern, and listCol\n        vals = Map<const VectorSx>(L_save.valuePtr(), nnz);\n        rowIdx = Map<const VectorIx>(L_save.innerIndexPtr(), nnz);\n        colPtr = Map<const VectorIx>(L_save.outerIndexPtr(), n+1);\n        col_pattern.fill(-1);\n        for(Index i=0; i<n; ++i)\n          listCol[i].clear();\n\n        break;\n      }\n\n      RealScalar rdiag = sqrt(numext::real(diag));\n      vals[colPtr[j]] = rdiag;\n      for (Index k = 0; k<col_nnz; ++k)\n      {\n        Index i = col_irow[k];\n        //Scale\n        col_vals(k) /= rdiag;\n        //Update the remaining diagonals with col_vals\n        vals[colPtr[i]] -= numext::abs2(col_vals(k));\n      }\n      // Select the largest p elements\n      // p is the original number of elements in the column (without the diagonal)\n      Index p = colPtr[j+1] - colPtr[j] - 1 ;\n      Ref<VectorSx> cvals = col_vals.head(col_nnz);\n      Ref<VectorIx> cirow = col_irow.head(col_nnz);\n      internal::QuickSplit(cvals,cirow, p);\n      // Insert the largest p elements in the matrix\n      Index cpt = 0;\n      for (Index i = colPtr[j]+1; i < colPtr[j+1]; i++)\n      {\n        vals[i] = col_vals(cpt);\n        rowIdx[i] = col_irow(cpt);\n        // restore col_pattern:\n        col_pattern(col_irow(cpt)) = -1;\n        cpt++;\n      }\n      // Get the first smallest row index and put it after the diagonal element\n      Index jk = colPtr(j)+1;\n      updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);\n    }\n\n    if(j==n)\n    {\n      m_factorizationIsOk = true;\n      m_info = Success;\n    }\n  } while(m_info!=Success);\n}\n\ntemplate<typename Scalar, int _UpLo, typename OrderingType>\ninline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(Ref<const VectorIx> colPtr, Ref<VectorIx> rowIdx, Ref<VectorSx> vals, const Index& col, const Index& jk, VectorIx& firstElt, VectorList& listCol)\n{\n  if (jk < colPtr(col+1) )\n  {\n    Index p = colPtr(col+1) - jk;\n    Index minpos; \n    rowIdx.segment(jk,p).minCoeff(&minpos);\n    minpos += jk;\n    if (rowIdx(minpos) != rowIdx(jk))\n    {\n      //Swap\n      std::swap(rowIdx(jk),rowIdx(minpos));\n      std::swap(vals(jk),vals(minpos));\n    }\n    firstElt(col) = internal::convert_index<StorageIndex,Index>(jk);\n    listCol[rowIdx(jk)].push_back(internal::convert_index<StorageIndex,Index>(col));\n  }\n}\n\n} // end namespace Eigen \n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_INCOMPLETE_LUT_H\n#define EIGEN_INCOMPLETE_LUT_H\n\n\nnamespace Eigen { \n\nnamespace internal {\n    \n/** \\internal\n  * Compute a quick-sort split of a vector \n  * On output, the vector row is permuted such that its elements satisfy\n  * abs(row(i)) >= abs(row(ncut)) if i<ncut\n  * abs(row(i)) <= abs(row(ncut)) if i>ncut \n  * \\param row The vector of values\n  * \\param ind The array of index for the elements in @p row\n  * \\param ncut  The number of largest elements to keep\n  **/ \ntemplate <typename VectorV, typename VectorI>\nIndex QuickSplit(VectorV &row, VectorI &ind, Index ncut)\n{\n  typedef typename VectorV::RealScalar RealScalar;\n  using std::swap;\n  using std::abs;\n  Index mid;\n  Index n = row.size(); /* length of the vector */\n  Index first, last ;\n  \n  ncut--; /* to fit the zero-based indices */\n  first = 0; \n  last = n-1; \n  if (ncut < first || ncut > last ) return 0;\n  \n  do {\n    mid = first; \n    RealScalar abskey = abs(row(mid)); \n    for (Index j = first + 1; j <= last; j++) {\n      if ( abs(row(j)) > abskey) {\n        ++mid;\n        swap(row(mid), row(j));\n        swap(ind(mid), ind(j));\n      }\n    }\n    /* Interchange for the pivot element */\n    swap(row(mid), row(first));\n    swap(ind(mid), ind(first));\n    \n    if (mid > ncut) last = mid - 1;\n    else if (mid < ncut ) first = mid + 1; \n  } while (mid != ncut );\n  \n  return 0; /* mid is equal to ncut */ \n}\n\n}// end namespace internal\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\class IncompleteLUT\n  * \\brief Incomplete LU factorization with dual-threshold strategy\n  *\n  * \\implsparsesolverconcept\n  *\n  * During the numerical factorization, two dropping rules are used :\n  *  1) any element whose magnitude is less than some tolerance is dropped.\n  *    This tolerance is obtained by multiplying the input tolerance @p droptol \n  *    by the average magnitude of all the original elements in the current row.\n  *  2) After the elimination of the row, only the @p fill largest elements in \n  *    the L part and the @p fill largest elements in the U part are kept \n  *    (in addition to the diagonal element ). Note that @p fill is computed from \n  *    the input parameter @p fillfactor which is used the ratio to control the fill_in \n  *    relatively to the initial number of nonzero elements.\n  * \n  * The two extreme cases are when @p droptol=0 (to keep all the @p fill*2 largest elements)\n  * and when @p fill=n/2 with @p droptol being different to zero. \n  * \n  * References : Yousef Saad, ILUT: A dual threshold incomplete LU factorization, \n  *              Numerical Linear Algebra with Applications, 1(4), pp 387-402, 1994.\n  * \n  * NOTE : The following implementation is derived from the ILUT implementation\n  * in the SPARSKIT package, Copyright (C) 2005, the Regents of the University of Minnesota \n  *  released under the terms of the GNU LGPL: \n  *    http://www-users.cs.umn.edu/~saad/software/SPARSKIT/README\n  * However, Yousef Saad gave us permission to relicense his ILUT code to MPL2.\n  * See the Eigen mailing list archive, thread: ILUT, date: July 8, 2012:\n  *   http://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2012/07/msg00064.html\n  * alternatively, on GMANE:\n  *   http://comments.gmane.org/gmane.comp.lib.eigen/3302\n  */\ntemplate <typename _Scalar, typename _StorageIndex = int>\nclass IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageIndex> >\n{\n  protected:\n    typedef SparseSolverBase<IncompleteLUT> Base;\n    using Base::m_isInitialized;\n  public:\n    typedef _Scalar Scalar;\n    typedef _StorageIndex StorageIndex;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n    typedef Matrix<Scalar,Dynamic,1> Vector;\n    typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n    typedef SparseMatrix<Scalar,RowMajor,StorageIndex> FactorType;\n\n    enum {\n      ColsAtCompileTime = Dynamic,\n      MaxColsAtCompileTime = Dynamic\n    };\n\n  public:\n    \n    IncompleteLUT()\n      : m_droptol(NumTraits<Scalar>::dummy_precision()), m_fillfactor(10),\n        m_analysisIsOk(false), m_factorizationIsOk(false)\n    {}\n    \n    template<typename MatrixType>\n    explicit IncompleteLUT(const MatrixType& mat, const RealScalar& droptol=NumTraits<Scalar>::dummy_precision(), int fillfactor = 10)\n      : m_droptol(droptol),m_fillfactor(fillfactor),\n        m_analysisIsOk(false),m_factorizationIsOk(false)\n    {\n      eigen_assert(fillfactor != 0);\n      compute(mat); \n    }\n    \n    Index rows() const { return m_lu.rows(); }\n    \n    Index cols() const { return m_lu.cols(); }\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"IncompleteLUT is not initialized.\");\n      return m_info;\n    }\n    \n    template<typename MatrixType>\n    void analyzePattern(const MatrixType& amat);\n    \n    template<typename MatrixType>\n    void factorize(const MatrixType& amat);\n    \n    /**\n      * Compute an incomplete LU factorization with dual threshold on the matrix mat\n      * No pivoting is done in this version\n      * \n      **/\n    template<typename MatrixType>\n    IncompleteLUT& compute(const MatrixType& amat)\n    {\n      analyzePattern(amat); \n      factorize(amat);\n      return *this;\n    }\n\n    void setDroptol(const RealScalar& droptol); \n    void setFillfactor(int fillfactor); \n    \n    template<typename Rhs, typename Dest>\n    void _solve_impl(const Rhs& b, Dest& x) const\n    {\n      x = m_Pinv * b;\n      x = m_lu.template triangularView<UnitLower>().solve(x);\n      x = m_lu.template triangularView<Upper>().solve(x);\n      x = m_P * x; \n    }\n\nprotected:\n\n    /** keeps off-diagonal entries; drops diagonal entries */\n    struct keep_diag {\n      inline bool operator() (const Index& row, const Index& col, const Scalar&) const\n      {\n        return row!=col;\n      }\n    };\n\nprotected:\n\n    FactorType m_lu;\n    RealScalar m_droptol;\n    int m_fillfactor;\n    bool m_analysisIsOk;\n    bool m_factorizationIsOk;\n    ComputationInfo m_info;\n    PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P;     // Fill-reducing permutation\n    PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv;  // Inverse permutation\n};\n\n/**\n * Set control parameter droptol\n *  \\param droptol   Drop any element whose magnitude is less than this tolerance \n **/ \ntemplate<typename Scalar, typename StorageIndex>\nvoid IncompleteLUT<Scalar,StorageIndex>::setDroptol(const RealScalar& droptol)\n{\n  this->m_droptol = droptol;   \n}\n\n/**\n * Set control parameter fillfactor\n * \\param fillfactor  This is used to compute the  number @p fill_in of largest elements to keep on each row. \n **/ \ntemplate<typename Scalar, typename StorageIndex>\nvoid IncompleteLUT<Scalar,StorageIndex>::setFillfactor(int fillfactor)\n{\n  this->m_fillfactor = fillfactor;   \n}\n\ntemplate <typename Scalar, typename StorageIndex>\ntemplate<typename _MatrixType>\nvoid IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)\n{\n  // Compute the Fill-reducing permutation\n  // Since ILUT does not perform any numerical pivoting,\n  // it is highly preferable to keep the diagonal through symmetric permutations.\n#ifndef EIGEN_MPL2_ONLY\n  // To this end, let's symmetrize the pattern and perform AMD on it.\n  SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;\n  SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();\n  // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.\n  //       on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered...\n  SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;\n  AMDOrdering<StorageIndex> ordering;\n  ordering(AtA,m_P);\n  m_Pinv  = m_P.inverse(); // cache the inverse permutation\n#else\n  // If AMD is not available, (MPL2-only), then let's use the slower COLAMD routine.\n  SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;\n  COLAMDOrdering<StorageIndex> ordering;\n  ordering(mat1,m_Pinv);\n  m_P = m_Pinv.inverse();\n#endif\n\n  m_analysisIsOk = true;\n  m_factorizationIsOk = false;\n  m_isInitialized = true;\n}\n\ntemplate <typename Scalar, typename StorageIndex>\ntemplate<typename _MatrixType>\nvoid IncompleteLUT<Scalar,StorageIndex>::factorize(const _MatrixType& amat)\n{\n  using std::sqrt;\n  using std::swap;\n  using std::abs;\n  using internal::convert_index;\n\n  eigen_assert((amat.rows() == amat.cols()) && \"The factorization should be done on a square matrix\");\n  Index n = amat.cols();  // Size of the matrix\n  m_lu.resize(n,n);\n  // Declare Working vectors and variables\n  Vector u(n) ;     // real values of the row -- maximum size is n --\n  VectorI ju(n);   // column position of the values in u -- maximum size  is n\n  VectorI jr(n);   // Indicate the position of the nonzero elements in the vector u -- A zero location is indicated by -1\n\n  // Apply the fill-reducing permutation\n  eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n  SparseMatrix<Scalar,RowMajor, StorageIndex> mat;\n  mat = amat.twistedBy(m_Pinv);\n\n  // Initialization\n  jr.fill(-1);\n  ju.fill(0);\n  u.fill(0);\n\n  // number of largest elements to keep in each row:\n  Index fill_in = (amat.nonZeros()*m_fillfactor)/n + 1;\n  if (fill_in > n) fill_in = n;\n\n  // number of largest nonzero elements to keep in the L and the U part of the current row:\n  Index nnzL = fill_in/2;\n  Index nnzU = nnzL;\n  m_lu.reserve(n * (nnzL + nnzU + 1));\n\n  // global loop over the rows of the sparse matrix\n  for (Index ii = 0; ii < n; ii++)\n  {\n    // 1 - copy the lower and the upper part of the row i of mat in the working vector u\n\n    Index sizeu = 1; // number of nonzero elements in the upper part of the current row\n    Index sizel = 0; // number of nonzero elements in the lower part of the current row\n    ju(ii)    = convert_index<StorageIndex>(ii);\n    u(ii)     = 0;\n    jr(ii)    = convert_index<StorageIndex>(ii);\n    RealScalar rownorm = 0;\n\n    typename FactorType::InnerIterator j_it(mat, ii); // Iterate through the current row ii\n    for (; j_it; ++j_it)\n    {\n      Index k = j_it.index();\n      if (k < ii)\n      {\n        // copy the lower part\n        ju(sizel) = convert_index<StorageIndex>(k);\n        u(sizel) = j_it.value();\n        jr(k) = convert_index<StorageIndex>(sizel);\n        ++sizel;\n      }\n      else if (k == ii)\n      {\n        u(ii) = j_it.value();\n      }\n      else\n      {\n        // copy the upper part\n        Index jpos = ii + sizeu;\n        ju(jpos) = convert_index<StorageIndex>(k);\n        u(jpos) = j_it.value();\n        jr(k) = convert_index<StorageIndex>(jpos);\n        ++sizeu;\n      }\n      rownorm += numext::abs2(j_it.value());\n    }\n\n    // 2 - detect possible zero row\n    if(rownorm==0)\n    {\n      m_info = NumericalIssue;\n      return;\n    }\n    // Take the 2-norm of the current row as a relative tolerance\n    rownorm = sqrt(rownorm);\n\n    // 3 - eliminate the previous nonzero rows\n    Index jj = 0;\n    Index len = 0;\n    while (jj < sizel)\n    {\n      // In order to eliminate in the correct order,\n      // we must select first the smallest column index among  ju(jj:sizel)\n      Index k;\n      Index minrow = ju.segment(jj,sizel-jj).minCoeff(&k); // k is relative to the segment\n      k += jj;\n      if (minrow != ju(jj))\n      {\n        // swap the two locations\n        Index j = ju(jj);\n        swap(ju(jj), ju(k));\n        jr(minrow) = convert_index<StorageIndex>(jj);\n        jr(j) = convert_index<StorageIndex>(k);\n        swap(u(jj), u(k));\n      }\n      // Reset this location\n      jr(minrow) = -1;\n\n      // Start elimination\n      typename FactorType::InnerIterator ki_it(m_lu, minrow);\n      while (ki_it && ki_it.index() < minrow) ++ki_it;\n      eigen_internal_assert(ki_it && ki_it.col()==minrow);\n      Scalar fact = u(jj) / ki_it.value();\n\n      // drop too small elements\n      if(abs(fact) <= m_droptol)\n      {\n        jj++;\n        continue;\n      }\n\n      // linear combination of the current row ii and the row minrow\n      ++ki_it;\n      for (; ki_it; ++ki_it)\n      {\n        Scalar prod = fact * ki_it.value();\n        Index j     = ki_it.index();\n        Index jpos  = jr(j);\n        if (jpos == -1) // fill-in element\n        {\n          Index newpos;\n          if (j >= ii) // dealing with the upper part\n          {\n            newpos = ii + sizeu;\n            sizeu++;\n            eigen_internal_assert(sizeu<=n);\n          }\n          else // dealing with the lower part\n          {\n            newpos = sizel;\n            sizel++;\n            eigen_internal_assert(sizel<=ii);\n          }\n          ju(newpos) = convert_index<StorageIndex>(j);\n          u(newpos) = -prod;\n          jr(j) = convert_index<StorageIndex>(newpos);\n        }\n        else\n          u(jpos) -= prod;\n      }\n      // store the pivot element\n      u(len)  = fact;\n      ju(len) = convert_index<StorageIndex>(minrow);\n      ++len;\n\n      jj++;\n    } // end of the elimination on the row ii\n\n    // reset the upper part of the pointer jr to zero\n    for(Index k = 0; k <sizeu; k++) jr(ju(ii+k)) = -1;\n\n    // 4 - partially sort and insert the elements in the m_lu matrix\n\n    // sort the L-part of the row\n    sizel = len;\n    len = (std::min)(sizel, nnzL);\n    typename Vector::SegmentReturnType ul(u.segment(0, sizel));\n    typename VectorI::SegmentReturnType jul(ju.segment(0, sizel));\n    internal::QuickSplit(ul, jul, len);\n\n    // store the largest m_fill elements of the L part\n    m_lu.startVec(ii);\n    for(Index k = 0; k < len; k++)\n      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);\n\n    // store the diagonal element\n    // apply a shifting rule to avoid zero pivots (we are doing an incomplete factorization)\n    if (u(ii) == Scalar(0))\n      u(ii) = sqrt(m_droptol) * rownorm;\n    m_lu.insertBackByOuterInnerUnordered(ii, ii) = u(ii);\n\n    // sort the U-part of the row\n    // apply the dropping rule first\n    len = 0;\n    for(Index k = 1; k < sizeu; k++)\n    {\n      if(abs(u(ii+k)) > m_droptol * rownorm )\n      {\n        ++len;\n        u(ii + len)  = u(ii + k);\n        ju(ii + len) = ju(ii + k);\n      }\n    }\n    sizeu = len + 1; // +1 to take into account the diagonal element\n    len = (std::min)(sizeu, nnzU);\n    typename Vector::SegmentReturnType uu(u.segment(ii+1, sizeu-1));\n    typename VectorI::SegmentReturnType juu(ju.segment(ii+1, sizeu-1));\n    internal::QuickSplit(uu, juu, len);\n\n    // store the largest elements of the U part\n    for(Index k = ii + 1; k < ii + len; k++)\n      m_lu.insertBackByOuterInnerUnordered(ii,ju(k)) = u(k);\n  }\n  m_lu.finalize();\n  m_lu.makeCompressed();\n\n  m_factorizationIsOk = true;\n  m_info = Success;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_INCOMPLETE_LUT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/IterativeSolverBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ITERATIVE_SOLVER_BASE_H\n#define EIGEN_ITERATIVE_SOLVER_BASE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename MatrixType>\nstruct is_ref_compatible_impl\n{\nprivate:\n  template <typename T0>\n  struct any_conversion\n  {\n    template <typename T> any_conversion(const volatile T&);\n    template <typename T> any_conversion(T&);\n  };\n  struct yes {int a[1];};\n  struct no  {int a[2];};\n\n  template<typename T>\n  static yes test(const Ref<const T>&, int);\n  template<typename T>\n  static no  test(any_conversion<T>, ...);\n\npublic:\n  static MatrixType ms_from;\n  enum { value = sizeof(test<MatrixType>(ms_from, 0))==sizeof(yes) };\n};\n\ntemplate<typename MatrixType>\nstruct is_ref_compatible\n{\n  enum { value = is_ref_compatible_impl<typename remove_all<MatrixType>::type>::value };\n};\n\ntemplate<typename MatrixType, bool MatrixFree = !internal::is_ref_compatible<MatrixType>::value>\nclass generic_matrix_wrapper;\n\n// We have an explicit matrix at hand, compatible with Ref<>\ntemplate<typename MatrixType>\nclass generic_matrix_wrapper<MatrixType,false>\n{\npublic:\n  typedef Ref<const MatrixType> ActualMatrixType;\n  template<int UpLo> struct ConstSelfAdjointViewReturnType {\n    typedef typename ActualMatrixType::template ConstSelfAdjointViewReturnType<UpLo>::Type Type;\n  };\n\n  enum {\n    MatrixFree = false\n  };\n\n  generic_matrix_wrapper()\n    : m_dummy(0,0), m_matrix(m_dummy)\n  {}\n\n  template<typename InputType>\n  generic_matrix_wrapper(const InputType &mat)\n    : m_matrix(mat)\n  {}\n\n  const ActualMatrixType& matrix() const\n  {\n    return m_matrix;\n  }\n\n  template<typename MatrixDerived>\n  void grab(const EigenBase<MatrixDerived> &mat)\n  {\n    m_matrix.~Ref<const MatrixType>();\n    ::new (&m_matrix) Ref<const MatrixType>(mat.derived());\n  }\n\n  void grab(const Ref<const MatrixType> &mat)\n  {\n    if(&(mat.derived()) != &m_matrix)\n    {\n      m_matrix.~Ref<const MatrixType>();\n      ::new (&m_matrix) Ref<const MatrixType>(mat);\n    }\n  }\n\nprotected:\n  MatrixType m_dummy; // used to default initialize the Ref<> object\n  ActualMatrixType m_matrix;\n};\n\n// MatrixType is not compatible with Ref<> -> matrix-free wrapper\ntemplate<typename MatrixType>\nclass generic_matrix_wrapper<MatrixType,true>\n{\npublic:\n  typedef MatrixType ActualMatrixType;\n  template<int UpLo> struct ConstSelfAdjointViewReturnType\n  {\n    typedef ActualMatrixType Type;\n  };\n\n  enum {\n    MatrixFree = true\n  };\n\n  generic_matrix_wrapper()\n    : mp_matrix(0)\n  {}\n\n  generic_matrix_wrapper(const MatrixType &mat)\n    : mp_matrix(&mat)\n  {}\n\n  const ActualMatrixType& matrix() const\n  {\n    return *mp_matrix;\n  }\n\n  void grab(const MatrixType &mat)\n  {\n    mp_matrix = &mat;\n  }\n\nprotected:\n  const ActualMatrixType *mp_matrix;\n};\n\n}\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief Base class for linear iterative solvers\n  *\n  * \\sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner\n  */\ntemplate< typename Derived>\nclass IterativeSolverBase : public SparseSolverBase<Derived>\n{\nprotected:\n  typedef SparseSolverBase<Derived> Base;\n  using Base::m_isInitialized;\n  \npublic:\n  typedef typename internal::traits<Derived>::MatrixType MatrixType;\n  typedef typename internal::traits<Derived>::Preconditioner Preconditioner;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef typename MatrixType::RealScalar RealScalar;\n\n  enum {\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n  };\n\npublic:\n\n  using Base::derived;\n\n  /** Default constructor. */\n  IterativeSolverBase()\n  {\n    init();\n  }\n\n  /** Initialize the solver with matrix \\a A for further \\c Ax=b solving.\n    * \n    * This constructor is a shortcut for the default constructor followed\n    * by a call to compute().\n    * \n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  explicit IterativeSolverBase(const EigenBase<MatrixDerived>& A)\n    : m_matrixWrapper(A.derived())\n  {\n    init();\n    compute(matrix());\n  }\n\n  ~IterativeSolverBase() {}\n  \n  /** Initializes the iterative solver for the sparsity pattern of the matrix \\a A for further solving \\c Ax=b problems.\n    *\n    * Currently, this function mostly calls analyzePattern on the preconditioner. In the future\n    * we might, for instance, implement column reordering for faster matrix vector products.\n    */\n  template<typename MatrixDerived>\n  Derived& analyzePattern(const EigenBase<MatrixDerived>& A)\n  {\n    grab(A.derived());\n    m_preconditioner.analyzePattern(matrix());\n    m_isInitialized = true;\n    m_analysisIsOk = true;\n    m_info = m_preconditioner.info();\n    return derived();\n  }\n  \n  /** Initializes the iterative solver with the numerical values of the matrix \\a A for further solving \\c Ax=b problems.\n    *\n    * Currently, this function mostly calls factorize on the preconditioner.\n    *\n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  Derived& factorize(const EigenBase<MatrixDerived>& A)\n  {\n    eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\"); \n    grab(A.derived());\n    m_preconditioner.factorize(matrix());\n    m_factorizationIsOk = true;\n    m_info = m_preconditioner.info();\n    return derived();\n  }\n\n  /** Initializes the iterative solver with the matrix \\a A for further solving \\c Ax=b problems.\n    *\n    * Currently, this function mostly initializes/computes the preconditioner. In the future\n    * we might, for instance, implement column reordering for faster matrix vector products.\n    *\n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  Derived& compute(const EigenBase<MatrixDerived>& A)\n  {\n    grab(A.derived());\n    m_preconditioner.compute(matrix());\n    m_isInitialized = true;\n    m_analysisIsOk = true;\n    m_factorizationIsOk = true;\n    m_info = m_preconditioner.info();\n    return derived();\n  }\n\n  /** \\internal */\n  Index rows() const { return matrix().rows(); }\n\n  /** \\internal */\n  Index cols() const { return matrix().cols(); }\n\n  /** \\returns the tolerance threshold used by the stopping criteria.\n    * \\sa setTolerance()\n    */\n  RealScalar tolerance() const { return m_tolerance; }\n  \n  /** Sets the tolerance threshold used by the stopping criteria.\n    *\n    * This value is used as an upper bound to the relative residual error: |Ax-b|/|b|.\n    * The default value is the machine precision given by NumTraits<Scalar>::epsilon()\n    */\n  Derived& setTolerance(const RealScalar& tolerance)\n  {\n    m_tolerance = tolerance;\n    return derived();\n  }\n\n  /** \\returns a read-write reference to the preconditioner for custom configuration. */\n  Preconditioner& preconditioner() { return m_preconditioner; }\n  \n  /** \\returns a read-only reference to the preconditioner. */\n  const Preconditioner& preconditioner() const { return m_preconditioner; }\n\n  /** \\returns the max number of iterations.\n    * It is either the value setted by setMaxIterations or, by default,\n    * twice the number of columns of the matrix.\n    */\n  Index maxIterations() const\n  {\n    return (m_maxIterations<0) ? 2*matrix().cols() : m_maxIterations;\n  }\n  \n  /** Sets the max number of iterations.\n    * Default is twice the number of columns of the matrix.\n    */\n  Derived& setMaxIterations(Index maxIters)\n  {\n    m_maxIterations = maxIters;\n    return derived();\n  }\n\n  /** \\returns the number of iterations performed during the last solve */\n  Index iterations() const\n  {\n    eigen_assert(m_isInitialized && \"ConjugateGradient is not initialized.\");\n    return m_iterations;\n  }\n\n  /** \\returns the tolerance error reached during the last solve.\n    * It is a close approximation of the true relative residual error |Ax-b|/|b|.\n    */\n  RealScalar error() const\n  {\n    eigen_assert(m_isInitialized && \"ConjugateGradient is not initialized.\");\n    return m_error;\n  }\n\n  /** \\returns the solution x of \\f$ A x = b \\f$ using the current decomposition of A\n    * and \\a x0 as an initial solution.\n    *\n    * \\sa solve(), compute()\n    */\n  template<typename Rhs,typename Guess>\n  inline const SolveWithGuess<Derived, Rhs, Guess>\n  solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const\n  {\n    eigen_assert(m_isInitialized && \"Solver is not initialized.\");\n    eigen_assert(derived().rows()==b.rows() && \"solve(): invalid number of rows of the right hand side matrix b\");\n    return SolveWithGuess<Derived, Rhs, Guess>(derived(), b.derived(), x0);\n  }\n\n  /** \\returns Success if the iterations converged, and NoConvergence otherwise. */\n  ComputationInfo info() const\n  {\n    eigen_assert(m_isInitialized && \"IterativeSolverBase is not initialized.\");\n    return m_info;\n  }\n  \n  /** \\internal */\n  template<typename Rhs, typename DestDerived>\n  void _solve_impl(const Rhs& b, SparseMatrixBase<DestDerived> &aDest) const\n  {\n    eigen_assert(rows()==b.rows());\n    \n    Index rhsCols = b.cols();\n    Index size = b.rows();\n    DestDerived& dest(aDest.derived());\n    typedef typename DestDerived::Scalar DestScalar;\n    Eigen::Matrix<DestScalar,Dynamic,1> tb(size);\n    Eigen::Matrix<DestScalar,Dynamic,1> tx(cols());\n    // We do not directly fill dest because sparse expressions have to be free of aliasing issue.\n    // For non square least-square problems, b and dest might not have the same size whereas they might alias each-other.\n    typename DestDerived::PlainObject tmp(cols(),rhsCols);\n    for(Index k=0; k<rhsCols; ++k)\n    {\n      tb = b.col(k);\n      tx = derived().solve(tb);\n      tmp.col(k) = tx.sparseView(0);\n    }\n    dest.swap(tmp);\n  }\n\nprotected:\n  void init()\n  {\n    m_isInitialized = false;\n    m_analysisIsOk = false;\n    m_factorizationIsOk = false;\n    m_maxIterations = -1;\n    m_tolerance = NumTraits<Scalar>::epsilon();\n  }\n\n  typedef internal::generic_matrix_wrapper<MatrixType> MatrixWrapper;\n  typedef typename MatrixWrapper::ActualMatrixType ActualMatrixType;\n\n  const ActualMatrixType& matrix() const\n  {\n    return m_matrixWrapper.matrix();\n  }\n  \n  template<typename InputType>\n  void grab(const InputType &A)\n  {\n    m_matrixWrapper.grab(A);\n  }\n  \n  MatrixWrapper m_matrixWrapper;\n  Preconditioner m_preconditioner;\n\n  Index m_maxIterations;\n  RealScalar m_tolerance;\n  \n  mutable RealScalar m_error;\n  mutable Index m_iterations;\n  mutable ComputationInfo m_info;\n  mutable bool m_analysisIsOk, m_factorizationIsOk;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_ITERATIVE_SOLVER_BASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/LeastSquareConjugateGradient.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H\n#define EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal Low-level conjugate gradient algorithm for least-square problems\n  * \\param mat The matrix A\n  * \\param rhs The right hand side vector b\n  * \\param x On input and initial solution, on output the computed solution.\n  * \\param precond A preconditioner being able to efficiently solve for an\n  *                approximation of A'Ax=b (regardless of b)\n  * \\param iters On input the max number of iteration, on output the number of performed iterations.\n  * \\param tol_error On input the tolerance error, on output an estimation of the relative error.\n  */\ntemplate<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>\nEIGEN_DONT_INLINE\nvoid least_square_conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,\n                                     const Preconditioner& precond, Index& iters,\n                                     typename Dest::RealScalar& tol_error)\n{\n  using std::sqrt;\n  using std::abs;\n  typedef typename Dest::RealScalar RealScalar;\n  typedef typename Dest::Scalar Scalar;\n  typedef Matrix<Scalar,Dynamic,1> VectorType;\n  \n  RealScalar tol = tol_error;\n  Index maxIters = iters;\n  \n  Index m = mat.rows(), n = mat.cols();\n\n  VectorType residual        = rhs - mat * x;\n  VectorType normal_residual = mat.adjoint() * residual;\n\n  RealScalar rhsNorm2 = (mat.adjoint()*rhs).squaredNorm();\n  if(rhsNorm2 == 0) \n  {\n    x.setZero();\n    iters = 0;\n    tol_error = 0;\n    return;\n  }\n  RealScalar threshold = tol*tol*rhsNorm2;\n  RealScalar residualNorm2 = normal_residual.squaredNorm();\n  if (residualNorm2 < threshold)\n  {\n    iters = 0;\n    tol_error = sqrt(residualNorm2 / rhsNorm2);\n    return;\n  }\n  \n  VectorType p(n);\n  p = precond.solve(normal_residual);                         // initial search direction\n\n  VectorType z(n), tmp(m);\n  RealScalar absNew = numext::real(normal_residual.dot(p));  // the square of the absolute value of r scaled by invM\n  Index i = 0;\n  while(i < maxIters)\n  {\n    tmp.noalias() = mat * p;\n\n    Scalar alpha = absNew / tmp.squaredNorm();      // the amount we travel on dir\n    x += alpha * p;                                 // update solution\n    residual -= alpha * tmp;                        // update residual\n    normal_residual = mat.adjoint() * residual;     // update residual of the normal equation\n    \n    residualNorm2 = normal_residual.squaredNorm();\n    if(residualNorm2 < threshold)\n      break;\n    \n    z = precond.solve(normal_residual);             // approximately solve for \"A'A z = normal_residual\"\n\n    RealScalar absOld = absNew;\n    absNew = numext::real(normal_residual.dot(z));  // update the absolute value of r\n    RealScalar beta = absNew / absOld;              // calculate the Gram-Schmidt value used to create the new search direction\n    p = z + beta * p;                               // update search direction\n    i++;\n  }\n  tol_error = sqrt(residualNorm2 / rhsNorm2);\n  iters = i;\n}\n\n}\n\ntemplate< typename _MatrixType,\n          typename _Preconditioner = LeastSquareDiagonalPreconditioner<typename _MatrixType::Scalar> >\nclass LeastSquaresConjugateGradient;\n\nnamespace internal {\n\ntemplate< typename _MatrixType, typename _Preconditioner>\nstruct traits<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Preconditioner Preconditioner;\n};\n\n}\n\n/** \\ingroup IterativeLinearSolvers_Module\n  * \\brief A conjugate gradient solver for sparse (or dense) least-square problems\n  *\n  * This class allows to solve for A x = b linear problems using an iterative conjugate gradient algorithm.\n  * The matrix A can be non symmetric and rectangular, but the matrix A' A should be positive-definite to guaranty stability.\n  * Otherwise, the SparseLU or SparseQR classes might be preferable.\n  * The matrix A and the vectors x and b can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.\n  * \\tparam _Preconditioner the type of the preconditioner. Default is LeastSquareDiagonalPreconditioner\n  *\n  * \\implsparsesolverconcept\n  * \n  * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()\n  * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations\n  * and NumTraits<Scalar>::epsilon() for the tolerance.\n  * \n  * This class can be used as the direct solver classes. Here is a typical usage example:\n    \\code\n    int m=1000000, n = 10000;\n    VectorXd x(n), b(m);\n    SparseMatrix<double> A(m,n);\n    // fill A and b\n    LeastSquaresConjugateGradient<SparseMatrix<double> > lscg;\n    lscg.compute(A);\n    x = lscg.solve(b);\n    std::cout << \"#iterations:     \" << lscg.iterations() << std::endl;\n    std::cout << \"estimated error: \" << lscg.error()      << std::endl;\n    // update b, and solve again\n    x = lscg.solve(b);\n    \\endcode\n  * \n  * By default the iterations start with x=0 as an initial guess of the solution.\n  * One can control the start using the solveWithGuess() method.\n  * \n  * \\sa class ConjugateGradient, SparseLU, SparseQR\n  */\ntemplate< typename _MatrixType, typename _Preconditioner>\nclass LeastSquaresConjugateGradient : public IterativeSolverBase<LeastSquaresConjugateGradient<_MatrixType,_Preconditioner> >\n{\n  typedef IterativeSolverBase<LeastSquaresConjugateGradient> Base;\n  using Base::matrix;\n  using Base::m_error;\n  using Base::m_iterations;\n  using Base::m_info;\n  using Base::m_isInitialized;\npublic:\n  typedef _MatrixType MatrixType;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  typedef _Preconditioner Preconditioner;\n\npublic:\n\n  /** Default constructor. */\n  LeastSquaresConjugateGradient() : Base() {}\n\n  /** Initialize the solver with matrix \\a A for further \\c Ax=b solving.\n    * \n    * This constructor is a shortcut for the default constructor followed\n    * by a call to compute().\n    * \n    * \\warning this class stores a reference to the matrix A as well as some\n    * precomputed values that depend on it. Therefore, if \\a A is changed\n    * this class becomes invalid. Call compute() to update it with the new\n    * matrix A, or modify a copy of A.\n    */\n  template<typename MatrixDerived>\n  explicit LeastSquaresConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}\n\n  ~LeastSquaresConjugateGradient() {}\n\n  /** \\internal */\n  template<typename Rhs,typename Dest>\n  void _solve_with_guess_impl(const Rhs& b, Dest& x) const\n  {\n    m_iterations = Base::maxIterations();\n    m_error = Base::m_tolerance;\n\n    for(Index j=0; j<b.cols(); ++j)\n    {\n      m_iterations = Base::maxIterations();\n      m_error = Base::m_tolerance;\n\n      typename Dest::ColXpr xj(x,j);\n      internal::least_square_conjugate_gradient(matrix(), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);\n    }\n\n    m_isInitialized = true;\n    m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;\n  }\n  \n  /** \\internal */\n  using Base::_solve_impl;\n  template<typename Rhs,typename Dest>\n  void _solve_impl(const MatrixBase<Rhs>& b, Dest& x) const\n  {\n    x.setZero();\n    _solve_with_guess_impl(b.derived(),x);\n  }\n\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_LEAST_SQUARE_CONJUGATE_GRADIENT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/IterativeLinearSolvers/SolveWithGuess.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SOLVEWITHGUESS_H\n#define EIGEN_SOLVEWITHGUESS_H\n\nnamespace Eigen {\n\ntemplate<typename Decomposition, typename RhsType, typename GuessType> class SolveWithGuess;\n  \n/** \\class SolveWithGuess\n  * \\ingroup IterativeLinearSolvers_Module\n  *\n  * \\brief Pseudo expression representing a solving operation\n  *\n  * \\tparam Decomposition the type of the matrix or decomposion object\n  * \\tparam Rhstype the type of the right-hand side\n  *\n  * This class represents an expression of A.solve(B)\n  * and most of the time this is the only way it is used.\n  *\n  */\nnamespace internal {\n\n\ntemplate<typename Decomposition, typename RhsType, typename GuessType>\nstruct traits<SolveWithGuess<Decomposition, RhsType, GuessType> >\n  : traits<Solve<Decomposition,RhsType> >\n{};\n\n}\n\n\ntemplate<typename Decomposition, typename RhsType, typename GuessType>\nclass SolveWithGuess : public internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type\n{\npublic:\n  typedef typename internal::traits<SolveWithGuess>::Scalar Scalar;\n  typedef typename internal::traits<SolveWithGuess>::PlainObject PlainObject;\n  typedef typename internal::generic_xpr_base<SolveWithGuess<Decomposition,RhsType,GuessType>, MatrixXpr, typename internal::traits<RhsType>::StorageKind>::type Base;\n  typedef typename internal::ref_selector<SolveWithGuess>::type Nested;\n  \n  SolveWithGuess(const Decomposition &dec, const RhsType &rhs, const GuessType &guess)\n    : m_dec(dec), m_rhs(rhs), m_guess(guess)\n  {}\n  \n  EIGEN_DEVICE_FUNC Index rows() const { return m_dec.cols(); }\n  EIGEN_DEVICE_FUNC Index cols() const { return m_rhs.cols(); }\n\n  EIGEN_DEVICE_FUNC const Decomposition& dec()   const { return m_dec; }\n  EIGEN_DEVICE_FUNC const RhsType&       rhs()   const { return m_rhs; }\n  EIGEN_DEVICE_FUNC const GuessType&     guess() const { return m_guess; }\n\nprotected:\n  const Decomposition &m_dec;\n  const RhsType       &m_rhs;\n  const GuessType     &m_guess;\n  \nprivate:\n  Scalar coeff(Index row, Index col) const;\n  Scalar coeff(Index i) const;\n};\n\nnamespace internal {\n\n// Evaluator of SolveWithGuess -> eval into a temporary\ntemplate<typename Decomposition, typename RhsType, typename GuessType>\nstruct evaluator<SolveWithGuess<Decomposition,RhsType, GuessType> >\n  : public evaluator<typename SolveWithGuess<Decomposition,RhsType,GuessType>::PlainObject>\n{\n  typedef SolveWithGuess<Decomposition,RhsType,GuessType> SolveType;\n  typedef typename SolveType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  evaluator(const SolveType& solve)\n    : m_result(solve.rows(), solve.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    m_result = solve.guess();\n    solve.dec()._solve_with_guess_impl(solve.rhs(), m_result);\n  }\n  \nprotected:  \n  PlainObject m_result;\n};\n\n// Specialization for \"dst = dec.solveWithGuess(rhs)\"\n// NOTE we need to specialize it for Dense2Dense to avoid ambiguous specialization error and a Sparse2Sparse specialization must exist somewhere\ntemplate<typename DstXprType, typename DecType, typename RhsType, typename GuessType, typename Scalar>\nstruct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, internal::assign_op<Scalar,Scalar>, Dense2Dense>\n{\n  typedef SolveWithGuess<DecType,RhsType,GuessType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    dst = src.guess();\n    src.dec()._solve_with_guess_impl(src.rhs(), dst/*, src.guess()*/);\n  }\n};\n\n} // end namepsace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SOLVEWITHGUESS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/Jacobi/Jacobi.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_JACOBI_H\n#define EIGEN_JACOBI_H\n\nnamespace Eigen { \n\n/** \\ingroup Jacobi_Module\n  * \\jacobi_module\n  * \\class JacobiRotation\n  * \\brief Rotation given by a cosine-sine pair.\n  *\n  * This class represents a Jacobi or Givens rotation.\n  * This is a 2D rotation in the plane \\c J of angle \\f$ \\theta \\f$ defined by\n  * its cosine \\c c and sine \\c s as follow:\n  * \\f$ J = \\left ( \\begin{array}{cc} c & \\overline s \\\\ -s  & \\overline c \\end{array} \\right ) \\f$\n  *\n  * You can apply the respective counter-clockwise rotation to a column vector \\c v by\n  * applying its adjoint on the left: \\f$ v = J^* v \\f$ that translates to the following Eigen code:\n  * \\code\n  * v.applyOnTheLeft(J.adjoint());\n  * \\endcode\n  *\n  * \\sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\ntemplate<typename Scalar> class JacobiRotation\n{\n  public:\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    /** Default constructor without any initialization. */\n    JacobiRotation() {}\n\n    /** Construct a planar rotation from a cosine-sine pair (\\a c, \\c s). */\n    JacobiRotation(const Scalar& c, const Scalar& s) : m_c(c), m_s(s) {}\n\n    Scalar& c() { return m_c; }\n    Scalar c() const { return m_c; }\n    Scalar& s() { return m_s; }\n    Scalar s() const { return m_s; }\n\n    /** Concatenates two planar rotation */\n    JacobiRotation operator*(const JacobiRotation& other)\n    {\n      using numext::conj;\n      return JacobiRotation(m_c * other.m_c - conj(m_s) * other.m_s,\n                            conj(m_c * conj(other.m_s) + conj(m_s) * conj(other.m_c)));\n    }\n\n    /** Returns the transposed transformation */\n    JacobiRotation transpose() const { using numext::conj; return JacobiRotation(m_c, -conj(m_s)); }\n\n    /** Returns the adjoint transformation */\n    JacobiRotation adjoint() const { using numext::conj; return JacobiRotation(conj(m_c), -m_s); }\n\n    template<typename Derived>\n    bool makeJacobi(const MatrixBase<Derived>&, Index p, Index q);\n    bool makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z);\n\n    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z=0);\n\n  protected:\n    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::true_type);\n    void makeGivens(const Scalar& p, const Scalar& q, Scalar* z, internal::false_type);\n\n    Scalar m_c, m_s;\n};\n\n/** Makes \\c *this as a Jacobi rotation \\a J such that applying \\a J on both the right and left sides of the selfadjoint 2x2 matrix\n  * \\f$ B = \\left ( \\begin{array}{cc} x & y \\\\ \\overline y & z \\end{array} \\right )\\f$ yields a diagonal matrix \\f$ A = J^* B J \\f$\n  *\n  * \\sa MatrixBase::makeJacobi(const MatrixBase<Derived>&, Index, Index), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\ntemplate<typename Scalar>\nbool JacobiRotation<Scalar>::makeJacobi(const RealScalar& x, const Scalar& y, const RealScalar& z)\n{\n  using std::sqrt;\n  using std::abs;\n  typedef typename NumTraits<Scalar>::Real RealScalar;\n  RealScalar deno = RealScalar(2)*abs(y);\n  if(deno < (std::numeric_limits<RealScalar>::min)())\n  {\n    m_c = Scalar(1);\n    m_s = Scalar(0);\n    return false;\n  }\n  else\n  {\n    RealScalar tau = (x-z)/deno;\n    RealScalar w = sqrt(numext::abs2(tau) + RealScalar(1));\n    RealScalar t;\n    if(tau>RealScalar(0))\n    {\n      t = RealScalar(1) / (tau + w);\n    }\n    else\n    {\n      t = RealScalar(1) / (tau - w);\n    }\n    RealScalar sign_t = t > RealScalar(0) ? RealScalar(1) : RealScalar(-1);\n    RealScalar n = RealScalar(1) / sqrt(numext::abs2(t)+RealScalar(1));\n    m_s = - sign_t * (numext::conj(y) / abs(y)) * abs(t) * n;\n    m_c = n;\n    return true;\n  }\n}\n\n/** Makes \\c *this as a Jacobi rotation \\c J such that applying \\a J on both the right and left sides of the 2x2 selfadjoint matrix\n  * \\f$ B = \\left ( \\begin{array}{cc} \\text{this}_{pp} & \\text{this}_{pq} \\\\ (\\text{this}_{pq})^* & \\text{this}_{qq} \\end{array} \\right )\\f$ yields\n  * a diagonal matrix \\f$ A = J^* B J \\f$\n  *\n  * Example: \\include Jacobi_makeJacobi.cpp\n  * Output: \\verbinclude Jacobi_makeJacobi.out\n  *\n  * \\sa JacobiRotation::makeJacobi(RealScalar, Scalar, RealScalar), MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\ntemplate<typename Scalar>\ntemplate<typename Derived>\ninline bool JacobiRotation<Scalar>::makeJacobi(const MatrixBase<Derived>& m, Index p, Index q)\n{\n  return makeJacobi(numext::real(m.coeff(p,p)), m.coeff(p,q), numext::real(m.coeff(q,q)));\n}\n\n/** Makes \\c *this as a Givens rotation \\c G such that applying \\f$ G^* \\f$ to the left of the vector\n  * \\f$ V = \\left ( \\begin{array}{c} p \\\\ q \\end{array} \\right )\\f$ yields:\n  * \\f$ G^* V = \\left ( \\begin{array}{c} r \\\\ 0 \\end{array} \\right )\\f$.\n  *\n  * The value of \\a z is returned if \\a z is not null (the default is null).\n  * Also note that G is built such that the cosine is always real.\n  *\n  * Example: \\include Jacobi_makeGivens.cpp\n  * Output: \\verbinclude Jacobi_makeGivens.out\n  *\n  * This function implements the continuous Givens rotation generation algorithm\n  * found in Anderson (2000), Discontinuous Plane Rotations and the Symmetric Eigenvalue Problem.\n  * LAPACK Working Note 150, University of Tennessee, UT-CS-00-454, December 4, 2000.\n  *\n  * \\sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\ntemplate<typename Scalar>\nvoid JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* z)\n{\n  makeGivens(p, q, z, typename internal::conditional<NumTraits<Scalar>::IsComplex, internal::true_type, internal::false_type>::type());\n}\n\n\n// specialization for complexes\ntemplate<typename Scalar>\nvoid JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::true_type)\n{\n  using std::sqrt;\n  using std::abs;\n  using numext::conj;\n  \n  if(q==Scalar(0))\n  {\n    m_c = numext::real(p)<0 ? Scalar(-1) : Scalar(1);\n    m_s = 0;\n    if(r) *r = m_c * p;\n  }\n  else if(p==Scalar(0))\n  {\n    m_c = 0;\n    m_s = -q/abs(q);\n    if(r) *r = abs(q);\n  }\n  else\n  {\n    RealScalar p1 = numext::norm1(p);\n    RealScalar q1 = numext::norm1(q);\n    if(p1>=q1)\n    {\n      Scalar ps = p / p1;\n      RealScalar p2 = numext::abs2(ps);\n      Scalar qs = q / p1;\n      RealScalar q2 = numext::abs2(qs);\n\n      RealScalar u = sqrt(RealScalar(1) + q2/p2);\n      if(numext::real(p)<RealScalar(0))\n        u = -u;\n\n      m_c = Scalar(1)/u;\n      m_s = -qs*conj(ps)*(m_c/p2);\n      if(r) *r = p * u;\n    }\n    else\n    {\n      Scalar ps = p / q1;\n      RealScalar p2 = numext::abs2(ps);\n      Scalar qs = q / q1;\n      RealScalar q2 = numext::abs2(qs);\n\n      RealScalar u = q1 * sqrt(p2 + q2);\n      if(numext::real(p)<RealScalar(0))\n        u = -u;\n\n      p1 = abs(p);\n      ps = p/p1;\n      m_c = p1/u;\n      m_s = -conj(ps) * (q/u);\n      if(r) *r = ps * u;\n    }\n  }\n}\n\n// specialization for reals\ntemplate<typename Scalar>\nvoid JacobiRotation<Scalar>::makeGivens(const Scalar& p, const Scalar& q, Scalar* r, internal::false_type)\n{\n  using std::sqrt;\n  using std::abs;\n  if(q==Scalar(0))\n  {\n    m_c = p<Scalar(0) ? Scalar(-1) : Scalar(1);\n    m_s = Scalar(0);\n    if(r) *r = abs(p);\n  }\n  else if(p==Scalar(0))\n  {\n    m_c = Scalar(0);\n    m_s = q<Scalar(0) ? Scalar(1) : Scalar(-1);\n    if(r) *r = abs(q);\n  }\n  else if(abs(p) > abs(q))\n  {\n    Scalar t = q/p;\n    Scalar u = sqrt(Scalar(1) + numext::abs2(t));\n    if(p<Scalar(0))\n      u = -u;\n    m_c = Scalar(1)/u;\n    m_s = -t * m_c;\n    if(r) *r = p * u;\n  }\n  else\n  {\n    Scalar t = p/q;\n    Scalar u = sqrt(Scalar(1) + numext::abs2(t));\n    if(q<Scalar(0))\n      u = -u;\n    m_s = -Scalar(1)/u;\n    m_c = -t * m_s;\n    if(r) *r = q * u;\n  }\n\n}\n\n/****************************************************************************************\n*   Implementation of MatrixBase methods\n****************************************************************************************/\n\nnamespace internal {\n/** \\jacobi_module\n  * Applies the clock wise 2D rotation \\a j to the set of 2D vectors of cordinates \\a x and \\a y:\n  * \\f$ \\left ( \\begin{array}{cc} x \\\\ y \\end{array} \\right )  =  J \\left ( \\begin{array}{cc} x \\\\ y \\end{array} \\right ) \\f$\n  *\n  * \\sa MatrixBase::applyOnTheLeft(), MatrixBase::applyOnTheRight()\n  */\ntemplate<typename VectorX, typename VectorY, typename OtherScalar>\nvoid apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j);\n}\n\n/** \\jacobi_module\n  * Applies the rotation in the plane \\a j to the rows \\a p and \\a q of \\c *this, i.e., it computes B = J * B,\n  * with \\f$ B = \\left ( \\begin{array}{cc} \\text{*this.row}(p) \\\\ \\text{*this.row}(q) \\end{array} \\right ) \\f$.\n  *\n  * \\sa class JacobiRotation, MatrixBase::applyOnTheRight(), internal::apply_rotation_in_the_plane()\n  */\ntemplate<typename Derived>\ntemplate<typename OtherScalar>\ninline void MatrixBase<Derived>::applyOnTheLeft(Index p, Index q, const JacobiRotation<OtherScalar>& j)\n{\n  RowXpr x(this->row(p));\n  RowXpr y(this->row(q));\n  internal::apply_rotation_in_the_plane(x, y, j);\n}\n\n/** \\ingroup Jacobi_Module\n  * Applies the rotation in the plane \\a j to the columns \\a p and \\a q of \\c *this, i.e., it computes B = B * J\n  * with \\f$ B = \\left ( \\begin{array}{cc} \\text{*this.col}(p) & \\text{*this.col}(q) \\end{array} \\right ) \\f$.\n  *\n  * \\sa class JacobiRotation, MatrixBase::applyOnTheLeft(), internal::apply_rotation_in_the_plane()\n  */\ntemplate<typename Derived>\ntemplate<typename OtherScalar>\ninline void MatrixBase<Derived>::applyOnTheRight(Index p, Index q, const JacobiRotation<OtherScalar>& j)\n{\n  ColXpr x(this->col(p));\n  ColXpr y(this->col(q));\n  internal::apply_rotation_in_the_plane(x, y, j.transpose());\n}\n\nnamespace internal {\ntemplate<typename VectorX, typename VectorY, typename OtherScalar>\nvoid /*EIGEN_DONT_INLINE*/ apply_rotation_in_the_plane(DenseBase<VectorX>& xpr_x, DenseBase<VectorY>& xpr_y, const JacobiRotation<OtherScalar>& j)\n{\n  typedef typename VectorX::Scalar Scalar;\n  enum { PacketSize = packet_traits<Scalar>::size };\n  typedef typename packet_traits<Scalar>::type Packet;\n  eigen_assert(xpr_x.size() == xpr_y.size());\n  Index size = xpr_x.size();\n  Index incrx = xpr_x.derived().innerStride();\n  Index incry = xpr_y.derived().innerStride();\n\n  Scalar* EIGEN_RESTRICT x = &xpr_x.derived().coeffRef(0);\n  Scalar* EIGEN_RESTRICT y = &xpr_y.derived().coeffRef(0);\n  \n  OtherScalar c = j.c();\n  OtherScalar s = j.s();\n  if (c==OtherScalar(1) && s==OtherScalar(0))\n    return;\n\n  /*** dynamic-size vectorized paths ***/\n\n  if(VectorX::SizeAtCompileTime == Dynamic &&\n    (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&\n    ((incrx==1 && incry==1) || PacketSize == 1))\n  {\n    // both vectors are sequentially stored in memory => vectorization\n    enum { Peeling = 2 };\n\n    Index alignedStart = internal::first_default_aligned(y, size);\n    Index alignedEnd = alignedStart + ((size-alignedStart)/PacketSize)*PacketSize;\n\n    const Packet pc = pset1<Packet>(c);\n    const Packet ps = pset1<Packet>(s);\n    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;\n\n    for(Index i=0; i<alignedStart; ++i)\n    {\n      Scalar xi = x[i];\n      Scalar yi = y[i];\n      x[i] =  c * xi + numext::conj(s) * yi;\n      y[i] = -s * xi + numext::conj(c) * yi;\n    }\n\n    Scalar* EIGEN_RESTRICT px = x + alignedStart;\n    Scalar* EIGEN_RESTRICT py = y + alignedStart;\n\n    if(internal::first_default_aligned(x, size)==alignedStart)\n    {\n      for(Index i=alignedStart; i<alignedEnd; i+=PacketSize)\n      {\n        Packet xi = pload<Packet>(px);\n        Packet yi = pload<Packet>(py);\n        pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));\n        pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));\n        px += PacketSize;\n        py += PacketSize;\n      }\n    }\n    else\n    {\n      Index peelingEnd = alignedStart + ((size-alignedStart)/(Peeling*PacketSize))*(Peeling*PacketSize);\n      for(Index i=alignedStart; i<peelingEnd; i+=Peeling*PacketSize)\n      {\n        Packet xi   = ploadu<Packet>(px);\n        Packet xi1  = ploadu<Packet>(px+PacketSize);\n        Packet yi   = pload <Packet>(py);\n        Packet yi1  = pload <Packet>(py+PacketSize);\n        pstoreu(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));\n        pstoreu(px+PacketSize, padd(pmul(pc,xi1),pcj.pmul(ps,yi1)));\n        pstore (py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));\n        pstore (py+PacketSize, psub(pcj.pmul(pc,yi1),pmul(ps,xi1)));\n        px += Peeling*PacketSize;\n        py += Peeling*PacketSize;\n      }\n      if(alignedEnd!=peelingEnd)\n      {\n        Packet xi = ploadu<Packet>(x+peelingEnd);\n        Packet yi = pload <Packet>(y+peelingEnd);\n        pstoreu(x+peelingEnd, padd(pmul(pc,xi),pcj.pmul(ps,yi)));\n        pstore (y+peelingEnd, psub(pcj.pmul(pc,yi),pmul(ps,xi)));\n      }\n    }\n\n    for(Index i=alignedEnd; i<size; ++i)\n    {\n      Scalar xi = x[i];\n      Scalar yi = y[i];\n      x[i] =  c * xi + numext::conj(s) * yi;\n      y[i] = -s * xi + numext::conj(c) * yi;\n    }\n  }\n\n  /*** fixed-size vectorized path ***/\n  else if(VectorX::SizeAtCompileTime != Dynamic &&\n          (VectorX::Flags & VectorY::Flags & PacketAccessBit) &&\n          (EIGEN_PLAIN_ENUM_MIN(evaluator<VectorX>::Alignment, evaluator<VectorY>::Alignment)>0)) // FIXME should be compared to the required alignment\n  {\n    const Packet pc = pset1<Packet>(c);\n    const Packet ps = pset1<Packet>(s);\n    conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex,false> pcj;\n    Scalar* EIGEN_RESTRICT px = x;\n    Scalar* EIGEN_RESTRICT py = y;\n    for(Index i=0; i<size; i+=PacketSize)\n    {\n      Packet xi = pload<Packet>(px);\n      Packet yi = pload<Packet>(py);\n      pstore(px, padd(pmul(pc,xi),pcj.pmul(ps,yi)));\n      pstore(py, psub(pcj.pmul(pc,yi),pmul(ps,xi)));\n      px += PacketSize;\n      py += PacketSize;\n    }\n  }\n\n  /*** non-vectorized path ***/\n  else\n  {\n    for(Index i=0; i<size; ++i)\n    {\n      Scalar xi = *x;\n      Scalar yi = *y;\n      *x =  c * xi + numext::conj(s) * yi;\n      *y = -s * xi + numext::conj(c) * yi;\n      x += incrx;\n      y += incry;\n    }\n  }\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_JACOBI_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/Determinant.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_DETERMINANT_H\n#define EIGEN_DETERMINANT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Derived>\ninline const typename Derived::Scalar bruteforce_det3_helper\n(const MatrixBase<Derived>& matrix, int a, int b, int c)\n{\n  return matrix.coeff(0,a)\n         * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));\n}\n\ntemplate<typename Derived>\nconst typename Derived::Scalar bruteforce_det4_helper\n(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)\n{\n  return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))\n       * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));\n}\n\ntemplate<typename Derived,\n         int DeterminantType = Derived::RowsAtCompileTime\n> struct determinant_impl\n{\n  static inline typename traits<Derived>::Scalar run(const Derived& m)\n  {\n    if(Derived::ColsAtCompileTime==Dynamic && m.rows()==0)\n      return typename traits<Derived>::Scalar(1);\n    return m.partialPivLu().determinant();\n  }\n};\n\ntemplate<typename Derived> struct determinant_impl<Derived, 1>\n{\n  static inline typename traits<Derived>::Scalar run(const Derived& m)\n  {\n    return m.coeff(0,0);\n  }\n};\n\ntemplate<typename Derived> struct determinant_impl<Derived, 2>\n{\n  static inline typename traits<Derived>::Scalar run(const Derived& m)\n  {\n    return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);\n  }\n};\n\ntemplate<typename Derived> struct determinant_impl<Derived, 3>\n{\n  static inline typename traits<Derived>::Scalar run(const Derived& m)\n  {\n    return bruteforce_det3_helper(m,0,1,2)\n          - bruteforce_det3_helper(m,1,0,2)\n          + bruteforce_det3_helper(m,2,0,1);\n  }\n};\n\ntemplate<typename Derived> struct determinant_impl<Derived, 4>\n{\n  static typename traits<Derived>::Scalar run(const Derived& m)\n  {\n    // trick by Martin Costabel to compute 4x4 det with only 30 muls\n    return bruteforce_det4_helper(m,0,1,2,3)\n          - bruteforce_det4_helper(m,0,2,1,3)\n          + bruteforce_det4_helper(m,0,3,1,2)\n          + bruteforce_det4_helper(m,1,2,0,3)\n          - bruteforce_det4_helper(m,1,3,0,2)\n          + bruteforce_det4_helper(m,2,3,0,1);\n  }\n};\n\n} // end namespace internal\n\n/** \\lu_module\n  *\n  * \\returns the determinant of this matrix\n  */\ntemplate<typename Derived>\ninline typename internal::traits<Derived>::Scalar MatrixBase<Derived>::determinant() const\n{\n  eigen_assert(rows() == cols());\n  typedef typename internal::nested_eval<Derived,Base::RowsAtCompileTime>::type Nested;\n  return internal::determinant_impl<typename internal::remove_all<Nested>::type>::run(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_DETERMINANT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/FullPivLU.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_LU_H\n#define EIGEN_LU_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >\n : traits<_MatrixType>\n{\n  typedef MatrixXpr XprKind;\n  typedef SolverStorage StorageKind;\n  enum { Flags = 0 };\n};\n\n} // end namespace internal\n\n/** \\ingroup LU_Module\n  *\n  * \\class FullPivLU\n  *\n  * \\brief LU decomposition of a matrix with complete pivoting, and related features\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the LU decomposition\n  *\n  * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is\n  * decomposed as \\f$ A = P^{-1} L U Q^{-1} \\f$ where L is unit-lower-triangular, U is\n  * upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU\n  * decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any\n  * zeros are at the end.\n  *\n  * This decomposition provides the generic approach to solving systems of linear equations, computing\n  * the rank, invertibility, inverse, kernel, and determinant.\n  *\n  * This LU decomposition is very stable and well tested with large matrices. However there are use cases where the SVD\n  * decomposition is inherently more stable and/or flexible. For example, when computing the kernel of a matrix,\n  * working with the SVD allows to select the smallest singular values of the matrix, something that\n  * the LU decomposition doesn't see.\n  *\n  * The data of the LU decomposition can be directly accessed through the methods matrixLU(),\n  * permutationP(), permutationQ().\n  *\n  * As an exemple, here is how the original matrix can be retrieved:\n  * \\include class_FullPivLU.cpp\n  * Output: \\verbinclude class_FullPivLU.out\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::fullPivLu(), MatrixBase::determinant(), MatrixBase::inverse()\n  */\ntemplate<typename _MatrixType> class FullPivLU\n  : public SolverBase<FullPivLU<_MatrixType> >\n{\n  public:\n    typedef _MatrixType MatrixType;\n    typedef SolverBase<FullPivLU> Base;\n\n    EIGEN_GENERIC_PUBLIC_INTERFACE(FullPivLU)\n    // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int\n    enum {\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename internal::plain_row_type<MatrixType, StorageIndex>::type IntRowVectorType;\n    typedef typename internal::plain_col_type<MatrixType, StorageIndex>::type IntColVectorType;\n    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationQType;\n    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationPType;\n    typedef typename MatrixType::PlainObject PlainObject;\n\n    /**\n      * \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via LU::compute(const MatrixType&).\n      */\n    FullPivLU();\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa FullPivLU()\n      */\n    FullPivLU(Index rows, Index cols);\n\n    /** Constructor.\n      *\n      * \\param matrix the matrix of which to compute the LU decomposition.\n      *               It is required to be nonzero.\n      */\n    template<typename InputType>\n    explicit FullPivLU(const EigenBase<InputType>& matrix);\n\n    /** \\brief Constructs a LU factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa FullPivLU(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit FullPivLU(EigenBase<InputType>& matrix);\n\n    /** Computes the LU decomposition of the given matrix.\n      *\n      * \\param matrix the matrix of which to compute the LU decomposition.\n      *               It is required to be nonzero.\n      *\n      * \\returns a reference to *this\n      */\n    template<typename InputType>\n    FullPivLU& compute(const EigenBase<InputType>& matrix) {\n      m_lu = matrix.derived();\n      computeInPlace();\n      return *this;\n    }\n\n    /** \\returns the LU decomposition matrix: the upper-triangular part is U, the\n      * unit-lower-triangular part is L (at least for square matrices; in the non-square\n      * case, special care is needed, see the documentation of class FullPivLU).\n      *\n      * \\sa matrixL(), matrixU()\n      */\n    inline const MatrixType& matrixLU() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return m_lu;\n    }\n\n    /** \\returns the number of nonzero pivots in the LU decomposition.\n      * Here nonzero is meant in the exact sense, not in a fuzzy sense.\n      * So that notion isn't really intrinsically interesting, but it is\n      * still useful when implementing algorithms.\n      *\n      * \\sa rank()\n      */\n    inline Index nonzeroPivots() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return m_nonzero_pivots;\n    }\n\n    /** \\returns the absolute value of the biggest pivot, i.e. the biggest\n      *          diagonal coefficient of U.\n      */\n    RealScalar maxPivot() const { return m_maxpivot; }\n\n    /** \\returns the permutation matrix P\n      *\n      * \\sa permutationQ()\n      */\n    EIGEN_DEVICE_FUNC inline const PermutationPType& permutationP() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return m_p;\n    }\n\n    /** \\returns the permutation matrix Q\n      *\n      * \\sa permutationP()\n      */\n    inline const PermutationQType& permutationQ() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return m_q;\n    }\n\n    /** \\returns the kernel of the matrix, also called its null-space. The columns of the returned matrix\n      * will form a basis of the kernel.\n      *\n      * \\note If the kernel has dimension zero, then the returned matrix is a column-vector filled with zeros.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      *\n      * Example: \\include FullPivLU_kernel.cpp\n      * Output: \\verbinclude FullPivLU_kernel.out\n      *\n      * \\sa image()\n      */\n    inline const internal::kernel_retval<FullPivLU> kernel() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return internal::kernel_retval<FullPivLU>(*this);\n    }\n\n    /** \\returns the image of the matrix, also called its column-space. The columns of the returned matrix\n      * will form a basis of the image (column-space).\n      *\n      * \\param originalMatrix the original matrix, of which *this is the LU decomposition.\n      *                       The reason why it is needed to pass it here, is that this allows\n      *                       a large optimization, as otherwise this method would need to reconstruct it\n      *                       from the LU decomposition.\n      *\n      * \\note If the image has dimension zero, then the returned matrix is a column-vector filled with zeros.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      *\n      * Example: \\include FullPivLU_image.cpp\n      * Output: \\verbinclude FullPivLU_image.out\n      *\n      * \\sa kernel()\n      */\n    inline const internal::image_retval<FullPivLU>\n      image(const MatrixType& originalMatrix) const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return internal::image_retval<FullPivLU>(*this, originalMatrix);\n    }\n\n    /** \\return a solution x to the equation Ax=b, where A is the matrix of which\n      * *this is the LU decomposition.\n      *\n      * \\param b the right-hand-side of the equation to solve. Can be a vector or a matrix,\n      *          the only requirement in order for the equation to make sense is that\n      *          b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.\n      *\n      * \\returns a solution.\n      *\n      * \\note_about_checking_solutions\n      *\n      * \\note_about_arbitrary_choice_of_solution\n      * \\note_about_using_kernel_to_study_multiple_solutions\n      *\n      * Example: \\include FullPivLU_solve.cpp\n      * Output: \\verbinclude FullPivLU_solve.out\n      *\n      * \\sa TriangularView::solve(), kernel(), inverse()\n      */\n    // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.\n    template<typename Rhs>\n    inline const Solve<FullPivLU, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return Solve<FullPivLU, Rhs>(*this, b.derived());\n    }\n\n    /** \\returns an estimate of the reciprocal condition number of the matrix of which \\c *this is\n        the LU decomposition.\n      */\n    inline RealScalar rcond() const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return internal::rcond_estimate_helper(m_l1_norm, *this);\n    }\n\n    /** \\returns the determinant of the matrix of which\n      * *this is the LU decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the LU decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers\n      *       optimized paths.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      *\n      * \\sa MatrixBase::determinant()\n      */\n    typename internal::traits<MatrixType>::Scalar determinant() const;\n\n    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),\n      * who need to determine when pivots are to be considered nonzero. This is not used for the\n      * LU decomposition itself.\n      *\n      * When it needs to get the threshold value, Eigen calls threshold(). By default, this\n      * uses a formula to automatically determine a reasonable threshold.\n      * Once you have called the present method setThreshold(const RealScalar&),\n      * your value is used instead.\n      *\n      * \\param threshold The new value to use as the threshold.\n      *\n      * A pivot will be considered nonzero if its absolute value is strictly greater than\n      *  \\f$ \\vert pivot \\vert \\leqslant threshold \\times \\vert maxpivot \\vert \\f$\n      * where maxpivot is the biggest pivot.\n      *\n      * If you want to come back to the default behavior, call setThreshold(Default_t)\n      */\n    FullPivLU& setThreshold(const RealScalar& threshold)\n    {\n      m_usePrescribedThreshold = true;\n      m_prescribedThreshold = threshold;\n      return *this;\n    }\n\n    /** Allows to come back to the default behavior, letting Eigen use its default formula for\n      * determining the threshold.\n      *\n      * You should pass the special object Eigen::Default as parameter here.\n      * \\code lu.setThreshold(Eigen::Default); \\endcode\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    FullPivLU& setThreshold(Default_t)\n    {\n      m_usePrescribedThreshold = false;\n      return *this;\n    }\n\n    /** Returns the threshold that will be used by certain methods such as rank().\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    RealScalar threshold() const\n    {\n      eigen_assert(m_isInitialized || m_usePrescribedThreshold);\n      return m_usePrescribedThreshold ? m_prescribedThreshold\n      // this formula comes from experimenting (see \"LU precision tuning\" thread on the list)\n      // and turns out to be identical to Higham's formula used already in LDLt.\n                                      : NumTraits<Scalar>::epsilon() * m_lu.diagonalSize();\n    }\n\n    /** \\returns the rank of the matrix of which *this is the LU decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index rank() const\n    {\n      using std::abs;\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();\n      Index result = 0;\n      for(Index i = 0; i < m_nonzero_pivots; ++i)\n        result += (abs(m_lu.coeff(i,i)) > premultiplied_threshold);\n      return result;\n    }\n\n    /** \\returns the dimension of the kernel of the matrix of which *this is the LU decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index dimensionOfKernel() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return cols() - rank();\n    }\n\n    /** \\returns true if the matrix of which *this is the LU decomposition represents an injective\n      *          linear map, i.e. has trivial kernel; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInjective() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return rank() == cols();\n    }\n\n    /** \\returns true if the matrix of which *this is the LU decomposition represents a surjective\n      *          linear map; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isSurjective() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return rank() == rows();\n    }\n\n    /** \\returns true if the matrix of which *this is the LU decomposition is invertible.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInvertible() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return isInjective() && (m_lu.rows() == m_lu.cols());\n    }\n\n    /** \\returns the inverse of the matrix of which *this is the LU decomposition.\n      *\n      * \\note If this matrix is not invertible, the returned matrix has undefined coefficients.\n      *       Use isInvertible() to first determine whether this matrix is invertible.\n      *\n      * \\sa MatrixBase::inverse()\n      */\n    inline const Inverse<FullPivLU> inverse() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      eigen_assert(m_lu.rows() == m_lu.cols() && \"You can't take the inverse of a non-square matrix!\");\n      return Inverse<FullPivLU>(*this);\n    }\n\n    MatrixType reconstructedMatrix() const;\n\n    EIGEN_DEVICE_FUNC inline Index rows() const { return m_lu.rows(); }\n    EIGEN_DEVICE_FUNC inline Index cols() const { return m_lu.cols(); }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n\n    template<bool Conjugate, typename RhsType, typename DstType>\n    void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    void computeInPlace();\n\n    MatrixType m_lu;\n    PermutationPType m_p;\n    PermutationQType m_q;\n    IntColVectorType m_rowsTranspositions;\n    IntRowVectorType m_colsTranspositions;\n    Index m_nonzero_pivots;\n    RealScalar m_l1_norm;\n    RealScalar m_maxpivot, m_prescribedThreshold;\n    signed char m_det_pq;\n    bool m_isInitialized, m_usePrescribedThreshold;\n};\n\ntemplate<typename MatrixType>\nFullPivLU<MatrixType>::FullPivLU()\n  : m_isInitialized(false), m_usePrescribedThreshold(false)\n{\n}\n\ntemplate<typename MatrixType>\nFullPivLU<MatrixType>::FullPivLU(Index rows, Index cols)\n  : m_lu(rows, cols),\n    m_p(rows),\n    m_q(cols),\n    m_rowsTranspositions(rows),\n    m_colsTranspositions(cols),\n    m_isInitialized(false),\n    m_usePrescribedThreshold(false)\n{\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nFullPivLU<MatrixType>::FullPivLU(const EigenBase<InputType>& matrix)\n  : m_lu(matrix.rows(), matrix.cols()),\n    m_p(matrix.rows()),\n    m_q(matrix.cols()),\n    m_rowsTranspositions(matrix.rows()),\n    m_colsTranspositions(matrix.cols()),\n    m_isInitialized(false),\n    m_usePrescribedThreshold(false)\n{\n  compute(matrix.derived());\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nFullPivLU<MatrixType>::FullPivLU(EigenBase<InputType>& matrix)\n  : m_lu(matrix.derived()),\n    m_p(matrix.rows()),\n    m_q(matrix.cols()),\n    m_rowsTranspositions(matrix.rows()),\n    m_colsTranspositions(matrix.cols()),\n    m_isInitialized(false),\n    m_usePrescribedThreshold(false)\n{\n  computeInPlace();\n}\n\ntemplate<typename MatrixType>\nvoid FullPivLU<MatrixType>::computeInPlace()\n{\n  check_template_parameters();\n\n  // the permutations are stored as int indices, so just to be sure:\n  eigen_assert(m_lu.rows()<=NumTraits<int>::highest() && m_lu.cols()<=NumTraits<int>::highest());\n\n  m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();\n\n  const Index size = m_lu.diagonalSize();\n  const Index rows = m_lu.rows();\n  const Index cols = m_lu.cols();\n\n  // will store the transpositions, before we accumulate them at the end.\n  // can't accumulate on-the-fly because that will be done in reverse order for the rows.\n  m_rowsTranspositions.resize(m_lu.rows());\n  m_colsTranspositions.resize(m_lu.cols());\n  Index number_of_transpositions = 0; // number of NONTRIVIAL transpositions, i.e. m_rowsTranspositions[i]!=i\n\n  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)\n  m_maxpivot = RealScalar(0);\n\n  for(Index k = 0; k < size; ++k)\n  {\n    // First, we need to find the pivot.\n\n    // biggest coefficient in the remaining bottom-right corner (starting at row k, col k)\n    Index row_of_biggest_in_corner, col_of_biggest_in_corner;\n    typedef internal::scalar_score_coeff_op<Scalar> Scoring;\n    typedef typename Scoring::result_type Score;\n    Score biggest_in_corner;\n    biggest_in_corner = m_lu.bottomRightCorner(rows-k, cols-k)\n                        .unaryExpr(Scoring())\n                        .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);\n    row_of_biggest_in_corner += k; // correct the values! since they were computed in the corner,\n    col_of_biggest_in_corner += k; // need to add k to them.\n\n    if(biggest_in_corner==Score(0))\n    {\n      // before exiting, make sure to initialize the still uninitialized transpositions\n      // in a sane state without destroying what we already have.\n      m_nonzero_pivots = k;\n      for(Index i = k; i < size; ++i)\n      {\n        m_rowsTranspositions.coeffRef(i) = i;\n        m_colsTranspositions.coeffRef(i) = i;\n      }\n      break;\n    }\n\n    RealScalar abs_pivot = internal::abs_knowing_score<Scalar>()(m_lu(row_of_biggest_in_corner, col_of_biggest_in_corner), biggest_in_corner);\n    if(abs_pivot > m_maxpivot) m_maxpivot = abs_pivot;\n\n    // Now that we've found the pivot, we need to apply the row/col swaps to\n    // bring it to the location (k,k).\n\n    m_rowsTranspositions.coeffRef(k) = row_of_biggest_in_corner;\n    m_colsTranspositions.coeffRef(k) = col_of_biggest_in_corner;\n    if(k != row_of_biggest_in_corner) {\n      m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));\n      ++number_of_transpositions;\n    }\n    if(k != col_of_biggest_in_corner) {\n      m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));\n      ++number_of_transpositions;\n    }\n\n    // Now that the pivot is at the right location, we update the remaining\n    // bottom-right corner by Gaussian elimination.\n\n    if(k<rows-1)\n      m_lu.col(k).tail(rows-k-1) /= m_lu.coeff(k,k);\n    if(k<size-1)\n      m_lu.block(k+1,k+1,rows-k-1,cols-k-1).noalias() -= m_lu.col(k).tail(rows-k-1) * m_lu.row(k).tail(cols-k-1);\n  }\n\n  // the main loop is over, we still have to accumulate the transpositions to find the\n  // permutations P and Q\n\n  m_p.setIdentity(rows);\n  for(Index k = size-1; k >= 0; --k)\n    m_p.applyTranspositionOnTheRight(k, m_rowsTranspositions.coeff(k));\n\n  m_q.setIdentity(cols);\n  for(Index k = 0; k < size; ++k)\n    m_q.applyTranspositionOnTheRight(k, m_colsTranspositions.coeff(k));\n\n  m_det_pq = (number_of_transpositions%2) ? -1 : 1;\n\n  m_isInitialized = true;\n}\n\ntemplate<typename MatrixType>\ntypename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant() const\n{\n  eigen_assert(m_isInitialized && \"LU is not initialized.\");\n  eigen_assert(m_lu.rows() == m_lu.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return Scalar(m_det_pq) * Scalar(m_lu.diagonal().prod());\n}\n\n/** \\returns the matrix represented by the decomposition,\n * i.e., it returns the product: \\f$ P^{-1} L U Q^{-1} \\f$.\n * This function is provided for debug purposes. */\ntemplate<typename MatrixType>\nMatrixType FullPivLU<MatrixType>::reconstructedMatrix() const\n{\n  eigen_assert(m_isInitialized && \"LU is not initialized.\");\n  const Index smalldim = (std::min)(m_lu.rows(), m_lu.cols());\n  // LU\n  MatrixType res(m_lu.rows(),m_lu.cols());\n  // FIXME the .toDenseMatrix() should not be needed...\n  res = m_lu.leftCols(smalldim)\n            .template triangularView<UnitLower>().toDenseMatrix()\n      * m_lu.topRows(smalldim)\n            .template triangularView<Upper>().toDenseMatrix();\n\n  // P^{-1}(LU)\n  res = m_p.inverse() * res;\n\n  // (P^{-1}LU)Q^{-1}\n  res = res * m_q.inverse();\n\n  return res;\n}\n\n/********* Implementation of kernel() **************************************************/\n\nnamespace internal {\ntemplate<typename _MatrixType>\nstruct kernel_retval<FullPivLU<_MatrixType> >\n  : kernel_retval_base<FullPivLU<_MatrixType> >\n{\n  EIGEN_MAKE_KERNEL_HELPERS(FullPivLU<_MatrixType>)\n\n  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(\n            MatrixType::MaxColsAtCompileTime,\n            MatrixType::MaxRowsAtCompileTime)\n  };\n\n  template<typename Dest> void evalTo(Dest& dst) const\n  {\n    using std::abs;\n    const Index cols = dec().matrixLU().cols(), dimker = cols - rank();\n    if(dimker == 0)\n    {\n      // The Kernel is just {0}, so it doesn't have a basis properly speaking, but let's\n      // avoid crashing/asserting as that depends on floating point calculations. Let's\n      // just return a single column vector filled with zeros.\n      dst.setZero();\n      return;\n    }\n\n    /* Let us use the following lemma:\n      *\n      * Lemma: If the matrix A has the LU decomposition PAQ = LU,\n      * then Ker A = Q(Ker U).\n      *\n      * Proof: trivial: just keep in mind that P, Q, L are invertible.\n      */\n\n    /* Thus, all we need to do is to compute Ker U, and then apply Q.\n      *\n      * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end.\n      * Thus, the diagonal of U ends with exactly\n      * dimKer zero's. Let us use that to construct dimKer linearly\n      * independent vectors in Ker U.\n      */\n\n    Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());\n    RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();\n    Index p = 0;\n    for(Index i = 0; i < dec().nonzeroPivots(); ++i)\n      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)\n        pivots.coeffRef(p++) = i;\n    eigen_internal_assert(p == rank());\n\n    // we construct a temporaty trapezoid matrix m, by taking the U matrix and\n    // permuting the rows and cols to bring the nonnegligible pivots to the top of\n    // the main diagonal. We need that to be able to apply our triangular solvers.\n    // FIXME when we get triangularView-for-rectangular-matrices, this can be simplified\n    Matrix<typename MatrixType::Scalar, Dynamic, Dynamic, MatrixType::Options,\n           MaxSmallDimAtCompileTime, MatrixType::MaxColsAtCompileTime>\n      m(dec().matrixLU().block(0, 0, rank(), cols));\n    for(Index i = 0; i < rank(); ++i)\n    {\n      if(i) m.row(i).head(i).setZero();\n      m.row(i).tail(cols-i) = dec().matrixLU().row(pivots.coeff(i)).tail(cols-i);\n    }\n    m.block(0, 0, rank(), rank());\n    m.block(0, 0, rank(), rank()).template triangularView<StrictlyLower>().setZero();\n    for(Index i = 0; i < rank(); ++i)\n      m.col(i).swap(m.col(pivots.coeff(i)));\n\n    // ok, we have our trapezoid matrix, we can apply the triangular solver.\n    // notice that the math behind this suggests that we should apply this to the\n    // negative of the RHS, but for performance we just put the negative sign elsewhere, see below.\n    m.topLeftCorner(rank(), rank())\n     .template triangularView<Upper>().solveInPlace(\n        m.topRightCorner(rank(), dimker)\n      );\n\n    // now we must undo the column permutation that we had applied!\n    for(Index i = rank()-1; i >= 0; --i)\n      m.col(i).swap(m.col(pivots.coeff(i)));\n\n    // see the negative sign in the next line, that's what we were talking about above.\n    for(Index i = 0; i < rank(); ++i) dst.row(dec().permutationQ().indices().coeff(i)) = -m.row(i).tail(dimker);\n    for(Index i = rank(); i < cols; ++i) dst.row(dec().permutationQ().indices().coeff(i)).setZero();\n    for(Index k = 0; k < dimker; ++k) dst.coeffRef(dec().permutationQ().indices().coeff(rank()+k), k) = Scalar(1);\n  }\n};\n\n/***** Implementation of image() *****************************************************/\n\ntemplate<typename _MatrixType>\nstruct image_retval<FullPivLU<_MatrixType> >\n  : image_retval_base<FullPivLU<_MatrixType> >\n{\n  EIGEN_MAKE_IMAGE_HELPERS(FullPivLU<_MatrixType>)\n\n  enum { MaxSmallDimAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(\n            MatrixType::MaxColsAtCompileTime,\n            MatrixType::MaxRowsAtCompileTime)\n  };\n\n  template<typename Dest> void evalTo(Dest& dst) const\n  {\n    using std::abs;\n    if(rank() == 0)\n    {\n      // The Image is just {0}, so it doesn't have a basis properly speaking, but let's\n      // avoid crashing/asserting as that depends on floating point calculations. Let's\n      // just return a single column vector filled with zeros.\n      dst.setZero();\n      return;\n    }\n\n    Matrix<Index, Dynamic, 1, 0, MaxSmallDimAtCompileTime, 1> pivots(rank());\n    RealScalar premultiplied_threshold = dec().maxPivot() * dec().threshold();\n    Index p = 0;\n    for(Index i = 0; i < dec().nonzeroPivots(); ++i)\n      if(abs(dec().matrixLU().coeff(i,i)) > premultiplied_threshold)\n        pivots.coeffRef(p++) = i;\n    eigen_internal_assert(p == rank());\n\n    for(Index i = 0; i < rank(); ++i)\n      dst.col(i) = originalMatrix().col(dec().permutationQ().indices().coeff(pivots.coeff(i)));\n  }\n};\n\n/***** Implementation of solve() *****************************************************/\n\n} // end namespace internal\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType>\ntemplate<typename RhsType, typename DstType>\nvoid FullPivLU<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.\n  * So we proceed as follows:\n  * Step 1: compute c = P * rhs.\n  * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.\n  * Step 3: replace c by the solution x to Ux = c. May or may not exist.\n  * Step 4: result = Q * c;\n  */\n\n  const Index rows = this->rows(),\n              cols = this->cols(),\n              nonzero_pivots = this->rank();\n  eigen_assert(rhs.rows() == rows);\n  const Index smalldim = (std::min)(rows, cols);\n\n  if(nonzero_pivots == 0)\n  {\n    dst.setZero();\n    return;\n  }\n\n  typename RhsType::PlainObject c(rhs.rows(), rhs.cols());\n\n  // Step 1\n  c = permutationP() * rhs;\n\n  // Step 2\n  m_lu.topLeftCorner(smalldim,smalldim)\n      .template triangularView<UnitLower>()\n      .solveInPlace(c.topRows(smalldim));\n  if(rows>cols)\n    c.bottomRows(rows-cols) -= m_lu.bottomRows(rows-cols) * c.topRows(cols);\n\n  // Step 3\n  m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)\n      .template triangularView<Upper>()\n      .solveInPlace(c.topRows(nonzero_pivots));\n\n  // Step 4\n  for(Index i = 0; i < nonzero_pivots; ++i)\n    dst.row(permutationQ().indices().coeff(i)) = c.row(i);\n  for(Index i = nonzero_pivots; i < m_lu.cols(); ++i)\n    dst.row(permutationQ().indices().coeff(i)).setZero();\n}\n\ntemplate<typename _MatrixType>\ntemplate<bool Conjugate, typename RhsType, typename DstType>\nvoid FullPivLU<_MatrixType>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const\n{\n  /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1},\n   * and since permutations are real and unitary, we can write this\n   * as   A^T = Q U^T L^T P,\n   * So we proceed as follows:\n   * Step 1: compute c = Q^T rhs.\n   * Step 2: replace c by the solution x to U^T x = c. May or may not exist.\n   * Step 3: replace c by the solution x to L^T x = c.\n   * Step 4: result = P^T c.\n   * If Conjugate is true, replace \"^T\" by \"^*\" above.\n   */\n\n  const Index rows = this->rows(), cols = this->cols(),\n    nonzero_pivots = this->rank();\n   eigen_assert(rhs.rows() == cols);\n  const Index smalldim = (std::min)(rows, cols);\n\n  if(nonzero_pivots == 0)\n  {\n    dst.setZero();\n    return;\n  }\n\n  typename RhsType::PlainObject c(rhs.rows(), rhs.cols());\n\n  // Step 1\n  c = permutationQ().inverse() * rhs;\n\n  if (Conjugate) {\n    // Step 2\n    m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)\n        .template triangularView<Upper>()\n        .adjoint()\n        .solveInPlace(c.topRows(nonzero_pivots));\n    // Step 3\n    m_lu.topLeftCorner(smalldim, smalldim)\n        .template triangularView<UnitLower>()\n        .adjoint()\n        .solveInPlace(c.topRows(smalldim));\n  } else {\n    // Step 2\n    m_lu.topLeftCorner(nonzero_pivots, nonzero_pivots)\n        .template triangularView<Upper>()\n        .transpose()\n        .solveInPlace(c.topRows(nonzero_pivots));\n    // Step 3\n    m_lu.topLeftCorner(smalldim, smalldim)\n        .template triangularView<UnitLower>()\n        .transpose()\n        .solveInPlace(c.topRows(smalldim));\n  }\n\n  // Step 4\n  PermutationPType invp = permutationP().inverse().eval();\n  for(Index i = 0; i < smalldim; ++i)\n    dst.row(invp.indices().coeff(i)) = c.row(i);\n  for(Index i = smalldim; i < rows; ++i)\n    dst.row(invp.indices().coeff(i)).setZero();\n}\n\n#endif\n\nnamespace internal {\n\n\n/***** Implementation of inverse() *****************************************************/\ntemplate<typename DstXprType, typename MatrixType>\nstruct Assignment<DstXprType, Inverse<FullPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivLU<MatrixType>::Scalar>, Dense2Dense>\n{\n  typedef FullPivLU<MatrixType> LuType;\n  typedef Inverse<LuType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename MatrixType::Scalar> &)\n  {\n    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));\n  }\n};\n} // end namespace internal\n\n/******* MatrixBase methods *****************************************************************/\n\n/** \\lu_module\n  *\n  * \\return the full-pivoting LU decomposition of \\c *this.\n  *\n  * \\sa class FullPivLU\n  */\ntemplate<typename Derived>\ninline const FullPivLU<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::fullPivLu() const\n{\n  return FullPivLU<PlainObject>(eval());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_LU_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/InverseImpl.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_INVERSE_IMPL_H\n#define EIGEN_INVERSE_IMPL_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/**********************************\n*** General case implementation ***\n**********************************/\n\ntemplate<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>\nstruct compute_inverse\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const MatrixType& matrix, ResultType& result)\n  {\n    result = matrix.partialPivLu().inverse();\n  }\n};\n\ntemplate<typename MatrixType, typename ResultType, int Size = MatrixType::RowsAtCompileTime>\nstruct compute_inverse_and_det_with_check { /* nothing! general case not supported. */ };\n\n/****************************\n*** Size 1 implementation ***\n****************************/\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse<MatrixType, ResultType, 1>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const MatrixType& matrix, ResultType& result)\n  {\n    typedef typename MatrixType::Scalar Scalar;\n    internal::evaluator<MatrixType> matrixEval(matrix);\n    result.coeffRef(0,0) = Scalar(1) / matrixEval.coeff(0,0);\n  }\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_and_det_with_check<MatrixType, ResultType, 1>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(\n    const MatrixType& matrix,\n    const typename MatrixType::RealScalar& absDeterminantThreshold,\n    ResultType& result,\n    typename ResultType::Scalar& determinant,\n    bool& invertible\n  )\n  {\n    using std::abs;\n    determinant = matrix.coeff(0,0);\n    invertible = abs(determinant) > absDeterminantThreshold;\n    if(invertible) result.coeffRef(0,0) = typename ResultType::Scalar(1) / determinant;\n  }\n};\n\n/****************************\n*** Size 2 implementation ***\n****************************/\n\ntemplate<typename MatrixType, typename ResultType>\nEIGEN_DEVICE_FUNC \ninline void compute_inverse_size2_helper(\n    const MatrixType& matrix, const typename ResultType::Scalar& invdet,\n    ResultType& result)\n{\n  result.coeffRef(0,0) =  matrix.coeff(1,1) * invdet;\n  result.coeffRef(1,0) = -matrix.coeff(1,0) * invdet;\n  result.coeffRef(0,1) = -matrix.coeff(0,1) * invdet;\n  result.coeffRef(1,1) =  matrix.coeff(0,0) * invdet;\n}\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse<MatrixType, ResultType, 2>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const MatrixType& matrix, ResultType& result)\n  {\n    typedef typename ResultType::Scalar Scalar;\n    const Scalar invdet = typename MatrixType::Scalar(1) / matrix.determinant();\n    compute_inverse_size2_helper(matrix, invdet, result);\n  }\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_and_det_with_check<MatrixType, ResultType, 2>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(\n    const MatrixType& matrix,\n    const typename MatrixType::RealScalar& absDeterminantThreshold,\n    ResultType& inverse,\n    typename ResultType::Scalar& determinant,\n    bool& invertible\n  )\n  {\n    using std::abs;\n    typedef typename ResultType::Scalar Scalar;\n    determinant = matrix.determinant();\n    invertible = abs(determinant) > absDeterminantThreshold;\n    if(!invertible) return;\n    const Scalar invdet = Scalar(1) / determinant;\n    compute_inverse_size2_helper(matrix, invdet, inverse);\n  }\n};\n\n/****************************\n*** Size 3 implementation ***\n****************************/\n\ntemplate<typename MatrixType, int i, int j>\nEIGEN_DEVICE_FUNC \ninline typename MatrixType::Scalar cofactor_3x3(const MatrixType& m)\n{\n  enum {\n    i1 = (i+1) % 3,\n    i2 = (i+2) % 3,\n    j1 = (j+1) % 3,\n    j2 = (j+2) % 3\n  };\n  return m.coeff(i1, j1) * m.coeff(i2, j2)\n       - m.coeff(i1, j2) * m.coeff(i2, j1);\n}\n\ntemplate<typename MatrixType, typename ResultType>\nEIGEN_DEVICE_FUNC\ninline void compute_inverse_size3_helper(\n    const MatrixType& matrix,\n    const typename ResultType::Scalar& invdet,\n    const Matrix<typename ResultType::Scalar,3,1>& cofactors_col0,\n    ResultType& result)\n{\n  result.row(0) = cofactors_col0 * invdet;\n  result.coeffRef(1,0) =  cofactor_3x3<MatrixType,0,1>(matrix) * invdet;\n  result.coeffRef(1,1) =  cofactor_3x3<MatrixType,1,1>(matrix) * invdet;\n  result.coeffRef(1,2) =  cofactor_3x3<MatrixType,2,1>(matrix) * invdet;\n  result.coeffRef(2,0) =  cofactor_3x3<MatrixType,0,2>(matrix) * invdet;\n  result.coeffRef(2,1) =  cofactor_3x3<MatrixType,1,2>(matrix) * invdet;\n  result.coeffRef(2,2) =  cofactor_3x3<MatrixType,2,2>(matrix) * invdet;\n}\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse<MatrixType, ResultType, 3>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(const MatrixType& matrix, ResultType& result)\n  {\n    typedef typename ResultType::Scalar Scalar;\n    Matrix<typename MatrixType::Scalar,3,1> cofactors_col0;\n    cofactors_col0.coeffRef(0) =  cofactor_3x3<MatrixType,0,0>(matrix);\n    cofactors_col0.coeffRef(1) =  cofactor_3x3<MatrixType,1,0>(matrix);\n    cofactors_col0.coeffRef(2) =  cofactor_3x3<MatrixType,2,0>(matrix);\n    const Scalar det = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();\n    const Scalar invdet = Scalar(1) / det;\n    compute_inverse_size3_helper(matrix, invdet, cofactors_col0, result);\n  }\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_and_det_with_check<MatrixType, ResultType, 3>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(\n    const MatrixType& matrix,\n    const typename MatrixType::RealScalar& absDeterminantThreshold,\n    ResultType& inverse,\n    typename ResultType::Scalar& determinant,\n    bool& invertible\n  )\n  {\n    using std::abs;\n    typedef typename ResultType::Scalar Scalar;\n    Matrix<Scalar,3,1> cofactors_col0;\n    cofactors_col0.coeffRef(0) =  cofactor_3x3<MatrixType,0,0>(matrix);\n    cofactors_col0.coeffRef(1) =  cofactor_3x3<MatrixType,1,0>(matrix);\n    cofactors_col0.coeffRef(2) =  cofactor_3x3<MatrixType,2,0>(matrix);\n    determinant = (cofactors_col0.cwiseProduct(matrix.col(0))).sum();\n    invertible = abs(determinant) > absDeterminantThreshold;\n    if(!invertible) return;\n    const Scalar invdet = Scalar(1) / determinant;\n    compute_inverse_size3_helper(matrix, invdet, cofactors_col0, inverse);\n  }\n};\n\n/****************************\n*** Size 4 implementation ***\n****************************/\n\ntemplate<typename Derived>\nEIGEN_DEVICE_FUNC \ninline const typename Derived::Scalar general_det3_helper\n(const MatrixBase<Derived>& matrix, int i1, int i2, int i3, int j1, int j2, int j3)\n{\n  return matrix.coeff(i1,j1)\n         * (matrix.coeff(i2,j2) * matrix.coeff(i3,j3) - matrix.coeff(i2,j3) * matrix.coeff(i3,j2));\n}\n\ntemplate<typename MatrixType, int i, int j>\nEIGEN_DEVICE_FUNC \ninline typename MatrixType::Scalar cofactor_4x4(const MatrixType& matrix)\n{\n  enum {\n    i1 = (i+1) % 4,\n    i2 = (i+2) % 4,\n    i3 = (i+3) % 4,\n    j1 = (j+1) % 4,\n    j2 = (j+2) % 4,\n    j3 = (j+3) % 4\n  };\n  return general_det3_helper(matrix, i1, i2, i3, j1, j2, j3)\n       + general_det3_helper(matrix, i2, i3, i1, j1, j2, j3)\n       + general_det3_helper(matrix, i3, i1, i2, j1, j2, j3);\n}\n\ntemplate<int Arch, typename Scalar, typename MatrixType, typename ResultType>\nstruct compute_inverse_size4\n{\n  EIGEN_DEVICE_FUNC\n  static void run(const MatrixType& matrix, ResultType& result)\n  {\n    result.coeffRef(0,0) =  cofactor_4x4<MatrixType,0,0>(matrix);\n    result.coeffRef(1,0) = -cofactor_4x4<MatrixType,0,1>(matrix);\n    result.coeffRef(2,0) =  cofactor_4x4<MatrixType,0,2>(matrix);\n    result.coeffRef(3,0) = -cofactor_4x4<MatrixType,0,3>(matrix);\n    result.coeffRef(0,2) =  cofactor_4x4<MatrixType,2,0>(matrix);\n    result.coeffRef(1,2) = -cofactor_4x4<MatrixType,2,1>(matrix);\n    result.coeffRef(2,2) =  cofactor_4x4<MatrixType,2,2>(matrix);\n    result.coeffRef(3,2) = -cofactor_4x4<MatrixType,2,3>(matrix);\n    result.coeffRef(0,1) = -cofactor_4x4<MatrixType,1,0>(matrix);\n    result.coeffRef(1,1) =  cofactor_4x4<MatrixType,1,1>(matrix);\n    result.coeffRef(2,1) = -cofactor_4x4<MatrixType,1,2>(matrix);\n    result.coeffRef(3,1) =  cofactor_4x4<MatrixType,1,3>(matrix);\n    result.coeffRef(0,3) = -cofactor_4x4<MatrixType,3,0>(matrix);\n    result.coeffRef(1,3) =  cofactor_4x4<MatrixType,3,1>(matrix);\n    result.coeffRef(2,3) = -cofactor_4x4<MatrixType,3,2>(matrix);\n    result.coeffRef(3,3) =  cofactor_4x4<MatrixType,3,3>(matrix);\n    result /= (matrix.col(0).cwiseProduct(result.row(0).transpose())).sum();\n  }\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse<MatrixType, ResultType, 4>\n : compute_inverse_size4<Architecture::Target, typename MatrixType::Scalar,\n                            MatrixType, ResultType>\n{\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_and_det_with_check<MatrixType, ResultType, 4>\n{\n  EIGEN_DEVICE_FUNC\n  static inline void run(\n    const MatrixType& matrix,\n    const typename MatrixType::RealScalar& absDeterminantThreshold,\n    ResultType& inverse,\n    typename ResultType::Scalar& determinant,\n    bool& invertible\n  )\n  {\n    using std::abs;\n    determinant = matrix.determinant();\n    invertible = abs(determinant) > absDeterminantThreshold;\n    if(invertible) compute_inverse<MatrixType, ResultType>::run(matrix, inverse);\n  }\n};\n\n/*************************\n*** MatrixBase methods ***\n*************************/\n\n} // end namespace internal\n\nnamespace internal {\n\n// Specialization for \"dense = dense_xpr.inverse()\"\ntemplate<typename DstXprType, typename XprType>\nstruct Assignment<DstXprType, Inverse<XprType>, internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar>, Dense2Dense>\n{\n  typedef Inverse<XprType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename XprType::Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n    \n    const int Size = EIGEN_PLAIN_ENUM_MIN(XprType::ColsAtCompileTime,DstXprType::ColsAtCompileTime);\n    EIGEN_ONLY_USED_FOR_DEBUG(Size);\n    eigen_assert(( (Size<=1) || (Size>4) || (extract_data(src.nestedExpression())!=extract_data(dst)))\n              && \"Aliasing problem detected in inverse(), you need to do inverse().eval() here.\");\n\n    typedef typename internal::nested_eval<XprType,XprType::ColsAtCompileTime>::type  ActualXprType;\n    typedef typename internal::remove_all<ActualXprType>::type                        ActualXprTypeCleanded;\n    \n    ActualXprType actual_xpr(src.nestedExpression());\n    \n    compute_inverse<ActualXprTypeCleanded, DstXprType>::run(actual_xpr, dst);\n  }\n};\n\n  \n} // end namespace internal\n\n/** \\lu_module\n  *\n  * \\returns the matrix inverse of this matrix.\n  *\n  * For small fixed sizes up to 4x4, this method uses cofactors.\n  * In the general case, this method uses class PartialPivLU.\n  *\n  * \\note This matrix must be invertible, otherwise the result is undefined. If you need an\n  * invertibility check, do the following:\n  * \\li for fixed sizes up to 4x4, use computeInverseAndDetWithCheck().\n  * \\li for the general case, use class FullPivLU.\n  *\n  * Example: \\include MatrixBase_inverse.cpp\n  * Output: \\verbinclude MatrixBase_inverse.out\n  *\n  * \\sa computeInverseAndDetWithCheck()\n  */\ntemplate<typename Derived>\ninline const Inverse<Derived> MatrixBase<Derived>::inverse() const\n{\n  EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsInteger,THIS_FUNCTION_IS_NOT_FOR_INTEGER_NUMERIC_TYPES)\n  eigen_assert(rows() == cols());\n  return Inverse<Derived>(derived());\n}\n\n/** \\lu_module\n  *\n  * Computation of matrix inverse and determinant, with invertibility check.\n  *\n  * This is only for fixed-size square matrices of size up to 4x4.\n  *\n  * \\param inverse Reference to the matrix in which to store the inverse.\n  * \\param determinant Reference to the variable in which to store the determinant.\n  * \\param invertible Reference to the bool variable in which to store whether the matrix is invertible.\n  * \\param absDeterminantThreshold Optional parameter controlling the invertibility check.\n  *                                The matrix will be declared invertible if the absolute value of its\n  *                                determinant is greater than this threshold.\n  *\n  * Example: \\include MatrixBase_computeInverseAndDetWithCheck.cpp\n  * Output: \\verbinclude MatrixBase_computeInverseAndDetWithCheck.out\n  *\n  * \\sa inverse(), computeInverseWithCheck()\n  */\ntemplate<typename Derived>\ntemplate<typename ResultType>\ninline void MatrixBase<Derived>::computeInverseAndDetWithCheck(\n    ResultType& inverse,\n    typename ResultType::Scalar& determinant,\n    bool& invertible,\n    const RealScalar& absDeterminantThreshold\n  ) const\n{\n  // i'd love to put some static assertions there, but SFINAE means that they have no effect...\n  eigen_assert(rows() == cols());\n  // for 2x2, it's worth giving a chance to avoid evaluating.\n  // for larger sizes, evaluating has negligible cost and limits code size.\n  typedef typename internal::conditional<\n    RowsAtCompileTime == 2,\n    typename internal::remove_all<typename internal::nested_eval<Derived, 2>::type>::type,\n    PlainObject\n  >::type MatrixType;\n  internal::compute_inverse_and_det_with_check<MatrixType, ResultType>::run\n    (derived(), absDeterminantThreshold, inverse, determinant, invertible);\n}\n\n/** \\lu_module\n  *\n  * Computation of matrix inverse, with invertibility check.\n  *\n  * This is only for fixed-size square matrices of size up to 4x4.\n  *\n  * \\param inverse Reference to the matrix in which to store the inverse.\n  * \\param invertible Reference to the bool variable in which to store whether the matrix is invertible.\n  * \\param absDeterminantThreshold Optional parameter controlling the invertibility check.\n  *                                The matrix will be declared invertible if the absolute value of its\n  *                                determinant is greater than this threshold.\n  *\n  * Example: \\include MatrixBase_computeInverseWithCheck.cpp\n  * Output: \\verbinclude MatrixBase_computeInverseWithCheck.out\n  *\n  * \\sa inverse(), computeInverseAndDetWithCheck()\n  */\ntemplate<typename Derived>\ntemplate<typename ResultType>\ninline void MatrixBase<Derived>::computeInverseWithCheck(\n    ResultType& inverse,\n    bool& invertible,\n    const RealScalar& absDeterminantThreshold\n  ) const\n{\n  RealScalar determinant;\n  // i'd love to put some static assertions there, but SFINAE means that they have no effect...\n  eigen_assert(rows() == cols());\n  computeInverseAndDetWithCheck(inverse,determinant,invertible,absDeterminantThreshold);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_INVERSE_IMPL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/PartialPivLU.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2006-2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARTIALLU_H\n#define EIGEN_PARTIALLU_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename _MatrixType> struct traits<PartialPivLU<_MatrixType> >\n : traits<_MatrixType>\n{\n  typedef MatrixXpr XprKind;\n  typedef SolverStorage StorageKind;\n  typedef traits<_MatrixType> BaseTraits;\n  enum {\n    Flags = BaseTraits::Flags & RowMajorBit,\n    CoeffReadCost = Dynamic\n  };\n};\n\ntemplate<typename T,typename Derived>\nstruct enable_if_ref;\n// {\n//   typedef Derived type;\n// };\n\ntemplate<typename T,typename Derived>\nstruct enable_if_ref<Ref<T>,Derived> {\n  typedef Derived type;\n};\n\n} // end namespace internal\n\n/** \\ingroup LU_Module\n  *\n  * \\class PartialPivLU\n  *\n  * \\brief LU decomposition of a matrix with partial pivoting, and related features\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the LU decomposition\n  *\n  * This class represents a LU decomposition of a \\b square \\b invertible matrix, with partial pivoting: the matrix A\n  * is decomposed as A = PLU where L is unit-lower-triangular, U is upper-triangular, and P\n  * is a permutation matrix.\n  *\n  * Typically, partial pivoting LU decomposition is only considered numerically stable for square invertible\n  * matrices. Thus LAPACK's dgesv and dgesvx require the matrix to be square and invertible. The present class\n  * does the same. It will assert that the matrix is square, but it won't (actually it can't) check that the\n  * matrix is invertible: it is your task to check that you only use this decomposition on invertible matrices.\n  *\n  * The guaranteed safe alternative, working for all matrices, is the full pivoting LU decomposition, provided\n  * by class FullPivLU.\n  *\n  * This is \\b not a rank-revealing LU decomposition. Many features are intentionally absent from this class,\n  * such as rank computation. If you need these features, use class FullPivLU.\n  *\n  * This LU decomposition is suitable to invert invertible matrices. It is what MatrixBase::inverse() uses\n  * in the general case.\n  * On the other hand, it is \\b not suitable to determine whether a given matrix is invertible.\n  *\n  * The data of the LU decomposition can be directly accessed through the methods matrixLU(), permutationP().\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::partialPivLu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse(), class FullPivLU\n  */\ntemplate<typename _MatrixType> class PartialPivLU\n  : public SolverBase<PartialPivLU<_MatrixType> >\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    typedef SolverBase<PartialPivLU> Base;\n    EIGEN_GENERIC_PUBLIC_INTERFACE(PartialPivLU)\n    // FIXME StorageIndex defined in EIGEN_GENERIC_PUBLIC_INTERFACE should be int\n    enum {\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;\n    typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;\n    typedef typename MatrixType::PlainObject PlainObject;\n\n    /**\n      * \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via PartialPivLU::compute(const MatrixType&).\n      */\n    PartialPivLU();\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa PartialPivLU()\n      */\n    explicit PartialPivLU(Index size);\n\n    /** Constructor.\n      *\n      * \\param matrix the matrix of which to compute the LU decomposition.\n      *\n      * \\warning The matrix should have full rank (e.g. if it's square, it should be invertible).\n      * If you need to deal with non-full rank, use class FullPivLU instead.\n      */\n    template<typename InputType>\n    explicit PartialPivLU(const EigenBase<InputType>& matrix);\n\n    /** Constructor for \\link InplaceDecomposition inplace decomposition \\endlink\n      *\n      * \\param matrix the matrix of which to compute the LU decomposition.\n      *\n      * \\warning The matrix should have full rank (e.g. if it's square, it should be invertible).\n      * If you need to deal with non-full rank, use class FullPivLU instead.\n      */\n    template<typename InputType>\n    explicit PartialPivLU(EigenBase<InputType>& matrix);\n\n    template<typename InputType>\n    PartialPivLU& compute(const EigenBase<InputType>& matrix) {\n      m_lu = matrix.derived();\n      compute();\n      return *this;\n    }\n\n    /** \\returns the LU decomposition matrix: the upper-triangular part is U, the\n      * unit-lower-triangular part is L (at least for square matrices; in the non-square\n      * case, special care is needed, see the documentation of class FullPivLU).\n      *\n      * \\sa matrixL(), matrixU()\n      */\n    inline const MatrixType& matrixLU() const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return m_lu;\n    }\n\n    /** \\returns the permutation matrix P.\n      */\n    inline const PermutationType& permutationP() const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return m_p;\n    }\n\n    /** This method returns the solution x to the equation Ax=b, where A is the matrix of which\n      * *this is the LU decomposition.\n      *\n      * \\param b the right-hand-side of the equation to solve. Can be a vector or a matrix,\n      *          the only requirement in order for the equation to make sense is that\n      *          b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.\n      *\n      * \\returns the solution.\n      *\n      * Example: \\include PartialPivLU_solve.cpp\n      * Output: \\verbinclude PartialPivLU_solve.out\n      *\n      * Since this PartialPivLU class assumes anyway that the matrix A is invertible, the solution\n      * theoretically exists and is unique regardless of b.\n      *\n      * \\sa TriangularView::solve(), inverse(), computeInverse()\n      */\n    // FIXME this is a copy-paste of the base-class member to add the isInitialized assertion.\n    template<typename Rhs>\n    inline const Solve<PartialPivLU, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return Solve<PartialPivLU, Rhs>(*this, b.derived());\n    }\n\n    /** \\returns an estimate of the reciprocal condition number of the matrix of which \\c *this is\n        the LU decomposition.\n      */\n    inline RealScalar rcond() const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return internal::rcond_estimate_helper(m_l1_norm, *this);\n    }\n\n    /** \\returns the inverse of the matrix of which *this is the LU decomposition.\n      *\n      * \\warning The matrix being decomposed here is assumed to be invertible. If you need to check for\n      *          invertibility, use class FullPivLU instead.\n      *\n      * \\sa MatrixBase::inverse(), LU::inverse()\n      */\n    inline const Inverse<PartialPivLU> inverse() const\n    {\n      eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n      return Inverse<PartialPivLU>(*this);\n    }\n\n    /** \\returns the determinant of the matrix of which\n      * *this is the LU decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the LU decomposition has already been computed.\n      *\n      * \\note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers\n      *       optimized paths.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      *\n      * \\sa MatrixBase::determinant()\n      */\n    Scalar determinant() const;\n\n    MatrixType reconstructedMatrix() const;\n\n    inline Index rows() const { return m_lu.rows(); }\n    inline Index cols() const { return m_lu.cols(); }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    EIGEN_DEVICE_FUNC\n    void _solve_impl(const RhsType &rhs, DstType &dst) const {\n     /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.\n      * So we proceed as follows:\n      * Step 1: compute c = Pb.\n      * Step 2: replace c by the solution x to Lx = c.\n      * Step 3: replace c by the solution x to Ux = c.\n      */\n\n      eigen_assert(rhs.rows() == m_lu.rows());\n\n      // Step 1\n      dst = permutationP() * rhs;\n\n      // Step 2\n      m_lu.template triangularView<UnitLower>().solveInPlace(dst);\n\n      // Step 3\n      m_lu.template triangularView<Upper>().solveInPlace(dst);\n    }\n\n    template<bool Conjugate, typename RhsType, typename DstType>\n    EIGEN_DEVICE_FUNC\n    void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const {\n     /* The decomposition PA = LU can be rewritten as A = P^{-1} L U.\n      * So we proceed as follows:\n      * Step 1: compute c = Pb.\n      * Step 2: replace c by the solution x to Lx = c.\n      * Step 3: replace c by the solution x to Ux = c.\n      */\n\n      eigen_assert(rhs.rows() == m_lu.cols());\n\n      if (Conjugate) {\n        // Step 1\n        dst = m_lu.template triangularView<Upper>().adjoint().solve(rhs);\n        // Step 2\n        m_lu.template triangularView<UnitLower>().adjoint().solveInPlace(dst);\n      } else {\n        // Step 1\n        dst = m_lu.template triangularView<Upper>().transpose().solve(rhs);\n        // Step 2\n        m_lu.template triangularView<UnitLower>().transpose().solveInPlace(dst);\n      }\n      // Step 3\n      dst = permutationP().transpose() * dst;\n    }\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    void compute();\n\n    MatrixType m_lu;\n    PermutationType m_p;\n    TranspositionType m_rowsTranspositions;\n    RealScalar m_l1_norm;\n    signed char m_det_p;\n    bool m_isInitialized;\n};\n\ntemplate<typename MatrixType>\nPartialPivLU<MatrixType>::PartialPivLU()\n  : m_lu(),\n    m_p(),\n    m_rowsTranspositions(),\n    m_l1_norm(0),\n    m_det_p(0),\n    m_isInitialized(false)\n{\n}\n\ntemplate<typename MatrixType>\nPartialPivLU<MatrixType>::PartialPivLU(Index size)\n  : m_lu(size, size),\n    m_p(size),\n    m_rowsTranspositions(size),\n    m_l1_norm(0),\n    m_det_p(0),\n    m_isInitialized(false)\n{\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nPartialPivLU<MatrixType>::PartialPivLU(const EigenBase<InputType>& matrix)\n  : m_lu(matrix.rows(),matrix.cols()),\n    m_p(matrix.rows()),\n    m_rowsTranspositions(matrix.rows()),\n    m_l1_norm(0),\n    m_det_p(0),\n    m_isInitialized(false)\n{\n  compute(matrix.derived());\n}\n\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nPartialPivLU<MatrixType>::PartialPivLU(EigenBase<InputType>& matrix)\n  : m_lu(matrix.derived()),\n    m_p(matrix.rows()),\n    m_rowsTranspositions(matrix.rows()),\n    m_l1_norm(0),\n    m_det_p(0),\n    m_isInitialized(false)\n{\n  compute();\n}\n\nnamespace internal {\n\n/** \\internal This is the blocked version of fullpivlu_unblocked() */\ntemplate<typename Scalar, int StorageOrder, typename PivIndex>\nstruct partial_lu_impl\n{\n  // FIXME add a stride to Map, so that the following mapping becomes easier,\n  // another option would be to create an expression being able to automatically\n  // warp any Map, Matrix, and Block expressions as a unique type, but since that's exactly\n  // a Map + stride, why not adding a stride to Map, and convenient ctors from a Matrix,\n  // and Block.\n  typedef Map<Matrix<Scalar, Dynamic, Dynamic, StorageOrder> > MapLU;\n  typedef Block<MapLU, Dynamic, Dynamic> MatrixType;\n  typedef Block<MatrixType,Dynamic,Dynamic> BlockType;\n  typedef typename MatrixType::RealScalar RealScalar;\n\n  /** \\internal performs the LU decomposition in-place of the matrix \\a lu\n    * using an unblocked algorithm.\n    *\n    * In addition, this function returns the row transpositions in the\n    * vector \\a row_transpositions which must have a size equal to the number\n    * of columns of the matrix \\a lu, and an integer \\a nb_transpositions\n    * which returns the actual number of transpositions.\n    *\n    * \\returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.\n    */\n  static Index unblocked_lu(MatrixType& lu, PivIndex* row_transpositions, PivIndex& nb_transpositions)\n  {\n    typedef scalar_score_coeff_op<Scalar> Scoring;\n    typedef typename Scoring::result_type Score;\n    const Index rows = lu.rows();\n    const Index cols = lu.cols();\n    const Index size = (std::min)(rows,cols);\n    nb_transpositions = 0;\n    Index first_zero_pivot = -1;\n    for(Index k = 0; k < size; ++k)\n    {\n      Index rrows = rows-k-1;\n      Index rcols = cols-k-1;\n\n      Index row_of_biggest_in_col;\n      Score biggest_in_corner\n        = lu.col(k).tail(rows-k).unaryExpr(Scoring()).maxCoeff(&row_of_biggest_in_col);\n      row_of_biggest_in_col += k;\n\n      row_transpositions[k] = PivIndex(row_of_biggest_in_col);\n\n      if(biggest_in_corner != Score(0))\n      {\n        if(k != row_of_biggest_in_col)\n        {\n          lu.row(k).swap(lu.row(row_of_biggest_in_col));\n          ++nb_transpositions;\n        }\n\n        // FIXME shall we introduce a safe quotient expression in cas 1/lu.coeff(k,k)\n        // overflow but not the actual quotient?\n        lu.col(k).tail(rrows) /= lu.coeff(k,k);\n      }\n      else if(first_zero_pivot==-1)\n      {\n        // the pivot is exactly zero, we record the index of the first pivot which is exactly 0,\n        // and continue the factorization such we still have A = PLU\n        first_zero_pivot = k;\n      }\n\n      if(k<rows-1)\n        lu.bottomRightCorner(rrows,rcols).noalias() -= lu.col(k).tail(rrows) * lu.row(k).tail(rcols);\n    }\n    return first_zero_pivot;\n  }\n\n  /** \\internal performs the LU decomposition in-place of the matrix represented\n    * by the variables \\a rows, \\a cols, \\a lu_data, and \\a lu_stride using a\n    * recursive, blocked algorithm.\n    *\n    * In addition, this function returns the row transpositions in the\n    * vector \\a row_transpositions which must have a size equal to the number\n    * of columns of the matrix \\a lu, and an integer \\a nb_transpositions\n    * which returns the actual number of transpositions.\n    *\n    * \\returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.\n    *\n    * \\note This very low level interface using pointers, etc. is to:\n    *   1 - reduce the number of instanciations to the strict minimum\n    *   2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > >\n    */\n  static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)\n  {\n    MapLU lu1(lu_data,StorageOrder==RowMajor?rows:luStride,StorageOrder==RowMajor?luStride:cols);\n    MatrixType lu(lu1,0,0,rows,cols);\n\n    const Index size = (std::min)(rows,cols);\n\n    // if the matrix is too small, no blocking:\n    if(size<=16)\n    {\n      return unblocked_lu(lu, row_transpositions, nb_transpositions);\n    }\n\n    // automatically adjust the number of subdivisions to the size\n    // of the matrix so that there is enough sub blocks:\n    Index blockSize;\n    {\n      blockSize = size/8;\n      blockSize = (blockSize/16)*16;\n      blockSize = (std::min)((std::max)(blockSize,Index(8)), maxBlockSize);\n    }\n\n    nb_transpositions = 0;\n    Index first_zero_pivot = -1;\n    for(Index k = 0; k < size; k+=blockSize)\n    {\n      Index bs = (std::min)(size-k,blockSize); // actual size of the block\n      Index trows = rows - k - bs; // trailing rows\n      Index tsize = size - k - bs; // trailing size\n\n      // partition the matrix:\n      //                          A00 | A01 | A02\n      // lu  = A_0 | A_1 | A_2 =  A10 | A11 | A12\n      //                          A20 | A21 | A22\n      BlockType A_0(lu,0,0,rows,k);\n      BlockType A_2(lu,0,k+bs,rows,tsize);\n      BlockType A11(lu,k,k,bs,bs);\n      BlockType A12(lu,k,k+bs,bs,tsize);\n      BlockType A21(lu,k+bs,k,trows,bs);\n      BlockType A22(lu,k+bs,k+bs,trows,tsize);\n\n      PivIndex nb_transpositions_in_panel;\n      // recursively call the blocked LU algorithm on [A11^T A21^T]^T\n      // with a very small blocking size:\n      Index ret = blocked_lu(trows+bs, bs, &lu.coeffRef(k,k), luStride,\n                   row_transpositions+k, nb_transpositions_in_panel, 16);\n      if(ret>=0 && first_zero_pivot==-1)\n        first_zero_pivot = k+ret;\n\n      nb_transpositions += nb_transpositions_in_panel;\n      // update permutations and apply them to A_0\n      for(Index i=k; i<k+bs; ++i)\n      {\n        Index piv = (row_transpositions[i] += internal::convert_index<PivIndex>(k));\n        A_0.row(i).swap(A_0.row(piv));\n      }\n\n      if(trows)\n      {\n        // apply permutations to A_2\n        for(Index i=k;i<k+bs; ++i)\n          A_2.row(i).swap(A_2.row(row_transpositions[i]));\n\n        // A12 = A11^-1 A12\n        A11.template triangularView<UnitLower>().solveInPlace(A12);\n\n        A22.noalias() -= A21 * A12;\n      }\n    }\n    return first_zero_pivot;\n  }\n};\n\n/** \\internal performs the LU decomposition with partial pivoting in-place.\n  */\ntemplate<typename MatrixType, typename TranspositionType>\nvoid partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, typename TranspositionType::StorageIndex& nb_transpositions)\n{\n  eigen_assert(lu.cols() == row_transpositions.size());\n  eigen_assert((&row_transpositions.coeffRef(1)-&row_transpositions.coeffRef(0)) == 1);\n\n  partial_lu_impl\n    <typename MatrixType::Scalar, MatrixType::Flags&RowMajorBit?RowMajor:ColMajor, typename TranspositionType::StorageIndex>\n    ::blocked_lu(lu.rows(), lu.cols(), &lu.coeffRef(0,0), lu.outerStride(), &row_transpositions.coeffRef(0), nb_transpositions);\n}\n\n} // end namespace internal\n\ntemplate<typename MatrixType>\nvoid PartialPivLU<MatrixType>::compute()\n{\n  check_template_parameters();\n\n  // the row permutation is stored as int indices, so just to be sure:\n  eigen_assert(m_lu.rows()<NumTraits<int>::highest());\n\n  m_l1_norm = m_lu.cwiseAbs().colwise().sum().maxCoeff();\n\n  eigen_assert(m_lu.rows() == m_lu.cols() && \"PartialPivLU is only for square (and moreover invertible) matrices\");\n  const Index size = m_lu.rows();\n\n  m_rowsTranspositions.resize(size);\n\n  typename TranspositionType::StorageIndex nb_transpositions;\n  internal::partial_lu_inplace(m_lu, m_rowsTranspositions, nb_transpositions);\n  m_det_p = (nb_transpositions%2) ? -1 : 1;\n\n  m_p = m_rowsTranspositions;\n\n  m_isInitialized = true;\n}\n\ntemplate<typename MatrixType>\ntypename PartialPivLU<MatrixType>::Scalar PartialPivLU<MatrixType>::determinant() const\n{\n  eigen_assert(m_isInitialized && \"PartialPivLU is not initialized.\");\n  return Scalar(m_det_p) * m_lu.diagonal().prod();\n}\n\n/** \\returns the matrix represented by the decomposition,\n * i.e., it returns the product: P^{-1} L U.\n * This function is provided for debug purpose. */\ntemplate<typename MatrixType>\nMatrixType PartialPivLU<MatrixType>::reconstructedMatrix() const\n{\n  eigen_assert(m_isInitialized && \"LU is not initialized.\");\n  // LU\n  MatrixType res = m_lu.template triangularView<UnitLower>().toDenseMatrix()\n                 * m_lu.template triangularView<Upper>();\n\n  // P^{-1}(LU)\n  res = m_p.inverse() * res;\n\n  return res;\n}\n\n/***** Implementation details *****************************************************/\n\nnamespace internal {\n\n/***** Implementation of inverse() *****************************************************/\ntemplate<typename DstXprType, typename MatrixType>\nstruct Assignment<DstXprType, Inverse<PartialPivLU<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename PartialPivLU<MatrixType>::Scalar>, Dense2Dense>\n{\n  typedef PartialPivLU<MatrixType> LuType;\n  typedef Inverse<LuType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename LuType::Scalar> &)\n  {\n    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));\n  }\n};\n} // end namespace internal\n\n/******** MatrixBase methods *******/\n\n/** \\lu_module\n  *\n  * \\return the partial-pivoting LU decomposition of \\c *this.\n  *\n  * \\sa class PartialPivLU\n  */\ntemplate<typename Derived>\ninline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::partialPivLu() const\n{\n  return PartialPivLU<PlainObject>(eval());\n}\n\n/** \\lu_module\n  *\n  * Synonym of partialPivLu().\n  *\n  * \\return the partial-pivoting LU decomposition of \\c *this.\n  *\n  * \\sa class PartialPivLU\n  */\ntemplate<typename Derived>\ninline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::lu() const\n{\n  return PartialPivLU<PlainObject>(eval());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARTIALLU_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/PartialPivLU_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *     LU decomposition with partial pivoting based on LAPACKE_?getrf function.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_PARTIALLU_LAPACK_H\n#define EIGEN_PARTIALLU_LAPACK_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_LU_PARTPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \\\ntemplate<int StorageOrder> \\\nstruct partial_lu_impl<EIGTYPE, StorageOrder, lapack_int> \\\n{ \\\n  /* \\internal performs the LU decomposition in-place of the matrix represented */ \\\n  static lapack_int blocked_lu(Index rows, Index cols, EIGTYPE* lu_data, Index luStride, lapack_int* row_transpositions, lapack_int& nb_transpositions, lapack_int maxBlockSize=256) \\\n  { \\\n    EIGEN_UNUSED_VARIABLE(maxBlockSize);\\\n    lapack_int matrix_order, first_zero_pivot; \\\n    lapack_int m, n, lda, *ipiv, info; \\\n    EIGTYPE* a; \\\n/* Set up parameters for ?getrf */ \\\n    matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \\\n    lda = convert_index<lapack_int>(luStride); \\\n    a = lu_data; \\\n    ipiv = row_transpositions; \\\n    m = convert_index<lapack_int>(rows); \\\n    n = convert_index<lapack_int>(cols); \\\n    nb_transpositions = 0; \\\n\\\n    info = LAPACKE_##LAPACKE_PREFIX##getrf( matrix_order, m, n, (LAPACKE_TYPE*)a, lda, ipiv ); \\\n\\\n    for(int i=0;i<m;i++) { ipiv[i]--; if (ipiv[i]!=i) nb_transpositions++; } \\\n\\\n    eigen_assert(info >= 0); \\\n/* something should be done with nb_transpositions */ \\\n\\\n    first_zero_pivot = info; \\\n    return first_zero_pivot; \\\n  } \\\n};\n\nEIGEN_LAPACKE_LU_PARTPIV(double, double, d)\nEIGEN_LAPACKE_LU_PARTPIV(float, float, s)\nEIGEN_LAPACKE_LU_PARTPIV(dcomplex, lapack_complex_double, z)\nEIGEN_LAPACKE_LU_PARTPIV(scomplex, lapack_complex_float,  c)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARTIALLU_LAPACK_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/LU/arch/Inverse_SSE.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2001 Intel Corporation\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// The SSE code for the 4x4 float and double matrix inverse in this file\n// comes from the following Intel's library:\n// http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/\n//\n// Here is the respective copyright and license statement:\n//\n//   Copyright (c) 2001 Intel Corporation.\n//\n// Permition is granted to use, copy, distribute and prepare derivative works\n// of this library for any purpose and without fee, provided, that the above\n// copyright notice and this statement appear in all copies.\n// Intel makes no representations about the suitability of this software for\n// any purpose, and specifically disclaims all warranties.\n// See LEGAL.TXT for all the legal information.\n\n#ifndef EIGEN_INVERSE_SSE_H\n#define EIGEN_INVERSE_SSE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType>\n{\n  enum {\n    MatrixAlignment     = traits<MatrixType>::Alignment,\n    ResultAlignment     = traits<ResultType>::Alignment,\n    StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)\n  };\n  typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;\n  \n  static void run(const MatrixType& mat, ResultType& result)\n  {\n    ActualMatrixType matrix(mat);\n    EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 };\n\n    // Load the full matrix into registers\n    __m128 _L1 = matrix.template packet<MatrixAlignment>( 0);\n    __m128 _L2 = matrix.template packet<MatrixAlignment>( 4);\n    __m128 _L3 = matrix.template packet<MatrixAlignment>( 8);\n    __m128 _L4 = matrix.template packet<MatrixAlignment>(12);\n\n    // The inverse is calculated using \"Divide and Conquer\" technique. The\n    // original matrix is divide into four 2x2 sub-matrices. Since each\n    // register holds four matrix element, the smaller matrices are\n    // represented as a registers. Hence we get a better locality of the\n    // calculations.\n\n    __m128 A, B, C, D; // the four sub-matrices\n    if(!StorageOrdersMatch)\n    {\n      A = _mm_unpacklo_ps(_L1, _L2);\n      B = _mm_unpacklo_ps(_L3, _L4);\n      C = _mm_unpackhi_ps(_L1, _L2);\n      D = _mm_unpackhi_ps(_L3, _L4);\n    }\n    else\n    {\n      A = _mm_movelh_ps(_L1, _L2);\n      B = _mm_movehl_ps(_L2, _L1);\n      C = _mm_movelh_ps(_L3, _L4);\n      D = _mm_movehl_ps(_L4, _L3);\n    }\n\n    __m128 iA, iB, iC, iD,                 // partial inverse of the sub-matrices\n            DC, AB;\n    __m128 dA, dB, dC, dD;                 // determinant of the sub-matrices\n    __m128 det, d, d1, d2;\n    __m128 rd;                             // reciprocal of the determinant\n\n    //  AB = A# * B\n    AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B);\n    AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E)));\n    //  DC = D# * C\n    DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C);\n    DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E)));\n\n    //  dA = |A|\n    dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A);\n    dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA));\n    //  dB = |B|\n    dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B);\n    dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB));\n\n    //  dC = |C|\n    dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C);\n    dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC));\n    //  dD = |D|\n    dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D);\n    dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD));\n\n    //  d = trace(AB*DC) = trace(A#*B*D#*C)\n    d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB);\n\n    //  iD = C*A#*B\n    iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB));\n    iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB)));\n    //  iA = B*D#*C\n    iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC));\n    iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC)));\n\n    //  d = trace(AB*DC) = trace(A#*B*D#*C) [continue]\n    d  = _mm_add_ps(d, _mm_movehl_ps(d, d));\n    d  = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1));\n    d1 = _mm_mul_ss(dA,dD);\n    d2 = _mm_mul_ss(dB,dC);\n\n    //  iD = D*|A| - C*A#*B\n    iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD);\n\n    //  iA = A*|D| - B*D#*C;\n    iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA);\n\n    //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)\n    det = _mm_sub_ss(_mm_add_ss(d1,d2),d);\n    rd  = _mm_div_ss(_mm_set_ss(1.0f), det);\n\n//     #ifdef ZERO_SINGULAR\n//         rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd);\n//     #endif\n\n    //  iB = D * (A#B)# = D*B#*A\n    iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33));\n    iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66)));\n    //  iC = A * (D#C)# = A*C#*D\n    iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33));\n    iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66)));\n\n    rd = _mm_shuffle_ps(rd,rd,0);\n    rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP));\n\n    //  iB = C*|B| - D*B#*A\n    iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB);\n\n    //  iC = B*|C| - A*C#*D;\n    iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC);\n\n    //  iX = iX / det\n    iA = _mm_mul_ps(rd,iA);\n    iB = _mm_mul_ps(rd,iB);\n    iC = _mm_mul_ps(rd,iC);\n    iD = _mm_mul_ps(rd,iD);\n\n    Index res_stride = result.outerStride();\n    float* res = result.data();\n    pstoret<float, Packet4f, ResultAlignment>(res+0,            _mm_shuffle_ps(iA,iB,0x77));\n    pstoret<float, Packet4f, ResultAlignment>(res+res_stride,   _mm_shuffle_ps(iA,iB,0x22));\n    pstoret<float, Packet4f, ResultAlignment>(res+2*res_stride, _mm_shuffle_ps(iC,iD,0x77));\n    pstoret<float, Packet4f, ResultAlignment>(res+3*res_stride, _mm_shuffle_ps(iC,iD,0x22));\n  }\n\n};\n\ntemplate<typename MatrixType, typename ResultType>\nstruct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType>\n{\n  enum {\n    MatrixAlignment     = traits<MatrixType>::Alignment,\n    ResultAlignment     = traits<ResultType>::Alignment,\n    StorageOrdersMatch  = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit)\n  };\n  typedef typename conditional<(MatrixType::Flags&LinearAccessBit),MatrixType const &,typename MatrixType::PlainObject>::type ActualMatrixType;\n  \n  static void run(const MatrixType& mat, ResultType& result)\n  {\n    ActualMatrixType matrix(mat);\n    const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0));\n    const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0));\n\n    // The inverse is calculated using \"Divide and Conquer\" technique. The\n    // original matrix is divide into four 2x2 sub-matrices. Since each\n    // register of the matrix holds two elements, the smaller matrices are\n    // consisted of two registers. Hence we get a better locality of the\n    // calculations.\n\n    // the four sub-matrices\n    __m128d A1, A2, B1, B2, C1, C2, D1, D2;\n    \n    if(StorageOrdersMatch)\n    {\n      A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2);\n      A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6);\n      C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);\n      C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);\n    }\n    else\n    {\n      __m128d tmp;\n      A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2);\n      A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6);\n      tmp = A1;\n      A1 = _mm_unpacklo_pd(A1,A2);\n      A2 = _mm_unpackhi_pd(tmp,A2);\n      tmp = C1;\n      C1 = _mm_unpacklo_pd(C1,C2);\n      C2 = _mm_unpackhi_pd(tmp,C2);\n      \n      B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10);\n      B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14);\n      tmp = B1;\n      B1 = _mm_unpacklo_pd(B1,B2);\n      B2 = _mm_unpackhi_pd(tmp,B2);\n      tmp = D1;\n      D1 = _mm_unpacklo_pd(D1,D2);\n      D2 = _mm_unpackhi_pd(tmp,D2);\n    }\n    \n    __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2,     // partial invese of the sub-matrices\n            DC1, DC2, AB1, AB2;\n    __m128d dA, dB, dC, dD;     // determinant of the sub-matrices\n    __m128d det, d1, d2, rd;\n\n    //  dA = |A|\n    dA = _mm_shuffle_pd(A2, A2, 1);\n    dA = _mm_mul_pd(A1, dA);\n    dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3));\n    //  dB = |B|\n    dB = _mm_shuffle_pd(B2, B2, 1);\n    dB = _mm_mul_pd(B1, dB);\n    dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3));\n\n    //  AB = A# * B\n    AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3));\n    AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0));\n    AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3)));\n    AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0)));\n\n    //  dC = |C|\n    dC = _mm_shuffle_pd(C2, C2, 1);\n    dC = _mm_mul_pd(C1, dC);\n    dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3));\n    //  dD = |D|\n    dD = _mm_shuffle_pd(D2, D2, 1);\n    dD = _mm_mul_pd(D1, dD);\n    dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3));\n\n    //  DC = D# * C\n    DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3));\n    DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0));\n    DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3)));\n    DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0)));\n\n    //  rd = trace(AB*DC) = trace(A#*B*D#*C)\n    d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0));\n    d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3));\n    rd = _mm_add_pd(d1, d2);\n    rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3));\n\n    //  iD = C*A#*B\n    iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0));\n    iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0));\n    iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3)));\n    iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3)));\n\n    //  iA = B*D#*C\n    iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0));\n    iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0));\n    iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3)));\n    iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3)));\n\n    //  iD = D*|A| - C*A#*B\n    dA = _mm_shuffle_pd(dA,dA,0);\n    iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1);\n    iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2);\n\n    //  iA = A*|D| - B*D#*C;\n    dD = _mm_shuffle_pd(dD,dD,0);\n    iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1);\n    iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2);\n\n    d1 = _mm_mul_sd(dA, dD);\n    d2 = _mm_mul_sd(dB, dC);\n\n    //  iB = D * (A#B)# = D*B#*A\n    iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1));\n    iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1));\n    iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2)));\n    iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2)));\n\n    //  det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C)\n    det = _mm_add_sd(d1, d2);\n    det = _mm_sub_sd(det, rd);\n\n    //  iC = A * (D#C)# = A*C#*D\n    iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1));\n    iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1));\n    iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2)));\n    iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2)));\n\n    rd = _mm_div_sd(_mm_set_sd(1.0), det);\n//     #ifdef ZERO_SINGULAR\n//         rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd);\n//     #endif\n    rd = _mm_shuffle_pd(rd,rd,0);\n\n    //  iB = C*|B| - D*B#*A\n    dB = _mm_shuffle_pd(dB,dB,0);\n    iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1);\n    iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2);\n\n    d1 = _mm_xor_pd(rd, _Sign_PN);\n    d2 = _mm_xor_pd(rd, _Sign_NP);\n\n    //  iC = B*|C| - A*C#*D;\n    dC = _mm_shuffle_pd(dC,dC,0);\n    iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1);\n    iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2);\n\n    Index res_stride = result.outerStride();\n    double* res = result.data();\n    pstoret<double, Packet2d, ResultAlignment>(res+0,             _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1));\n    pstoret<double, Packet2d, ResultAlignment>(res+res_stride,    _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2));\n    pstoret<double, Packet2d, ResultAlignment>(res+2,             _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1));\n    pstoret<double, Packet2d, ResultAlignment>(res+res_stride+2,  _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2));\n    pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride,  _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1));\n    pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride,  _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2));\n    pstoret<double, Packet2d, ResultAlignment>(res+2*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1));\n    pstoret<double, Packet2d, ResultAlignment>(res+3*res_stride+2,_mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2));\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_INVERSE_SSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/MetisSupport/MetisSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#ifndef METIS_SUPPORT_H\n#define METIS_SUPPORT_H\n\nnamespace Eigen {\n/**\n * Get the fill-reducing ordering from the METIS package\n * \n * If A is the original matrix and Ap is the permuted matrix, \n * the fill-reducing permutation is defined as follows :\n * Row (column) i of A is the matperm(i) row (column) of Ap. \n * WARNING: As computed by METIS, this corresponds to the vector iperm (instead of perm)\n */\ntemplate <typename StorageIndex>\nclass MetisOrdering\n{\npublic:\n  typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> PermutationType;\n  typedef Matrix<StorageIndex,Dynamic,1> IndexVector; \n  \n  template <typename MatrixType>\n  void get_symmetrized_graph(const MatrixType& A)\n  {\n    Index m = A.cols(); \n    eigen_assert((A.rows() == A.cols()) && \"ONLY FOR SQUARED MATRICES\");\n    // Get the transpose of the input matrix \n    MatrixType At = A.transpose(); \n    // Get the number of nonzeros elements in each row/col of At+A\n    Index TotNz = 0; \n    IndexVector visited(m); \n    visited.setConstant(-1); \n    for (StorageIndex j = 0; j < m; j++)\n    {\n      // Compute the union structure of of A(j,:) and At(j,:)\n      visited(j) = j; // Do not include the diagonal element\n      // Get the nonzeros in row/column j of A\n      for (typename MatrixType::InnerIterator it(A, j); it; ++it)\n      {\n        Index idx = it.index(); // Get the row index (for column major) or column index (for row major)\n        if (visited(idx) != j ) \n        {\n          visited(idx) = j; \n          ++TotNz; \n        }\n      }\n      //Get the nonzeros in row/column j of At\n      for (typename MatrixType::InnerIterator it(At, j); it; ++it)\n      {\n        Index idx = it.index(); \n        if(visited(idx) != j)\n        {\n          visited(idx) = j; \n          ++TotNz; \n        }\n      }\n    }\n    // Reserve place for A + At\n    m_indexPtr.resize(m+1);\n    m_innerIndices.resize(TotNz); \n\n    // Now compute the real adjacency list of each column/row \n    visited.setConstant(-1); \n    StorageIndex CurNz = 0; \n    for (StorageIndex j = 0; j < m; j++)\n    {\n      m_indexPtr(j) = CurNz; \n      \n      visited(j) = j; // Do not include the diagonal element\n      // Add the pattern of row/column j of A to A+At\n      for (typename MatrixType::InnerIterator it(A,j); it; ++it)\n      {\n        StorageIndex idx = it.index(); // Get the row index (for column major) or column index (for row major)\n        if (visited(idx) != j ) \n        {\n          visited(idx) = j; \n          m_innerIndices(CurNz) = idx; \n          CurNz++; \n        }\n      }\n      //Add the pattern of row/column j of At to A+At\n      for (typename MatrixType::InnerIterator it(At, j); it; ++it)\n      {\n        StorageIndex idx = it.index(); \n        if(visited(idx) != j)\n        {\n          visited(idx) = j; \n          m_innerIndices(CurNz) = idx; \n          ++CurNz; \n        }\n      }\n    }\n    m_indexPtr(m) = CurNz;    \n  }\n  \n  template <typename MatrixType>\n  void operator() (const MatrixType& A, PermutationType& matperm)\n  {\n     StorageIndex m = internal::convert_index<StorageIndex>(A.cols()); // must be StorageIndex, because it is passed by address to METIS\n     IndexVector perm(m),iperm(m); \n    // First, symmetrize the matrix graph. \n     get_symmetrized_graph(A); \n     int output_error;\n     \n     // Call the fill-reducing routine from METIS \n     output_error = METIS_NodeND(&m, m_indexPtr.data(), m_innerIndices.data(), NULL, NULL, perm.data(), iperm.data());\n     \n    if(output_error != METIS_OK) \n    {\n      //FIXME The ordering interface should define a class of possible errors \n     std::cerr << \"ERROR WHILE CALLING THE METIS PACKAGE \\n\"; \n     return; \n    }\n    \n    // Get the fill-reducing permutation \n    //NOTE:  If Ap is the permuted matrix then perm and iperm vectors are defined as follows \n    // Row (column) i of Ap is the perm(i) row(column) of A, and row (column) i of A is the iperm(i) row(column) of Ap\n    \n     matperm.resize(m);\n     for (int j = 0; j < m; j++)\n       matperm.indices()(iperm(j)) = j;\n   \n  }\n  \n  protected:\n    IndexVector m_indexPtr; // Pointer to the adjacenccy list of each row/column\n    IndexVector m_innerIndices; // Adjacency list \n};\n\n}// end namespace eigen \n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/OrderingMethods/Amd.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n\n/*\n\nNOTE: this routine has been adapted from the CSparse library:\n\nCopyright (c) 2006, Timothy A. Davis.\nhttp://www.suitesparse.com\n\nCSparse is free software; you can redistribute it and/or\nmodify it under the terms of the GNU Lesser General Public\nLicense as published by the Free Software Foundation; either\nversion 2.1 of the License, or (at your option) any later version.\n\nCSparse is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\nLesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this Module; if not, write to the Free Software\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA\n\n*/\n\n#include \"../Core/util/NonMPL2.h\"\n\n#ifndef EIGEN_SPARSE_AMD_H\n#define EIGEN_SPARSE_AMD_H\n\nnamespace Eigen { \n\nnamespace internal {\n  \ntemplate<typename T> inline T amd_flip(const T& i) { return -i-2; }\ntemplate<typename T> inline T amd_unflip(const T& i) { return i<0 ? amd_flip(i) : i; }\ntemplate<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1& j) { return w[j]<0; }\ntemplate<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }\n\n/* clear w */\ntemplate<typename StorageIndex>\nstatic StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)\n{\n  StorageIndex k;\n  if(mark < 2 || (mark + lemax < 0))\n  {\n    for(k = 0; k < n; k++)\n      if(w[k] != 0)\n        w[k] = 1;\n    mark = 2;\n  }\n  return (mark);     /* at this point, w[0..n-1] < mark holds */\n}\n\n/* depth-first search and postorder of a tree rooted at node j */\ntemplate<typename StorageIndex>\nStorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)\n{\n  StorageIndex i, p, top = 0;\n  if(!head || !next || !post || !stack) return (-1);    /* check inputs */\n  stack[0] = j;                 /* place j on the stack */\n  while (top >= 0)                /* while (stack is not empty) */\n  {\n    p = stack[top];           /* p = top of stack */\n    i = head[p];              /* i = youngest child of p */\n    if(i == -1)\n    {\n      top--;                 /* p has no unordered children left */\n      post[k++] = p;        /* node p is the kth postordered node */\n    }\n    else\n    {\n      head[p] = next[i];   /* remove i from children of p */\n      stack[++top] = i;     /* start dfs on child node i */\n    }\n  }\n  return k;\n}\n\n\n/** \\internal\n  * \\ingroup OrderingMethods_Module \n  * Approximate minimum degree ordering algorithm.\n  *\n  * \\param[in] C the input selfadjoint matrix stored in compressed column major format.\n  * \\param[out] perm the permutation P reducing the fill-in of the input matrix \\a C\n  *\n  * Note that the input matrix \\a C must be complete, that is both the upper and lower parts have to be stored, as well as the diagonal entries.\n  * On exit the values of C are destroyed */\ntemplate<typename Scalar, typename StorageIndex>\nvoid minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)\n{\n  using std::sqrt;\n  \n  StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,\n                k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,\n                ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;\n  \n  StorageIndex n = StorageIndex(C.cols());\n  dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n))));   /* find dense threshold */\n  dense = (std::min)(n-2, dense);\n  \n  StorageIndex cnz = StorageIndex(C.nonZeros());\n  perm.resize(n+1);\n  t = cnz + cnz/5 + 2*n;                 /* add elbow room to C */\n  C.resizeNonZeros(t);\n  \n  // get workspace\n  ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);\n  StorageIndex* len     = W;\n  StorageIndex* nv      = W +   (n+1);\n  StorageIndex* next    = W + 2*(n+1);\n  StorageIndex* head    = W + 3*(n+1);\n  StorageIndex* elen    = W + 4*(n+1);\n  StorageIndex* degree  = W + 5*(n+1);\n  StorageIndex* w       = W + 6*(n+1);\n  StorageIndex* hhead   = W + 7*(n+1);\n  StorageIndex* last    = perm.indices().data();                              /* use P as workspace for last */\n  \n  /* --- Initialize quotient graph ---------------------------------------- */\n  StorageIndex* Cp = C.outerIndexPtr();\n  StorageIndex* Ci = C.innerIndexPtr();\n  for(k = 0; k < n; k++)\n    len[k] = Cp[k+1] - Cp[k];\n  len[n] = 0;\n  nzmax = t;\n  \n  for(i = 0; i <= n; i++)\n  {\n    head[i]   = -1;                     // degree list i is empty\n    last[i]   = -1;\n    next[i]   = -1;\n    hhead[i]  = -1;                     // hash list i is empty \n    nv[i]     = 1;                      // node i is just one node\n    w[i]      = 1;                      // node i is alive\n    elen[i]   = 0;                      // Ek of node i is empty\n    degree[i] = len[i];                 // degree of node i\n  }\n  mark = internal::cs_wclear<StorageIndex>(0, 0, w, n);         /* clear w */\n  \n  /* --- Initialize degree lists ------------------------------------------ */\n  for(i = 0; i < n; i++)\n  {\n    bool has_diag = false;\n    for(p = Cp[i]; p<Cp[i+1]; ++p)\n      if(Ci[p]==i)\n      {\n        has_diag = true;\n        break;\n      }\n   \n    d = degree[i];\n    if(d == 1 && has_diag)           /* node i is empty */\n    {\n      elen[i] = -2;                 /* element i is dead */\n      nel++;\n      Cp[i] = -1;                   /* i is a root of assembly tree */\n      w[i] = 0;\n    }\n    else if(d > dense || !has_diag)  /* node i is dense or has no structural diagonal element */\n    {\n      nv[i] = 0;                    /* absorb i into element n */\n      elen[i] = -1;                 /* node i is dead */\n      nel++;\n      Cp[i] = amd_flip (n);\n      nv[n]++;\n    }\n    else\n    {\n      if(head[d] != -1) last[head[d]] = i;\n      next[i] = head[d];           /* put node i in degree list d */\n      head[d] = i;\n    }\n  }\n  \n  elen[n] = -2;                         /* n is a dead element */\n  Cp[n] = -1;                           /* n is a root of assembly tree */\n  w[n] = 0;                             /* n is a dead element */\n  \n  while (nel < n)                         /* while (selecting pivots) do */\n  {\n    /* --- Select node of minimum approximate degree -------------------- */\n    for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}\n    if(next[k] != -1) last[next[k]] = -1;\n    head[mindeg] = next[k];          /* remove k from degree list */\n    elenk = elen[k];                  /* elenk = |Ek| */\n    nvk = nv[k];                      /* # of nodes k represents */\n    nel += nvk;                        /* nv[k] nodes of A eliminated */\n    \n    /* --- Garbage collection ------------------------------------------- */\n    if(elenk > 0 && cnz + mindeg >= nzmax)\n    {\n      for(j = 0; j < n; j++)\n      {\n        if((p = Cp[j]) >= 0)      /* j is a live node or element */\n        {\n          Cp[j] = Ci[p];          /* save first entry of object */\n          Ci[p] = amd_flip (j);    /* first entry is now amd_flip(j) */\n        }\n      }\n      for(q = 0, p = 0; p < cnz; ) /* scan all of memory */\n      {\n        if((j = amd_flip (Ci[p++])) >= 0)  /* found object j */\n        {\n          Ci[q] = Cp[j];       /* restore first entry of object */\n          Cp[j] = q++;          /* new pointer to object j */\n          for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];\n        }\n      }\n      cnz = q;                       /* Ci[cnz...nzmax-1] now free */\n    }\n    \n    /* --- Construct new element ---------------------------------------- */\n    dk = 0;\n    nv[k] = -nvk;                     /* flag k as in Lk */\n    p = Cp[k];\n    pk1 = (elenk == 0) ? p : cnz;      /* do in place if elen[k] == 0 */\n    pk2 = pk1;\n    for(k1 = 1; k1 <= elenk + 1; k1++)\n    {\n      if(k1 > elenk)\n      {\n        e = k;                     /* search the nodes in k */\n        pj = p;                    /* list of nodes starts at Ci[pj]*/\n        ln = len[k] - elenk;      /* length of list of nodes in k */\n      }\n      else\n      {\n        e = Ci[p++];              /* search the nodes in e */\n        pj = Cp[e];\n        ln = len[e];              /* length of list of nodes in e */\n      }\n      for(k2 = 1; k2 <= ln; k2++)\n      {\n        i = Ci[pj++];\n        if((nvi = nv[i]) <= 0) continue; /* node i dead, or seen */\n        dk += nvi;                 /* degree[Lk] += size of node i */\n        nv[i] = -nvi;             /* negate nv[i] to denote i in Lk*/\n        Ci[pk2++] = i;            /* place i in Lk */\n        if(next[i] != -1) last[next[i]] = last[i];\n        if(last[i] != -1)         /* remove i from degree list */\n        {\n          next[last[i]] = next[i];\n        }\n        else\n        {\n          head[degree[i]] = next[i];\n        }\n      }\n      if(e != k)\n      {\n        Cp[e] = amd_flip (k);      /* absorb e into k */\n        w[e] = 0;                 /* e is now a dead element */\n      }\n    }\n    if(elenk != 0) cnz = pk2;         /* Ci[cnz...nzmax] is free */\n    degree[k] = dk;                   /* external degree of k - |Lk\\i| */\n    Cp[k] = pk1;                      /* element k is in Ci[pk1..pk2-1] */\n    len[k] = pk2 - pk1;\n    elen[k] = -2;                     /* k is now an element */\n    \n    /* --- Find set differences ----------------------------------------- */\n    mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n);  /* clear w if necessary */\n    for(pk = pk1; pk < pk2; pk++)    /* scan 1: find |Le\\Lk| */\n    {\n      i = Ci[pk];\n      if((eln = elen[i]) <= 0) continue;/* skip if elen[i] empty */\n      nvi = -nv[i];                      /* nv[i] was negated */\n      wnvi = mark - nvi;\n      for(p = Cp[i]; p <= Cp[i] + eln - 1; p++)  /* scan Ei */\n      {\n        e = Ci[p];\n        if(w[e] >= mark)\n        {\n          w[e] -= nvi;          /* decrement |Le\\Lk| */\n        }\n        else if(w[e] != 0)        /* ensure e is a live element */\n        {\n          w[e] = degree[e] + wnvi; /* 1st time e seen in scan 1 */\n        }\n      }\n    }\n    \n    /* --- Degree update ------------------------------------------------ */\n    for(pk = pk1; pk < pk2; pk++)    /* scan2: degree update */\n    {\n      i = Ci[pk];                   /* consider node i in Lk */\n      p1 = Cp[i];\n      p2 = p1 + elen[i] - 1;\n      pn = p1;\n      for(h = 0, d = 0, p = p1; p <= p2; p++)    /* scan Ei */\n      {\n        e = Ci[p];\n        if(w[e] != 0)             /* e is an unabsorbed element */\n        {\n          dext = w[e] - mark;   /* dext = |Le\\Lk| */\n          if(dext > 0)\n          {\n            d += dext;         /* sum up the set differences */\n            Ci[pn++] = e;     /* keep e in Ei */\n            h += e;            /* compute the hash of node i */\n          }\n          else\n          {\n            Cp[e] = amd_flip (k);  /* aggressive absorb. e->k */\n            w[e] = 0;             /* e is a dead element */\n          }\n        }\n      }\n      elen[i] = pn - p1 + 1;        /* elen[i] = |Ei| */\n      p3 = pn;\n      p4 = p1 + len[i];\n      for(p = p2 + 1; p < p4; p++) /* prune edges in Ai */\n      {\n        j = Ci[p];\n        if((nvj = nv[j]) <= 0) continue; /* node j dead or in Lk */\n        d += nvj;                  /* degree(i) += |j| */\n        Ci[pn++] = j;             /* place j in node list of i */\n        h += j;                    /* compute hash for node i */\n      }\n      if(d == 0)                     /* check for mass elimination */\n      {\n        Cp[i] = amd_flip (k);      /* absorb i into k */\n        nvi = -nv[i];\n        dk -= nvi;                 /* |Lk| -= |i| */\n        nvk += nvi;                /* |k| += nv[i] */\n        nel += nvi;\n        nv[i] = 0;\n        elen[i] = -1;             /* node i is dead */\n      }\n      else\n      {\n        degree[i] = std::min<StorageIndex> (degree[i], d);   /* update degree(i) */\n        Ci[pn] = Ci[p3];         /* move first node to end */\n        Ci[p3] = Ci[p1];         /* move 1st el. to end of Ei */\n        Ci[p1] = k;               /* add k as 1st element in of Ei */\n        len[i] = pn - p1 + 1;     /* new len of adj. list of node i */\n        h %= n;                    /* finalize hash of i */\n        next[i] = hhead[h];      /* place i in hash bucket */\n        hhead[h] = i;\n        last[i] = h;      /* save hash of i in last[i] */\n      }\n    }                                   /* scan2 is done */\n    degree[k] = dk;                   /* finalize |Lk| */\n    lemax = std::max<StorageIndex>(lemax, dk);\n    mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n);    /* clear w */\n    \n    /* --- Supernode detection ------------------------------------------ */\n    for(pk = pk1; pk < pk2; pk++)\n    {\n      i = Ci[pk];\n      if(nv[i] >= 0) continue;         /* skip if i is dead */\n      h = last[i];                      /* scan hash bucket of node i */\n      i = hhead[h];\n      hhead[h] = -1;                    /* hash bucket will be empty */\n      for(; i != -1 && next[i] != -1; i = next[i], mark++)\n      {\n        ln = len[i];\n        eln = elen[i];\n        for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;\n        jlast = i;\n        for(j = next[i]; j != -1; ) /* compare i with all j */\n        {\n          ok = (len[j] == ln) && (elen[j] == eln);\n          for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)\n          {\n            if(w[Ci[p]] != mark) ok = 0;    /* compare i and j*/\n          }\n          if(ok)                     /* i and j are identical */\n          {\n            Cp[j] = amd_flip (i);  /* absorb j into i */\n            nv[i] += nv[j];\n            nv[j] = 0;\n            elen[j] = -1;         /* node j is dead */\n            j = next[j];          /* delete j from hash bucket */\n            next[jlast] = j;\n          }\n          else\n          {\n            jlast = j;             /* j and i are different */\n            j = next[j];\n          }\n        }\n      }\n    }\n    \n    /* --- Finalize new element------------------------------------------ */\n    for(p = pk1, pk = pk1; pk < pk2; pk++)   /* finalize Lk */\n    {\n      i = Ci[pk];\n      if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */\n      nv[i] = nvi;                      /* restore nv[i] */\n      d = degree[i] + dk - nvi;         /* compute external degree(i) */\n      d = std::min<StorageIndex> (d, n - nel - nvi);\n      if(head[d] != -1) last[head[d]] = i;\n      next[i] = head[d];               /* put i back in degree list */\n      last[i] = -1;\n      head[d] = i;\n      mindeg = std::min<StorageIndex> (mindeg, d);       /* find new minimum degree */\n      degree[i] = d;\n      Ci[p++] = i;                      /* place i in Lk */\n    }\n    nv[k] = nvk;                      /* # nodes absorbed into k */\n    if((len[k] = p-pk1) == 0)         /* length of adj list of element k*/\n    {\n      Cp[k] = -1;                   /* k is a root of the tree */\n      w[k] = 0;                     /* k is now a dead element */\n    }\n    if(elenk != 0) cnz = p;           /* free unused space in Lk */\n  }\n  \n  /* --- Postordering ----------------------------------------------------- */\n  for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);/* fix assembly tree */\n  for(j = 0; j <= n; j++) head[j] = -1;\n  for(j = n; j >= 0; j--)              /* place unordered nodes in lists */\n  {\n    if(nv[j] > 0) continue;          /* skip if j is an element */\n    next[j] = head[Cp[j]];          /* place j in list of its parent */\n    head[Cp[j]] = j;\n  }\n  for(e = n; e >= 0; e--)              /* place elements in lists */\n  {\n    if(nv[e] <= 0) continue;         /* skip unless e is an element */\n    if(Cp[e] != -1)\n    {\n      next[e] = head[Cp[e]];      /* place e in list of its parent */\n      head[Cp[e]] = e;\n    }\n  }\n  for(k = 0, i = 0; i <= n; i++)       /* postorder the assembly tree */\n  {\n    if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);\n  }\n  \n  perm.indices().conservativeResize(n);\n}\n\n} // namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_AMD_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/OrderingMethods/Eigen_Colamd.h",
    "content": "// // This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Desire Nuentsa Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// This file is modified from the colamd/symamd library. The copyright is below\n\n//   The authors of the code itself are Stefan I. Larimore and Timothy A.\n//   Davis (davis@cise.ufl.edu), University of Florida.  The algorithm was\n//   developed in collaboration with John Gilbert, Xerox PARC, and Esmond\n//   Ng, Oak Ridge National Laboratory.\n// \n//     Date:\n// \n//   September 8, 2003.  Version 2.3.\n// \n//     Acknowledgements:\n// \n//   This work was supported by the National Science Foundation, under\n//   grants DMS-9504974 and DMS-9803599.\n// \n//     Notice:\n// \n//   Copyright (c) 1998-2003 by the University of Florida.\n//   All Rights Reserved.\n// \n//   THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n//   EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n// \n//   Permission is hereby granted to use, copy, modify, and/or distribute\n//   this program, provided that the Copyright, this License, and the\n//   Availability of the original version is retained on all copies and made\n//   accessible to the end-user of any code or package that includes COLAMD\n//   or any modified version of COLAMD. \n// \n//     Availability:\n// \n//   The colamd/symamd library is available at\n// \n//       http://www.suitesparse.com\n\n  \n#ifndef EIGEN_COLAMD_H\n#define EIGEN_COLAMD_H\n\nnamespace internal {\n/* Ensure that debugging is turned off: */\n#ifndef COLAMD_NDEBUG\n#define COLAMD_NDEBUG\n#endif /* NDEBUG */\n/* ========================================================================== */\n/* === Knob and statistics definitions ====================================== */\n/* ========================================================================== */\n\n/* size of the knobs [ ] array.  Only knobs [0..1] are currently used. */\n#define COLAMD_KNOBS 20\n\n/* number of output statistics.  Only stats [0..6] are currently used. */\n#define COLAMD_STATS 20 \n\n/* knobs [0] and stats [0]: dense row knob and output statistic. */\n#define COLAMD_DENSE_ROW 0\n\n/* knobs [1] and stats [1]: dense column knob and output statistic. */\n#define COLAMD_DENSE_COL 1\n\n/* stats [2]: memory defragmentation count output statistic */\n#define COLAMD_DEFRAG_COUNT 2\n\n/* stats [3]: colamd status:  zero OK, > 0 warning or notice, < 0 error */\n#define COLAMD_STATUS 3\n\n/* stats [4..6]: error info, or info on jumbled columns */ \n#define COLAMD_INFO1 4\n#define COLAMD_INFO2 5\n#define COLAMD_INFO3 6\n\n/* error codes returned in stats [3]: */\n#define COLAMD_OK       (0)\n#define COLAMD_OK_BUT_JUMBLED     (1)\n#define COLAMD_ERROR_A_not_present    (-1)\n#define COLAMD_ERROR_p_not_present    (-2)\n#define COLAMD_ERROR_nrow_negative    (-3)\n#define COLAMD_ERROR_ncol_negative    (-4)\n#define COLAMD_ERROR_nnz_negative   (-5)\n#define COLAMD_ERROR_p0_nonzero     (-6)\n#define COLAMD_ERROR_A_too_small    (-7)\n#define COLAMD_ERROR_col_length_negative  (-8)\n#define COLAMD_ERROR_row_index_out_of_bounds  (-9)\n#define COLAMD_ERROR_out_of_memory    (-10)\n#define COLAMD_ERROR_internal_error   (-999)\n\n/* ========================================================================== */\n/* === Definitions ========================================================== */\n/* ========================================================================== */\n\n#define ONES_COMPLEMENT(r) (-(r)-1)\n\n/* -------------------------------------------------------------------------- */\n\n#define COLAMD_EMPTY (-1)\n\n/* Row and column status */\n#define ALIVE (0)\n#define DEAD  (-1)\n\n/* Column status */\n#define DEAD_PRINCIPAL    (-1)\n#define DEAD_NON_PRINCIPAL  (-2)\n\n/* Macros for row and column status update and checking. */\n#define ROW_IS_DEAD(r)      ROW_IS_MARKED_DEAD (Row[r].shared2.mark)\n#define ROW_IS_MARKED_DEAD(row_mark)  (row_mark < ALIVE)\n#define ROW_IS_ALIVE(r)     (Row [r].shared2.mark >= ALIVE)\n#define COL_IS_DEAD(c)      (Col [c].start < ALIVE)\n#define COL_IS_ALIVE(c)     (Col [c].start >= ALIVE)\n#define COL_IS_DEAD_PRINCIPAL(c)  (Col [c].start == DEAD_PRINCIPAL)\n#define KILL_ROW(r)     { Row [r].shared2.mark = DEAD ; }\n#define KILL_PRINCIPAL_COL(c)   { Col [c].start = DEAD_PRINCIPAL ; }\n#define KILL_NON_PRINCIPAL_COL(c) { Col [c].start = DEAD_NON_PRINCIPAL ; }\n\n/* ========================================================================== */\n/* === Colamd reporting mechanism =========================================== */\n/* ========================================================================== */\n\n// == Row and Column structures ==\ntemplate <typename IndexType>\nstruct colamd_col\n{\n  IndexType start ;   /* index for A of first row in this column, or DEAD */\n  /* if column is dead */\n  IndexType length ;  /* number of rows in this column */\n  union\n  {\n    IndexType thickness ; /* number of original columns represented by this */\n    /* col, if the column is alive */\n    IndexType parent ;  /* parent in parent tree super-column structure, if */\n    /* the column is dead */\n  } shared1 ;\n  union\n  {\n    IndexType score ; /* the score used to maintain heap, if col is alive */\n    IndexType order ; /* pivot ordering of this column, if col is dead */\n  } shared2 ;\n  union\n  {\n    IndexType headhash ;  /* head of a hash bucket, if col is at the head of */\n    /* a degree list */\n    IndexType hash ;  /* hash value, if col is not in a degree list */\n    IndexType prev ;  /* previous column in degree list, if col is in a */\n    /* degree list (but not at the head of a degree list) */\n  } shared3 ;\n  union\n  {\n    IndexType degree_next ; /* next column, if col is in a degree list */\n    IndexType hash_next ;   /* next column, if col is in a hash list */\n  } shared4 ;\n  \n};\n \ntemplate <typename IndexType>\nstruct Colamd_Row\n{\n  IndexType start ;   /* index for A of first col in this row */\n  IndexType length ;  /* number of principal columns in this row */\n  union\n  {\n    IndexType degree ;  /* number of principal & non-principal columns in row */\n    IndexType p ;   /* used as a row pointer in init_rows_cols () */\n  } shared1 ;\n  union\n  {\n    IndexType mark ;  /* for computing set differences and marking dead rows*/\n    IndexType first_column ;/* first column in row (used in garbage collection) */\n  } shared2 ;\n  \n};\n \n/* ========================================================================== */\n/* === Colamd recommended memory size ======================================= */\n/* ========================================================================== */\n \n/*\n  The recommended length Alen of the array A passed to colamd is given by\n  the COLAMD_RECOMMENDED (nnz, n_row, n_col) macro.  It returns -1 if any\n  argument is negative.  2*nnz space is required for the row and column\n  indices of the matrix. colamd_c (n_col) + colamd_r (n_row) space is\n  required for the Col and Row arrays, respectively, which are internal to\n  colamd.  An additional n_col space is the minimal amount of \"elbow room\",\n  and nnz/5 more space is recommended for run time efficiency.\n  \n  This macro is not needed when using symamd.\n  \n  Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid\n  gcc -pedantic warning messages.\n*/\ntemplate <typename IndexType>\ninline IndexType colamd_c(IndexType n_col) \n{ return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }\n\ntemplate <typename IndexType>\ninline IndexType  colamd_r(IndexType n_row)\n{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }\n\n// Prototypes of non-user callable routines\ntemplate <typename IndexType>\nstatic IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] ); \n\ntemplate <typename IndexType>\nstatic void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);\n\ntemplate <typename IndexType>\nstatic IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);\n\ntemplate <typename IndexType>\nstatic void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);\n\ntemplate <typename IndexType>\nstatic void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;\n\ntemplate <typename IndexType>\nstatic IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;\n\ntemplate <typename IndexType>\nstatic inline  IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;\n\n/* === No debugging ========================================================= */\n\n#define COLAMD_DEBUG0(params) ;\n#define COLAMD_DEBUG1(params) ;\n#define COLAMD_DEBUG2(params) ;\n#define COLAMD_DEBUG3(params) ;\n#define COLAMD_DEBUG4(params) ;\n\n#define COLAMD_ASSERT(expression) ((void) 0)\n\n\n/**\n * \\brief Returns the recommended value of Alen \n * \n * Returns recommended value of Alen for use by colamd.  \n * Returns -1 if any input argument is negative.  \n * The use of this routine or macro is optional.  \n * Note that the macro uses its arguments   more than once, \n * so be careful for side effects, if you pass expressions as arguments to COLAMD_RECOMMENDED.  \n * \n * \\param nnz nonzeros in A\n * \\param n_row number of rows in A\n * \\param n_col number of columns in A\n * \\return recommended value of Alen for use by colamd\n */\ntemplate <typename IndexType>\ninline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)\n{\n  if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)\n    return (-1);\n  else\n    return (2 * (nnz) + colamd_c (n_col) + colamd_r (n_row) + (n_col) + ((nnz) / 5)); \n}\n\n/**\n * \\brief set default parameters  The use of this routine is optional.\n * \n * Colamd: rows with more than (knobs [COLAMD_DENSE_ROW] * n_col)\n * entries are removed prior to ordering.  Columns with more than\n * (knobs [COLAMD_DENSE_COL] * n_row) entries are removed prior to\n * ordering, and placed last in the output column ordering. \n *\n * COLAMD_DENSE_ROW and COLAMD_DENSE_COL are defined as 0 and 1,\n * respectively, in colamd.h.  Default values of these two knobs\n * are both 0.5.  Currently, only knobs [0] and knobs [1] are\n * used, but future versions may use more knobs.  If so, they will\n * be properly set to their defaults by the future version of\n * colamd_set_defaults, so that the code that calls colamd will\n * not need to change, assuming that you either use\n * colamd_set_defaults, or pass a (double *) NULL pointer as the\n * knobs array to colamd or symamd.\n * \n * \\param knobs parameter settings for colamd\n */\n\nstatic inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])\n{\n  /* === Local variables ================================================== */\n  \n  int i ;\n\n  if (!knobs)\n  {\n    return ;      /* no knobs to initialize */\n  }\n  for (i = 0 ; i < COLAMD_KNOBS ; i++)\n  {\n    knobs [i] = 0 ;\n  }\n  knobs [COLAMD_DENSE_ROW] = 0.5 ;  /* ignore rows over 50% dense */\n  knobs [COLAMD_DENSE_COL] = 0.5 ;  /* ignore columns over 50% dense */\n}\n\n/** \n * \\brief  Computes a column ordering using the column approximate minimum degree ordering\n * \n * Computes a column ordering (Q) of A such that P(AQ)=LU or\n * (AQ)'AQ=LL' have less fill-in and require fewer floating point\n * operations than factorizing the unpermuted matrix A or A'A,\n * respectively.\n * \n * \n * \\param n_row number of rows in A\n * \\param n_col number of columns in A\n * \\param Alen, size of the array A\n * \\param A row indices of the matrix, of size ALen\n * \\param p column pointers of A, of size n_col+1\n * \\param knobs parameter settings for colamd\n * \\param stats colamd output statistics and error codes\n */\ntemplate <typename IndexType>\nstatic bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])\n{\n  /* === Local variables ================================================== */\n  \n  IndexType i ;     /* loop index */\n  IndexType nnz ;     /* nonzeros in A */\n  IndexType Row_size ;    /* size of Row [], in integers */\n  IndexType Col_size ;    /* size of Col [], in integers */\n  IndexType need ;      /* minimum required length of A */\n  Colamd_Row<IndexType> *Row ;   /* pointer into A of Row [0..n_row] array */\n  colamd_col<IndexType> *Col ;   /* pointer into A of Col [0..n_col] array */\n  IndexType n_col2 ;    /* number of non-dense, non-empty columns */\n  IndexType n_row2 ;    /* number of non-dense, non-empty rows */\n  IndexType ngarbage ;    /* number of garbage collections performed */\n  IndexType max_deg ;   /* maximum row degree */\n  double default_knobs [COLAMD_KNOBS] ; /* default knobs array */\n  \n  \n  /* === Check the input arguments ======================================== */\n  \n  if (!stats)\n  {\n    COLAMD_DEBUG0 ((\"colamd: stats not present\\n\")) ;\n    return (false) ;\n  }\n  for (i = 0 ; i < COLAMD_STATS ; i++)\n  {\n    stats [i] = 0 ;\n  }\n  stats [COLAMD_STATUS] = COLAMD_OK ;\n  stats [COLAMD_INFO1] = -1 ;\n  stats [COLAMD_INFO2] = -1 ;\n  \n  if (!A)   /* A is not present */\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_A_not_present ;\n    COLAMD_DEBUG0 ((\"colamd: A not present\\n\")) ;\n    return (false) ;\n  }\n  \n  if (!p)   /* p is not present */\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_p_not_present ;\n    COLAMD_DEBUG0 ((\"colamd: p not present\\n\")) ;\n    return (false) ;\n  }\n  \n  if (n_row < 0)  /* n_row must be >= 0 */\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_nrow_negative ;\n    stats [COLAMD_INFO1] = n_row ;\n    COLAMD_DEBUG0 ((\"colamd: nrow negative %d\\n\", n_row)) ;\n    return (false) ;\n  }\n  \n  if (n_col < 0)  /* n_col must be >= 0 */\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_ncol_negative ;\n    stats [COLAMD_INFO1] = n_col ;\n    COLAMD_DEBUG0 ((\"colamd: ncol negative %d\\n\", n_col)) ;\n    return (false) ;\n  }\n  \n  nnz = p [n_col] ;\n  if (nnz < 0)  /* nnz must be >= 0 */\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_nnz_negative ;\n    stats [COLAMD_INFO1] = nnz ;\n    COLAMD_DEBUG0 ((\"colamd: number of entries negative %d\\n\", nnz)) ;\n    return (false) ;\n  }\n  \n  if (p [0] != 0)\n  {\n    stats [COLAMD_STATUS] = COLAMD_ERROR_p0_nonzero ;\n    stats [COLAMD_INFO1] = p [0] ;\n    COLAMD_DEBUG0 ((\"colamd: p[0] not zero %d\\n\", p [0])) ;\n    return (false) ;\n  }\n  \n  /* === If no knobs, set default knobs =================================== */\n  \n  if (!knobs)\n  {\n    colamd_set_defaults (default_knobs) ;\n    knobs = default_knobs ;\n  }\n  \n  /* === Allocate the Row and Col arrays from array A ===================== */\n  \n  Col_size = colamd_c (n_col) ;\n  Row_size = colamd_r (n_row) ;\n  need = 2*nnz + n_col + Col_size + Row_size ;\n  \n  if (need > Alen)\n  {\n    /* not enough space in array A to perform the ordering */\n    stats [COLAMD_STATUS] = COLAMD_ERROR_A_too_small ;\n    stats [COLAMD_INFO1] = need ;\n    stats [COLAMD_INFO2] = Alen ;\n    COLAMD_DEBUG0 ((\"colamd: Need Alen >= %d, given only Alen = %d\\n\", need,Alen));\n    return (false) ;\n  }\n  \n  Alen -= Col_size + Row_size ;\n  Col = (colamd_col<IndexType> *) &A [Alen] ;\n  Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;\n\n  /* === Construct the row and column data structures ===================== */\n  \n  if (!Eigen::internal::init_rows_cols (n_row, n_col, Row, Col, A, p, stats))\n  {\n    /* input matrix is invalid */\n    COLAMD_DEBUG0 ((\"colamd: Matrix invalid\\n\")) ;\n    return (false) ;\n  }\n  \n  /* === Initialize scores, kill dense rows/columns ======================= */\n\n  Eigen::internal::init_scoring (n_row, n_col, Row, Col, A, p, knobs,\n\t\t&n_row2, &n_col2, &max_deg) ;\n  \n  /* === Order the supercolumns =========================================== */\n  \n  ngarbage = Eigen::internal::find_ordering (n_row, n_col, Alen, Row, Col, A, p,\n\t\t\t    n_col2, max_deg, 2*nnz) ;\n  \n  /* === Order the non-principal columns ================================== */\n  \n  Eigen::internal::order_children (n_col, Col, p) ;\n  \n  /* === Return statistics in stats ======================================= */\n  \n  stats [COLAMD_DENSE_ROW] = n_row - n_row2 ;\n  stats [COLAMD_DENSE_COL] = n_col - n_col2 ;\n  stats [COLAMD_DEFRAG_COUNT] = ngarbage ;\n  COLAMD_DEBUG0 ((\"colamd: done.\\n\")) ; \n  return (true) ;\n}\n\n/* ========================================================================== */\n/* === NON-USER-CALLABLE ROUTINES: ========================================== */\n/* ========================================================================== */\n\n/* There are no user-callable routines beyond this point in the file */\n\n\n/* ========================================================================== */\n/* === init_rows_cols ======================================================= */\n/* ========================================================================== */\n\n/*\n  Takes the column form of the matrix in A and creates the row form of the\n  matrix.  Also, row and column attributes are stored in the Col and Row\n  structs.  If the columns are un-sorted or contain duplicate row indices,\n  this routine will also sort and remove duplicate row indices from the\n  column form of the matrix.  Returns false if the matrix is invalid,\n  true otherwise.  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic IndexType init_rows_cols  /* returns true if OK, or false otherwise */\n  (\n    /* === Parameters ======================================================= */\n\n    IndexType n_row,      /* number of rows of A */\n    IndexType n_col,      /* number of columns of A */\n    Colamd_Row<IndexType> Row [],    /* of size n_row+1 */\n    colamd_col<IndexType> Col [],    /* of size n_col+1 */\n    IndexType A [],     /* row indices of A, of size Alen */\n    IndexType p [],     /* pointers to columns in A, of size n_col+1 */\n    IndexType stats [COLAMD_STATS]  /* colamd statistics */ \n    )\n{\n  /* === Local variables ================================================== */\n\n  IndexType col ;     /* a column index */\n  IndexType row ;     /* a row index */\n  IndexType *cp ;     /* a column pointer */\n  IndexType *cp_end ;   /* a pointer to the end of a column */\n  IndexType *rp ;     /* a row pointer */\n  IndexType *rp_end ;   /* a pointer to the end of a row */\n  IndexType last_row ;    /* previous row */\n\n  /* === Initialize columns, and check column pointers ==================== */\n\n  for (col = 0 ; col < n_col ; col++)\n  {\n    Col [col].start = p [col] ;\n    Col [col].length = p [col+1] - p [col] ;\n\n    if ((Col [col].length) < 0) // extra parentheses to work-around gcc bug 10200\n    {\n      /* column pointers must be non-decreasing */\n      stats [COLAMD_STATUS] = COLAMD_ERROR_col_length_negative ;\n      stats [COLAMD_INFO1] = col ;\n      stats [COLAMD_INFO2] = Col [col].length ;\n      COLAMD_DEBUG0 ((\"colamd: col %d length %d < 0\\n\", col, Col [col].length)) ;\n      return (false) ;\n    }\n\n    Col [col].shared1.thickness = 1 ;\n    Col [col].shared2.score = 0 ;\n    Col [col].shared3.prev = COLAMD_EMPTY ;\n    Col [col].shared4.degree_next = COLAMD_EMPTY ;\n  }\n\n  /* p [0..n_col] no longer needed, used as \"head\" in subsequent routines */\n\n  /* === Scan columns, compute row degrees, and check row indices ========= */\n\n  stats [COLAMD_INFO3] = 0 ;  /* number of duplicate or unsorted row indices*/\n\n  for (row = 0 ; row < n_row ; row++)\n  {\n    Row [row].length = 0 ;\n    Row [row].shared2.mark = -1 ;\n  }\n\n  for (col = 0 ; col < n_col ; col++)\n  {\n    last_row = -1 ;\n\n    cp = &A [p [col]] ;\n    cp_end = &A [p [col+1]] ;\n\n    while (cp < cp_end)\n    {\n      row = *cp++ ;\n\n      /* make sure row indices within range */\n      if (row < 0 || row >= n_row)\n      {\n\tstats [COLAMD_STATUS] = COLAMD_ERROR_row_index_out_of_bounds ;\n\tstats [COLAMD_INFO1] = col ;\n\tstats [COLAMD_INFO2] = row ;\n\tstats [COLAMD_INFO3] = n_row ;\n\tCOLAMD_DEBUG0 ((\"colamd: row %d col %d out of bounds\\n\", row, col)) ;\n\treturn (false) ;\n      }\n\n      if (row <= last_row || Row [row].shared2.mark == col)\n      {\n\t/* row index are unsorted or repeated (or both), thus col */\n\t/* is jumbled.  This is a notice, not an error condition. */\n\tstats [COLAMD_STATUS] = COLAMD_OK_BUT_JUMBLED ;\n\tstats [COLAMD_INFO1] = col ;\n\tstats [COLAMD_INFO2] = row ;\n\t(stats [COLAMD_INFO3]) ++ ;\n\tCOLAMD_DEBUG1 ((\"colamd: row %d col %d unsorted/duplicate\\n\",row,col));\n      }\n\n      if (Row [row].shared2.mark != col)\n      {\n\tRow [row].length++ ;\n      }\n      else\n      {\n\t/* this is a repeated entry in the column, */\n\t/* it will be removed */\n\tCol [col].length-- ;\n      }\n\n      /* mark the row as having been seen in this column */\n      Row [row].shared2.mark = col ;\n\n      last_row = row ;\n    }\n  }\n\n  /* === Compute row pointers ============================================= */\n\n  /* row form of the matrix starts directly after the column */\n  /* form of matrix in A */\n  Row [0].start = p [n_col] ;\n  Row [0].shared1.p = Row [0].start ;\n  Row [0].shared2.mark = -1 ;\n  for (row = 1 ; row < n_row ; row++)\n  {\n    Row [row].start = Row [row-1].start + Row [row-1].length ;\n    Row [row].shared1.p = Row [row].start ;\n    Row [row].shared2.mark = -1 ;\n  }\n\n  /* === Create row form ================================================== */\n\n  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)\n  {\n    /* if cols jumbled, watch for repeated row indices */\n    for (col = 0 ; col < n_col ; col++)\n    {\n      cp = &A [p [col]] ;\n      cp_end = &A [p [col+1]] ;\n      while (cp < cp_end)\n      {\n\trow = *cp++ ;\n\tif (Row [row].shared2.mark != col)\n\t{\n\t  A [(Row [row].shared1.p)++] = col ;\n\t  Row [row].shared2.mark = col ;\n\t}\n      }\n    }\n  }\n  else\n  {\n    /* if cols not jumbled, we don't need the mark (this is faster) */\n    for (col = 0 ; col < n_col ; col++)\n    {\n      cp = &A [p [col]] ;\n      cp_end = &A [p [col+1]] ;\n      while (cp < cp_end)\n      {\n\tA [(Row [*cp++].shared1.p)++] = col ;\n      }\n    }\n  }\n\n  /* === Clear the row marks and set row degrees ========================== */\n\n  for (row = 0 ; row < n_row ; row++)\n  {\n    Row [row].shared2.mark = 0 ;\n    Row [row].shared1.degree = Row [row].length ;\n  }\n\n  /* === See if we need to re-create columns ============================== */\n\n  if (stats [COLAMD_STATUS] == COLAMD_OK_BUT_JUMBLED)\n  {\n    COLAMD_DEBUG0 ((\"colamd: reconstructing column form, matrix jumbled\\n\")) ;\n\n\n    /* === Compute col pointers ========================================= */\n\n    /* col form of the matrix starts at A [0]. */\n    /* Note, we may have a gap between the col form and the row */\n    /* form if there were duplicate entries, if so, it will be */\n    /* removed upon the first garbage collection */\n    Col [0].start = 0 ;\n    p [0] = Col [0].start ;\n    for (col = 1 ; col < n_col ; col++)\n    {\n      /* note that the lengths here are for pruned columns, i.e. */\n      /* no duplicate row indices will exist for these columns */\n      Col [col].start = Col [col-1].start + Col [col-1].length ;\n      p [col] = Col [col].start ;\n    }\n\n    /* === Re-create col form =========================================== */\n\n    for (row = 0 ; row < n_row ; row++)\n    {\n      rp = &A [Row [row].start] ;\n      rp_end = rp + Row [row].length ;\n      while (rp < rp_end)\n      {\n\tA [(p [*rp++])++] = row ;\n      }\n    }\n  }\n\n  /* === Done.  Matrix is not (or no longer) jumbled ====================== */\n\n  return (true) ;\n}\n\n\n/* ========================================================================== */\n/* === init_scoring ========================================================= */\n/* ========================================================================== */\n\n/*\n  Kills dense or empty columns and rows, calculates an initial score for\n  each column, and places all columns in the degree lists.  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic void init_scoring\n  (\n    /* === Parameters ======================================================= */\n\n    IndexType n_row,      /* number of rows of A */\n    IndexType n_col,      /* number of columns of A */\n    Colamd_Row<IndexType> Row [],    /* of size n_row+1 */\n    colamd_col<IndexType> Col [],    /* of size n_col+1 */\n    IndexType A [],     /* column form and row form of A */\n    IndexType head [],    /* of size n_col+1 */\n    double knobs [COLAMD_KNOBS],/* parameters */\n    IndexType *p_n_row2,    /* number of non-dense, non-empty rows */\n    IndexType *p_n_col2,    /* number of non-dense, non-empty columns */\n    IndexType *p_max_deg    /* maximum row degree */\n    )\n{\n  /* === Local variables ================================================== */\n\n  IndexType c ;     /* a column index */\n  IndexType r, row ;    /* a row index */\n  IndexType *cp ;     /* a column pointer */\n  IndexType deg ;     /* degree of a row or column */\n  IndexType *cp_end ;   /* a pointer to the end of a column */\n  IndexType *new_cp ;   /* new column pointer */\n  IndexType col_length ;    /* length of pruned column */\n  IndexType score ;     /* current column score */\n  IndexType n_col2 ;    /* number of non-dense, non-empty columns */\n  IndexType n_row2 ;    /* number of non-dense, non-empty rows */\n  IndexType dense_row_count ; /* remove rows with more entries than this */\n  IndexType dense_col_count ; /* remove cols with more entries than this */\n  IndexType min_score ;   /* smallest column score */\n  IndexType max_deg ;   /* maximum row degree */\n  IndexType next_col ;    /* Used to add to degree list.*/\n\n\n  /* === Extract knobs ==================================================== */\n\n  dense_row_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_ROW] * n_col), n_col)) ;\n  dense_col_count = numext::maxi(IndexType(0), numext::mini(IndexType(knobs [COLAMD_DENSE_COL] * n_row), n_row)) ;\n  COLAMD_DEBUG1 ((\"colamd: densecount: %d %d\\n\", dense_row_count, dense_col_count)) ;\n  max_deg = 0 ;\n  n_col2 = n_col ;\n  n_row2 = n_row ;\n\n  /* === Kill empty columns =============================================== */\n\n  /* Put the empty columns at the end in their natural order, so that LU */\n  /* factorization can proceed as far as possible. */\n  for (c = n_col-1 ; c >= 0 ; c--)\n  {\n    deg = Col [c].length ;\n    if (deg == 0)\n    {\n      /* this is a empty column, kill and order it last */\n      Col [c].shared2.order = --n_col2 ;\n      KILL_PRINCIPAL_COL (c) ;\n    }\n  }\n  COLAMD_DEBUG1 ((\"colamd: null columns killed: %d\\n\", n_col - n_col2)) ;\n\n  /* === Kill dense columns =============================================== */\n\n  /* Put the dense columns at the end, in their natural order */\n  for (c = n_col-1 ; c >= 0 ; c--)\n  {\n    /* skip any dead columns */\n    if (COL_IS_DEAD (c))\n    {\n      continue ;\n    }\n    deg = Col [c].length ;\n    if (deg > dense_col_count)\n    {\n      /* this is a dense column, kill and order it last */\n      Col [c].shared2.order = --n_col2 ;\n      /* decrement the row degrees */\n      cp = &A [Col [c].start] ;\n      cp_end = cp + Col [c].length ;\n      while (cp < cp_end)\n      {\n\tRow [*cp++].shared1.degree-- ;\n      }\n      KILL_PRINCIPAL_COL (c) ;\n    }\n  }\n  COLAMD_DEBUG1 ((\"colamd: Dense and null columns killed: %d\\n\", n_col - n_col2)) ;\n\n  /* === Kill dense and empty rows ======================================== */\n\n  for (r = 0 ; r < n_row ; r++)\n  {\n    deg = Row [r].shared1.degree ;\n    COLAMD_ASSERT (deg >= 0 && deg <= n_col) ;\n    if (deg > dense_row_count || deg == 0)\n    {\n      /* kill a dense or empty row */\n      KILL_ROW (r) ;\n      --n_row2 ;\n    }\n    else\n    {\n      /* keep track of max degree of remaining rows */\n      max_deg = numext::maxi(max_deg, deg) ;\n    }\n  }\n  COLAMD_DEBUG1 ((\"colamd: Dense and null rows killed: %d\\n\", n_row - n_row2)) ;\n\n  /* === Compute initial column scores ==================================== */\n\n  /* At this point the row degrees are accurate.  They reflect the number */\n  /* of \"live\" (non-dense) columns in each row.  No empty rows exist. */\n  /* Some \"live\" columns may contain only dead rows, however.  These are */\n  /* pruned in the code below. */\n\n  /* now find the initial matlab score for each column */\n  for (c = n_col-1 ; c >= 0 ; c--)\n  {\n    /* skip dead column */\n    if (COL_IS_DEAD (c))\n    {\n      continue ;\n    }\n    score = 0 ;\n    cp = &A [Col [c].start] ;\n    new_cp = cp ;\n    cp_end = cp + Col [c].length ;\n    while (cp < cp_end)\n    {\n      /* get a row */\n      row = *cp++ ;\n      /* skip if dead */\n      if (ROW_IS_DEAD (row))\n      {\n\tcontinue ;\n      }\n      /* compact the column */\n      *new_cp++ = row ;\n      /* add row's external degree */\n      score += Row [row].shared1.degree - 1 ;\n      /* guard against integer overflow */\n      score = numext::mini(score, n_col) ;\n    }\n    /* determine pruned column length */\n    col_length = (IndexType) (new_cp - &A [Col [c].start]) ;\n    if (col_length == 0)\n    {\n      /* a newly-made null column (all rows in this col are \"dense\" */\n      /* and have already been killed) */\n      COLAMD_DEBUG2 ((\"Newly null killed: %d\\n\", c)) ;\n      Col [c].shared2.order = --n_col2 ;\n      KILL_PRINCIPAL_COL (c) ;\n    }\n    else\n    {\n      /* set column length and set score */\n      COLAMD_ASSERT (score >= 0) ;\n      COLAMD_ASSERT (score <= n_col) ;\n      Col [c].length = col_length ;\n      Col [c].shared2.score = score ;\n    }\n  }\n  COLAMD_DEBUG1 ((\"colamd: Dense, null, and newly-null columns killed: %d\\n\",\n\t\t  n_col-n_col2)) ;\n\n  /* At this point, all empty rows and columns are dead.  All live columns */\n  /* are \"clean\" (containing no dead rows) and simplicial (no supercolumns */\n  /* yet).  Rows may contain dead columns, but all live rows contain at */\n  /* least one live column. */\n\n  /* === Initialize degree lists ========================================== */\n\n\n  /* clear the hash buckets */\n  for (c = 0 ; c <= n_col ; c++)\n  {\n    head [c] = COLAMD_EMPTY ;\n  }\n  min_score = n_col ;\n  /* place in reverse order, so low column indices are at the front */\n  /* of the lists.  This is to encourage natural tie-breaking */\n  for (c = n_col-1 ; c >= 0 ; c--)\n  {\n    /* only add principal columns to degree lists */\n    if (COL_IS_ALIVE (c))\n    {\n      COLAMD_DEBUG4 ((\"place %d score %d minscore %d ncol %d\\n\",\n\t\t      c, Col [c].shared2.score, min_score, n_col)) ;\n\n      /* === Add columns score to DList =============================== */\n\n      score = Col [c].shared2.score ;\n\n      COLAMD_ASSERT (min_score >= 0) ;\n      COLAMD_ASSERT (min_score <= n_col) ;\n      COLAMD_ASSERT (score >= 0) ;\n      COLAMD_ASSERT (score <= n_col) ;\n      COLAMD_ASSERT (head [score] >= COLAMD_EMPTY) ;\n\n      /* now add this column to dList at proper score location */\n      next_col = head [score] ;\n      Col [c].shared3.prev = COLAMD_EMPTY ;\n      Col [c].shared4.degree_next = next_col ;\n\n      /* if there already was a column with the same score, set its */\n      /* previous pointer to this new column */\n      if (next_col != COLAMD_EMPTY)\n      {\n\tCol [next_col].shared3.prev = c ;\n      }\n      head [score] = c ;\n\n      /* see if this score is less than current min */\n      min_score = numext::mini(min_score, score) ;\n\n\n    }\n  }\n\n\n  /* === Return number of remaining columns, and max row degree =========== */\n\n  *p_n_col2 = n_col2 ;\n  *p_n_row2 = n_row2 ;\n  *p_max_deg = max_deg ;\n}\n\n\n/* ========================================================================== */\n/* === find_ordering ======================================================== */\n/* ========================================================================== */\n\n/*\n  Order the principal columns of the supercolumn form of the matrix\n  (no supercolumns on input).  Uses a minimum approximate column minimum\n  degree ordering method.  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic IndexType find_ordering /* return the number of garbage collections */\n  (\n    /* === Parameters ======================================================= */\n\n    IndexType n_row,      /* number of rows of A */\n    IndexType n_col,      /* number of columns of A */\n    IndexType Alen,     /* size of A, 2*nnz + n_col or larger */\n    Colamd_Row<IndexType> Row [],    /* of size n_row+1 */\n    colamd_col<IndexType> Col [],    /* of size n_col+1 */\n    IndexType A [],     /* column form and row form of A */\n    IndexType head [],    /* of size n_col+1 */\n    IndexType n_col2,     /* Remaining columns to order */\n    IndexType max_deg,    /* Maximum row degree */\n    IndexType pfree     /* index of first free slot (2*nnz on entry) */\n    )\n{\n  /* === Local variables ================================================== */\n\n  IndexType k ;     /* current pivot ordering step */\n  IndexType pivot_col ;   /* current pivot column */\n  IndexType *cp ;     /* a column pointer */\n  IndexType *rp ;     /* a row pointer */\n  IndexType pivot_row ;   /* current pivot row */\n  IndexType *new_cp ;   /* modified column pointer */\n  IndexType *new_rp ;   /* modified row pointer */\n  IndexType pivot_row_start ; /* pointer to start of pivot row */\n  IndexType pivot_row_degree ;  /* number of columns in pivot row */\n  IndexType pivot_row_length ;  /* number of supercolumns in pivot row */\n  IndexType pivot_col_score ; /* score of pivot column */\n  IndexType needed_memory ;   /* free space needed for pivot row */\n  IndexType *cp_end ;   /* pointer to the end of a column */\n  IndexType *rp_end ;   /* pointer to the end of a row */\n  IndexType row ;     /* a row index */\n  IndexType col ;     /* a column index */\n  IndexType max_score ;   /* maximum possible score */\n  IndexType cur_score ;   /* score of current column */\n  unsigned int hash ;   /* hash value for supernode detection */\n  IndexType head_column ;   /* head of hash bucket */\n  IndexType first_col ;   /* first column in hash bucket */\n  IndexType tag_mark ;    /* marker value for mark array */\n  IndexType row_mark ;    /* Row [row].shared2.mark */\n  IndexType set_difference ;  /* set difference size of row with pivot row */\n  IndexType min_score ;   /* smallest column score */\n  IndexType col_thickness ;   /* \"thickness\" (no. of columns in a supercol) */\n  IndexType max_mark ;    /* maximum value of tag_mark */\n  IndexType pivot_col_thickness ; /* number of columns represented by pivot col */\n  IndexType prev_col ;    /* Used by Dlist operations. */\n  IndexType next_col ;    /* Used by Dlist operations. */\n  IndexType ngarbage ;    /* number of garbage collections performed */\n\n\n  /* === Initialization and clear mark ==================================== */\n\n  max_mark = INT_MAX - n_col ;  /* INT_MAX defined in <limits.h> */\n  tag_mark = Eigen::internal::clear_mark (n_row, Row) ;\n  min_score = 0 ;\n  ngarbage = 0 ;\n  COLAMD_DEBUG1 ((\"colamd: Ordering, n_col2=%d\\n\", n_col2)) ;\n\n  /* === Order the columns ================================================ */\n\n  for (k = 0 ; k < n_col2 ; /* 'k' is incremented below */)\n  {\n\n    /* === Select pivot column, and order it ============================ */\n\n    /* make sure degree list isn't empty */\n    COLAMD_ASSERT (min_score >= 0) ;\n    COLAMD_ASSERT (min_score <= n_col) ;\n    COLAMD_ASSERT (head [min_score] >= COLAMD_EMPTY) ;\n\n    /* get pivot column from head of minimum degree list */\n    while (min_score < n_col && head [min_score] == COLAMD_EMPTY)\n    {\n      min_score++ ;\n    }\n    pivot_col = head [min_score] ;\n    COLAMD_ASSERT (pivot_col >= 0 && pivot_col <= n_col) ;\n    next_col = Col [pivot_col].shared4.degree_next ;\n    head [min_score] = next_col ;\n    if (next_col != COLAMD_EMPTY)\n    {\n      Col [next_col].shared3.prev = COLAMD_EMPTY ;\n    }\n\n    COLAMD_ASSERT (COL_IS_ALIVE (pivot_col)) ;\n    COLAMD_DEBUG3 ((\"Pivot col: %d\\n\", pivot_col)) ;\n\n    /* remember score for defrag check */\n    pivot_col_score = Col [pivot_col].shared2.score ;\n\n    /* the pivot column is the kth column in the pivot order */\n    Col [pivot_col].shared2.order = k ;\n\n    /* increment order count by column thickness */\n    pivot_col_thickness = Col [pivot_col].shared1.thickness ;\n    k += pivot_col_thickness ;\n    COLAMD_ASSERT (pivot_col_thickness > 0) ;\n\n    /* === Garbage_collection, if necessary ============================= */\n\n    needed_memory = numext::mini(pivot_col_score, n_col - k) ;\n    if (pfree + needed_memory >= Alen)\n    {\n      pfree = Eigen::internal::garbage_collection (n_row, n_col, Row, Col, A, &A [pfree]) ;\n      ngarbage++ ;\n      /* after garbage collection we will have enough */\n      COLAMD_ASSERT (pfree + needed_memory < Alen) ;\n      /* garbage collection has wiped out the Row[].shared2.mark array */\n      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;\n\n    }\n\n    /* === Compute pivot row pattern ==================================== */\n\n    /* get starting location for this new merged row */\n    pivot_row_start = pfree ;\n\n    /* initialize new row counts to zero */\n    pivot_row_degree = 0 ;\n\n    /* tag pivot column as having been visited so it isn't included */\n    /* in merged pivot row */\n    Col [pivot_col].shared1.thickness = -pivot_col_thickness ;\n\n    /* pivot row is the union of all rows in the pivot column pattern */\n    cp = &A [Col [pivot_col].start] ;\n    cp_end = cp + Col [pivot_col].length ;\n    while (cp < cp_end)\n    {\n      /* get a row */\n      row = *cp++ ;\n      COLAMD_DEBUG4 ((\"Pivot col pattern %d %d\\n\", ROW_IS_ALIVE (row), row)) ;\n      /* skip if row is dead */\n      if (ROW_IS_DEAD (row))\n      {\n\tcontinue ;\n      }\n      rp = &A [Row [row].start] ;\n      rp_end = rp + Row [row].length ;\n      while (rp < rp_end)\n      {\n\t/* get a column */\n\tcol = *rp++ ;\n\t/* add the column, if alive and untagged */\n\tcol_thickness = Col [col].shared1.thickness ;\n\tif (col_thickness > 0 && COL_IS_ALIVE (col))\n\t{\n\t  /* tag column in pivot row */\n\t  Col [col].shared1.thickness = -col_thickness ;\n\t  COLAMD_ASSERT (pfree < Alen) ;\n\t  /* place column in pivot row */\n\t  A [pfree++] = col ;\n\t  pivot_row_degree += col_thickness ;\n\t}\n      }\n    }\n\n    /* clear tag on pivot column */\n    Col [pivot_col].shared1.thickness = pivot_col_thickness ;\n    max_deg = numext::maxi(max_deg, pivot_row_degree) ;\n\n\n    /* === Kill all rows used to construct pivot row ==================== */\n\n    /* also kill pivot row, temporarily */\n    cp = &A [Col [pivot_col].start] ;\n    cp_end = cp + Col [pivot_col].length ;\n    while (cp < cp_end)\n    {\n      /* may be killing an already dead row */\n      row = *cp++ ;\n      COLAMD_DEBUG3 ((\"Kill row in pivot col: %d\\n\", row)) ;\n      KILL_ROW (row) ;\n    }\n\n    /* === Select a row index to use as the new pivot row =============== */\n\n    pivot_row_length = pfree - pivot_row_start ;\n    if (pivot_row_length > 0)\n    {\n      /* pick the \"pivot\" row arbitrarily (first row in col) */\n      pivot_row = A [Col [pivot_col].start] ;\n      COLAMD_DEBUG3 ((\"Pivotal row is %d\\n\", pivot_row)) ;\n    }\n    else\n    {\n      /* there is no pivot row, since it is of zero length */\n      pivot_row = COLAMD_EMPTY ;\n      COLAMD_ASSERT (pivot_row_length == 0) ;\n    }\n    COLAMD_ASSERT (Col [pivot_col].length > 0 || pivot_row_length == 0) ;\n\n    /* === Approximate degree computation =============================== */\n\n    /* Here begins the computation of the approximate degree.  The column */\n    /* score is the sum of the pivot row \"length\", plus the size of the */\n    /* set differences of each row in the column minus the pattern of the */\n    /* pivot row itself.  The column (\"thickness\") itself is also */\n    /* excluded from the column score (we thus use an approximate */\n    /* external degree). */\n\n    /* The time taken by the following code (compute set differences, and */\n    /* add them up) is proportional to the size of the data structure */\n    /* being scanned - that is, the sum of the sizes of each column in */\n    /* the pivot row.  Thus, the amortized time to compute a column score */\n    /* is proportional to the size of that column (where size, in this */\n    /* context, is the column \"length\", or the number of row indices */\n    /* in that column).  The number of row indices in a column is */\n    /* monotonically non-decreasing, from the length of the original */\n    /* column on input to colamd. */\n\n    /* === Compute set differences ====================================== */\n\n    COLAMD_DEBUG3 ((\"** Computing set differences phase. **\\n\")) ;\n\n    /* pivot row is currently dead - it will be revived later. */\n\n    COLAMD_DEBUG3 ((\"Pivot row: \")) ;\n    /* for each column in pivot row */\n    rp = &A [pivot_row_start] ;\n    rp_end = rp + pivot_row_length ;\n    while (rp < rp_end)\n    {\n      col = *rp++ ;\n      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;\n      COLAMD_DEBUG3 ((\"Col: %d\\n\", col)) ;\n\n      /* clear tags used to construct pivot row pattern */\n      col_thickness = -Col [col].shared1.thickness ;\n      COLAMD_ASSERT (col_thickness > 0) ;\n      Col [col].shared1.thickness = col_thickness ;\n\n      /* === Remove column from degree list =========================== */\n\n      cur_score = Col [col].shared2.score ;\n      prev_col = Col [col].shared3.prev ;\n      next_col = Col [col].shared4.degree_next ;\n      COLAMD_ASSERT (cur_score >= 0) ;\n      COLAMD_ASSERT (cur_score <= n_col) ;\n      COLAMD_ASSERT (cur_score >= COLAMD_EMPTY) ;\n      if (prev_col == COLAMD_EMPTY)\n      {\n\thead [cur_score] = next_col ;\n      }\n      else\n      {\n\tCol [prev_col].shared4.degree_next = next_col ;\n      }\n      if (next_col != COLAMD_EMPTY)\n      {\n\tCol [next_col].shared3.prev = prev_col ;\n      }\n\n      /* === Scan the column ========================================== */\n\n      cp = &A [Col [col].start] ;\n      cp_end = cp + Col [col].length ;\n      while (cp < cp_end)\n      {\n\t/* get a row */\n\trow = *cp++ ;\n\trow_mark = Row [row].shared2.mark ;\n\t/* skip if dead */\n\tif (ROW_IS_MARKED_DEAD (row_mark))\n\t{\n\t  continue ;\n\t}\n\tCOLAMD_ASSERT (row != pivot_row) ;\n\tset_difference = row_mark - tag_mark ;\n\t/* check if the row has been seen yet */\n\tif (set_difference < 0)\n\t{\n\t  COLAMD_ASSERT (Row [row].shared1.degree <= max_deg) ;\n\t  set_difference = Row [row].shared1.degree ;\n\t}\n\t/* subtract column thickness from this row's set difference */\n\tset_difference -= col_thickness ;\n\tCOLAMD_ASSERT (set_difference >= 0) ;\n\t/* absorb this row if the set difference becomes zero */\n\tif (set_difference == 0)\n\t{\n\t  COLAMD_DEBUG3 ((\"aggressive absorption. Row: %d\\n\", row)) ;\n\t  KILL_ROW (row) ;\n\t}\n\telse\n\t{\n\t  /* save the new mark */\n\t  Row [row].shared2.mark = set_difference + tag_mark ;\n\t}\n      }\n    }\n\n\n    /* === Add up set differences for each column ======================= */\n\n    COLAMD_DEBUG3 ((\"** Adding set differences phase. **\\n\")) ;\n\n    /* for each column in pivot row */\n    rp = &A [pivot_row_start] ;\n    rp_end = rp + pivot_row_length ;\n    while (rp < rp_end)\n    {\n      /* get a column */\n      col = *rp++ ;\n      COLAMD_ASSERT (COL_IS_ALIVE (col) && col != pivot_col) ;\n      hash = 0 ;\n      cur_score = 0 ;\n      cp = &A [Col [col].start] ;\n      /* compact the column */\n      new_cp = cp ;\n      cp_end = cp + Col [col].length ;\n\n      COLAMD_DEBUG4 ((\"Adding set diffs for Col: %d.\\n\", col)) ;\n\n      while (cp < cp_end)\n      {\n\t/* get a row */\n\trow = *cp++ ;\n\tCOLAMD_ASSERT(row >= 0 && row < n_row) ;\n\trow_mark = Row [row].shared2.mark ;\n\t/* skip if dead */\n\tif (ROW_IS_MARKED_DEAD (row_mark))\n\t{\n\t  continue ;\n\t}\n\tCOLAMD_ASSERT (row_mark > tag_mark) ;\n\t/* compact the column */\n\t*new_cp++ = row ;\n\t/* compute hash function */\n\thash += row ;\n\t/* add set difference */\n\tcur_score += row_mark - tag_mark ;\n\t/* integer overflow... */\n\tcur_score = numext::mini(cur_score, n_col) ;\n      }\n\n      /* recompute the column's length */\n      Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ;\n\n      /* === Further mass elimination ================================= */\n\n      if (Col [col].length == 0)\n      {\n\tCOLAMD_DEBUG4 ((\"further mass elimination. Col: %d\\n\", col)) ;\n\t/* nothing left but the pivot row in this column */\n\tKILL_PRINCIPAL_COL (col) ;\n\tpivot_row_degree -= Col [col].shared1.thickness ;\n\tCOLAMD_ASSERT (pivot_row_degree >= 0) ;\n\t/* order it */\n\tCol [col].shared2.order = k ;\n\t/* increment order count by column thickness */\n\tk += Col [col].shared1.thickness ;\n      }\n      else\n      {\n\t/* === Prepare for supercolumn detection ==================== */\n\n\tCOLAMD_DEBUG4 ((\"Preparing supercol detection for Col: %d.\\n\", col)) ;\n\n\t/* save score so far */\n\tCol [col].shared2.score = cur_score ;\n\n\t/* add column to hash table, for supercolumn detection */\n\thash %= n_col + 1 ;\n\n\tCOLAMD_DEBUG4 ((\" Hash = %d, n_col = %d.\\n\", hash, n_col)) ;\n\tCOLAMD_ASSERT (hash <= n_col) ;\n\n\thead_column = head [hash] ;\n\tif (head_column > COLAMD_EMPTY)\n\t{\n\t  /* degree list \"hash\" is non-empty, use prev (shared3) of */\n\t  /* first column in degree list as head of hash bucket */\n\t  first_col = Col [head_column].shared3.headhash ;\n\t  Col [head_column].shared3.headhash = col ;\n\t}\n\telse\n\t{\n\t  /* degree list \"hash\" is empty, use head as hash bucket */\n\t  first_col = - (head_column + 2) ;\n\t  head [hash] = - (col + 2) ;\n\t}\n\tCol [col].shared4.hash_next = first_col ;\n\n\t/* save hash function in Col [col].shared3.hash */\n\tCol [col].shared3.hash = (IndexType) hash ;\n\tCOLAMD_ASSERT (COL_IS_ALIVE (col)) ;\n      }\n    }\n\n    /* The approximate external column degree is now computed.  */\n\n    /* === Supercolumn detection ======================================== */\n\n    COLAMD_DEBUG3 ((\"** Supercolumn detection phase. **\\n\")) ;\n\n    Eigen::internal::detect_super_cols (Col, A, head, pivot_row_start, pivot_row_length) ;\n\n    /* === Kill the pivotal column ====================================== */\n\n    KILL_PRINCIPAL_COL (pivot_col) ;\n\n    /* === Clear mark =================================================== */\n\n    tag_mark += (max_deg + 1) ;\n    if (tag_mark >= max_mark)\n    {\n      COLAMD_DEBUG2 ((\"clearing tag_mark\\n\")) ;\n      tag_mark = Eigen::internal::clear_mark (n_row, Row) ;\n    }\n\n    /* === Finalize the new pivot row, and column scores ================ */\n\n    COLAMD_DEBUG3 ((\"** Finalize scores phase. **\\n\")) ;\n\n    /* for each column in pivot row */\n    rp = &A [pivot_row_start] ;\n    /* compact the pivot row */\n    new_rp = rp ;\n    rp_end = rp + pivot_row_length ;\n    while (rp < rp_end)\n    {\n      col = *rp++ ;\n      /* skip dead columns */\n      if (COL_IS_DEAD (col))\n      {\n\tcontinue ;\n      }\n      *new_rp++ = col ;\n      /* add new pivot row to column */\n      A [Col [col].start + (Col [col].length++)] = pivot_row ;\n\n      /* retrieve score so far and add on pivot row's degree. */\n      /* (we wait until here for this in case the pivot */\n      /* row's degree was reduced due to mass elimination). */\n      cur_score = Col [col].shared2.score + pivot_row_degree ;\n\n      /* calculate the max possible score as the number of */\n      /* external columns minus the 'k' value minus the */\n      /* columns thickness */\n      max_score = n_col - k - Col [col].shared1.thickness ;\n\n      /* make the score the external degree of the union-of-rows */\n      cur_score -= Col [col].shared1.thickness ;\n\n      /* make sure score is less or equal than the max score */\n      cur_score = numext::mini(cur_score, max_score) ;\n      COLAMD_ASSERT (cur_score >= 0) ;\n\n      /* store updated score */\n      Col [col].shared2.score = cur_score ;\n\n      /* === Place column back in degree list ========================= */\n\n      COLAMD_ASSERT (min_score >= 0) ;\n      COLAMD_ASSERT (min_score <= n_col) ;\n      COLAMD_ASSERT (cur_score >= 0) ;\n      COLAMD_ASSERT (cur_score <= n_col) ;\n      COLAMD_ASSERT (head [cur_score] >= COLAMD_EMPTY) ;\n      next_col = head [cur_score] ;\n      Col [col].shared4.degree_next = next_col ;\n      Col [col].shared3.prev = COLAMD_EMPTY ;\n      if (next_col != COLAMD_EMPTY)\n      {\n\tCol [next_col].shared3.prev = col ;\n      }\n      head [cur_score] = col ;\n\n      /* see if this score is less than current min */\n      min_score = numext::mini(min_score, cur_score) ;\n\n    }\n\n    /* === Resurrect the new pivot row ================================== */\n\n    if (pivot_row_degree > 0)\n    {\n      /* update pivot row length to reflect any cols that were killed */\n      /* during super-col detection and mass elimination */\n      Row [pivot_row].start  = pivot_row_start ;\n      Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ;\n      Row [pivot_row].shared1.degree = pivot_row_degree ;\n      Row [pivot_row].shared2.mark = 0 ;\n      /* pivot row is no longer dead */\n    }\n  }\n\n  /* === All principal columns have now been ordered ====================== */\n\n  return (ngarbage) ;\n}\n\n\n/* ========================================================================== */\n/* === order_children ======================================================= */\n/* ========================================================================== */\n\n/*\n  The find_ordering routine has ordered all of the principal columns (the\n  representatives of the supercolumns).  The non-principal columns have not\n  yet been ordered.  This routine orders those columns by walking up the\n  parent tree (a column is a child of the column which absorbed it).  The\n  final permutation vector is then placed in p [0 ... n_col-1], with p [0]\n  being the first column, and p [n_col-1] being the last.  It doesn't look\n  like it at first glance, but be assured that this routine takes time linear\n  in the number of columns.  Although not immediately obvious, the time\n  taken by this routine is O (n_col), that is, linear in the number of\n  columns.  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic inline  void order_children\n(\n  /* === Parameters ======================================================= */\n\n  IndexType n_col,      /* number of columns of A */\n  colamd_col<IndexType> Col [],    /* of size n_col+1 */\n  IndexType p []      /* p [0 ... n_col-1] is the column permutation*/\n  )\n{\n  /* === Local variables ================================================== */\n\n  IndexType i ;     /* loop counter for all columns */\n  IndexType c ;     /* column index */\n  IndexType parent ;    /* index of column's parent */\n  IndexType order ;     /* column's order */\n\n  /* === Order each non-principal column ================================== */\n\n  for (i = 0 ; i < n_col ; i++)\n  {\n    /* find an un-ordered non-principal column */\n    COLAMD_ASSERT (COL_IS_DEAD (i)) ;\n    if (!COL_IS_DEAD_PRINCIPAL (i) && Col [i].shared2.order == COLAMD_EMPTY)\n    {\n      parent = i ;\n      /* once found, find its principal parent */\n      do\n      {\n\tparent = Col [parent].shared1.parent ;\n      } while (!COL_IS_DEAD_PRINCIPAL (parent)) ;\n\n      /* now, order all un-ordered non-principal columns along path */\n      /* to this parent.  collapse tree at the same time */\n      c = i ;\n      /* get order of parent */\n      order = Col [parent].shared2.order ;\n\n      do\n      {\n\tCOLAMD_ASSERT (Col [c].shared2.order == COLAMD_EMPTY) ;\n\n\t/* order this column */\n\tCol [c].shared2.order = order++ ;\n\t/* collaps tree */\n\tCol [c].shared1.parent = parent ;\n\n\t/* get immediate parent of this column */\n\tc = Col [c].shared1.parent ;\n\n\t/* continue until we hit an ordered column.  There are */\n\t/* guarranteed not to be anymore unordered columns */\n\t/* above an ordered column */\n      } while (Col [c].shared2.order == COLAMD_EMPTY) ;\n\n      /* re-order the super_col parent to largest order for this group */\n      Col [parent].shared2.order = order ;\n    }\n  }\n\n  /* === Generate the permutation ========================================= */\n\n  for (c = 0 ; c < n_col ; c++)\n  {\n    p [Col [c].shared2.order] = c ;\n  }\n}\n\n\n/* ========================================================================== */\n/* === detect_super_cols ==================================================== */\n/* ========================================================================== */\n\n/*\n  Detects supercolumns by finding matches between columns in the hash buckets.\n  Check amongst columns in the set A [row_start ... row_start + row_length-1].\n  The columns under consideration are currently *not* in the degree lists,\n  and have already been placed in the hash buckets.\n\n  The hash bucket for columns whose hash function is equal to h is stored\n  as follows:\n\n  if head [h] is >= 0, then head [h] contains a degree list, so:\n\n  head [h] is the first column in degree bucket h.\n  Col [head [h]].headhash gives the first column in hash bucket h.\n\n  otherwise, the degree list is empty, and:\n\n  -(head [h] + 2) is the first column in hash bucket h.\n\n  For a column c in a hash bucket, Col [c].shared3.prev is NOT a \"previous\n  column\" pointer.  Col [c].shared3.hash is used instead as the hash number\n  for that column.  The value of Col [c].shared4.hash_next is the next column\n  in the same hash bucket.\n\n  Assuming no, or \"few\" hash collisions, the time taken by this routine is\n  linear in the sum of the sizes (lengths) of each column whose score has\n  just been computed in the approximate degree computation.\n  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic void detect_super_cols\n(\n  /* === Parameters ======================================================= */\n  \n  colamd_col<IndexType> Col [],    /* of size n_col+1 */\n  IndexType A [],     /* row indices of A */\n  IndexType head [],    /* head of degree lists and hash buckets */\n  IndexType row_start,    /* pointer to set of columns to check */\n  IndexType row_length    /* number of columns to check */\n)\n{\n  /* === Local variables ================================================== */\n\n  IndexType hash ;      /* hash value for a column */\n  IndexType *rp ;     /* pointer to a row */\n  IndexType c ;     /* a column index */\n  IndexType super_c ;   /* column index of the column to absorb into */\n  IndexType *cp1 ;      /* column pointer for column super_c */\n  IndexType *cp2 ;      /* column pointer for column c */\n  IndexType length ;    /* length of column super_c */\n  IndexType prev_c ;    /* column preceding c in hash bucket */\n  IndexType i ;     /* loop counter */\n  IndexType *rp_end ;   /* pointer to the end of the row */\n  IndexType col ;     /* a column index in the row to check */\n  IndexType head_column ;   /* first column in hash bucket or degree list */\n  IndexType first_col ;   /* first column in hash bucket */\n\n  /* === Consider each column in the row ================================== */\n\n  rp = &A [row_start] ;\n  rp_end = rp + row_length ;\n  while (rp < rp_end)\n  {\n    col = *rp++ ;\n    if (COL_IS_DEAD (col))\n    {\n      continue ;\n    }\n\n    /* get hash number for this column */\n    hash = Col [col].shared3.hash ;\n    COLAMD_ASSERT (hash <= n_col) ;\n\n    /* === Get the first column in this hash bucket ===================== */\n\n    head_column = head [hash] ;\n    if (head_column > COLAMD_EMPTY)\n    {\n      first_col = Col [head_column].shared3.headhash ;\n    }\n    else\n    {\n      first_col = - (head_column + 2) ;\n    }\n\n    /* === Consider each column in the hash bucket ====================== */\n\n    for (super_c = first_col ; super_c != COLAMD_EMPTY ;\n\t super_c = Col [super_c].shared4.hash_next)\n    {\n      COLAMD_ASSERT (COL_IS_ALIVE (super_c)) ;\n      COLAMD_ASSERT (Col [super_c].shared3.hash == hash) ;\n      length = Col [super_c].length ;\n\n      /* prev_c is the column preceding column c in the hash bucket */\n      prev_c = super_c ;\n\n      /* === Compare super_c with all columns after it ================ */\n\n      for (c = Col [super_c].shared4.hash_next ;\n\t   c != COLAMD_EMPTY ; c = Col [c].shared4.hash_next)\n      {\n\tCOLAMD_ASSERT (c != super_c) ;\n\tCOLAMD_ASSERT (COL_IS_ALIVE (c)) ;\n\tCOLAMD_ASSERT (Col [c].shared3.hash == hash) ;\n\n\t/* not identical if lengths or scores are different */\n\tif (Col [c].length != length ||\n\t    Col [c].shared2.score != Col [super_c].shared2.score)\n\t{\n\t  prev_c = c ;\n\t  continue ;\n\t}\n\n\t/* compare the two columns */\n\tcp1 = &A [Col [super_c].start] ;\n\tcp2 = &A [Col [c].start] ;\n\n\tfor (i = 0 ; i < length ; i++)\n\t{\n\t  /* the columns are \"clean\" (no dead rows) */\n\t  COLAMD_ASSERT (ROW_IS_ALIVE (*cp1))  ;\n\t  COLAMD_ASSERT (ROW_IS_ALIVE (*cp2))  ;\n\t  /* row indices will same order for both supercols, */\n\t  /* no gather scatter nessasary */\n\t  if (*cp1++ != *cp2++)\n\t  {\n\t    break ;\n\t  }\n\t}\n\n\t/* the two columns are different if the for-loop \"broke\" */\n\tif (i != length)\n\t{\n\t  prev_c = c ;\n\t  continue ;\n\t}\n\n\t/* === Got it!  two columns are identical =================== */\n\n\tCOLAMD_ASSERT (Col [c].shared2.score == Col [super_c].shared2.score) ;\n\n\tCol [super_c].shared1.thickness += Col [c].shared1.thickness ;\n\tCol [c].shared1.parent = super_c ;\n\tKILL_NON_PRINCIPAL_COL (c) ;\n\t/* order c later, in order_children() */\n\tCol [c].shared2.order = COLAMD_EMPTY ;\n\t/* remove c from hash bucket */\n\tCol [prev_c].shared4.hash_next = Col [c].shared4.hash_next ;\n      }\n    }\n\n    /* === Empty this hash bucket ======================================= */\n\n    if (head_column > COLAMD_EMPTY)\n    {\n      /* corresponding degree list \"hash\" is not empty */\n      Col [head_column].shared3.headhash = COLAMD_EMPTY ;\n    }\n    else\n    {\n      /* corresponding degree list \"hash\" is empty */\n      head [hash] = COLAMD_EMPTY ;\n    }\n  }\n}\n\n\n/* ========================================================================== */\n/* === garbage_collection =================================================== */\n/* ========================================================================== */\n\n/*\n  Defragments and compacts columns and rows in the workspace A.  Used when\n  all avaliable memory has been used while performing row merging.  Returns\n  the index of the first free position in A, after garbage collection.  The\n  time taken by this routine is linear is the size of the array A, which is\n  itself linear in the number of nonzeros in the input matrix.\n  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic IndexType garbage_collection  /* returns the new value of pfree */\n  (\n    /* === Parameters ======================================================= */\n    \n    IndexType n_row,      /* number of rows */\n    IndexType n_col,      /* number of columns */\n    Colamd_Row<IndexType> Row [],    /* row info */\n    colamd_col<IndexType> Col [],    /* column info */\n    IndexType A [],     /* A [0 ... Alen-1] holds the matrix */\n    IndexType *pfree      /* &A [0] ... pfree is in use */\n    )\n{\n  /* === Local variables ================================================== */\n\n  IndexType *psrc ;     /* source pointer */\n  IndexType *pdest ;    /* destination pointer */\n  IndexType j ;     /* counter */\n  IndexType r ;     /* a row index */\n  IndexType c ;     /* a column index */\n  IndexType length ;    /* length of a row or column */\n\n  /* === Defragment the columns =========================================== */\n\n  pdest = &A[0] ;\n  for (c = 0 ; c < n_col ; c++)\n  {\n    if (COL_IS_ALIVE (c))\n    {\n      psrc = &A [Col [c].start] ;\n\n      /* move and compact the column */\n      COLAMD_ASSERT (pdest <= psrc) ;\n      Col [c].start = (IndexType) (pdest - &A [0]) ;\n      length = Col [c].length ;\n      for (j = 0 ; j < length ; j++)\n      {\n\tr = *psrc++ ;\n\tif (ROW_IS_ALIVE (r))\n\t{\n\t  *pdest++ = r ;\n\t}\n      }\n      Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ;\n    }\n  }\n\n  /* === Prepare to defragment the rows =================================== */\n\n  for (r = 0 ; r < n_row ; r++)\n  {\n    if (ROW_IS_ALIVE (r))\n    {\n      if (Row [r].length == 0)\n      {\n\t/* this row is of zero length.  cannot compact it, so kill it */\n\tCOLAMD_DEBUG3 ((\"Defrag row kill\\n\")) ;\n\tKILL_ROW (r) ;\n      }\n      else\n      {\n\t/* save first column index in Row [r].shared2.first_column */\n\tpsrc = &A [Row [r].start] ;\n\tRow [r].shared2.first_column = *psrc ;\n\tCOLAMD_ASSERT (ROW_IS_ALIVE (r)) ;\n\t/* flag the start of the row with the one's complement of row */\n\t*psrc = ONES_COMPLEMENT (r) ;\n\n      }\n    }\n  }\n\n  /* === Defragment the rows ============================================== */\n\n  psrc = pdest ;\n  while (psrc < pfree)\n  {\n    /* find a negative number ... the start of a row */\n    if (*psrc++ < 0)\n    {\n      psrc-- ;\n      /* get the row index */\n      r = ONES_COMPLEMENT (*psrc) ;\n      COLAMD_ASSERT (r >= 0 && r < n_row) ;\n      /* restore first column index */\n      *psrc = Row [r].shared2.first_column ;\n      COLAMD_ASSERT (ROW_IS_ALIVE (r)) ;\n\n      /* move and compact the row */\n      COLAMD_ASSERT (pdest <= psrc) ;\n      Row [r].start = (IndexType) (pdest - &A [0]) ;\n      length = Row [r].length ;\n      for (j = 0 ; j < length ; j++)\n      {\n\tc = *psrc++ ;\n\tif (COL_IS_ALIVE (c))\n\t{\n\t  *pdest++ = c ;\n\t}\n      }\n      Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ;\n\n    }\n  }\n  /* ensure we found all the rows */\n  COLAMD_ASSERT (debug_rows == 0) ;\n\n  /* === Return the new value of pfree ==================================== */\n\n  return ((IndexType) (pdest - &A [0])) ;\n}\n\n\n/* ========================================================================== */\n/* === clear_mark =========================================================== */\n/* ========================================================================== */\n\n/*\n  Clears the Row [].shared2.mark array, and returns the new tag_mark.\n  Return value is the new tag_mark.  Not user-callable.\n*/\ntemplate <typename IndexType>\nstatic inline  IndexType clear_mark  /* return the new value for tag_mark */\n  (\n      /* === Parameters ======================================================= */\n\n    IndexType n_row,    /* number of rows in A */\n    Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */\n    )\n{\n  /* === Local variables ================================================== */\n\n  IndexType r ;\n\n  for (r = 0 ; r < n_row ; r++)\n  {\n    if (ROW_IS_ALIVE (r))\n    {\n      Row [r].shared2.mark = 0 ;\n    }\n  }\n  return (1) ;\n}\n\n\n} // namespace internal \n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/OrderingMethods/Ordering.h",
    "content": " \n// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012  Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_ORDERING_H\n#define EIGEN_ORDERING_H\n\nnamespace Eigen {\n  \n#include \"Eigen_Colamd.h\"\n\nnamespace internal {\n    \n/** \\internal\n  * \\ingroup OrderingMethods_Module\n  * \\param[in] A the input non-symmetric matrix\n  * \\param[out] symmat the symmetric pattern A^T+A from the input matrix \\a A.\n  * FIXME: The values should not be considered here\n  */\ntemplate<typename MatrixType> \nvoid ordering_helper_at_plus_a(const MatrixType& A, MatrixType& symmat)\n{\n  MatrixType C;\n  C = A.transpose(); // NOTE: Could be  costly\n  for (int i = 0; i < C.rows(); i++) \n  {\n      for (typename MatrixType::InnerIterator it(C, i); it; ++it)\n        it.valueRef() = 0.0;\n  }\n  symmat = C + A;\n}\n    \n}\n\n#ifndef EIGEN_MPL2_ONLY\n\n/** \\ingroup OrderingMethods_Module\n  * \\class AMDOrdering\n  *\n  * Functor computing the \\em approximate \\em minimum \\em degree ordering\n  * If the matrix is not structurally symmetric, an ordering of A^T+A is computed\n  * \\tparam  StorageIndex The type of indices of the matrix \n  * \\sa COLAMDOrdering\n  */\ntemplate <typename StorageIndex>\nclass AMDOrdering\n{\n  public:\n    typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;\n    \n    /** Compute the permutation vector from a sparse matrix\n     * This routine is much faster if the input matrix is column-major     \n     */\n    template <typename MatrixType>\n    void operator()(const MatrixType& mat, PermutationType& perm)\n    {\n      // Compute the symmetric pattern\n      SparseMatrix<typename MatrixType::Scalar, ColMajor, StorageIndex> symm;\n      internal::ordering_helper_at_plus_a(mat,symm); \n    \n      // Call the AMD routine \n      //m_mat.prune(keep_diag());\n      internal::minimum_degree_ordering(symm, perm);\n    }\n    \n    /** Compute the permutation with a selfadjoint matrix */\n    template <typename SrcType, unsigned int SrcUpLo> \n    void operator()(const SparseSelfAdjointView<SrcType, SrcUpLo>& mat, PermutationType& perm)\n    { \n      SparseMatrix<typename SrcType::Scalar, ColMajor, StorageIndex> C; C = mat;\n      \n      // Call the AMD routine \n      // m_mat.prune(keep_diag()); //Remove the diagonal elements \n      internal::minimum_degree_ordering(C, perm);\n    }\n};\n\n#endif // EIGEN_MPL2_ONLY\n\n/** \\ingroup OrderingMethods_Module\n  * \\class NaturalOrdering\n  *\n  * Functor computing the natural ordering (identity)\n  * \n  * \\note Returns an empty permutation matrix\n  * \\tparam  StorageIndex The type of indices of the matrix \n  */\ntemplate <typename StorageIndex>\nclass NaturalOrdering\n{\n  public:\n    typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;\n    \n    /** Compute the permutation vector from a column-major sparse matrix */\n    template <typename MatrixType>\n    void operator()(const MatrixType& /*mat*/, PermutationType& perm)\n    {\n      perm.resize(0); \n    }\n    \n};\n\n/** \\ingroup OrderingMethods_Module\n  * \\class COLAMDOrdering\n  *\n  * \\tparam  StorageIndex The type of indices of the matrix \n  * \n  * Functor computing the \\em column \\em approximate \\em minimum \\em degree ordering \n  * The matrix should be in column-major and \\b compressed format (see SparseMatrix::makeCompressed()).\n  */\ntemplate<typename StorageIndex>\nclass COLAMDOrdering\n{\n  public:\n    typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType; \n    typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;\n    \n    /** Compute the permutation vector \\a perm form the sparse matrix \\a mat\n      * \\warning The input sparse matrix \\a mat must be in compressed mode (see SparseMatrix::makeCompressed()).\n      */\n    template <typename MatrixType>\n    void operator() (const MatrixType& mat, PermutationType& perm)\n    {\n      eigen_assert(mat.isCompressed() && \"COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering\");\n      \n      StorageIndex m = StorageIndex(mat.rows());\n      StorageIndex n = StorageIndex(mat.cols());\n      StorageIndex nnz = StorageIndex(mat.nonZeros());\n      // Get the recommended value of Alen to be used by colamd\n      StorageIndex Alen = internal::colamd_recommended(nnz, m, n); \n      // Set the default parameters\n      double knobs [COLAMD_KNOBS]; \n      StorageIndex stats [COLAMD_STATS];\n      internal::colamd_set_defaults(knobs);\n      \n      IndexVector p(n+1), A(Alen); \n      for(StorageIndex i=0; i <= n; i++)   p(i) = mat.outerIndexPtr()[i];\n      for(StorageIndex i=0; i < nnz; i++)  A(i) = mat.innerIndexPtr()[i];\n      // Call Colamd routine to compute the ordering \n      StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); \n      EIGEN_UNUSED_VARIABLE(info);\n      eigen_assert( info && \"COLAMD failed \" );\n      \n      perm.resize(n);\n      for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i;\n    }\n};\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/PaStiXSupport/PaStiXSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PASTIXSUPPORT_H\n#define EIGEN_PASTIXSUPPORT_H\n\nnamespace Eigen { \n\n#if defined(DCOMPLEX)\n  #define PASTIX_COMPLEX  COMPLEX\n  #define PASTIX_DCOMPLEX DCOMPLEX\n#else\n  #define PASTIX_COMPLEX  std::complex<float>\n  #define PASTIX_DCOMPLEX std::complex<double>\n#endif\n\n/** \\ingroup PaStiXSupport_Module\n  * \\brief Interface to the PaStix solver\n  * \n  * This class is used to solve the linear systems A.X = B via the PaStix library. \n  * The matrix can be either real or complex, symmetric or not.\n  *\n  * \\sa TutorialSparseDirectSolvers\n  */\ntemplate<typename _MatrixType, bool IsStrSym = false> class PastixLU;\ntemplate<typename _MatrixType, int Options> class PastixLLT;\ntemplate<typename _MatrixType, int Options> class PastixLDLT;\n\nnamespace internal\n{\n    \n  template<class Pastix> struct pastix_traits;\n\n  template<typename _MatrixType>\n  struct pastix_traits< PastixLU<_MatrixType> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;\n  };\n\n  template<typename _MatrixType, int Options>\n  struct pastix_traits< PastixLLT<_MatrixType,Options> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;\n  };\n\n  template<typename _MatrixType, int Options>\n  struct pastix_traits< PastixLDLT<_MatrixType,Options> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;\n  };\n  \n  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, float *vals, int *perm, int * invp, float *x, int nbrhs, int *iparm, double *dparm)\n  {\n    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }\n    if (nbrhs == 0) {x = NULL; nbrhs=1;}\n    s_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); \n  }\n  \n  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, double *vals, int *perm, int * invp, double *x, int nbrhs, int *iparm, double *dparm)\n  {\n    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }\n    if (nbrhs == 0) {x = NULL; nbrhs=1;}\n    d_pastix(pastix_data, pastix_comm, n, ptr, idx, vals, perm, invp, x, nbrhs, iparm, dparm); \n  }\n  \n  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<float> *vals, int *perm, int * invp, std::complex<float> *x, int nbrhs, int *iparm, double *dparm)\n  {\n    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }\n    if (nbrhs == 0) {x = NULL; nbrhs=1;}\n    c_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_COMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_COMPLEX*>(x), nbrhs, iparm, dparm); \n  }\n  \n  void eigen_pastix(pastix_data_t **pastix_data, int pastix_comm, int n, int *ptr, int *idx, std::complex<double> *vals, int *perm, int * invp, std::complex<double> *x, int nbrhs, int *iparm, double *dparm)\n  {\n    if (n == 0) { ptr = NULL; idx = NULL; vals = NULL; }\n    if (nbrhs == 0) {x = NULL; nbrhs=1;}\n    z_pastix(pastix_data, pastix_comm, n, ptr, idx, reinterpret_cast<PASTIX_DCOMPLEX*>(vals), perm, invp, reinterpret_cast<PASTIX_DCOMPLEX*>(x), nbrhs, iparm, dparm); \n  }\n\n  // Convert the matrix  to Fortran-style Numbering\n  template <typename MatrixType>\n  void c_to_fortran_numbering (MatrixType& mat)\n  {\n    if ( !(mat.outerIndexPtr()[0]) ) \n    { \n      int i;\n      for(i = 0; i <= mat.rows(); ++i)\n        ++mat.outerIndexPtr()[i];\n      for(i = 0; i < mat.nonZeros(); ++i)\n        ++mat.innerIndexPtr()[i];\n    }\n  }\n  \n  // Convert to C-style Numbering\n  template <typename MatrixType>\n  void fortran_to_c_numbering (MatrixType& mat)\n  {\n    // Check the Numbering\n    if ( mat.outerIndexPtr()[0] == 1 ) \n    { // Convert to C-style numbering\n      int i;\n      for(i = 0; i <= mat.rows(); ++i)\n        --mat.outerIndexPtr()[i];\n      for(i = 0; i < mat.nonZeros(); ++i)\n        --mat.innerIndexPtr()[i];\n    }\n  }\n}\n\n// This is the base class to interface with PaStiX functions. \n// Users should not used this class directly. \ntemplate <class Derived>\nclass PastixBase : public SparseSolverBase<Derived>\n{\n  protected:\n    typedef SparseSolverBase<Derived> Base;\n    using Base::derived;\n    using Base::m_isInitialized;\n  public:\n    using Base::_solve_impl;\n    \n    typedef typename internal::pastix_traits<Derived>::MatrixType _MatrixType;\n    typedef _MatrixType MatrixType;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<Scalar,Dynamic,1> Vector;\n    typedef SparseMatrix<Scalar, ColMajor> ColSpMatrix;\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    \n  public:\n    \n    PastixBase() : m_initisOk(false), m_analysisIsOk(false), m_factorizationIsOk(false), m_pastixdata(0), m_size(0)\n    {\n      init();\n    }\n    \n    ~PastixBase() \n    {\n      clean();\n    }\n    \n    template<typename Rhs,typename Dest>\n    bool _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const;\n    \n    /** Returns a reference to the integer vector IPARM of PaStiX parameters\n      * to modify the default parameters. \n      * The statistics related to the different phases of factorization and solve are saved here as well\n      * \\sa analyzePattern() factorize()\n      */\n    Array<StorageIndex,IPARM_SIZE,1>& iparm()\n    {\n      return m_iparm; \n    }\n    \n    /** Return a reference to a particular index parameter of the IPARM vector \n     * \\sa iparm()\n     */\n    \n    int& iparm(int idxparam)\n    {\n      return m_iparm(idxparam);\n    }\n    \n     /** Returns a reference to the double vector DPARM of PaStiX parameters \n      * The statistics related to the different phases of factorization and solve are saved here as well\n      * \\sa analyzePattern() factorize()\n      */\n    Array<double,DPARM_SIZE,1>& dparm()\n    {\n      return m_dparm; \n    }\n    \n    \n    /** Return a reference to a particular index parameter of the DPARM vector \n     * \\sa dparm()\n     */\n    double& dparm(int idxparam)\n    {\n      return m_dparm(idxparam);\n    }\n    \n    inline Index cols() const { return m_size; }\n    inline Index rows() const { return m_size; }\n    \n     /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the PaStiX reports a problem\n      *          \\c InvalidInput if the input matrix is invalid\n      *\n      * \\sa iparm()          \n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n    \n  protected:\n\n    // Initialize the Pastix data structure, check the matrix\n    void init(); \n    \n    // Compute the ordering and the symbolic factorization\n    void analyzePattern(ColSpMatrix& mat);\n    \n    // Compute the numerical factorization\n    void factorize(ColSpMatrix& mat);\n    \n    // Free all the data allocated by Pastix\n    void clean()\n    {\n      eigen_assert(m_initisOk && \"The Pastix structure should be allocated first\"); \n      m_iparm(IPARM_START_TASK) = API_TASK_CLEAN;\n      m_iparm(IPARM_END_TASK) = API_TASK_CLEAN;\n      internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,\n                             m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());\n    }\n    \n    void compute(ColSpMatrix& mat);\n    \n    int m_initisOk; \n    int m_analysisIsOk;\n    int m_factorizationIsOk;\n    mutable ComputationInfo m_info; \n    mutable pastix_data_t *m_pastixdata; // Data structure for pastix\n    mutable int m_comm; // The MPI communicator identifier\n    mutable Array<int,IPARM_SIZE,1> m_iparm; // integer vector for the input parameters\n    mutable Array<double,DPARM_SIZE,1> m_dparm; // Scalar vector for the input parameters\n    mutable Matrix<StorageIndex,Dynamic,1> m_perm;  // Permutation vector\n    mutable Matrix<StorageIndex,Dynamic,1> m_invp;  // Inverse permutation vector\n    mutable int m_size; // Size of the matrix \n}; \n\n /** Initialize the PaStiX data structure. \n   *A first call to this function fills iparm and dparm with the default PaStiX parameters\n   * \\sa iparm() dparm()\n   */\ntemplate <class Derived>\nvoid PastixBase<Derived>::init()\n{\n  m_size = 0; \n  m_iparm.setZero(IPARM_SIZE);\n  m_dparm.setZero(DPARM_SIZE);\n  \n  m_iparm(IPARM_MODIFY_PARAMETER) = API_NO;\n  pastix(&m_pastixdata, MPI_COMM_WORLD,\n         0, 0, 0, 0,\n         0, 0, 0, 1, m_iparm.data(), m_dparm.data());\n  \n  m_iparm[IPARM_MATRIX_VERIFICATION] = API_NO;\n  m_iparm[IPARM_VERBOSE]             = API_VERBOSE_NOT;\n  m_iparm[IPARM_ORDERING]            = API_ORDER_SCOTCH;\n  m_iparm[IPARM_INCOMPLETE]          = API_NO;\n  m_iparm[IPARM_OOC_LIMIT]           = 2000;\n  m_iparm[IPARM_RHS_MAKING]          = API_RHS_B;\n  m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;\n  \n  m_iparm(IPARM_START_TASK) = API_TASK_INIT;\n  m_iparm(IPARM_END_TASK) = API_TASK_INIT;\n  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, 0, 0, 0, (Scalar*)0,\n                         0, 0, 0, 0, m_iparm.data(), m_dparm.data());\n  \n  // Check the returned error\n  if(m_iparm(IPARM_ERROR_NUMBER)) {\n    m_info = InvalidInput;\n    m_initisOk = false;\n  }\n  else { \n    m_info = Success;\n    m_initisOk = true;\n  }\n}\n\ntemplate <class Derived>\nvoid PastixBase<Derived>::compute(ColSpMatrix& mat)\n{\n  eigen_assert(mat.rows() == mat.cols() && \"The input matrix should be squared\");\n  \n  analyzePattern(mat);  \n  factorize(mat);\n  \n  m_iparm(IPARM_MATRIX_VERIFICATION) = API_NO;\n}\n\n\ntemplate <class Derived>\nvoid PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)\n{                         \n  eigen_assert(m_initisOk && \"The initialization of PaSTiX failed\");\n  \n  // clean previous calls\n  if(m_size>0)\n    clean();\n  \n  m_size = internal::convert_index<int>(mat.rows());\n  m_perm.resize(m_size);\n  m_invp.resize(m_size);\n  \n  m_iparm(IPARM_START_TASK) = API_TASK_ORDERING;\n  m_iparm(IPARM_END_TASK) = API_TASK_ANALYSE;\n  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),\n               mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());\n  \n  // Check the returned error\n  if(m_iparm(IPARM_ERROR_NUMBER))\n  {\n    m_info = NumericalIssue;\n    m_analysisIsOk = false;\n  }\n  else\n  { \n    m_info = Success;\n    m_analysisIsOk = true;\n  }\n}\n\ntemplate <class Derived>\nvoid PastixBase<Derived>::factorize(ColSpMatrix& mat)\n{\n//   if(&m_cpyMat != &mat) m_cpyMat = mat;\n  eigen_assert(m_analysisIsOk && \"The analysis phase should be called before the factorization phase\");\n  m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;\n  m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;\n  m_size = internal::convert_index<int>(mat.rows());\n  \n  internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),\n               mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());\n  \n  // Check the returned error\n  if(m_iparm(IPARM_ERROR_NUMBER))\n  {\n    m_info = NumericalIssue;\n    m_factorizationIsOk = false;\n    m_isInitialized = false;\n  }\n  else\n  {\n    m_info = Success;\n    m_factorizationIsOk = true;\n    m_isInitialized = true;\n  }\n}\n\n/* Solve the system */\ntemplate<typename Base>\ntemplate<typename Rhs,typename Dest>\nbool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x) const\n{\n  eigen_assert(m_isInitialized && \"The matrix should be factorized first\");\n  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,\n                     THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n  int rhs = 1;\n  \n  x = b; /* on return, x is overwritten by the computed solution */\n  \n  for (int i = 0; i < b.cols(); i++){\n    m_iparm[IPARM_START_TASK]          = API_TASK_SOLVE;\n    m_iparm[IPARM_END_TASK]            = API_TASK_REFINE;\n  \n    internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,\n                           m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());\n  }\n  \n  // Check the returned error\n  m_info = m_iparm(IPARM_ERROR_NUMBER)==0 ? Success : NumericalIssue;\n  \n  return m_iparm(IPARM_ERROR_NUMBER)==0;\n}\n\n/** \\ingroup PaStiXSupport_Module\n  * \\class PastixLU\n  * \\brief Sparse direct LU solver based on PaStiX library\n  * \n  * This class is used to solve the linear systems A.X = B with a supernodal LU \n  * factorization in the PaStiX library. The matrix A should be squared and nonsingular\n  * PaStiX requires that the matrix A has a symmetric structural pattern. \n  * This interface can symmetrize the input matrix otherwise. \n  * The vectors or matrices X and B can be either dense or sparse.\n  * \n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam IsStrSym Indicates if the input matrix has a symmetric pattern, default is false\n  * NOTE : Note that if the analysis and factorization phase are called separately, \n  * the input matrix will be symmetrized at each call, hence it is advised to \n  * symmetrize the matrix in a end-user program and set \\p IsStrSym to true\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SparseLU\n  * \n  */\ntemplate<typename _MatrixType, bool IsStrSym>\nclass PastixLU : public PastixBase< PastixLU<_MatrixType> >\n{\n  public:\n    typedef _MatrixType MatrixType;\n    typedef PastixBase<PastixLU<MatrixType> > Base;\n    typedef typename Base::ColSpMatrix ColSpMatrix;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    \n  public:\n    PastixLU() : Base()\n    {\n      init();\n    }\n    \n    explicit PastixLU(const MatrixType& matrix):Base()\n    {\n      init();\n      compute(matrix);\n    }\n    /** Compute the LU supernodal factorization of \\p matrix. \n      * iparm and dparm can be used to tune the PaStiX parameters. \n      * see the PaStiX user's manual\n      * \\sa analyzePattern() factorize()\n      */\n    void compute (const MatrixType& matrix)\n    {\n      m_structureIsUptodate = false;\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::compute(temp);\n    }\n    /** Compute the LU symbolic factorization of \\p matrix using its sparsity pattern. \n      * Several ordering methods can be used at this step. See the PaStiX user's manual. \n      * The result of this operation can be used with successive matrices having the same pattern as \\p matrix\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    {\n      m_structureIsUptodate = false;\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::analyzePattern(temp);\n    }\n\n    /** Compute the LU supernodal factorization of \\p matrix\n      * WARNING The matrix \\p matrix should have the same structural pattern \n      * as the same used in the analysis phase.\n      * \\sa analyzePattern()\n      */ \n    void factorize(const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::factorize(temp);\n    }\n  protected:\n    \n    void init()\n    {\n      m_structureIsUptodate = false;\n      m_iparm(IPARM_SYM) = API_SYM_NO;\n      m_iparm(IPARM_FACTORIZATION) = API_FACT_LU;\n    }\n    \n    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)\n    {\n      if(IsStrSym)\n        out = matrix;\n      else\n      {\n        if(!m_structureIsUptodate)\n        {\n          // update the transposed structure\n          m_transposedStructure = matrix.transpose();\n          \n          // Set the elements of the matrix to zero \n          for (Index j=0; j<m_transposedStructure.outerSize(); ++j) \n            for(typename ColSpMatrix::InnerIterator it(m_transposedStructure, j); it; ++it)\n              it.valueRef() = 0.0;\n\n          m_structureIsUptodate = true;\n        }\n        \n        out = m_transposedStructure + matrix;\n      }\n      internal::c_to_fortran_numbering(out);\n    }\n    \n    using Base::m_iparm;\n    using Base::m_dparm;\n    \n    ColSpMatrix m_transposedStructure;\n    bool m_structureIsUptodate;\n};\n\n/** \\ingroup PaStiXSupport_Module\n  * \\class PastixLLT\n  * \\brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library\n  * \n  * This class is used to solve the linear systems A.X = B via a LL^T supernodal Cholesky factorization\n  * available in the PaStiX library. The matrix A should be symmetric and positive definite\n  * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX\n  * The vectors or matrices X and B can be either dense or sparse\n  * \n  * \\tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SimplicialLLT\n  */\ntemplate<typename _MatrixType, int _UpLo>\nclass PastixLLT : public PastixBase< PastixLLT<_MatrixType, _UpLo> >\n{\n  public:\n    typedef _MatrixType MatrixType;\n    typedef PastixBase<PastixLLT<MatrixType, _UpLo> > Base;\n    typedef typename Base::ColSpMatrix ColSpMatrix;\n    \n  public:\n    enum { UpLo = _UpLo };\n    PastixLLT() : Base()\n    {\n      init();\n    }\n    \n    explicit PastixLLT(const MatrixType& matrix):Base()\n    {\n      init();\n      compute(matrix);\n    }\n\n    /** Compute the L factor of the LL^T supernodal factorization of \\p matrix \n      * \\sa analyzePattern() factorize()\n      */\n    void compute (const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::compute(temp);\n    }\n\n     /** Compute the LL^T symbolic factorization of \\p matrix using its sparsity pattern\n      * The result of this operation can be used with successive matrices having the same pattern as \\p matrix\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::analyzePattern(temp);\n    }\n      /** Compute the LL^T supernodal numerical factorization of \\p matrix \n        * \\sa analyzePattern()\n        */\n    void factorize(const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::factorize(temp);\n    }\n  protected:\n    using Base::m_iparm;\n    \n    void init()\n    {\n      m_iparm(IPARM_SYM) = API_SYM_YES;\n      m_iparm(IPARM_FACTORIZATION) = API_FACT_LLT;\n    }\n    \n    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)\n    {\n      out.resize(matrix.rows(), matrix.cols());\n      // Pastix supports only lower, column-major matrices \n      out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();\n      internal::c_to_fortran_numbering(out);\n    }\n};\n\n/** \\ingroup PaStiXSupport_Module\n  * \\class PastixLDLT\n  * \\brief A sparse direct supernodal Cholesky (LLT) factorization and solver based on the PaStiX library\n  * \n  * This class is used to solve the linear systems A.X = B via a LDL^T supernodal Cholesky factorization\n  * available in the PaStiX library. The matrix A should be symmetric and positive definite\n  * WARNING Selfadjoint complex matrices are not supported in the current version of PaStiX\n  * The vectors or matrices X and B can be either dense or sparse\n  * \n  * \\tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam UpLo The part of the matrix to use : Lower or Upper. The default is Lower as required by PaStiX\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SimplicialLDLT\n  */\ntemplate<typename _MatrixType, int _UpLo>\nclass PastixLDLT : public PastixBase< PastixLDLT<_MatrixType, _UpLo> >\n{\n  public:\n    typedef _MatrixType MatrixType;\n    typedef PastixBase<PastixLDLT<MatrixType, _UpLo> > Base; \n    typedef typename Base::ColSpMatrix ColSpMatrix;\n    \n  public:\n    enum { UpLo = _UpLo };\n    PastixLDLT():Base()\n    {\n      init();\n    }\n    \n    explicit PastixLDLT(const MatrixType& matrix):Base()\n    {\n      init();\n      compute(matrix);\n    }\n\n    /** Compute the L and D factors of the LDL^T factorization of \\p matrix \n      * \\sa analyzePattern() factorize()\n      */\n    void compute (const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::compute(temp);\n    }\n\n    /** Compute the LDL^T symbolic factorization of \\p matrix using its sparsity pattern\n      * The result of this operation can be used with successive matrices having the same pattern as \\p matrix\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    { \n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::analyzePattern(temp);\n    }\n    /** Compute the LDL^T supernodal numerical factorization of \\p matrix \n      * \n      */\n    void factorize(const MatrixType& matrix)\n    {\n      ColSpMatrix temp;\n      grabMatrix(matrix, temp);\n      Base::factorize(temp);\n    }\n\n  protected:\n    using Base::m_iparm;\n    \n    void init()\n    {\n      m_iparm(IPARM_SYM) = API_SYM_YES;\n      m_iparm(IPARM_FACTORIZATION) = API_FACT_LDLT;\n    }\n    \n    void grabMatrix(const MatrixType& matrix, ColSpMatrix& out)\n    {\n      // Pastix supports only lower, column-major matrices \n      out.resize(matrix.rows(), matrix.cols());\n      out.template selfadjointView<Lower>() = matrix.template selfadjointView<UpLo>();\n      internal::c_to_fortran_numbering(out);\n    }\n};\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/PardisoSupport/PardisoSupport.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to Intel(R) MKL PARDISO\n ********************************************************************************\n*/\n\n#ifndef EIGEN_PARDISOSUPPORT_H\n#define EIGEN_PARDISOSUPPORT_H\n\nnamespace Eigen { \n\ntemplate<typename _MatrixType> class PardisoLU;\ntemplate<typename _MatrixType, int Options=Upper> class PardisoLLT;\ntemplate<typename _MatrixType, int Options=Upper> class PardisoLDLT;\n\nnamespace internal\n{\n  template<typename IndexType>\n  struct pardiso_run_selector\n  {\n    static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,\n                      IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)\n    {\n      IndexType error = 0;\n      ::pardiso(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);\n      return error;\n    }\n  };\n  template<>\n  struct pardiso_run_selector<long long int>\n  {\n    typedef long long int IndexType;\n    static IndexType run( _MKL_DSS_HANDLE_t pt, IndexType maxfct, IndexType mnum, IndexType type, IndexType phase, IndexType n, void *a,\n                      IndexType *ia, IndexType *ja, IndexType *perm, IndexType nrhs, IndexType *iparm, IndexType msglvl, void *b, void *x)\n    {\n      IndexType error = 0;\n      ::pardiso_64(pt, &maxfct, &mnum, &type, &phase, &n, a, ia, ja, perm, &nrhs, iparm, &msglvl, b, x, &error);\n      return error;\n    }\n  };\n\n  template<class Pardiso> struct pardiso_traits;\n\n  template<typename _MatrixType>\n  struct pardiso_traits< PardisoLU<_MatrixType> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;\n  };\n\n  template<typename _MatrixType, int Options>\n  struct pardiso_traits< PardisoLLT<_MatrixType, Options> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;\n  };\n\n  template<typename _MatrixType, int Options>\n  struct pardiso_traits< PardisoLDLT<_MatrixType, Options> >\n  {\n    typedef _MatrixType MatrixType;\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef typename _MatrixType::StorageIndex StorageIndex;    \n  };\n\n} // end namespace internal\n\ntemplate<class Derived>\nclass PardisoImpl : public SparseSolverBase<Derived>\n{\n  protected:\n    typedef SparseSolverBase<Derived> Base;\n    using Base::derived;\n    using Base::m_isInitialized;\n    \n    typedef internal::pardiso_traits<Derived> Traits;\n  public:\n    using Base::_solve_impl;\n    \n    typedef typename Traits::MatrixType MatrixType;\n    typedef typename Traits::Scalar Scalar;\n    typedef typename Traits::RealScalar RealScalar;\n    typedef typename Traits::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,RowMajor,StorageIndex> SparseMatrixType;\n    typedef Matrix<Scalar,Dynamic,1> VectorType;\n    typedef Matrix<StorageIndex, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;\n    typedef Matrix<StorageIndex, MatrixType::RowsAtCompileTime, 1> IntColVectorType;\n    typedef Array<StorageIndex,64,1,DontAlign> ParameterType;\n    enum {\n      ScalarIsComplex = NumTraits<Scalar>::IsComplex,\n      ColsAtCompileTime = Dynamic,\n      MaxColsAtCompileTime = Dynamic\n    };\n\n    PardisoImpl()\n    {\n      eigen_assert((sizeof(StorageIndex) >= sizeof(_INTEGER_t) && sizeof(StorageIndex) <= 8) && \"Non-supported index type\");\n      m_iparm.setZero();\n      m_msglvl = 0; // No output\n      m_isInitialized = false;\n    }\n\n    ~PardisoImpl()\n    {\n      pardisoRelease();\n    }\n\n    inline Index cols() const { return m_size; }\n    inline Index rows() const { return m_size; }\n  \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n\n    /** \\warning for advanced usage only.\n      * \\returns a reference to the parameter array controlling PARDISO.\n      * See the PARDISO manual to know how to use it. */\n    ParameterType& pardisoParameterArray()\n    {\n      return m_iparm;\n    }\n    \n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      * \n      * \\sa factorize()\n      */\n    Derived& analyzePattern(const MatrixType& matrix);\n    \n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    Derived& factorize(const MatrixType& matrix);\n\n    Derived& compute(const MatrixType& matrix);\n\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;\n\n  protected:\n    void pardisoRelease()\n    {\n      if(m_isInitialized) // Factorization ran at least once\n      {\n        internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, -1, internal::convert_index<StorageIndex>(m_size),0, 0, 0, m_perm.data(), 0,\n                                                          m_iparm.data(), m_msglvl, NULL, NULL);\n        m_isInitialized = false;\n      }\n    }\n\n    void pardisoInit(int type)\n    {\n      m_type = type;\n      bool symmetric = std::abs(m_type) < 10;\n      m_iparm[0] = 1;   // No solver default\n      m_iparm[1] = 2;   // use Metis for the ordering\n      m_iparm[2] = 0;   // Reserved. Set to zero. (??Numbers of processors, value of OMP_NUM_THREADS??)\n      m_iparm[3] = 0;   // No iterative-direct algorithm\n      m_iparm[4] = 0;   // No user fill-in reducing permutation\n      m_iparm[5] = 0;   // Write solution into x, b is left unchanged\n      m_iparm[6] = 0;   // Not in use\n      m_iparm[7] = 2;   // Max numbers of iterative refinement steps\n      m_iparm[8] = 0;   // Not in use\n      m_iparm[9] = 13;  // Perturb the pivot elements with 1E-13\n      m_iparm[10] = symmetric ? 0 : 1; // Use nonsymmetric permutation and scaling MPS\n      m_iparm[11] = 0;  // Not in use\n      m_iparm[12] = symmetric ? 0 : 1;  // Maximum weighted matching algorithm is switched-off (default for symmetric).\n                                        // Try m_iparm[12] = 1 in case of inappropriate accuracy\n      m_iparm[13] = 0;  // Output: Number of perturbed pivots\n      m_iparm[14] = 0;  // Not in use\n      m_iparm[15] = 0;  // Not in use\n      m_iparm[16] = 0;  // Not in use\n      m_iparm[17] = -1; // Output: Number of nonzeros in the factor LU\n      m_iparm[18] = -1; // Output: Mflops for LU factorization\n      m_iparm[19] = 0;  // Output: Numbers of CG Iterations\n      \n      m_iparm[20] = 0;  // 1x1 pivoting\n      m_iparm[26] = 0;  // No matrix checker\n      m_iparm[27] = (sizeof(RealScalar) == 4) ? 1 : 0;\n      m_iparm[34] = 1;  // C indexing\n      m_iparm[36] = 0;  // CSR\n      m_iparm[59] = 0;  // 0 - In-Core ; 1 - Automatic switch between In-Core and Out-of-Core modes ; 2 - Out-of-Core\n      \n      memset(m_pt, 0, sizeof(m_pt));\n    }\n\n  protected:\n    // cached data to reduce reallocation, etc.\n    \n    void manageErrorCode(Index error) const\n    {\n      switch(error)\n      {\n        case 0:\n          m_info = Success;\n          break;\n        case -4:\n        case -7:\n          m_info = NumericalIssue;\n          break;\n        default:\n          m_info = InvalidInput;\n      }\n    }\n\n    mutable SparseMatrixType m_matrix;\n    mutable ComputationInfo m_info;\n    bool m_analysisIsOk, m_factorizationIsOk;\n    StorageIndex m_type, m_msglvl;\n    mutable void *m_pt[64];\n    mutable ParameterType m_iparm;\n    mutable IntColVectorType m_perm;\n    Index m_size;\n    \n};\n\ntemplate<class Derived>\nDerived& PardisoImpl<Derived>::compute(const MatrixType& a)\n{\n  m_size = a.rows();\n  eigen_assert(a.rows() == a.cols());\n\n  pardisoRelease();\n  m_perm.setZero(m_size);\n  derived().getMatrix(a);\n  \n  Index error;\n  error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 12, internal::convert_index<StorageIndex>(m_size),\n                                                            m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),\n                                                            m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);\n  manageErrorCode(error);\n  m_analysisIsOk = true;\n  m_factorizationIsOk = true;\n  m_isInitialized = true;\n  return derived();\n}\n\ntemplate<class Derived>\nDerived& PardisoImpl<Derived>::analyzePattern(const MatrixType& a)\n{\n  m_size = a.rows();\n  eigen_assert(m_size == a.cols());\n\n  pardisoRelease();\n  m_perm.setZero(m_size);\n  derived().getMatrix(a);\n  \n  Index error;\n  error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 11, internal::convert_index<StorageIndex>(m_size),\n                                                            m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),\n                                                            m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);\n  \n  manageErrorCode(error);\n  m_analysisIsOk = true;\n  m_factorizationIsOk = false;\n  m_isInitialized = true;\n  return derived();\n}\n\ntemplate<class Derived>\nDerived& PardisoImpl<Derived>::factorize(const MatrixType& a)\n{\n  eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n  eigen_assert(m_size == a.rows() && m_size == a.cols());\n  \n  derived().getMatrix(a);\n\n  Index error;\n  error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 22, internal::convert_index<StorageIndex>(m_size),\n                                                            m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),\n                                                            m_perm.data(), 0, m_iparm.data(), m_msglvl, NULL, NULL);\n  \n  manageErrorCode(error);\n  m_factorizationIsOk = true;\n  return derived();\n}\n\ntemplate<class Derived>\ntemplate<typename BDerived,typename XDerived>\nvoid PardisoImpl<Derived>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived>& x) const\n{\n  if(m_iparm[0] == 0) // Factorization was not computed\n  {\n    m_info = InvalidInput;\n    return;\n  }\n\n  //Index n = m_matrix.rows();\n  Index nrhs = Index(b.cols());\n  eigen_assert(m_size==b.rows());\n  eigen_assert(((MatrixBase<BDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && \"Row-major right hand sides are not supported\");\n  eigen_assert(((MatrixBase<XDerived>::Flags & RowMajorBit) == 0 || nrhs == 1) && \"Row-major matrices of unknowns are not supported\");\n  eigen_assert(((nrhs == 1) || b.outerStride() == b.rows()));\n\n\n//  switch (transposed) {\n//    case SvNoTrans    : m_iparm[11] = 0 ; break;\n//    case SvTranspose  : m_iparm[11] = 2 ; break;\n//    case SvAdjoint    : m_iparm[11] = 1 ; break;\n//    default:\n//      //std::cerr << \"Eigen: transposition  option \\\"\" << transposed << \"\\\" not supported by the PARDISO backend\\n\";\n//      m_iparm[11] = 0;\n//  }\n\n  Scalar* rhs_ptr = const_cast<Scalar*>(b.derived().data());\n  Matrix<Scalar,Dynamic,Dynamic,ColMajor> tmp;\n  \n  // Pardiso cannot solve in-place\n  if(rhs_ptr == x.derived().data())\n  {\n    tmp = b;\n    rhs_ptr = tmp.data();\n  }\n  \n  Index error;\n  error = internal::pardiso_run_selector<StorageIndex>::run(m_pt, 1, 1, m_type, 33, internal::convert_index<StorageIndex>(m_size),\n                                                            m_matrix.valuePtr(), m_matrix.outerIndexPtr(), m_matrix.innerIndexPtr(),\n                                                            m_perm.data(), internal::convert_index<StorageIndex>(nrhs), m_iparm.data(), m_msglvl,\n                                                            rhs_ptr, x.derived().data());\n\n  manageErrorCode(error);\n}\n\n\n/** \\ingroup PardisoSupport_Module\n  * \\class PardisoLU\n  * \\brief A sparse direct LU factorization and solver based on the PARDISO library\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization\n  * using the Intel MKL PARDISO library. The sparse matrix A must be squared and invertible.\n  * The vectors or matrices X and B can be either dense or sparse.\n  *\n  * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:\n  * \\code solver.pardisoParameterArray()[59] = 1; \\endcode\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SparseLU\n  */\ntemplate<typename MatrixType>\nclass PardisoLU : public PardisoImpl< PardisoLU<MatrixType> >\n{\n  protected:\n    typedef PardisoImpl<PardisoLU> Base;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::RealScalar RealScalar;\n    using Base::pardisoInit;\n    using Base::m_matrix;\n    friend class PardisoImpl< PardisoLU<MatrixType> >;\n\n  public:\n\n    using Base::compute;\n    using Base::solve;\n\n    PardisoLU()\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? 13 : 11);\n    }\n\n    explicit PardisoLU(const MatrixType& matrix)\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? 13 : 11);\n      compute(matrix);\n    }\n  protected:\n    void getMatrix(const MatrixType& matrix)\n    {\n      m_matrix = matrix;\n      m_matrix.makeCompressed();\n    }\n};\n\n/** \\ingroup PardisoSupport_Module\n  * \\class PardisoLLT\n  * \\brief A sparse direct Cholesky (LLT) factorization and solver based on the PARDISO library\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a LL^T Cholesky factorization\n  * using the Intel MKL PARDISO library. The sparse matrix A must be selfajoint and positive definite.\n  * The vectors or matrices X and B can be either dense or sparse.\n  *\n  * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:\n  * \\code solver.pardisoParameterArray()[59] = 1; \\endcode\n  *\n  * \\tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam UpLo can be any bitwise combination of Upper, Lower. The default is Upper, meaning only the upper triangular part has to be used.\n  *         Upper|Lower can be used to tell both triangular parts can be used as input.\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SimplicialLLT\n  */\ntemplate<typename MatrixType, int _UpLo>\nclass PardisoLLT : public PardisoImpl< PardisoLLT<MatrixType,_UpLo> >\n{\n  protected:\n    typedef PardisoImpl< PardisoLLT<MatrixType,_UpLo> > Base;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::RealScalar RealScalar;\n    using Base::pardisoInit;\n    using Base::m_matrix;\n    friend class PardisoImpl< PardisoLLT<MatrixType,_UpLo> >;\n\n  public:\n\n    typedef typename Base::StorageIndex StorageIndex;\n    enum { UpLo = _UpLo };\n    using Base::compute;\n\n    PardisoLLT()\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? 4 : 2);\n    }\n\n    explicit PardisoLLT(const MatrixType& matrix)\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? 4 : 2);\n      compute(matrix);\n    }\n    \n  protected:\n    \n    void getMatrix(const MatrixType& matrix)\n    {\n      // PARDISO supports only upper, row-major matrices\n      PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;\n      m_matrix.resize(matrix.rows(), matrix.cols());\n      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);\n      m_matrix.makeCompressed();\n    }\n};\n\n/** \\ingroup PardisoSupport_Module\n  * \\class PardisoLDLT\n  * \\brief A sparse direct Cholesky (LDLT) factorization and solver based on the PARDISO library\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a LDL^T Cholesky factorization\n  * using the Intel MKL PARDISO library. The sparse matrix A is assumed to be selfajoint and positive definite.\n  * For complex matrices, A can also be symmetric only, see the \\a Options template parameter.\n  * The vectors or matrices X and B can be either dense or sparse.\n  *\n  * By default, it runs in in-core mode. To enable PARDISO's out-of-core feature, set:\n  * \\code solver.pardisoParameterArray()[59] = 1; \\endcode\n  *\n  * \\tparam MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam Options can be any bitwise combination of Upper, Lower, and Symmetric. The default is Upper, meaning only the upper triangular part has to be used.\n  *         Symmetric can be used for symmetric, non-selfadjoint complex matrices, the default being to assume a selfadjoint matrix.\n  *         Upper|Lower can be used to tell both triangular parts can be used as input.\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SimplicialLDLT\n  */\ntemplate<typename MatrixType, int Options>\nclass PardisoLDLT : public PardisoImpl< PardisoLDLT<MatrixType,Options> >\n{\n  protected:\n    typedef PardisoImpl< PardisoLDLT<MatrixType,Options> > Base;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::RealScalar RealScalar;\n    using Base::pardisoInit;\n    using Base::m_matrix;\n    friend class PardisoImpl< PardisoLDLT<MatrixType,Options> >;\n\n  public:\n\n    typedef typename Base::StorageIndex StorageIndex;\n    using Base::compute;\n    enum { UpLo = Options&(Upper|Lower) };\n\n    PardisoLDLT()\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);\n    }\n\n    explicit PardisoLDLT(const MatrixType& matrix)\n      : Base()\n    {\n      pardisoInit(Base::ScalarIsComplex ? ( bool(Options&Symmetric) ? 6 : -4 ) : -2);\n      compute(matrix);\n    }\n    \n    void getMatrix(const MatrixType& matrix)\n    {\n      // PARDISO supports only upper, row-major matrices\n      PermutationMatrix<Dynamic,Dynamic,StorageIndex> p_null;\n      m_matrix.resize(matrix.rows(), matrix.cols());\n      m_matrix.template selfadjointView<Upper>() = matrix.template selfadjointView<UpLo>().twistedBy(p_null);\n      m_matrix.makeCompressed();\n    }\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_PARDISOSUPPORT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/ColPivHouseholderQR.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_H\n#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate<typename _MatrixType> struct traits<ColPivHouseholderQR<_MatrixType> >\n : traits<_MatrixType>\n{\n  enum { Flags = 0 };\n};\n\n} // end namespace internal\n\n/** \\ingroup QR_Module\n  *\n  * \\class ColPivHouseholderQR\n  *\n  * \\brief Householder rank-revealing QR decomposition of a matrix with column-pivoting\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the QR decomposition\n  *\n  * This class performs a rank-revealing QR decomposition of a matrix \\b A into matrices \\b P, \\b Q and \\b R\n  * such that\n  * \\f[\n  *  \\mathbf{A} \\, \\mathbf{P} = \\mathbf{Q} \\, \\mathbf{R}\n  * \\f]\n  * by using Householder transformations. Here, \\b P is a permutation matrix, \\b Q a unitary matrix and \\b R an\n  * upper triangular matrix.\n  *\n  * This decomposition performs column pivoting in order to be rank-revealing and improve\n  * numerical stability. It is slower than HouseholderQR, and faster than FullPivHouseholderQR.\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::colPivHouseholderQr()\n  */\ntemplate<typename _MatrixType> class ColPivHouseholderQR\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    // FIXME should be int\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;\n    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;\n    typedef typename internal::plain_row_type<MatrixType, Index>::type IntRowVectorType;\n    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;\n    typedef typename internal::plain_row_type<MatrixType, RealScalar>::type RealRowVectorType;\n    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;\n    typedef typename MatrixType::PlainObject PlainObject;\n\n  private:\n\n    typedef typename PermutationType::StorageIndex PermIndexType;\n\n  public:\n\n    /**\n    * \\brief Default Constructor.\n    *\n    * The default constructor is useful in cases in which the user intends to\n    * perform decompositions via ColPivHouseholderQR::compute(const MatrixType&).\n    */\n    ColPivHouseholderQR()\n      : m_qr(),\n        m_hCoeffs(),\n        m_colsPermutation(),\n        m_colsTranspositions(),\n        m_temp(),\n        m_colNormsUpdated(),\n        m_colNormsDirect(),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false) {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa ColPivHouseholderQR()\n      */\n    ColPivHouseholderQR(Index rows, Index cols)\n      : m_qr(rows, cols),\n        m_hCoeffs((std::min)(rows,cols)),\n        m_colsPermutation(PermIndexType(cols)),\n        m_colsTranspositions(cols),\n        m_temp(cols),\n        m_colNormsUpdated(cols),\n        m_colNormsDirect(cols),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false) {}\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This constructor computes the QR factorization of the matrix \\a matrix by calling\n      * the method compute(). It is a short cut for:\n      *\n      * \\code\n      * ColPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());\n      * qr.compute(matrix);\n      * \\endcode\n      *\n      * \\sa compute()\n      */\n    template<typename InputType>\n    explicit ColPivHouseholderQR(const EigenBase<InputType>& matrix)\n      : m_qr(matrix.rows(), matrix.cols()),\n        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),\n        m_colsPermutation(PermIndexType(matrix.cols())),\n        m_colsTranspositions(matrix.cols()),\n        m_temp(matrix.cols()),\n        m_colNormsUpdated(matrix.cols()),\n        m_colNormsDirect(matrix.cols()),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa ColPivHouseholderQR(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit ColPivHouseholderQR(EigenBase<InputType>& matrix)\n      : m_qr(matrix.derived()),\n        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),\n        m_colsPermutation(PermIndexType(matrix.cols())),\n        m_colsTranspositions(matrix.cols()),\n        m_temp(matrix.cols()),\n        m_colNormsUpdated(matrix.cols()),\n        m_colNormsDirect(matrix.cols()),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false)\n    {\n      computeInPlace();\n    }\n\n    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which\n      * *this is the QR decomposition, if any exists.\n      *\n      * \\param b the right-hand-side of the equation to solve.\n      *\n      * \\returns a solution.\n      *\n      * \\note_about_checking_solutions\n      *\n      * \\note_about_arbitrary_choice_of_solution\n      *\n      * Example: \\include ColPivHouseholderQR_solve.cpp\n      * Output: \\verbinclude ColPivHouseholderQR_solve.out\n      */\n    template<typename Rhs>\n    inline const Solve<ColPivHouseholderQR, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return Solve<ColPivHouseholderQR, Rhs>(*this, b.derived());\n    }\n\n    HouseholderSequenceType householderQ() const;\n    HouseholderSequenceType matrixQ() const\n    {\n      return householderQ();\n    }\n\n    /** \\returns a reference to the matrix where the Householder QR decomposition is stored\n      */\n    const MatrixType& matrixQR() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return m_qr;\n    }\n\n    /** \\returns a reference to the matrix where the result Householder QR is stored\n     * \\warning The strict lower part of this matrix contains internal values.\n     * Only the upper triangular part should be referenced. To get it, use\n     * \\code matrixR().template triangularView<Upper>() \\endcode\n     * For rank-deficient matrices, use\n     * \\code\n     * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>()\n     * \\endcode\n     */\n    const MatrixType& matrixR() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return m_qr;\n    }\n\n    template<typename InputType>\n    ColPivHouseholderQR& compute(const EigenBase<InputType>& matrix);\n\n    /** \\returns a const reference to the column permutation matrix */\n    const PermutationType& colsPermutation() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return m_colsPermutation;\n    }\n\n    /** \\returns the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      * One way to work around that is to use logAbsDeterminant() instead.\n      *\n      * \\sa logAbsDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar absDeterminant() const;\n\n    /** \\returns the natural log of the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\note This method is useful to work around the risk of overflow/underflow that's inherent\n      * to determinant computation.\n      *\n      * \\sa absDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar logAbsDeterminant() const;\n\n    /** \\returns the rank of the matrix of which *this is the QR decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index rank() const\n    {\n      using std::abs;\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();\n      Index result = 0;\n      for(Index i = 0; i < m_nonzero_pivots; ++i)\n        result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\n      return result;\n    }\n\n    /** \\returns the dimension of the kernel of the matrix of which *this is the QR decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index dimensionOfKernel() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return cols() - rank();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition represents an injective\n      *          linear map, i.e. has trivial kernel; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInjective() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return rank() == cols();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition represents a surjective\n      *          linear map; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isSurjective() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return rank() == rows();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition is invertible.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInvertible() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return isInjective() && isSurjective();\n    }\n\n    /** \\returns the inverse of the matrix of which *this is the QR decomposition.\n      *\n      * \\note If this matrix is not invertible, the returned matrix has undefined coefficients.\n      *       Use isInvertible() to first determine whether this matrix is invertible.\n      */\n    inline const Inverse<ColPivHouseholderQR> inverse() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return Inverse<ColPivHouseholderQR>(*this);\n    }\n\n    inline Index rows() const { return m_qr.rows(); }\n    inline Index cols() const { return m_qr.cols(); }\n\n    /** \\returns a const reference to the vector of Householder coefficients used to represent the factor \\c Q.\n      *\n      * For advanced uses only.\n      */\n    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }\n\n    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),\n      * who need to determine when pivots are to be considered nonzero. This is not used for the\n      * QR decomposition itself.\n      *\n      * When it needs to get the threshold value, Eigen calls threshold(). By default, this\n      * uses a formula to automatically determine a reasonable threshold.\n      * Once you have called the present method setThreshold(const RealScalar&),\n      * your value is used instead.\n      *\n      * \\param threshold The new value to use as the threshold.\n      *\n      * A pivot will be considered nonzero if its absolute value is strictly greater than\n      *  \\f$ \\vert pivot \\vert \\leqslant threshold \\times \\vert maxpivot \\vert \\f$\n      * where maxpivot is the biggest pivot.\n      *\n      * If you want to come back to the default behavior, call setThreshold(Default_t)\n      */\n    ColPivHouseholderQR& setThreshold(const RealScalar& threshold)\n    {\n      m_usePrescribedThreshold = true;\n      m_prescribedThreshold = threshold;\n      return *this;\n    }\n\n    /** Allows to come back to the default behavior, letting Eigen use its default formula for\n      * determining the threshold.\n      *\n      * You should pass the special object Eigen::Default as parameter here.\n      * \\code qr.setThreshold(Eigen::Default); \\endcode\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    ColPivHouseholderQR& setThreshold(Default_t)\n    {\n      m_usePrescribedThreshold = false;\n      return *this;\n    }\n\n    /** Returns the threshold that will be used by certain methods such as rank().\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    RealScalar threshold() const\n    {\n      eigen_assert(m_isInitialized || m_usePrescribedThreshold);\n      return m_usePrescribedThreshold ? m_prescribedThreshold\n      // this formula comes from experimenting (see \"LU precision tuning\" thread on the list)\n      // and turns out to be identical to Higham's formula used already in LDLt.\n                                      : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());\n    }\n\n    /** \\returns the number of nonzero pivots in the QR decomposition.\n      * Here nonzero is meant in the exact sense, not in a fuzzy sense.\n      * So that notion isn't really intrinsically interesting, but it is\n      * still useful when implementing algorithms.\n      *\n      * \\sa rank()\n      */\n    inline Index nonzeroPivots() const\n    {\n      eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n      return m_nonzero_pivots;\n    }\n\n    /** \\returns the absolute value of the biggest pivot, i.e. the biggest\n      *          diagonal coefficient of R.\n      */\n    RealScalar maxPivot() const { return m_maxpivot; }\n\n    /** \\brief Reports whether the QR factorization was succesful.\n      *\n      * \\note This function always returns \\c Success. It is provided for compatibility\n      * with other factorization routines.\n      * \\returns \\c Success\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return Success;\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    friend class CompleteOrthogonalDecomposition<MatrixType>;\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    void computeInPlace();\n\n    MatrixType m_qr;\n    HCoeffsType m_hCoeffs;\n    PermutationType m_colsPermutation;\n    IntRowVectorType m_colsTranspositions;\n    RowVectorType m_temp;\n    RealRowVectorType m_colNormsUpdated;\n    RealRowVectorType m_colNormsDirect;\n    bool m_isInitialized, m_usePrescribedThreshold;\n    RealScalar m_prescribedThreshold, m_maxpivot;\n    Index m_nonzero_pivots;\n    Index m_det_pq;\n};\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::absDeterminant() const\n{\n  using std::abs;\n  eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return abs(m_qr.diagonal().prod());\n}\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDeterminant() const\n{\n  eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return m_qr.diagonal().cwiseAbs().array().log().sum();\n}\n\n/** Performs the QR factorization of the given matrix \\a matrix. The result of\n  * the factorization is stored into \\c *this, and a reference to \\c *this\n  * is returned.\n  *\n  * \\sa class ColPivHouseholderQR, ColPivHouseholderQR(const MatrixType&)\n  */\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix)\n{\n  m_qr = matrix.derived();\n  computeInPlace();\n  return *this;\n}\n\ntemplate<typename MatrixType>\nvoid ColPivHouseholderQR<MatrixType>::computeInPlace()\n{\n  check_template_parameters();\n\n  // the column permutation is stored as int indices, so just to be sure:\n  eigen_assert(m_qr.cols()<=NumTraits<int>::highest());\n\n  using std::abs;\n\n  Index rows = m_qr.rows();\n  Index cols = m_qr.cols();\n  Index size = m_qr.diagonalSize();\n\n  m_hCoeffs.resize(size);\n\n  m_temp.resize(cols);\n\n  m_colsTranspositions.resize(m_qr.cols());\n  Index number_of_transpositions = 0;\n\n  m_colNormsUpdated.resize(cols);\n  m_colNormsDirect.resize(cols);\n  for (Index k = 0; k < cols; ++k) {\n    // colNormsDirect(k) caches the most recent directly computed norm of\n    // column k.\n    m_colNormsDirect.coeffRef(k) = m_qr.col(k).norm();\n    m_colNormsUpdated.coeffRef(k) = m_colNormsDirect.coeffRef(k);\n  }\n\n  RealScalar threshold_helper =  numext::abs2<Scalar>(m_colNormsUpdated.maxCoeff() * NumTraits<Scalar>::epsilon()) / RealScalar(rows);\n  RealScalar norm_downdate_threshold = numext::sqrt(NumTraits<Scalar>::epsilon());\n\n  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)\n  m_maxpivot = RealScalar(0);\n\n  for(Index k = 0; k < size; ++k)\n  {\n    // first, we look up in our table m_colNormsUpdated which column has the biggest norm\n    Index biggest_col_index;\n    RealScalar biggest_col_sq_norm = numext::abs2(m_colNormsUpdated.tail(cols-k).maxCoeff(&biggest_col_index));\n    biggest_col_index += k;\n\n    // Track the number of meaningful pivots but do not stop the decomposition to make\n    // sure that the initial matrix is properly reproduced. See bug 941.\n    if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))\n      m_nonzero_pivots = k;\n\n    // apply the transposition to the columns\n    m_colsTranspositions.coeffRef(k) = biggest_col_index;\n    if(k != biggest_col_index) {\n      m_qr.col(k).swap(m_qr.col(biggest_col_index));\n      std::swap(m_colNormsUpdated.coeffRef(k), m_colNormsUpdated.coeffRef(biggest_col_index));\n      std::swap(m_colNormsDirect.coeffRef(k), m_colNormsDirect.coeffRef(biggest_col_index));\n      ++number_of_transpositions;\n    }\n\n    // generate the householder vector, store it below the diagonal\n    RealScalar beta;\n    m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);\n\n    // apply the householder transformation to the diagonal coefficient\n    m_qr.coeffRef(k,k) = beta;\n\n    // remember the maximum absolute value of diagonal coefficients\n    if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);\n\n    // apply the householder transformation\n    m_qr.bottomRightCorner(rows-k, cols-k-1)\n        .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));\n\n    // update our table of norms of the columns\n    for (Index j = k + 1; j < cols; ++j) {\n      // The following implements the stable norm downgrade step discussed in\n      // http://www.netlib.org/lapack/lawnspdf/lawn176.pdf\n      // and used in LAPACK routines xGEQPF and xGEQP3.\n      // See lines 278-297 in http://www.netlib.org/lapack/explore-html/dc/df4/sgeqpf_8f_source.html\n      if (m_colNormsUpdated.coeffRef(j) != 0) {\n        RealScalar temp = abs(m_qr.coeffRef(k, j)) / m_colNormsUpdated.coeffRef(j);\n        temp = (RealScalar(1) + temp) * (RealScalar(1) - temp);\n        temp = temp < 0 ? 0 : temp;\n        RealScalar temp2 = temp * numext::abs2<Scalar>(m_colNormsUpdated.coeffRef(j) /\n                                                       m_colNormsDirect.coeffRef(j));\n        if (temp2 <= norm_downdate_threshold) {\n          // The updated norm has become too inaccurate so re-compute the column\n          // norm directly.\n          m_colNormsDirect.coeffRef(j) = m_qr.col(j).tail(rows - k - 1).norm();\n          m_colNormsUpdated.coeffRef(j) = m_colNormsDirect.coeffRef(j);\n        } else {\n          m_colNormsUpdated.coeffRef(j) *= numext::sqrt(temp);\n        }\n      }\n    }\n  }\n\n  m_colsPermutation.setIdentity(PermIndexType(cols));\n  for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)\n    m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));\n\n  m_det_pq = (number_of_transpositions%2) ? -1 : 1;\n  m_isInitialized = true;\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType>\ntemplate<typename RhsType, typename DstType>\nvoid ColPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  eigen_assert(rhs.rows() == rows());\n\n  const Index nonzero_pivots = nonzeroPivots();\n\n  if(nonzero_pivots == 0)\n  {\n    dst.setZero();\n    return;\n  }\n\n  typename RhsType::PlainObject c(rhs);\n\n  // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T\n  c.applyOnTheLeft(householderSequence(m_qr, m_hCoeffs)\n                    .setLength(nonzero_pivots)\n                    .transpose()\n    );\n\n  m_qr.topLeftCorner(nonzero_pivots, nonzero_pivots)\n      .template triangularView<Upper>()\n      .solveInPlace(c.topRows(nonzero_pivots));\n\n  for(Index i = 0; i < nonzero_pivots; ++i) dst.row(m_colsPermutation.indices().coeff(i)) = c.row(i);\n  for(Index i = nonzero_pivots; i < cols(); ++i) dst.row(m_colsPermutation.indices().coeff(i)).setZero();\n}\n#endif\n\nnamespace internal {\n\ntemplate<typename DstXprType, typename MatrixType>\nstruct Assignment<DstXprType, Inverse<ColPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename ColPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>\n{\n  typedef ColPivHouseholderQR<MatrixType> QrType;\n  typedef Inverse<QrType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)\n  {\n    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));\n  }\n};\n\n} // end namespace internal\n\n/** \\returns the matrix Q as a sequence of householder transformations.\n  * You can extract the meaningful part only by using:\n  * \\code qr.householderQ().setLength(qr.nonzeroPivots()) \\endcode*/\ntemplate<typename MatrixType>\ntypename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>\n  ::householderQ() const\n{\n  eigen_assert(m_isInitialized && \"ColPivHouseholderQR is not initialized.\");\n  return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());\n}\n\n/** \\return the column-pivoting Householder QR decomposition of \\c *this.\n  *\n  * \\sa class ColPivHouseholderQR\n  */\ntemplate<typename Derived>\nconst ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::colPivHouseholderQr() const\n{\n  return ColPivHouseholderQR<PlainObject>(eval());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/ColPivHouseholderQR_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Householder QR decomposition of a matrix with column pivoting based on\n *    LAPACKE_?geqp3 function.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H\n#define EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H\n\nnamespace Eigen { \n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_QR_COLPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \\\ntemplate<> template<typename InputType> inline \\\nColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >& \\\nColPivHouseholderQR<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> >::compute( \\\n              const EigenBase<InputType>& matrix) \\\n\\\n{ \\\n  using std::abs; \\\n  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \\\n  typedef MatrixType::RealScalar RealScalar; \\\n  Index rows = matrix.rows();\\\n  Index cols = matrix.cols();\\\n\\\n  m_qr = matrix;\\\n  Index size = m_qr.diagonalSize();\\\n  m_hCoeffs.resize(size);\\\n\\\n  m_colsTranspositions.resize(cols);\\\n  /*Index number_of_transpositions = 0;*/ \\\n\\\n  m_nonzero_pivots = 0; \\\n  m_maxpivot = RealScalar(0);\\\n  m_colsPermutation.resize(cols); \\\n  m_colsPermutation.indices().setZero(); \\\n\\\n  lapack_int lda = internal::convert_index<lapack_int,Index>(m_qr.outerStride()); \\\n  lapack_int matrix_order = LAPACKE_COLROW; \\\n  LAPACKE_##LAPACKE_PREFIX##geqp3( matrix_order, internal::convert_index<lapack_int,Index>(rows), internal::convert_index<lapack_int,Index>(cols), \\\n                              (LAPACKE_TYPE*)m_qr.data(), lda, (lapack_int*)m_colsPermutation.indices().data(), (LAPACKE_TYPE*)m_hCoeffs.data()); \\\n  m_isInitialized = true; \\\n  m_maxpivot=m_qr.diagonal().cwiseAbs().maxCoeff(); \\\n  m_hCoeffs.adjointInPlace(); \\\n  RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold(); \\\n  lapack_int *perm = m_colsPermutation.indices().data(); \\\n  for(Index i=0;i<size;i++) { \\\n    m_nonzero_pivots += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\\\n  } \\\n  for(Index i=0;i<cols;i++) perm[i]--;\\\n\\\n  /*m_det_pq = (number_of_transpositions%2) ? -1 : 1;  // TODO: It's not needed now; fix upon availability in Eigen */ \\\n\\\n  return *this; \\\n}\n\nEIGEN_LAPACKE_QR_COLPIV(double,   double,        d, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(float,    float,         s, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float,  c, ColMajor, LAPACK_COL_MAJOR)\n\nEIGEN_LAPACKE_QR_COLPIV(double,   double,        d, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(float,    float,         s, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(dcomplex, lapack_complex_double, z, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_QR_COLPIV(scomplex, lapack_complex_float,  c, RowMajor, LAPACK_ROW_MAJOR)\n\n} // end namespace Eigen\n\n#endif // EIGEN_COLPIVOTINGHOUSEHOLDERQR_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/CompleteOrthogonalDecomposition.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2016 Rasmus Munk Larsen <rmlarsen@google.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H\n#define EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H\n\nnamespace Eigen {\n\nnamespace internal {\ntemplate <typename _MatrixType>\nstruct traits<CompleteOrthogonalDecomposition<_MatrixType> >\n    : traits<_MatrixType> {\n  enum { Flags = 0 };\n};\n\n}  // end namespace internal\n\n/** \\ingroup QR_Module\n  *\n  * \\class CompleteOrthogonalDecomposition\n  *\n  * \\brief Complete orthogonal decomposition (COD) of a matrix.\n  *\n  * \\param MatrixType the type of the matrix of which we are computing the COD.\n  *\n  * This class performs a rank-revealing complete orthogonal decomposition of a\n  * matrix  \\b A into matrices \\b P, \\b Q, \\b T, and \\b Z such that\n  * \\f[\n  *  \\mathbf{A} \\, \\mathbf{P} = \\mathbf{Q} \\,\n  *                     \\begin{bmatrix} \\mathbf{T} &  \\mathbf{0} \\\\\n  *                                     \\mathbf{0} & \\mathbf{0} \\end{bmatrix} \\, \\mathbf{Z}\n  * \\f]\n  * by using Householder transformations. Here, \\b P is a permutation matrix,\n  * \\b Q and \\b Z are unitary matrices and \\b T an upper triangular matrix of\n  * size rank-by-rank. \\b A may be rank deficient.\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::completeOrthogonalDecomposition()\n  */\ntemplate <typename _MatrixType>\nclass CompleteOrthogonalDecomposition {\n public:\n  typedef _MatrixType MatrixType;\n  enum {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n  };\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;\n  typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime>\n      PermutationType;\n  typedef typename internal::plain_row_type<MatrixType, Index>::type\n      IntRowVectorType;\n  typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;\n  typedef typename internal::plain_row_type<MatrixType, RealScalar>::type\n      RealRowVectorType;\n  typedef HouseholderSequence<\n      MatrixType, typename internal::remove_all<\n                      typename HCoeffsType::ConjugateReturnType>::type>\n      HouseholderSequenceType;\n  typedef typename MatrixType::PlainObject PlainObject;\n\n private:\n  typedef typename PermutationType::Index PermIndexType;\n\n public:\n  /**\n   * \\brief Default Constructor.\n   *\n   * The default constructor is useful in cases in which the user intends to\n   * perform decompositions via\n   * \\c CompleteOrthogonalDecomposition::compute(const* MatrixType&).\n   */\n  CompleteOrthogonalDecomposition() : m_cpqr(), m_zCoeffs(), m_temp() {}\n\n  /** \\brief Default Constructor with memory preallocation\n   *\n   * Like the default constructor but with preallocation of the internal data\n   * according to the specified problem \\a size.\n   * \\sa CompleteOrthogonalDecomposition()\n   */\n  CompleteOrthogonalDecomposition(Index rows, Index cols)\n      : m_cpqr(rows, cols), m_zCoeffs((std::min)(rows, cols)), m_temp(cols) {}\n\n  /** \\brief Constructs a complete orthogonal decomposition from a given\n   * matrix.\n   *\n   * This constructor computes the complete orthogonal decomposition of the\n   * matrix \\a matrix by calling the method compute(). The default\n   * threshold for rank determination will be used. It is a short cut for:\n   *\n   * \\code\n   * CompleteOrthogonalDecomposition<MatrixType> cod(matrix.rows(),\n   *                                                 matrix.cols());\n   * cod.setThreshold(Default);\n   * cod.compute(matrix);\n   * \\endcode\n   *\n   * \\sa compute()\n   */\n  template <typename InputType>\n  explicit CompleteOrthogonalDecomposition(const EigenBase<InputType>& matrix)\n      : m_cpqr(matrix.rows(), matrix.cols()),\n        m_zCoeffs((std::min)(matrix.rows(), matrix.cols())),\n        m_temp(matrix.cols())\n  {\n    compute(matrix.derived());\n  }\n\n  /** \\brief Constructs a complete orthogonal decomposition from a given matrix\n    *\n    * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when \\c MatrixType is a Eigen::Ref.\n    *\n    * \\sa CompleteOrthogonalDecomposition(const EigenBase&)\n    */\n  template<typename InputType>\n  explicit CompleteOrthogonalDecomposition(EigenBase<InputType>& matrix)\n    : m_cpqr(matrix.derived()),\n      m_zCoeffs((std::min)(matrix.rows(), matrix.cols())),\n      m_temp(matrix.cols())\n  {\n    computeInPlace();\n  }\n\n\n  /** This method computes the minimum-norm solution X to a least squares\n   * problem \\f[\\mathrm{minimize} \\|A X - B\\|, \\f] where \\b A is the matrix of\n   * which \\c *this is the complete orthogonal decomposition.\n   *\n   * \\param b the right-hand sides of the problem to solve.\n   *\n   * \\returns a solution.\n   *\n   */\n  template <typename Rhs>\n  inline const Solve<CompleteOrthogonalDecomposition, Rhs> solve(\n      const MatrixBase<Rhs>& b) const {\n    eigen_assert(m_cpqr.m_isInitialized &&\n                 \"CompleteOrthogonalDecomposition is not initialized.\");\n    return Solve<CompleteOrthogonalDecomposition, Rhs>(*this, b.derived());\n  }\n\n  HouseholderSequenceType householderQ(void) const;\n  HouseholderSequenceType matrixQ(void) const { return m_cpqr.householderQ(); }\n\n  /** \\returns the matrix \\b Z.\n   */\n  MatrixType matrixZ() const {\n    MatrixType Z = MatrixType::Identity(m_cpqr.cols(), m_cpqr.cols());\n    applyZAdjointOnTheLeftInPlace(Z);\n    return Z.adjoint();\n  }\n\n  /** \\returns a reference to the matrix where the complete orthogonal\n   * decomposition is stored\n   */\n  const MatrixType& matrixQTZ() const { return m_cpqr.matrixQR(); }\n\n  /** \\returns a reference to the matrix where the complete orthogonal\n   * decomposition is stored.\n   * \\warning The strict lower part and \\code cols() - rank() \\endcode right\n   * columns of this matrix contains internal values.\n   * Only the upper triangular part should be referenced. To get it, use\n   * \\code matrixT().template triangularView<Upper>() \\endcode\n   * For rank-deficient matrices, use\n   * \\code\n   * matrixR().topLeftCorner(rank(), rank()).template triangularView<Upper>()\n   * \\endcode\n   */\n  const MatrixType& matrixT() const { return m_cpqr.matrixQR(); }\n\n  template <typename InputType>\n  CompleteOrthogonalDecomposition& compute(const EigenBase<InputType>& matrix) {\n    // Compute the column pivoted QR factorization A P = Q R.\n    m_cpqr.compute(matrix);\n    computeInPlace();\n    return *this;\n  }\n\n  /** \\returns a const reference to the column permutation matrix */\n  const PermutationType& colsPermutation() const {\n    return m_cpqr.colsPermutation();\n  }\n\n  /** \\returns the absolute value of the determinant of the matrix of which\n   * *this is the complete orthogonal decomposition. It has only linear\n   * complexity (that is, O(n) where n is the dimension of the square matrix)\n   * as the complete orthogonal decomposition has already been computed.\n   *\n   * \\note This is only for square matrices.\n   *\n   * \\warning a determinant can be very big or small, so for matrices\n   * of large enough dimension, there is a risk of overflow/underflow.\n   * One way to work around that is to use logAbsDeterminant() instead.\n   *\n   * \\sa logAbsDeterminant(), MatrixBase::determinant()\n   */\n  typename MatrixType::RealScalar absDeterminant() const;\n\n  /** \\returns the natural log of the absolute value of the determinant of the\n   * matrix of which *this is the complete orthogonal decomposition. It has\n   * only linear complexity (that is, O(n) where n is the dimension of the\n   * square matrix) as the complete orthogonal decomposition has already been\n   * computed.\n   *\n   * \\note This is only for square matrices.\n   *\n   * \\note This method is useful to work around the risk of overflow/underflow\n   * that's inherent to determinant computation.\n   *\n   * \\sa absDeterminant(), MatrixBase::determinant()\n   */\n  typename MatrixType::RealScalar logAbsDeterminant() const;\n\n  /** \\returns the rank of the matrix of which *this is the complete orthogonal\n   * decomposition.\n   *\n   * \\note This method has to determine which pivots should be considered\n   * nonzero. For that, it uses the threshold value that you can control by\n   * calling setThreshold(const RealScalar&).\n   */\n  inline Index rank() const { return m_cpqr.rank(); }\n\n  /** \\returns the dimension of the kernel of the matrix of which *this is the\n   * complete orthogonal decomposition.\n   *\n   * \\note This method has to determine which pivots should be considered\n   * nonzero. For that, it uses the threshold value that you can control by\n   * calling setThreshold(const RealScalar&).\n   */\n  inline Index dimensionOfKernel() const { return m_cpqr.dimensionOfKernel(); }\n\n  /** \\returns true if the matrix of which *this is the decomposition represents\n   * an injective linear map, i.e. has trivial kernel; false otherwise.\n   *\n   * \\note This method has to determine which pivots should be considered\n   * nonzero. For that, it uses the threshold value that you can control by\n   * calling setThreshold(const RealScalar&).\n   */\n  inline bool isInjective() const { return m_cpqr.isInjective(); }\n\n  /** \\returns true if the matrix of which *this is the decomposition represents\n   * a surjective linear map; false otherwise.\n   *\n   * \\note This method has to determine which pivots should be considered\n   * nonzero. For that, it uses the threshold value that you can control by\n   * calling setThreshold(const RealScalar&).\n   */\n  inline bool isSurjective() const { return m_cpqr.isSurjective(); }\n\n  /** \\returns true if the matrix of which *this is the complete orthogonal\n   * decomposition is invertible.\n   *\n   * \\note This method has to determine which pivots should be considered\n   * nonzero. For that, it uses the threshold value that you can control by\n   * calling setThreshold(const RealScalar&).\n   */\n  inline bool isInvertible() const { return m_cpqr.isInvertible(); }\n\n  /** \\returns the pseudo-inverse of the matrix of which *this is the complete\n   * orthogonal decomposition.\n   * \\warning: Do not compute \\c this->pseudoInverse()*rhs to solve a linear systems.\n   * It is more efficient and numerically stable to call \\c this->solve(rhs).\n   */\n  inline const Inverse<CompleteOrthogonalDecomposition> pseudoInverse() const\n  {\n    return Inverse<CompleteOrthogonalDecomposition>(*this);\n  }\n\n  inline Index rows() const { return m_cpqr.rows(); }\n  inline Index cols() const { return m_cpqr.cols(); }\n\n  /** \\returns a const reference to the vector of Householder coefficients used\n   * to represent the factor \\c Q.\n   *\n   * For advanced uses only.\n   */\n  inline const HCoeffsType& hCoeffs() const { return m_cpqr.hCoeffs(); }\n\n  /** \\returns a const reference to the vector of Householder coefficients\n   * used to represent the factor \\c Z.\n   *\n   * For advanced uses only.\n   */\n  const HCoeffsType& zCoeffs() const { return m_zCoeffs; }\n\n  /** Allows to prescribe a threshold to be used by certain methods, such as\n   * rank(), who need to determine when pivots are to be considered nonzero.\n   * Most be called before calling compute().\n   *\n   * When it needs to get the threshold value, Eigen calls threshold(). By\n   * default, this uses a formula to automatically determine a reasonable\n   * threshold. Once you have called the present method\n   * setThreshold(const RealScalar&), your value is used instead.\n   *\n   * \\param threshold The new value to use as the threshold.\n   *\n   * A pivot will be considered nonzero if its absolute value is strictly\n   * greater than\n   *  \\f$ \\vert pivot \\vert \\leqslant threshold \\times \\vert maxpivot \\vert \\f$\n   * where maxpivot is the biggest pivot.\n   *\n   * If you want to come back to the default behavior, call\n   * setThreshold(Default_t)\n   */\n  CompleteOrthogonalDecomposition& setThreshold(const RealScalar& threshold) {\n    m_cpqr.setThreshold(threshold);\n    return *this;\n  }\n\n  /** Allows to come back to the default behavior, letting Eigen use its default\n   * formula for determining the threshold.\n   *\n   * You should pass the special object Eigen::Default as parameter here.\n   * \\code qr.setThreshold(Eigen::Default); \\endcode\n   *\n   * See the documentation of setThreshold(const RealScalar&).\n   */\n  CompleteOrthogonalDecomposition& setThreshold(Default_t) {\n    m_cpqr.setThreshold(Default);\n    return *this;\n  }\n\n  /** Returns the threshold that will be used by certain methods such as rank().\n   *\n   * See the documentation of setThreshold(const RealScalar&).\n   */\n  RealScalar threshold() const { return m_cpqr.threshold(); }\n\n  /** \\returns the number of nonzero pivots in the complete orthogonal\n   * decomposition. Here nonzero is meant in the exact sense, not in a\n   * fuzzy sense. So that notion isn't really intrinsically interesting,\n   * but it is still useful when implementing algorithms.\n   *\n   * \\sa rank()\n   */\n  inline Index nonzeroPivots() const { return m_cpqr.nonzeroPivots(); }\n\n  /** \\returns the absolute value of the biggest pivot, i.e. the biggest\n   *          diagonal coefficient of R.\n   */\n  inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }\n\n  /** \\brief Reports whether the complete orthogonal decomposition was\n   * succesful.\n   *\n   * \\note This function always returns \\c Success. It is provided for\n   * compatibility\n   * with other factorization routines.\n   * \\returns \\c Success\n   */\n  ComputationInfo info() const {\n    eigen_assert(m_cpqr.m_isInitialized && \"Decomposition is not initialized.\");\n    return Success;\n  }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n  template <typename RhsType, typename DstType>\n  void _solve_impl(const RhsType& rhs, DstType& dst) const;\n#endif\n\n protected:\n  static void check_template_parameters() {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n  }\n\n  void computeInPlace();\n\n  /** Overwrites \\b rhs with \\f$ \\mathbf{Z}^* * \\mathbf{rhs} \\f$.\n   */\n  template <typename Rhs>\n  void applyZAdjointOnTheLeftInPlace(Rhs& rhs) const;\n\n  ColPivHouseholderQR<MatrixType> m_cpqr;\n  HCoeffsType m_zCoeffs;\n  RowVectorType m_temp;\n};\n\ntemplate <typename MatrixType>\ntypename MatrixType::RealScalar\nCompleteOrthogonalDecomposition<MatrixType>::absDeterminant() const {\n  return m_cpqr.absDeterminant();\n}\n\ntemplate <typename MatrixType>\ntypename MatrixType::RealScalar\nCompleteOrthogonalDecomposition<MatrixType>::logAbsDeterminant() const {\n  return m_cpqr.logAbsDeterminant();\n}\n\n/** Performs the complete orthogonal decomposition of the given matrix \\a\n * matrix. The result of the factorization is stored into \\c *this, and a\n * reference to \\c *this is returned.\n *\n * \\sa class CompleteOrthogonalDecomposition,\n * CompleteOrthogonalDecomposition(const MatrixType&)\n */\ntemplate <typename MatrixType>\nvoid CompleteOrthogonalDecomposition<MatrixType>::computeInPlace()\n{\n  check_template_parameters();\n\n  // the column permutation is stored as int indices, so just to be sure:\n  eigen_assert(m_cpqr.cols() <= NumTraits<int>::highest());\n\n  const Index rank = m_cpqr.rank();\n  const Index cols = m_cpqr.cols();\n  const Index rows = m_cpqr.rows();\n  m_zCoeffs.resize((std::min)(rows, cols));\n  m_temp.resize(cols);\n\n  if (rank < cols) {\n    // We have reduced the (permuted) matrix to the form\n    //   [R11 R12]\n    //   [ 0  R22]\n    // where R11 is r-by-r (r = rank) upper triangular, R12 is\n    // r-by-(n-r), and R22 is empty or the norm of R22 is negligible.\n    // We now compute the complete orthogonal decomposition by applying\n    // Householder transformations from the right to the upper trapezoidal\n    // matrix X = [R11 R12] to zero out R12 and obtain the factorization\n    // [R11 R12] = [T11 0] * Z, where T11 is r-by-r upper triangular and\n    // Z = Z(0) * Z(1) ... Z(r-1) is an n-by-n orthogonal matrix.\n    // We store the data representing Z in R12 and m_zCoeffs.\n    for (Index k = rank - 1; k >= 0; --k) {\n      if (k != rank - 1) {\n        // Given the API for Householder reflectors, it is more convenient if\n        // we swap the leading parts of columns k and r-1 (zero-based) to form\n        // the matrix X_k = [X(0:k, k), X(0:k, r:n)]\n        m_cpqr.m_qr.col(k).head(k + 1).swap(\n            m_cpqr.m_qr.col(rank - 1).head(k + 1));\n      }\n      // Construct Householder reflector Z(k) to zero out the last row of X_k,\n      // i.e. choose Z(k) such that\n      // [X(k, k), X(k, r:n)] * Z(k) = [beta, 0, .., 0].\n      RealScalar beta;\n      m_cpqr.m_qr.row(k)\n          .tail(cols - rank + 1)\n          .makeHouseholderInPlace(m_zCoeffs(k), beta);\n      m_cpqr.m_qr(k, rank - 1) = beta;\n      if (k > 0) {\n        // Apply Z(k) to the first k rows of X_k\n        m_cpqr.m_qr.topRightCorner(k, cols - rank + 1)\n            .applyHouseholderOnTheRight(\n                m_cpqr.m_qr.row(k).tail(cols - rank).transpose(), m_zCoeffs(k),\n                &m_temp(0));\n      }\n      if (k != rank - 1) {\n        // Swap X(0:k,k) back to its proper location.\n        m_cpqr.m_qr.col(k).head(k + 1).swap(\n            m_cpqr.m_qr.col(rank - 1).head(k + 1));\n      }\n    }\n  }\n}\n\ntemplate <typename MatrixType>\ntemplate <typename Rhs>\nvoid CompleteOrthogonalDecomposition<MatrixType>::applyZAdjointOnTheLeftInPlace(\n    Rhs& rhs) const {\n  const Index cols = this->cols();\n  const Index nrhs = rhs.cols();\n  const Index rank = this->rank();\n  Matrix<typename MatrixType::Scalar, Dynamic, 1> temp((std::max)(cols, nrhs));\n  for (Index k = 0; k < rank; ++k) {\n    if (k != rank - 1) {\n      rhs.row(k).swap(rhs.row(rank - 1));\n    }\n    rhs.middleRows(rank - 1, cols - rank + 1)\n        .applyHouseholderOnTheLeft(\n            matrixQTZ().row(k).tail(cols - rank).adjoint(), zCoeffs()(k),\n            &temp(0));\n    if (k != rank - 1) {\n      rhs.row(k).swap(rhs.row(rank - 1));\n    }\n  }\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate <typename _MatrixType>\ntemplate <typename RhsType, typename DstType>\nvoid CompleteOrthogonalDecomposition<_MatrixType>::_solve_impl(\n    const RhsType& rhs, DstType& dst) const {\n  eigen_assert(rhs.rows() == this->rows());\n\n  const Index rank = this->rank();\n  if (rank == 0) {\n    dst.setZero();\n    return;\n  }\n\n  // Compute c = Q^* * rhs\n  // Note that the matrix Q = H_0^* H_1^*... so its inverse is\n  // Q^* = (H_0 H_1 ...)^T\n  typename RhsType::PlainObject c(rhs);\n  c.applyOnTheLeft(\n      householderSequence(matrixQTZ(), hCoeffs()).setLength(rank).transpose());\n\n  // Solve T z = c(1:rank, :)\n  dst.topRows(rank) = matrixT()\n                          .topLeftCorner(rank, rank)\n                          .template triangularView<Upper>()\n                          .solve(c.topRows(rank));\n\n  const Index cols = this->cols();\n  if (rank < cols) {\n    // Compute y = Z^* * [ z ]\n    //                   [ 0 ]\n    dst.bottomRows(cols - rank).setZero();\n    applyZAdjointOnTheLeftInPlace(dst);\n  }\n\n  // Undo permutation to get x = P^{-1} * y.\n  dst = colsPermutation() * dst;\n}\n#endif\n\nnamespace internal {\n\ntemplate<typename DstXprType, typename MatrixType>\nstruct Assignment<DstXprType, Inverse<CompleteOrthogonalDecomposition<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename CompleteOrthogonalDecomposition<MatrixType>::Scalar>, Dense2Dense>\n{\n  typedef CompleteOrthogonalDecomposition<MatrixType> CodType;\n  typedef Inverse<CodType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename CodType::Scalar> &)\n  {\n    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.rows()));\n  }\n};\n\n} // end namespace internal\n\n/** \\returns the matrix Q as a sequence of householder transformations */\ntemplate <typename MatrixType>\ntypename CompleteOrthogonalDecomposition<MatrixType>::HouseholderSequenceType\nCompleteOrthogonalDecomposition<MatrixType>::householderQ() const {\n  return m_cpqr.householderQ();\n}\n\n/** \\return the complete orthogonal decomposition of \\c *this.\n  *\n  * \\sa class CompleteOrthogonalDecomposition\n  */\ntemplate <typename Derived>\nconst CompleteOrthogonalDecomposition<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::completeOrthogonalDecomposition() const {\n  return CompleteOrthogonalDecomposition<PlainObject>(eval());\n}\n\n}  // end namespace Eigen\n\n#endif  // EIGEN_COMPLETEORTHOGONALDECOMPOSITION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/FullPivHouseholderQR.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H\n#define EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename _MatrixType> struct traits<FullPivHouseholderQR<_MatrixType> >\n : traits<_MatrixType>\n{\n  enum { Flags = 0 };\n};\n\ntemplate<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType;\n\ntemplate<typename MatrixType>\nstruct traits<FullPivHouseholderQRMatrixQReturnType<MatrixType> >\n{\n  typedef typename MatrixType::PlainObject ReturnType;\n};\n\n} // end namespace internal\n\n/** \\ingroup QR_Module\n  *\n  * \\class FullPivHouseholderQR\n  *\n  * \\brief Householder rank-revealing QR decomposition of a matrix with full pivoting\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the QR decomposition\n  *\n  * This class performs a rank-revealing QR decomposition of a matrix \\b A into matrices \\b P, \\b P', \\b Q and \\b R\n  * such that \n  * \\f[\n  *  \\mathbf{P} \\, \\mathbf{A} \\, \\mathbf{P}' = \\mathbf{Q} \\, \\mathbf{R}\n  * \\f]\n  * by using Householder transformations. Here, \\b P and \\b P' are permutation matrices, \\b Q a unitary matrix \n  * and \\b R an upper triangular matrix.\n  *\n  * This decomposition performs a very prudent full pivoting in order to be rank-revealing and achieve optimal\n  * numerical stability. The trade-off is that it is slower than HouseholderQR and ColPivHouseholderQR.\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  * \n  * \\sa MatrixBase::fullPivHouseholderQr()\n  */\ntemplate<typename _MatrixType> class FullPivHouseholderQR\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    // FIXME should be int\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef internal::FullPivHouseholderQRMatrixQReturnType<MatrixType> MatrixQReturnType;\n    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;\n    typedef Matrix<StorageIndex, 1,\n                   EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime,RowsAtCompileTime), RowMajor, 1,\n                   EIGEN_SIZE_MIN_PREFER_FIXED(MaxColsAtCompileTime,MaxRowsAtCompileTime)> IntDiagSizeVectorType;\n    typedef PermutationMatrix<ColsAtCompileTime, MaxColsAtCompileTime> PermutationType;\n    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;\n    typedef typename internal::plain_col_type<MatrixType>::type ColVectorType;\n    typedef typename MatrixType::PlainObject PlainObject;\n\n    /** \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via FullPivHouseholderQR::compute(const MatrixType&).\n      */\n    FullPivHouseholderQR()\n      : m_qr(),\n        m_hCoeffs(),\n        m_rows_transpositions(),\n        m_cols_transpositions(),\n        m_cols_permutation(),\n        m_temp(),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false) {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa FullPivHouseholderQR()\n      */\n    FullPivHouseholderQR(Index rows, Index cols)\n      : m_qr(rows, cols),\n        m_hCoeffs((std::min)(rows,cols)),\n        m_rows_transpositions((std::min)(rows,cols)),\n        m_cols_transpositions((std::min)(rows,cols)),\n        m_cols_permutation(cols),\n        m_temp(cols),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false) {}\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This constructor computes the QR factorization of the matrix \\a matrix by calling\n      * the method compute(). It is a short cut for:\n      * \n      * \\code\n      * FullPivHouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());\n      * qr.compute(matrix);\n      * \\endcode\n      * \n      * \\sa compute()\n      */\n    template<typename InputType>\n    explicit FullPivHouseholderQR(const EigenBase<InputType>& matrix)\n      : m_qr(matrix.rows(), matrix.cols()),\n        m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),\n        m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),\n        m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),\n        m_cols_permutation(matrix.cols()),\n        m_temp(matrix.cols()),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false)\n    {\n      compute(matrix.derived());\n    }\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa FullPivHouseholderQR(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit FullPivHouseholderQR(EigenBase<InputType>& matrix)\n      : m_qr(matrix.derived()),\n        m_hCoeffs((std::min)(matrix.rows(), matrix.cols())),\n        m_rows_transpositions((std::min)(matrix.rows(), matrix.cols())),\n        m_cols_transpositions((std::min)(matrix.rows(), matrix.cols())),\n        m_cols_permutation(matrix.cols()),\n        m_temp(matrix.cols()),\n        m_isInitialized(false),\n        m_usePrescribedThreshold(false)\n    {\n      computeInPlace();\n    }\n\n    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which\n      * \\c *this is the QR decomposition.\n      *\n      * \\param b the right-hand-side of the equation to solve.\n      *\n      * \\returns the exact or least-square solution if the rank is greater or equal to the number of columns of A,\n      * and an arbitrary solution otherwise.\n      *\n      * \\note_about_checking_solutions\n      *\n      * \\note_about_arbitrary_choice_of_solution\n      *\n      * Example: \\include FullPivHouseholderQR_solve.cpp\n      * Output: \\verbinclude FullPivHouseholderQR_solve.out\n      */\n    template<typename Rhs>\n    inline const Solve<FullPivHouseholderQR, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return Solve<FullPivHouseholderQR, Rhs>(*this, b.derived());\n    }\n\n    /** \\returns Expression object representing the matrix Q\n      */\n    MatrixQReturnType matrixQ(void) const;\n\n    /** \\returns a reference to the matrix where the Householder QR decomposition is stored\n      */\n    const MatrixType& matrixQR() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return m_qr;\n    }\n\n    template<typename InputType>\n    FullPivHouseholderQR& compute(const EigenBase<InputType>& matrix);\n\n    /** \\returns a const reference to the column permutation matrix */\n    const PermutationType& colsPermutation() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return m_cols_permutation;\n    }\n\n    /** \\returns a const reference to the vector of indices representing the rows transpositions */\n    const IntDiagSizeVectorType& rowsTranspositions() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return m_rows_transpositions;\n    }\n\n    /** \\returns the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      * One way to work around that is to use logAbsDeterminant() instead.\n      *\n      * \\sa logAbsDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar absDeterminant() const;\n\n    /** \\returns the natural log of the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\note This method is useful to work around the risk of overflow/underflow that's inherent\n      * to determinant computation.\n      *\n      * \\sa absDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar logAbsDeterminant() const;\n\n    /** \\returns the rank of the matrix of which *this is the QR decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index rank() const\n    {\n      using std::abs;\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      RealScalar premultiplied_threshold = abs(m_maxpivot) * threshold();\n      Index result = 0;\n      for(Index i = 0; i < m_nonzero_pivots; ++i)\n        result += (abs(m_qr.coeff(i,i)) > premultiplied_threshold);\n      return result;\n    }\n\n    /** \\returns the dimension of the kernel of the matrix of which *this is the QR decomposition.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline Index dimensionOfKernel() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return cols() - rank();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition represents an injective\n      *          linear map, i.e. has trivial kernel; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInjective() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return rank() == cols();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition represents a surjective\n      *          linear map; false otherwise.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isSurjective() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return rank() == rows();\n    }\n\n    /** \\returns true if the matrix of which *this is the QR decomposition is invertible.\n      *\n      * \\note This method has to determine which pivots should be considered nonzero.\n      *       For that, it uses the threshold value that you can control by calling\n      *       setThreshold(const RealScalar&).\n      */\n    inline bool isInvertible() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return isInjective() && isSurjective();\n    }\n\n    /** \\returns the inverse of the matrix of which *this is the QR decomposition.\n      *\n      * \\note If this matrix is not invertible, the returned matrix has undefined coefficients.\n      *       Use isInvertible() to first determine whether this matrix is invertible.\n      */\n    inline const Inverse<FullPivHouseholderQR> inverse() const\n    {\n      eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n      return Inverse<FullPivHouseholderQR>(*this);\n    }\n\n    inline Index rows() const { return m_qr.rows(); }\n    inline Index cols() const { return m_qr.cols(); }\n    \n    /** \\returns a const reference to the vector of Householder coefficients used to represent the factor \\c Q.\n      * \n      * For advanced uses only.\n      */\n    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }\n\n    /** Allows to prescribe a threshold to be used by certain methods, such as rank(),\n      * who need to determine when pivots are to be considered nonzero. This is not used for the\n      * QR decomposition itself.\n      *\n      * When it needs to get the threshold value, Eigen calls threshold(). By default, this\n      * uses a formula to automatically determine a reasonable threshold.\n      * Once you have called the present method setThreshold(const RealScalar&),\n      * your value is used instead.\n      *\n      * \\param threshold The new value to use as the threshold.\n      *\n      * A pivot will be considered nonzero if its absolute value is strictly greater than\n      *  \\f$ \\vert pivot \\vert \\leqslant threshold \\times \\vert maxpivot \\vert \\f$\n      * where maxpivot is the biggest pivot.\n      *\n      * If you want to come back to the default behavior, call setThreshold(Default_t)\n      */\n    FullPivHouseholderQR& setThreshold(const RealScalar& threshold)\n    {\n      m_usePrescribedThreshold = true;\n      m_prescribedThreshold = threshold;\n      return *this;\n    }\n\n    /** Allows to come back to the default behavior, letting Eigen use its default formula for\n      * determining the threshold.\n      *\n      * You should pass the special object Eigen::Default as parameter here.\n      * \\code qr.setThreshold(Eigen::Default); \\endcode\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    FullPivHouseholderQR& setThreshold(Default_t)\n    {\n      m_usePrescribedThreshold = false;\n      return *this;\n    }\n\n    /** Returns the threshold that will be used by certain methods such as rank().\n      *\n      * See the documentation of setThreshold(const RealScalar&).\n      */\n    RealScalar threshold() const\n    {\n      eigen_assert(m_isInitialized || m_usePrescribedThreshold);\n      return m_usePrescribedThreshold ? m_prescribedThreshold\n      // this formula comes from experimenting (see \"LU precision tuning\" thread on the list)\n      // and turns out to be identical to Higham's formula used already in LDLt.\n                                      : NumTraits<Scalar>::epsilon() * RealScalar(m_qr.diagonalSize());\n    }\n\n    /** \\returns the number of nonzero pivots in the QR decomposition.\n      * Here nonzero is meant in the exact sense, not in a fuzzy sense.\n      * So that notion isn't really intrinsically interesting, but it is\n      * still useful when implementing algorithms.\n      *\n      * \\sa rank()\n      */\n    inline Index nonzeroPivots() const\n    {\n      eigen_assert(m_isInitialized && \"LU is not initialized.\");\n      return m_nonzero_pivots;\n    }\n\n    /** \\returns the absolute value of the biggest pivot, i.e. the biggest\n      *          diagonal coefficient of U.\n      */\n    RealScalar maxPivot() const { return m_maxpivot; }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    void computeInPlace();\n\n    MatrixType m_qr;\n    HCoeffsType m_hCoeffs;\n    IntDiagSizeVectorType m_rows_transpositions;\n    IntDiagSizeVectorType m_cols_transpositions;\n    PermutationType m_cols_permutation;\n    RowVectorType m_temp;\n    bool m_isInitialized, m_usePrescribedThreshold;\n    RealScalar m_prescribedThreshold, m_maxpivot;\n    Index m_nonzero_pivots;\n    RealScalar m_precision;\n    Index m_det_pq;\n};\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::absDeterminant() const\n{\n  using std::abs;\n  eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return abs(m_qr.diagonal().prod());\n}\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDeterminant() const\n{\n  eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return m_qr.diagonal().cwiseAbs().array().log().sum();\n}\n\n/** Performs the QR factorization of the given matrix \\a matrix. The result of\n  * the factorization is stored into \\c *this, and a reference to \\c *this\n  * is returned.\n  *\n  * \\sa class FullPivHouseholderQR, FullPivHouseholderQR(const MatrixType&)\n  */\ntemplate<typename MatrixType>\ntemplate<typename InputType>\nFullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const EigenBase<InputType>& matrix)\n{\n  m_qr = matrix.derived();\n  computeInPlace();\n  return *this;\n}\n\ntemplate<typename MatrixType>\nvoid FullPivHouseholderQR<MatrixType>::computeInPlace()\n{\n  check_template_parameters();\n\n  using std::abs;\n  Index rows = m_qr.rows();\n  Index cols = m_qr.cols();\n  Index size = (std::min)(rows,cols);\n\n  \n  m_hCoeffs.resize(size);\n\n  m_temp.resize(cols);\n\n  m_precision = NumTraits<Scalar>::epsilon() * RealScalar(size);\n\n  m_rows_transpositions.resize(size);\n  m_cols_transpositions.resize(size);\n  Index number_of_transpositions = 0;\n\n  RealScalar biggest(0);\n\n  m_nonzero_pivots = size; // the generic case is that in which all pivots are nonzero (invertible case)\n  m_maxpivot = RealScalar(0);\n\n  for (Index k = 0; k < size; ++k)\n  {\n    Index row_of_biggest_in_corner, col_of_biggest_in_corner;\n    typedef internal::scalar_score_coeff_op<Scalar> Scoring;\n    typedef typename Scoring::result_type Score;\n\n    Score score = m_qr.bottomRightCorner(rows-k, cols-k)\n                      .unaryExpr(Scoring())\n                      .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);\n    row_of_biggest_in_corner += k;\n    col_of_biggest_in_corner += k;\n    RealScalar biggest_in_corner = internal::abs_knowing_score<Scalar>()(m_qr(row_of_biggest_in_corner, col_of_biggest_in_corner), score);\n    if(k==0) biggest = biggest_in_corner;\n\n    // if the corner is negligible, then we have less than full rank, and we can finish early\n    if(internal::isMuchSmallerThan(biggest_in_corner, biggest, m_precision))\n    {\n      m_nonzero_pivots = k;\n      for(Index i = k; i < size; i++)\n      {\n        m_rows_transpositions.coeffRef(i) = i;\n        m_cols_transpositions.coeffRef(i) = i;\n        m_hCoeffs.coeffRef(i) = Scalar(0);\n      }\n      break;\n    }\n\n    m_rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;\n    m_cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;\n    if(k != row_of_biggest_in_corner) {\n      m_qr.row(k).tail(cols-k).swap(m_qr.row(row_of_biggest_in_corner).tail(cols-k));\n      ++number_of_transpositions;\n    }\n    if(k != col_of_biggest_in_corner) {\n      m_qr.col(k).swap(m_qr.col(col_of_biggest_in_corner));\n      ++number_of_transpositions;\n    }\n\n    RealScalar beta;\n    m_qr.col(k).tail(rows-k).makeHouseholderInPlace(m_hCoeffs.coeffRef(k), beta);\n    m_qr.coeffRef(k,k) = beta;\n\n    // remember the maximum absolute value of diagonal coefficients\n    if(abs(beta) > m_maxpivot) m_maxpivot = abs(beta);\n\n    m_qr.bottomRightCorner(rows-k, cols-k-1)\n        .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), m_hCoeffs.coeffRef(k), &m_temp.coeffRef(k+1));\n  }\n\n  m_cols_permutation.setIdentity(cols);\n  for(Index k = 0; k < size; ++k)\n    m_cols_permutation.applyTranspositionOnTheRight(k, m_cols_transpositions.coeff(k));\n\n  m_det_pq = (number_of_transpositions%2) ? -1 : 1;\n  m_isInitialized = true;\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType>\ntemplate<typename RhsType, typename DstType>\nvoid FullPivHouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  eigen_assert(rhs.rows() == rows());\n  const Index l_rank = rank();\n\n  // FIXME introduce nonzeroPivots() and use it here. and more generally,\n  // make the same improvements in this dec as in FullPivLU.\n  if(l_rank==0)\n  {\n    dst.setZero();\n    return;\n  }\n\n  typename RhsType::PlainObject c(rhs);\n\n  Matrix<Scalar,1,RhsType::ColsAtCompileTime> temp(rhs.cols());\n  for (Index k = 0; k < l_rank; ++k)\n  {\n    Index remainingSize = rows()-k;\n    c.row(k).swap(c.row(m_rows_transpositions.coeff(k)));\n    c.bottomRightCorner(remainingSize, rhs.cols())\n      .applyHouseholderOnTheLeft(m_qr.col(k).tail(remainingSize-1),\n                               m_hCoeffs.coeff(k), &temp.coeffRef(0));\n  }\n\n  m_qr.topLeftCorner(l_rank, l_rank)\n      .template triangularView<Upper>()\n      .solveInPlace(c.topRows(l_rank));\n\n  for(Index i = 0; i < l_rank; ++i) dst.row(m_cols_permutation.indices().coeff(i)) = c.row(i);\n  for(Index i = l_rank; i < cols(); ++i) dst.row(m_cols_permutation.indices().coeff(i)).setZero();\n}\n#endif\n\nnamespace internal {\n  \ntemplate<typename DstXprType, typename MatrixType>\nstruct Assignment<DstXprType, Inverse<FullPivHouseholderQR<MatrixType> >, internal::assign_op<typename DstXprType::Scalar,typename FullPivHouseholderQR<MatrixType>::Scalar>, Dense2Dense>\n{\n  typedef FullPivHouseholderQR<MatrixType> QrType;\n  typedef Inverse<QrType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename QrType::Scalar> &)\n  {    \n    dst = src.nestedExpression().solve(MatrixType::Identity(src.rows(), src.cols()));\n  }\n};\n\n/** \\ingroup QR_Module\n  *\n  * \\brief Expression type for return value of FullPivHouseholderQR::matrixQ()\n  *\n  * \\tparam MatrixType type of underlying dense matrix\n  */\ntemplate<typename MatrixType> struct FullPivHouseholderQRMatrixQReturnType\n  : public ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> >\n{\npublic:\n  typedef typename FullPivHouseholderQR<MatrixType>::IntDiagSizeVectorType IntDiagSizeVectorType;\n  typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;\n  typedef Matrix<typename MatrixType::Scalar, 1, MatrixType::RowsAtCompileTime, RowMajor, 1,\n                 MatrixType::MaxRowsAtCompileTime> WorkVectorType;\n\n  FullPivHouseholderQRMatrixQReturnType(const MatrixType&       qr,\n                                        const HCoeffsType&      hCoeffs,\n                                        const IntDiagSizeVectorType& rowsTranspositions)\n    : m_qr(qr),\n      m_hCoeffs(hCoeffs),\n      m_rowsTranspositions(rowsTranspositions)\n  {}\n\n  template <typename ResultType>\n  void evalTo(ResultType& result) const\n  {\n    const Index rows = m_qr.rows();\n    WorkVectorType workspace(rows);\n    evalTo(result, workspace);\n  }\n\n  template <typename ResultType>\n  void evalTo(ResultType& result, WorkVectorType& workspace) const\n  {\n    using numext::conj;\n    // compute the product H'_0 H'_1 ... H'_n-1,\n    // where H_k is the k-th Householder transformation I - h_k v_k v_k'\n    // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]\n    const Index rows = m_qr.rows();\n    const Index cols = m_qr.cols();\n    const Index size = (std::min)(rows, cols);\n    workspace.resize(rows);\n    result.setIdentity(rows, rows);\n    for (Index k = size-1; k >= 0; k--)\n    {\n      result.block(k, k, rows-k, rows-k)\n            .applyHouseholderOnTheLeft(m_qr.col(k).tail(rows-k-1), conj(m_hCoeffs.coeff(k)), &workspace.coeffRef(k));\n      result.row(k).swap(result.row(m_rowsTranspositions.coeff(k)));\n    }\n  }\n\n  Index rows() const { return m_qr.rows(); }\n  Index cols() const { return m_qr.rows(); }\n\nprotected:\n  typename MatrixType::Nested m_qr;\n  typename HCoeffsType::Nested m_hCoeffs;\n  typename IntDiagSizeVectorType::Nested m_rowsTranspositions;\n};\n\n// template<typename MatrixType>\n// struct evaluator<FullPivHouseholderQRMatrixQReturnType<MatrixType> >\n//  : public evaluator<ReturnByValue<FullPivHouseholderQRMatrixQReturnType<MatrixType> > >\n// {};\n\n} // end namespace internal\n\ntemplate<typename MatrixType>\ninline typename FullPivHouseholderQR<MatrixType>::MatrixQReturnType FullPivHouseholderQR<MatrixType>::matrixQ() const\n{\n  eigen_assert(m_isInitialized && \"FullPivHouseholderQR is not initialized.\");\n  return MatrixQReturnType(m_qr, m_hCoeffs, m_rows_transpositions);\n}\n\n/** \\return the full-pivoting Householder QR decomposition of \\c *this.\n  *\n  * \\sa class FullPivHouseholderQR\n  */\ntemplate<typename Derived>\nconst FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::fullPivHouseholderQr() const\n{\n  return FullPivHouseholderQR<PlainObject>(eval());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/HouseholderQR.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2010 Vincent Lejeune\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_QR_H\n#define EIGEN_QR_H\n\nnamespace Eigen { \n\n/** \\ingroup QR_Module\n  *\n  *\n  * \\class HouseholderQR\n  *\n  * \\brief Householder QR decomposition of a matrix\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the QR decomposition\n  *\n  * This class performs a QR decomposition of a matrix \\b A into matrices \\b Q and \\b R\n  * such that \n  * \\f[\n  *  \\mathbf{A} = \\mathbf{Q} \\, \\mathbf{R}\n  * \\f]\n  * by using Householder transformations. Here, \\b Q a unitary matrix and \\b R an upper triangular matrix.\n  * The result is stored in a compact way compatible with LAPACK.\n  *\n  * Note that no pivoting is performed. This is \\b not a rank-revealing decomposition.\n  * If you want that feature, use FullPivHouseholderQR or ColPivHouseholderQR instead.\n  *\n  * This Householder QR decomposition is faster, but less numerically stable and less feature-full than\n  * FullPivHouseholderQR or ColPivHouseholderQR.\n  *\n  * This class supports the \\link InplaceDecomposition inplace decomposition \\endlink mechanism.\n  *\n  * \\sa MatrixBase::householderQr()\n  */\ntemplate<typename _MatrixType> class HouseholderQR\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    // FIXME should be int\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, (MatrixType::Flags&RowMajorBit) ? RowMajor : ColMajor, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixQType;\n    typedef typename internal::plain_diag_type<MatrixType>::type HCoeffsType;\n    typedef typename internal::plain_row_type<MatrixType>::type RowVectorType;\n    typedef HouseholderSequence<MatrixType,typename internal::remove_all<typename HCoeffsType::ConjugateReturnType>::type> HouseholderSequenceType;\n\n    /**\n      * \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via HouseholderQR::compute(const MatrixType&).\n      */\n    HouseholderQR() : m_qr(), m_hCoeffs(), m_temp(), m_isInitialized(false) {}\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem \\a size.\n      * \\sa HouseholderQR()\n      */\n    HouseholderQR(Index rows, Index cols)\n      : m_qr(rows, cols),\n        m_hCoeffs((std::min)(rows,cols)),\n        m_temp(cols),\n        m_isInitialized(false) {}\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This constructor computes the QR factorization of the matrix \\a matrix by calling\n      * the method compute(). It is a short cut for:\n      * \n      * \\code\n      * HouseholderQR<MatrixType> qr(matrix.rows(), matrix.cols());\n      * qr.compute(matrix);\n      * \\endcode\n      * \n      * \\sa compute()\n      */\n    template<typename InputType>\n    explicit HouseholderQR(const EigenBase<InputType>& matrix)\n      : m_qr(matrix.rows(), matrix.cols()),\n        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),\n        m_temp(matrix.cols()),\n        m_isInitialized(false)\n    {\n      compute(matrix.derived());\n    }\n\n\n    /** \\brief Constructs a QR factorization from a given matrix\n      *\n      * This overloaded constructor is provided for \\link InplaceDecomposition inplace decomposition \\endlink when\n      * \\c MatrixType is a Eigen::Ref.\n      *\n      * \\sa HouseholderQR(const EigenBase&)\n      */\n    template<typename InputType>\n    explicit HouseholderQR(EigenBase<InputType>& matrix)\n      : m_qr(matrix.derived()),\n        m_hCoeffs((std::min)(matrix.rows(),matrix.cols())),\n        m_temp(matrix.cols()),\n        m_isInitialized(false)\n    {\n      computeInPlace();\n    }\n\n    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which\n      * *this is the QR decomposition, if any exists.\n      *\n      * \\param b the right-hand-side of the equation to solve.\n      *\n      * \\returns a solution.\n      *\n      * \\note_about_checking_solutions\n      *\n      * \\note_about_arbitrary_choice_of_solution\n      *\n      * Example: \\include HouseholderQR_solve.cpp\n      * Output: \\verbinclude HouseholderQR_solve.out\n      */\n    template<typename Rhs>\n    inline const Solve<HouseholderQR, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"HouseholderQR is not initialized.\");\n      return Solve<HouseholderQR, Rhs>(*this, b.derived());\n    }\n\n    /** This method returns an expression of the unitary matrix Q as a sequence of Householder transformations.\n      *\n      * The returned expression can directly be used to perform matrix products. It can also be assigned to a dense Matrix object.\n      * Here is an example showing how to recover the full or thin matrix Q, as well as how to perform matrix products using operator*:\n      *\n      * Example: \\include HouseholderQR_householderQ.cpp\n      * Output: \\verbinclude HouseholderQR_householderQ.out\n      */\n    HouseholderSequenceType householderQ() const\n    {\n      eigen_assert(m_isInitialized && \"HouseholderQR is not initialized.\");\n      return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());\n    }\n\n    /** \\returns a reference to the matrix where the Householder QR decomposition is stored\n      * in a LAPACK-compatible way.\n      */\n    const MatrixType& matrixQR() const\n    {\n        eigen_assert(m_isInitialized && \"HouseholderQR is not initialized.\");\n        return m_qr;\n    }\n\n    template<typename InputType>\n    HouseholderQR& compute(const EigenBase<InputType>& matrix) {\n      m_qr = matrix.derived();\n      computeInPlace();\n      return *this;\n    }\n\n    /** \\returns the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      * One way to work around that is to use logAbsDeterminant() instead.\n      *\n      * \\sa logAbsDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar absDeterminant() const;\n\n    /** \\returns the natural log of the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition. It has only linear complexity\n      * (that is, O(n) where n is the dimension of the square matrix)\n      * as the QR decomposition has already been computed.\n      *\n      * \\note This is only for square matrices.\n      *\n      * \\note This method is useful to work around the risk of overflow/underflow that's inherent\n      * to determinant computation.\n      *\n      * \\sa absDeterminant(), MatrixBase::determinant()\n      */\n    typename MatrixType::RealScalar logAbsDeterminant() const;\n\n    inline Index rows() const { return m_qr.rows(); }\n    inline Index cols() const { return m_qr.cols(); }\n\n    /** \\returns a const reference to the vector of Householder coefficients used to represent the factor \\c Q.\n      * \n      * For advanced uses only.\n      */\n    const HCoeffsType& hCoeffs() const { return m_hCoeffs; }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename RhsType, typename DstType>\n    void _solve_impl(const RhsType &rhs, DstType &dst) const;\n    #endif\n\n  protected:\n\n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n    }\n\n    void computeInPlace();\n\n    MatrixType m_qr;\n    HCoeffsType m_hCoeffs;\n    RowVectorType m_temp;\n    bool m_isInitialized;\n};\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar HouseholderQR<MatrixType>::absDeterminant() const\n{\n  using std::abs;\n  eigen_assert(m_isInitialized && \"HouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return abs(m_qr.diagonal().prod());\n}\n\ntemplate<typename MatrixType>\ntypename MatrixType::RealScalar HouseholderQR<MatrixType>::logAbsDeterminant() const\n{\n  eigen_assert(m_isInitialized && \"HouseholderQR is not initialized.\");\n  eigen_assert(m_qr.rows() == m_qr.cols() && \"You can't take the determinant of a non-square matrix!\");\n  return m_qr.diagonal().cwiseAbs().array().log().sum();\n}\n\nnamespace internal {\n\n/** \\internal */\ntemplate<typename MatrixQR, typename HCoeffs>\nvoid householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename MatrixQR::Scalar* tempData = 0)\n{\n  typedef typename MatrixQR::Scalar Scalar;\n  typedef typename MatrixQR::RealScalar RealScalar;\n  Index rows = mat.rows();\n  Index cols = mat.cols();\n  Index size = (std::min)(rows,cols);\n\n  eigen_assert(hCoeffs.size() == size);\n\n  typedef Matrix<Scalar,MatrixQR::ColsAtCompileTime,1> TempType;\n  TempType tempVector;\n  if(tempData==0)\n  {\n    tempVector.resize(cols);\n    tempData = tempVector.data();\n  }\n\n  for(Index k = 0; k < size; ++k)\n  {\n    Index remainingRows = rows - k;\n    Index remainingCols = cols - k - 1;\n\n    RealScalar beta;\n    mat.col(k).tail(remainingRows).makeHouseholderInPlace(hCoeffs.coeffRef(k), beta);\n    mat.coeffRef(k,k) = beta;\n\n    // apply H to remaining part of m_qr from the left\n    mat.bottomRightCorner(remainingRows, remainingCols)\n        .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), hCoeffs.coeffRef(k), tempData+k+1);\n  }\n}\n\n/** \\internal */\ntemplate<typename MatrixQR, typename HCoeffs,\n  typename MatrixQRScalar = typename MatrixQR::Scalar,\n  bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>\nstruct householder_qr_inplace_blocked\n{\n  // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h\n  static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index maxBlockSize=32,\n      typename MatrixQR::Scalar* tempData = 0)\n  {\n    typedef typename MatrixQR::Scalar Scalar;\n    typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;\n\n    Index rows = mat.rows();\n    Index cols = mat.cols();\n    Index size = (std::min)(rows, cols);\n\n    typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;\n    TempType tempVector;\n    if(tempData==0)\n    {\n      tempVector.resize(cols);\n      tempData = tempVector.data();\n    }\n\n    Index blockSize = (std::min)(maxBlockSize,size);\n\n    Index k = 0;\n    for (k = 0; k < size; k += blockSize)\n    {\n      Index bs = (std::min)(size-k,blockSize);  // actual size of the block\n      Index tcols = cols - k - bs;              // trailing columns\n      Index brows = rows-k;                     // rows of the block\n\n      // partition the matrix:\n      //        A00 | A01 | A02\n      // mat  = A10 | A11 | A12\n      //        A20 | A21 | A22\n      // and performs the qr dec of [A11^T A12^T]^T\n      // and update [A21^T A22^T]^T using level 3 operations.\n      // Finally, the algorithm continue on A22\n\n      BlockType A11_21 = mat.block(k,k,brows,bs);\n      Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);\n\n      householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);\n\n      if(tcols)\n      {\n        BlockType A21_22 = mat.block(k,k+bs,brows,tcols);\n        apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment, false); // false == backward\n      }\n    }\n  }\n};\n\n} // end namespace internal\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename _MatrixType>\ntemplate<typename RhsType, typename DstType>\nvoid HouseholderQR<_MatrixType>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  const Index rank = (std::min)(rows(), cols());\n  eigen_assert(rhs.rows() == rows());\n\n  typename RhsType::PlainObject c(rhs);\n\n  // Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T\n  c.applyOnTheLeft(householderSequence(\n    m_qr.leftCols(rank),\n    m_hCoeffs.head(rank)).transpose()\n  );\n\n  m_qr.topLeftCorner(rank, rank)\n      .template triangularView<Upper>()\n      .solveInPlace(c.topRows(rank));\n\n  dst.topRows(rank) = c.topRows(rank);\n  dst.bottomRows(cols()-rank).setZero();\n}\n#endif\n\n/** Performs the QR factorization of the given matrix \\a matrix. The result of\n  * the factorization is stored into \\c *this, and a reference to \\c *this\n  * is returned.\n  *\n  * \\sa class HouseholderQR, HouseholderQR(const MatrixType&)\n  */\ntemplate<typename MatrixType>\nvoid HouseholderQR<MatrixType>::computeInPlace()\n{\n  check_template_parameters();\n  \n  Index rows = m_qr.rows();\n  Index cols = m_qr.cols();\n  Index size = (std::min)(rows,cols);\n\n  m_hCoeffs.resize(size);\n\n  m_temp.resize(cols);\n\n  internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data());\n\n  m_isInitialized = true;\n}\n\n/** \\return the Householder QR decomposition of \\c *this.\n  *\n  * \\sa class HouseholderQR\n  */\ntemplate<typename Derived>\nconst HouseholderQR<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::householderQr() const\n{\n  return HouseholderQR<PlainObject>(eval());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_QR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/QR/HouseholderQR_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Householder QR decomposition of a matrix w/o pivoting based on\n *    LAPACKE_?geqrf function.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_QR_LAPACKE_H\n#define EIGEN_QR_LAPACKE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_QR_NOPIV(EIGTYPE, LAPACKE_TYPE, LAPACKE_PREFIX) \\\ntemplate<typename MatrixQR, typename HCoeffs> \\\nstruct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \\\n{ \\\n  static void run(MatrixQR& mat, HCoeffs& hCoeffs, Index = 32, \\\n      typename MatrixQR::Scalar* = 0) \\\n  { \\\n    lapack_int m = (lapack_int) mat.rows(); \\\n    lapack_int n = (lapack_int) mat.cols(); \\\n    lapack_int lda = (lapack_int) mat.outerStride(); \\\n    lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \\\n    LAPACKE_##LAPACKE_PREFIX##geqrf( matrix_order, m, n, (LAPACKE_TYPE*)mat.data(), lda, (LAPACKE_TYPE*)hCoeffs.data()); \\\n    hCoeffs.adjointInPlace(); \\\n  } \\\n};\n\nEIGEN_LAPACKE_QR_NOPIV(double, double, d)\nEIGEN_LAPACKE_QR_NOPIV(float, float, s)\nEIGEN_LAPACKE_QR_NOPIV(dcomplex, lapack_complex_double, z)\nEIGEN_LAPACKE_QR_NOPIV(scomplex, lapack_complex_float, c)\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_QR_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SUITESPARSEQRSUPPORT_H\n#define EIGEN_SUITESPARSEQRSUPPORT_H\n\nnamespace Eigen {\n  \n  template<typename MatrixType> class SPQR; \n  template<typename SPQRType> struct SPQRMatrixQReturnType; \n  template<typename SPQRType> struct SPQRMatrixQTransposeReturnType; \n  template <typename SPQRType, typename Derived> struct SPQR_QProduct;\n  namespace internal {\n    template <typename SPQRType> struct traits<SPQRMatrixQReturnType<SPQRType> >\n    {\n      typedef typename SPQRType::MatrixType ReturnType;\n    };\n    template <typename SPQRType> struct traits<SPQRMatrixQTransposeReturnType<SPQRType> >\n    {\n      typedef typename SPQRType::MatrixType ReturnType;\n    };\n    template <typename SPQRType, typename Derived> struct traits<SPQR_QProduct<SPQRType, Derived> >\n    {\n      typedef typename Derived::PlainObject ReturnType;\n    };\n  } // End namespace internal\n  \n/**\n  * \\ingroup SPQRSupport_Module\n  * \\class SPQR\n  * \\brief Sparse QR factorization based on SuiteSparseQR library\n  *\n  * This class is used to perform a multithreaded and multifrontal rank-revealing QR decomposition\n  * of sparse matrices. The result is then used to solve linear leasts_square systems.\n  * Clearly, a QR factorization is returned such that A*P = Q*R where :\n  *\n  * P is the column permutation. Use colsPermutation() to get it.\n  *\n  * Q is the orthogonal matrix represented as Householder reflectors.\n  * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.\n  * You can then apply it to a vector.\n  *\n  * R is the sparse triangular factor. Use matrixQR() to get it as SparseMatrix.\n  * NOTE : The Index type of R is always SuiteSparse_long. You can get it with SPQR::Index\n  *\n  * \\tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>\n  *\n  * \\implsparsesolverconcept\n  *\n  *\n  */\ntemplate<typename _MatrixType>\nclass SPQR : public SparseSolverBase<SPQR<_MatrixType> >\n{\n  protected:\n    typedef SparseSolverBase<SPQR<_MatrixType> > Base;\n    using Base::m_isInitialized;\n  public:\n    typedef typename _MatrixType::Scalar Scalar;\n    typedef typename _MatrixType::RealScalar RealScalar;\n    typedef SuiteSparse_long StorageIndex ;\n    typedef SparseMatrix<Scalar, ColMajor, StorageIndex> MatrixType;\n    typedef Map<PermutationMatrix<Dynamic, Dynamic, StorageIndex> > PermutationType;\n    enum {\n      ColsAtCompileTime = Dynamic,\n      MaxColsAtCompileTime = Dynamic\n    };\n  public:\n    SPQR() \n      : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)\n    { \n      cholmod_l_start(&m_cc);\n    }\n    \n    explicit SPQR(const _MatrixType& matrix)\n    : m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)\n    {\n      cholmod_l_start(&m_cc);\n      compute(matrix);\n    }\n    \n    ~SPQR()\n    {\n      SPQR_free();\n      cholmod_l_finish(&m_cc);\n    }\n    void SPQR_free()\n    {\n      cholmod_l_free_sparse(&m_H, &m_cc);\n      cholmod_l_free_sparse(&m_cR, &m_cc);\n      cholmod_l_free_dense(&m_HTau, &m_cc);\n      std::free(m_E);\n      std::free(m_HPinv);\n    }\n\n    void compute(const _MatrixType& matrix)\n    {\n      if(m_isInitialized) SPQR_free();\n\n      MatrixType mat(matrix);\n      \n      /* Compute the default threshold as in MatLab, see:\n       * Tim Davis, \"Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing\n       * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 \n       */\n      RealScalar pivotThreshold = m_tolerance;\n      if(m_useDefaultThreshold) \n      {\n        RealScalar max2Norm = 0.0;\n        for (int j = 0; j < mat.cols(); j++) max2Norm = numext::maxi(max2Norm, mat.col(j).norm());\n        if(max2Norm==RealScalar(0))\n          max2Norm = RealScalar(1);\n        pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();\n      }\n      cholmod_sparse A; \n      A = viewAsCholmod(mat);\n      m_rows = matrix.rows();\n      Index col = matrix.cols();\n      m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A, \n                             &m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);\n\n      if (!m_cR)\n      {\n        m_info = NumericalIssue;\n        m_isInitialized = false;\n        return;\n      }\n      m_info = Success;\n      m_isInitialized = true;\n      m_isRUpToDate = false;\n    }\n    /** \n     * Get the number of rows of the input matrix and the Q matrix\n     */\n    inline Index rows() const {return m_rows; }\n    \n    /** \n     * Get the number of columns of the input matrix. \n     */\n    inline Index cols() const { return m_cR->ncol; }\n    \n    template<typename Rhs, typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const\n    {\n      eigen_assert(m_isInitialized && \" The QR factorization should be computed first, call compute()\");\n      eigen_assert(b.cols()==1 && \"This method is for vectors only\");\n\n      //Compute Q^T * b\n      typename Dest::PlainObject y, y2;\n      y = matrixQ().transpose() * b;\n      \n      // Solves with the triangular matrix R\n      Index rk = this->rank();\n      y2 = y;\n      y.resize((std::max)(cols(),Index(y.rows())),y.cols());\n      y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));\n\n      // Apply the column permutation \n      // colsPermutation() performs a copy of the permutation,\n      // so let's apply it manually:\n      for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);\n      for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();\n      \n//       y.bottomRows(y.rows()-rk).setZero();\n//       dest = colsPermutation() * y.topRows(cols());\n      \n      m_info = Success;\n    }\n    \n    /** \\returns the sparse triangular factor R. It is a sparse matrix\n     */\n    const MatrixType matrixR() const\n    {\n      eigen_assert(m_isInitialized && \" The QR factorization should be computed first, call compute()\");\n      if(!m_isRUpToDate) {\n        m_R = viewAsEigen<Scalar,ColMajor, typename MatrixType::StorageIndex>(*m_cR);\n        m_isRUpToDate = true;\n      }\n      return m_R;\n    }\n    /// Get an expression of the matrix Q\n    SPQRMatrixQReturnType<SPQR> matrixQ() const\n    {\n      return SPQRMatrixQReturnType<SPQR>(*this);\n    }\n    /// Get the permutation that was applied to columns of A\n    PermutationType colsPermutation() const\n    { \n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return PermutationType(m_E, m_cR->ncol);\n    }\n    /**\n     * Gets the rank of the matrix. \n     * It should be equal to matrixQR().cols if the matrix is full-rank\n     */\n    Index rank() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_cc.SPQR_istat[4];\n    }\n    /// Set the fill-reducing ordering method to be used\n    void setSPQROrdering(int ord) { m_ordering = ord;}\n    /// Set the tolerance tol to treat columns with 2-norm < =tol as zero\n    void setPivotThreshold(const RealScalar& tol)\n    {\n      m_useDefaultThreshold = false;\n      m_tolerance = tol;\n    }\n    \n    /** \\returns a pointer to the SPQR workspace */\n    cholmod_common *cholmodCommon() const { return &m_cc; }\n    \n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the sparse QR can not be computed\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n  protected:\n    bool m_analysisIsOk;\n    bool m_factorizationIsOk;\n    mutable bool m_isRUpToDate;\n    mutable ComputationInfo m_info;\n    int m_ordering; // Ordering method to use, see SPQR's manual\n    int m_allow_tol; // Allow to use some tolerance during numerical factorization.\n    RealScalar m_tolerance; // treat columns with 2-norm below this tolerance as zero\n    mutable cholmod_sparse *m_cR; // The sparse R factor in cholmod format\n    mutable MatrixType m_R; // The sparse matrix R in Eigen format\n    mutable StorageIndex *m_E; // The permutation applied to columns\n    mutable cholmod_sparse *m_H;  //The householder vectors\n    mutable StorageIndex *m_HPinv; // The row permutation of H\n    mutable cholmod_dense *m_HTau; // The Householder coefficients\n    mutable Index m_rank; // The rank of the matrix\n    mutable cholmod_common m_cc; // Workspace and parameters\n    bool m_useDefaultThreshold;     // Use default threshold\n    Index m_rows;\n    template<typename ,typename > friend struct SPQR_QProduct;\n};\n\ntemplate <typename SPQRType, typename Derived>\nstruct SPQR_QProduct : ReturnByValue<SPQR_QProduct<SPQRType,Derived> >\n{\n  typedef typename SPQRType::Scalar Scalar;\n  typedef typename SPQRType::StorageIndex StorageIndex;\n  //Define the constructor to get reference to argument types\n  SPQR_QProduct(const SPQRType& spqr, const Derived& other, bool transpose) : m_spqr(spqr),m_other(other),m_transpose(transpose) {}\n  \n  inline Index rows() const { return m_transpose ? m_spqr.rows() : m_spqr.cols(); }\n  inline Index cols() const { return m_other.cols(); }\n  // Assign to a vector\n  template<typename ResType>\n  void evalTo(ResType& res) const\n  {\n    cholmod_dense y_cd;\n    cholmod_dense *x_cd; \n    int method = m_transpose ? SPQR_QTX : SPQR_QX; \n    cholmod_common *cc = m_spqr.cholmodCommon();\n    y_cd = viewAsCholmod(m_other.const_cast_derived());\n    x_cd = SuiteSparseQR_qmult<Scalar>(method, m_spqr.m_H, m_spqr.m_HTau, m_spqr.m_HPinv, &y_cd, cc);\n    res = Matrix<Scalar,ResType::RowsAtCompileTime,ResType::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x), x_cd->nrow, x_cd->ncol);\n    cholmod_l_free_dense(&x_cd, cc);\n  }\n  const SPQRType& m_spqr; \n  const Derived& m_other; \n  bool m_transpose; \n  \n};\ntemplate<typename SPQRType>\nstruct SPQRMatrixQReturnType{\n  \n  SPQRMatrixQReturnType(const SPQRType& spqr) : m_spqr(spqr) {}\n  template<typename Derived>\n  SPQR_QProduct<SPQRType, Derived> operator*(const MatrixBase<Derived>& other)\n  {\n    return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(),false);\n  }\n  SPQRMatrixQTransposeReturnType<SPQRType> adjoint() const\n  {\n    return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);\n  }\n  // To use for operations with the transpose of Q\n  SPQRMatrixQTransposeReturnType<SPQRType> transpose() const\n  {\n    return SPQRMatrixQTransposeReturnType<SPQRType>(m_spqr);\n  }\n  const SPQRType& m_spqr;\n};\n\ntemplate<typename SPQRType>\nstruct SPQRMatrixQTransposeReturnType{\n  SPQRMatrixQTransposeReturnType(const SPQRType& spqr) : m_spqr(spqr) {}\n  template<typename Derived>\n  SPQR_QProduct<SPQRType,Derived> operator*(const MatrixBase<Derived>& other)\n  {\n    return SPQR_QProduct<SPQRType,Derived>(m_spqr,other.derived(), true);\n  }\n  const SPQRType& m_spqr;\n};\n\n}// End namespace Eigen\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SVD/BDCSVD.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n// \n// We used the \"A Divide-And-Conquer Algorithm for the Bidiagonal SVD\"\n// research report written by Ming Gu and Stanley C.Eisenstat\n// The code variable names correspond to the names they used in their \n// report\n//\n// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>\n// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>\n// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>\n// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>\n// Copyright (C) 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>\n// Copyright (C) 2014-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BDCSVD_H\n#define EIGEN_BDCSVD_H\n// #define EIGEN_BDCSVD_DEBUG_VERBOSE\n// #define EIGEN_BDCSVD_SANITY_CHECKS\n\nnamespace Eigen {\n\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\nIOFormat bdcsvdfmt(8, 0, \", \", \"\\n\", \"  [\", \"]\");\n#endif\n  \ntemplate<typename _MatrixType> class BDCSVD;\n\nnamespace internal {\n\ntemplate<typename _MatrixType> \nstruct traits<BDCSVD<_MatrixType> >\n{\n  typedef _MatrixType MatrixType;\n};  \n\n} // end namespace internal\n  \n  \n/** \\ingroup SVD_Module\n *\n *\n * \\class BDCSVD\n *\n * \\brief class Bidiagonal Divide and Conquer SVD\n *\n * \\tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition\n *\n * This class first reduces the input matrix to bi-diagonal form using class UpperBidiagonalization,\n * and then performs a divide-and-conquer diagonalization. Small blocks are diagonalized using class JacobiSVD.\n * You can control the switching size with the setSwitchSize() method, default is 16.\n * For small matrice (<16), it is thus preferable to directly use JacobiSVD. For larger ones, BDCSVD is highly\n * recommended and can several order of magnitude faster.\n *\n * \\warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.\n * For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless\n * you compile with the \\c -fp-model \\c precise option. Likewise, the \\c -ffast-math option of GCC or clang will\n * significantly degrade the accuracy.\n *\n * \\sa class JacobiSVD\n */\ntemplate<typename _MatrixType> \nclass BDCSVD : public SVDBase<BDCSVD<_MatrixType> >\n{\n  typedef SVDBase<BDCSVD> Base;\n    \npublic:\n  using Base::rows;\n  using Base::cols;\n  using Base::computeU;\n  using Base::computeV;\n  \n  typedef _MatrixType MatrixType;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;\n  enum {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime, \n    ColsAtCompileTime = MatrixType::ColsAtCompileTime, \n    DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), \n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, \n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, \n    MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), \n    MatrixOptions = MatrixType::Options\n  };\n\n  typedef typename Base::MatrixUType MatrixUType;\n  typedef typename Base::MatrixVType MatrixVType;\n  typedef typename Base::SingularValuesType SingularValuesType;\n  \n  typedef Matrix<Scalar, Dynamic, Dynamic, ColMajor> MatrixX;\n  typedef Matrix<RealScalar, Dynamic, Dynamic, ColMajor> MatrixXr;\n  typedef Matrix<RealScalar, Dynamic, 1> VectorType;\n  typedef Array<RealScalar, Dynamic, 1> ArrayXr;\n  typedef Array<Index,1,Dynamic> ArrayXi;\n  typedef Ref<ArrayXr> ArrayRef;\n  typedef Ref<ArrayXi> IndicesRef;\n\n  /** \\brief Default Constructor.\n   *\n   * The default constructor is useful in cases in which the user intends to\n   * perform decompositions via BDCSVD::compute(const MatrixType&).\n   */\n  BDCSVD() : m_algoswap(16), m_numIters(0)\n  {}\n\n\n  /** \\brief Default Constructor with memory preallocation\n   *\n   * Like the default constructor but with preallocation of the internal data\n   * according to the specified problem size.\n   * \\sa BDCSVD()\n   */\n  BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)\n    : m_algoswap(16), m_numIters(0)\n  {\n    allocate(rows, cols, computationOptions);\n  }\n\n  /** \\brief Constructor performing the decomposition of given matrix.\n   *\n   * \\param matrix the matrix to decompose\n   * \\param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.\n   *                           By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, \n   *                           #ComputeFullV, #ComputeThinV.\n   *\n   * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not\n   * available with the (non - default) FullPivHouseholderQR preconditioner.\n   */\n  BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)\n    : m_algoswap(16), m_numIters(0)\n  {\n    compute(matrix, computationOptions);\n  }\n\n  ~BDCSVD() \n  {\n  }\n  \n  /** \\brief Method performing the decomposition of given matrix using custom options.\n   *\n   * \\param matrix the matrix to decompose\n   * \\param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.\n   *                           By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU, \n   *                           #ComputeFullV, #ComputeThinV.\n   *\n   * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not\n   * available with the (non - default) FullPivHouseholderQR preconditioner.\n   */\n  BDCSVD& compute(const MatrixType& matrix, unsigned int computationOptions);\n\n  /** \\brief Method performing the decomposition of given matrix using current options.\n   *\n   * \\param matrix the matrix to decompose\n   *\n   * This method uses the current \\a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).\n   */\n  BDCSVD& compute(const MatrixType& matrix)\n  {\n    return compute(matrix, this->m_computationOptions);\n  }\n\n  void setSwitchSize(int s) \n  {\n    eigen_assert(s>3 && \"BDCSVD the size of the algo switch has to be greater than 3\");\n    m_algoswap = s;\n  }\n \nprivate:\n  void allocate(Index rows, Index cols, unsigned int computationOptions);\n  void divide(Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift);\n  void computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V);\n  void computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, VectorType& singVals, ArrayRef shifts, ArrayRef mus);\n  void perturbCol0(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat);\n  void computeSingVecs(const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef& perm, const VectorType& singVals, const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V);\n  void deflation43(Index firstCol, Index shift, Index i, Index size);\n  void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);\n  void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);\n  template<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>\n  void copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naivev);\n  void structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1);\n  static RealScalar secularEq(RealScalar x, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift);\n\nprotected:\n  MatrixXr m_naiveU, m_naiveV;\n  MatrixXr m_computed;\n  Index m_nRec;\n  ArrayXr m_workspace;\n  ArrayXi m_workspaceI;\n  int m_algoswap;\n  bool m_isTranspose, m_compU, m_compV;\n  \n  using Base::m_singularValues;\n  using Base::m_diagSize;\n  using Base::m_computeFullU;\n  using Base::m_computeFullV;\n  using Base::m_computeThinU;\n  using Base::m_computeThinV;\n  using Base::m_matrixU;\n  using Base::m_matrixV;\n  using Base::m_isInitialized;\n  using Base::m_nonzeroSingularValues;\n\npublic:  \n  int m_numIters;\n}; //end class BDCSVD\n\n\n// Method to allocate and initialize matrix and attributes\ntemplate<typename MatrixType>\nvoid BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)\n{\n  m_isTranspose = (cols > rows);\n\n  if (Base::allocate(rows, cols, computationOptions))\n    return;\n  \n  m_computed = MatrixXr::Zero(m_diagSize + 1, m_diagSize );\n  m_compU = computeV();\n  m_compV = computeU();\n  if (m_isTranspose)\n    std::swap(m_compU, m_compV);\n  \n  if (m_compU) m_naiveU = MatrixXr::Zero(m_diagSize + 1, m_diagSize + 1 );\n  else         m_naiveU = MatrixXr::Zero(2, m_diagSize + 1 );\n  \n  if (m_compV) m_naiveV = MatrixXr::Zero(m_diagSize, m_diagSize);\n  \n  m_workspace.resize((m_diagSize+1)*(m_diagSize+1)*3);\n  m_workspaceI.resize(3*m_diagSize);\n}// end allocate\n\ntemplate<typename MatrixType>\nBDCSVD<MatrixType>& BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions) \n{\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"\\n\\n\\n======================================================================================================================\\n\\n\\n\";\n#endif\n  allocate(matrix.rows(), matrix.cols(), computationOptions);\n  using std::abs;\n\n  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();\n  \n  //**** step -1 - If the problem is too small, directly falls back to JacobiSVD and return\n  if(matrix.cols() < m_algoswap)\n  {\n    // FIXME this line involves temporaries\n    JacobiSVD<MatrixType> jsvd(matrix,computationOptions);\n    if(computeU()) m_matrixU = jsvd.matrixU();\n    if(computeV()) m_matrixV = jsvd.matrixV();\n    m_singularValues = jsvd.singularValues();\n    m_nonzeroSingularValues = jsvd.nonzeroSingularValues();\n    m_isInitialized = true;\n    return *this;\n  }\n  \n  //**** step 0 - Copy the input matrix and apply scaling to reduce over/under-flows\n  RealScalar scale = matrix.cwiseAbs().maxCoeff();\n  if(scale==RealScalar(0)) scale = RealScalar(1);\n  MatrixX copy;\n  if (m_isTranspose) copy = matrix.adjoint()/scale;\n  else               copy = matrix/scale;\n  \n  //**** step 1 - Bidiagonalization\n  // FIXME this line involves temporaries\n  internal::UpperBidiagonalization<MatrixX> bid(copy);\n\n  //**** step 2 - Divide & Conquer\n  m_naiveU.setZero();\n  m_naiveV.setZero();\n  // FIXME this line involves a temporary matrix\n  m_computed.topRows(m_diagSize) = bid.bidiagonal().toDenseMatrix().transpose();\n  m_computed.template bottomRows<1>().setZero();\n  divide(0, m_diagSize - 1, 0, 0, 0);\n\n  //**** step 3 - Copy singular values and vectors\n  for (int i=0; i<m_diagSize; i++)\n  {\n    RealScalar a = abs(m_computed.coeff(i, i));\n    m_singularValues.coeffRef(i) = a * scale;\n    if (a<considerZero)\n    {\n      m_nonzeroSingularValues = i;\n      m_singularValues.tail(m_diagSize - i - 1).setZero();\n      break;\n    }\n    else if (i == m_diagSize - 1)\n    {\n      m_nonzeroSingularValues = i + 1;\n      break;\n    }\n  }\n\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n//   std::cout << \"m_naiveU\\n\" << m_naiveU << \"\\n\\n\";\n//   std::cout << \"m_naiveV\\n\" << m_naiveV << \"\\n\\n\";\n#endif\n  if(m_isTranspose) copyUV(bid.householderV(), bid.householderU(), m_naiveV, m_naiveU);\n  else              copyUV(bid.householderU(), bid.householderV(), m_naiveU, m_naiveV);\n\n  m_isInitialized = true;\n  return *this;\n}// end compute\n\n\ntemplate<typename MatrixType>\ntemplate<typename HouseholderU, typename HouseholderV, typename NaiveU, typename NaiveV>\nvoid BDCSVD<MatrixType>::copyUV(const HouseholderU &householderU, const HouseholderV &householderV, const NaiveU &naiveU, const NaiveV &naiveV)\n{\n  // Note exchange of U and V: m_matrixU is set from m_naiveV and vice versa\n  if (computeU())\n  {\n    Index Ucols = m_computeThinU ? m_diagSize : householderU.cols();\n    m_matrixU = MatrixX::Identity(householderU.cols(), Ucols);\n    m_matrixU.topLeftCorner(m_diagSize, m_diagSize) = naiveV.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);\n    householderU.applyThisOnTheLeft(m_matrixU); // FIXME this line involves a temporary buffer\n  }\n  if (computeV())\n  {\n    Index Vcols = m_computeThinV ? m_diagSize : householderV.cols();\n    m_matrixV = MatrixX::Identity(householderV.cols(), Vcols);\n    m_matrixV.topLeftCorner(m_diagSize, m_diagSize) = naiveU.template cast<Scalar>().topLeftCorner(m_diagSize, m_diagSize);\n    householderV.applyThisOnTheLeft(m_matrixV); // FIXME this line involves a temporary buffer\n  }\n}\n\n/** \\internal\n  * Performs A = A * B exploiting the special structure of the matrix A. Splitting A as:\n  *  A = [A1]\n  *      [A2]\n  * such that A1.rows()==n1, then we assume that at least half of the columns of A1 and A2 are zeros.\n  * We can thus pack them prior to the the matrix product. However, this is only worth the effort if the matrix is large\n  * enough.\n  */\ntemplate<typename MatrixType>\nvoid BDCSVD<MatrixType>::structured_update(Block<MatrixXr,Dynamic,Dynamic> A, const MatrixXr &B, Index n1)\n{\n  Index n = A.rows();\n  if(n>100)\n  {\n    // If the matrices are large enough, let's exploit the sparse structure of A by\n    // splitting it in half (wrt n1), and packing the non-zero columns.\n    Index n2 = n - n1;\n    Map<MatrixXr> A1(m_workspace.data()      , n1, n);\n    Map<MatrixXr> A2(m_workspace.data()+ n1*n, n2, n);\n    Map<MatrixXr> B1(m_workspace.data()+  n*n, n,  n);\n    Map<MatrixXr> B2(m_workspace.data()+2*n*n, n,  n);\n    Index k1=0, k2=0;\n    for(Index j=0; j<n; ++j)\n    {\n      if( (A.col(j).head(n1).array()!=0).any() )\n      {\n        A1.col(k1) = A.col(j).head(n1);\n        B1.row(k1) = B.row(j);\n        ++k1;\n      }\n      if( (A.col(j).tail(n2).array()!=0).any() )\n      {\n        A2.col(k2) = A.col(j).tail(n2);\n        B2.row(k2) = B.row(j);\n        ++k2;\n      }\n    }\n  \n    A.topRows(n1).noalias()    = A1.leftCols(k1) * B1.topRows(k1);\n    A.bottomRows(n2).noalias() = A2.leftCols(k2) * B2.topRows(k2);\n  }\n  else\n  {\n    Map<MatrixXr,Aligned> tmp(m_workspace.data(),n,n);\n    tmp.noalias() = A*B;\n    A = tmp;\n  }\n}\n\n// The divide algorithm is done \"in place\", we are always working on subsets of the same matrix. The divide methods takes as argument the \n// place of the submatrix we are currently working on.\n\n//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;\n//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; \n// lastCol + 1 - firstCol is the size of the submatrix.\n//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)\n//@param firstRowW : Same as firstRowW with the column.\n//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix \n// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.\ntemplate<typename MatrixType>\nvoid BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, Index firstColW, Index shift)\n{\n  // requires rows = cols + 1;\n  using std::pow;\n  using std::sqrt;\n  using std::abs;\n  const Index n = lastCol - firstCol + 1;\n  const Index k = n/2;\n  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();\n  RealScalar alphaK;\n  RealScalar betaK; \n  RealScalar r0; \n  RealScalar lambda, phi, c0, s0;\n  VectorType l, f;\n  // We use the other algorithm which is more efficient for small \n  // matrices.\n  if (n < m_algoswap)\n  {\n    // FIXME this line involves temporaries\n    JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), ComputeFullU | (m_compV ? ComputeFullV : 0));\n    if (m_compU)\n      m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() = b.matrixU();\n    else \n    {\n      m_naiveU.row(0).segment(firstCol, n + 1).real() = b.matrixU().row(0);\n      m_naiveU.row(1).segment(firstCol, n + 1).real() = b.matrixU().row(n);\n    }\n    if (m_compV) m_naiveV.block(firstRowW, firstColW, n, n).real() = b.matrixV();\n    m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();\n    m_computed.diagonal().segment(firstCol + shift, n) = b.singularValues().head(n);\n    return;\n  }\n  // We use the divide and conquer algorithm\n  alphaK =  m_computed(firstCol + k, firstCol + k);\n  betaK = m_computed(firstCol + k + 1, firstCol + k);\n  // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices\n  // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the \n  // right submatrix before the left one. \n  divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);\n  divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);\n\n  if (m_compU)\n  {\n    lambda = m_naiveU(firstCol + k, firstCol + k);\n    phi = m_naiveU(firstCol + k + 1, lastCol + 1);\n  } \n  else \n  {\n    lambda = m_naiveU(1, firstCol + k);\n    phi = m_naiveU(0, lastCol + 1);\n  }\n  r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda)) + abs(betaK * phi) * abs(betaK * phi));\n  if (m_compU)\n  {\n    l = m_naiveU.row(firstCol + k).segment(firstCol, k);\n    f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);\n  } \n  else \n  {\n    l = m_naiveU.row(1).segment(firstCol, k);\n    f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);\n  }\n  if (m_compV) m_naiveV(firstRowW+k, firstColW) = 1;\n  if (r0<considerZero)\n  {\n    c0 = 1;\n    s0 = 0;\n  }\n  else\n  {\n    c0 = alphaK * lambda / r0;\n    s0 = betaK * phi / r0;\n  }\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n  \n  if (m_compU)\n  {\n    MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));     \n    // we shiftW Q1 to the right\n    for (Index i = firstCol + k - 1; i >= firstCol; i--) \n      m_naiveU.col(i + 1).segment(firstCol, k + 1) = m_naiveU.col(i).segment(firstCol, k + 1);\n    // we shift q1 at the left with a factor c0\n    m_naiveU.col(firstCol).segment( firstCol, k + 1) = (q1 * c0);\n    // last column = q1 * - s0\n    m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) = (q1 * ( - s0));\n    // first column = q2 * s0\n    m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) = m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) * s0; \n    // q2 *= c0\n    m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;\n  } \n  else \n  {\n    RealScalar q1 = m_naiveU(0, firstCol + k);\n    // we shift Q1 to the right\n    for (Index i = firstCol + k - 1; i >= firstCol; i--) \n      m_naiveU(0, i + 1) = m_naiveU(0, i);\n    // we shift q1 at the left with a factor c0\n    m_naiveU(0, firstCol) = (q1 * c0);\n    // last column = q1 * - s0\n    m_naiveU(0, lastCol + 1) = (q1 * ( - s0));\n    // first column = q2 * s0\n    m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; \n    // q2 *= c0\n    m_naiveU(1, lastCol + 1) *= c0;\n    m_naiveU.row(1).segment(firstCol + 1, k).setZero();\n    m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();\n  }\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n  \n  m_computed(firstCol + shift, firstCol + shift) = r0;\n  m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) = alphaK * l.transpose().real();\n  m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) = betaK * f.transpose().real();\n\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  ArrayXr tmp1 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();\n#endif\n  // Second part: try to deflate singular values in combined matrix\n  deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  ArrayXr tmp2 = (m_computed.block(firstCol+shift, firstCol+shift, n, n)).jacobiSvd().singularValues();\n  std::cout << \"\\n\\nj1 = \" << tmp1.transpose().format(bdcsvdfmt) << \"\\n\";\n  std::cout << \"j2 = \" << tmp2.transpose().format(bdcsvdfmt) << \"\\n\\n\";\n  std::cout << \"err:      \" << ((tmp1-tmp2).abs()>1e-12*tmp2.abs()).transpose() << \"\\n\";\n  static int count = 0;\n  std::cout << \"# \" << ++count << \"\\n\\n\";\n  assert((tmp1-tmp2).matrix().norm() < 1e-14*tmp2.matrix().norm());\n//   assert(count<681);\n//   assert(((tmp1-tmp2).abs()<1e-13*tmp2.abs()).all());\n#endif\n  \n  // Third part: compute SVD of combined matrix\n  MatrixXr UofSVD, VofSVD;\n  VectorType singVals;\n  computeSVDofM(firstCol + shift, n, UofSVD, singVals, VofSVD);\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(UofSVD.allFinite());\n  assert(VofSVD.allFinite());\n#endif\n  \n  if (m_compU)\n    structured_update(m_naiveU.block(firstCol, firstCol, n + 1, n + 1), UofSVD, (n+2)/2);\n  else\n  {\n    Map<Matrix<RealScalar,2,Dynamic>,Aligned> tmp(m_workspace.data(),2,n+1);\n    tmp.noalias() = m_naiveU.middleCols(firstCol, n+1) * UofSVD;\n    m_naiveU.middleCols(firstCol, n + 1) = tmp;\n  }\n  \n  if (m_compV)  structured_update(m_naiveV.block(firstRowW, firstColW, n, n), VofSVD, (n+1)/2);\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n  \n  m_computed.block(firstCol + shift, firstCol + shift, n, n).setZero();\n  m_computed.block(firstCol + shift, firstCol + shift, n, n).diagonal() = singVals;\n}// end divide\n\n// Compute SVD of m_computed.block(firstCol, firstCol, n + 1, n); this block only has non-zeros in\n// the first column and on the diagonal and has undergone deflation, so diagonal is in increasing\n// order except for possibly the (0,0) entry. The computed SVD is stored U, singVals and V, except\n// that if m_compV is false, then V is not computed. Singular values are sorted in decreasing order.\n//\n// TODO Opportunities for optimization: better root finding algo, better stopping criterion, better\n// handling of round-off errors, be consistent in ordering\n// For instance, to solve the secular equation using FMM, see http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::computeSVDofM(Index firstCol, Index n, MatrixXr& U, VectorType& singVals, MatrixXr& V)\n{\n  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();\n  using std::abs;\n  ArrayRef col0 = m_computed.col(firstCol).segment(firstCol, n);\n  m_workspace.head(n) =  m_computed.block(firstCol, firstCol, n, n).diagonal();\n  ArrayRef diag = m_workspace.head(n);\n  diag(0) = 0;\n\n  // Allocate space for singular values and vectors\n  singVals.resize(n);\n  U.resize(n+1, n+1);\n  if (m_compV) V.resize(n, n);\n\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  if (col0.hasNaN() || diag.hasNaN())\n    std::cout << \"\\n\\nHAS NAN\\n\\n\";\n#endif\n  \n  // Many singular values might have been deflated, the zero ones have been moved to the end,\n  // but others are interleaved and we must ignore them at this stage.\n  // To this end, let's compute a permutation skipping them:\n  Index actual_n = n;\n  while(actual_n>1 && diag(actual_n-1)==0) --actual_n;\n  Index m = 0; // size of the deflated problem\n  for(Index k=0;k<actual_n;++k)\n    if(abs(col0(k))>considerZero)\n      m_workspaceI(m++) = k;\n  Map<ArrayXi> perm(m_workspaceI.data(),m);\n  \n  Map<ArrayXr> shifts(m_workspace.data()+1*n, n);\n  Map<ArrayXr> mus(m_workspace.data()+2*n, n);\n  Map<ArrayXr> zhat(m_workspace.data()+3*n, n);\n\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"computeSVDofM using:\\n\";\n  std::cout << \"  z: \" << col0.transpose() << \"\\n\";\n  std::cout << \"  d: \" << diag.transpose() << \"\\n\";\n#endif\n  \n  // Compute singVals, shifts, and mus\n  computeSingVals(col0, diag, perm, singVals, shifts, mus);\n  \n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"  j:        \" << (m_computed.block(firstCol, firstCol, n, n)).jacobiSvd().singularValues().transpose().reverse() << \"\\n\\n\";\n  std::cout << \"  sing-val: \" << singVals.transpose() << \"\\n\";\n  std::cout << \"  mu:       \" << mus.transpose() << \"\\n\";\n  std::cout << \"  shift:    \" << shifts.transpose() << \"\\n\";\n  \n  {\n    Index actual_n = n;\n    while(actual_n>1 && abs(col0(actual_n-1))<considerZero) --actual_n;\n    std::cout << \"\\n\\n    mus:    \" << mus.head(actual_n).transpose() << \"\\n\\n\";\n    std::cout << \"    check1 (expect0) : \" << ((singVals.array()-(shifts+mus)) / singVals.array()).head(actual_n).transpose() << \"\\n\\n\";\n    std::cout << \"    check2 (>0)      : \" << ((singVals.array()-diag) / singVals.array()).head(actual_n).transpose() << \"\\n\\n\";\n    std::cout << \"    check3 (>0)      : \" << ((diag.segment(1,actual_n-1)-singVals.head(actual_n-1).array()) / singVals.head(actual_n-1).array()).transpose() << \"\\n\\n\\n\";\n    std::cout << \"    check4 (>0)      : \" << ((singVals.segment(1,actual_n-1)-singVals.head(actual_n-1))).transpose() << \"\\n\\n\\n\";\n  }\n#endif\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(singVals.allFinite());\n  assert(mus.allFinite());\n  assert(shifts.allFinite());\n#endif\n  \n  // Compute zhat\n  perturbCol0(col0, diag, perm, singVals, shifts, mus, zhat);\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"  zhat: \" << zhat.transpose() << \"\\n\";\n#endif\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(zhat.allFinite());\n#endif\n  \n  computeSingVecs(zhat, diag, perm, singVals, shifts, mus, U, V);\n  \n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"U^T U: \" << (U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() << \"\\n\";\n  std::cout << \"V^T V: \" << (V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() << \"\\n\";\n#endif\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(U.allFinite());\n  assert(V.allFinite());\n  assert((U.transpose() * U - MatrixXr(MatrixXr::Identity(U.cols(),U.cols()))).norm() < 1e-14 * n);\n  assert((V.transpose() * V - MatrixXr(MatrixXr::Identity(V.cols(),V.cols()))).norm() < 1e-14 * n);\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n  \n  // Because of deflation, the singular values might not be completely sorted.\n  // Fortunately, reordering them is a O(n) problem\n  for(Index i=0; i<actual_n-1; ++i)\n  {\n    if(singVals(i)>singVals(i+1))\n    {\n      using std::swap;\n      swap(singVals(i),singVals(i+1));\n      U.col(i).swap(U.col(i+1));\n      if(m_compV) V.col(i).swap(V.col(i+1));\n    }\n  }\n  \n  // Reverse order so that singular values in increased order\n  // Because of deflation, the zeros singular-values are already at the end\n  singVals.head(actual_n).reverseInPlace();\n  U.leftCols(actual_n).rowwise().reverseInPlace();\n  if (m_compV) V.leftCols(actual_n).rowwise().reverseInPlace();\n  \n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  JacobiSVD<MatrixXr> jsvd(m_computed.block(firstCol, firstCol, n, n) );\n  std::cout << \"  * j:        \" << jsvd.singularValues().transpose() << \"\\n\\n\";\n  std::cout << \"  * sing-val: \" << singVals.transpose() << \"\\n\";\n//   std::cout << \"  * err:      \" << ((jsvd.singularValues()-singVals)>1e-13*singVals.norm()).transpose() << \"\\n\";\n#endif\n}\n\ntemplate <typename MatrixType>\ntypename BDCSVD<MatrixType>::RealScalar BDCSVD<MatrixType>::secularEq(RealScalar mu, const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const ArrayRef& diagShifted, RealScalar shift)\n{\n  Index m = perm.size();\n  RealScalar res = 1;\n  for(Index i=0; i<m; ++i)\n  {\n    Index j = perm(i);\n    res += numext::abs2(col0(j)) / ((diagShifted(j) - mu) * (diag(j) + shift + mu));\n  }\n  return res;\n\n}\n\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::computeSingVals(const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm,\n                                         VectorType& singVals, ArrayRef shifts, ArrayRef mus)\n{\n  using std::abs;\n  using std::swap;\n\n  Index n = col0.size();\n  Index actual_n = n;\n  while(actual_n>1 && col0(actual_n-1)==0) --actual_n;\n\n  for (Index k = 0; k < n; ++k)\n  {\n    if (col0(k) == 0 || actual_n==1)\n    {\n      // if col0(k) == 0, then entry is deflated, so singular value is on diagonal\n      // if actual_n==1, then the deflated problem is already diagonalized\n      singVals(k) = k==0 ? col0(0) : diag(k);\n      mus(k) = 0;\n      shifts(k) = k==0 ? col0(0) : diag(k);\n      continue;\n    } \n\n    // otherwise, use secular equation to find singular value\n    RealScalar left = diag(k);\n    RealScalar right; // was: = (k != actual_n-1) ? diag(k+1) : (diag(actual_n-1) + col0.matrix().norm());\n    if(k==actual_n-1)\n      right = (diag(actual_n-1) + col0.matrix().norm());\n    else\n    {\n      // Skip deflated singular values\n      Index l = k+1;\n      while(col0(l)==0) { ++l; eigen_internal_assert(l<actual_n); }\n      right = diag(l);\n    }\n\n    // first decide whether it's closer to the left end or the right end\n    RealScalar mid = left + (right-left) / 2;\n    RealScalar fMid = secularEq(mid, col0, diag, perm, diag, 0);\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n    std::cout << right-left << \"\\n\";\n    std::cout << \"fMid = \" << fMid << \" \" << secularEq(mid-left, col0, diag, perm, diag-left, left) << \" \" << secularEq(mid-right, col0, diag, perm, diag-right, right)   << \"\\n\";\n    std::cout << \"     = \" << secularEq(0.1*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.2*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.3*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.4*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.49*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.5*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.51*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.6*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.7*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.8*(left+right), col0, diag, perm, diag, 0)\n              << \" \"       << secularEq(0.9*(left+right), col0, diag, perm, diag, 0) << \"\\n\";\n#endif\n    RealScalar shift = (k == actual_n-1 || fMid > 0) ? left : right;\n    \n    // measure everything relative to shift\n    Map<ArrayXr> diagShifted(m_workspace.data()+4*n, n);\n    diagShifted = diag - shift;\n    \n    // initial guess\n    RealScalar muPrev, muCur;\n    if (shift == left)\n    {\n      muPrev = (right - left) * RealScalar(0.1);\n      if (k == actual_n-1) muCur = right - left;\n      else                 muCur = (right - left) * RealScalar(0.5);\n    }\n    else\n    {\n      muPrev = -(right - left) * RealScalar(0.1);\n      muCur = -(right - left) * RealScalar(0.5);\n    }\n\n    RealScalar fPrev = secularEq(muPrev, col0, diag, perm, diagShifted, shift);\n    RealScalar fCur = secularEq(muCur, col0, diag, perm, diagShifted, shift);\n    if (abs(fPrev) < abs(fCur))\n    {\n      swap(fPrev, fCur);\n      swap(muPrev, muCur);\n    }\n\n    // rational interpolation: fit a function of the form a / mu + b through the two previous\n    // iterates and use its zero to compute the next iterate\n    bool useBisection = fPrev*fCur>0;\n    while (fCur!=0 && abs(muCur - muPrev) > 8 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(muCur), abs(muPrev)) && abs(fCur - fPrev)>NumTraits<RealScalar>::epsilon() && !useBisection)\n    {\n      ++m_numIters;\n\n      // Find a and b such that the function f(mu) = a / mu + b matches the current and previous samples.\n      RealScalar a = (fCur - fPrev) / (1/muCur - 1/muPrev);\n      RealScalar b = fCur - a / muCur;\n      // And find mu such that f(mu)==0:\n      RealScalar muZero = -a/b;\n      RealScalar fZero = secularEq(muZero, col0, diag, perm, diagShifted, shift);\n      \n      muPrev = muCur;\n      fPrev = fCur;\n      muCur = muZero;\n      fCur = fZero;\n      \n      \n      if (shift == left  && (muCur < 0 || muCur > right - left)) useBisection = true;\n      if (shift == right && (muCur < -(right - left) || muCur > 0)) useBisection = true;\n      if (abs(fCur)>abs(fPrev)) useBisection = true;\n    }\n\n    // fall back on bisection method if rational interpolation did not work\n    if (useBisection)\n    {\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n      std::cout << \"useBisection for k = \" << k << \", actual_n = \" << actual_n << \"\\n\";\n#endif\n      RealScalar leftShifted, rightShifted;\n      if (shift == left)\n      {\n        leftShifted = (std::numeric_limits<RealScalar>::min)();\n        // I don't understand why the case k==0 would be special there:\n        // if (k == 0) rightShifted = right - left; else \n        rightShifted = (k==actual_n-1) ? right : ((right - left) * RealScalar(0.6)); // theoretically we can take 0.5, but let's be safe\n      }\n      else\n      {\n        leftShifted = -(right - left) * RealScalar(0.6);\n        rightShifted = -(std::numeric_limits<RealScalar>::min)();\n      }\n      \n      RealScalar fLeft = secularEq(leftShifted, col0, diag, perm, diagShifted, shift);\n\n#if defined EIGEN_INTERNAL_DEBUGGING || defined EIGEN_BDCSVD_DEBUG_VERBOSE\n      RealScalar fRight = secularEq(rightShifted, col0, diag, perm, diagShifted, shift);\n#endif\n\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n      if(!(fLeft * fRight<0))\n      {\n        std::cout << \"fLeft: \" << leftShifted << \" - \" << diagShifted.head(10).transpose()  << \"\\n ; \" << bool(left==shift) << \" \" << (left-shift) << \"\\n\";\n        std::cout << k << \" : \" <<  fLeft << \" * \" << fRight << \" == \" << fLeft * fRight << \"  ;  \" << left << \" - \" << right << \" -> \" <<  leftShifted << \" \" << rightShifted << \"   shift=\" << shift << \"\\n\";\n      }\n#endif\n      eigen_internal_assert(fLeft * fRight < 0);\n      \n      while (rightShifted - leftShifted > 2 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(abs(leftShifted), abs(rightShifted)))\n      {\n        RealScalar midShifted = (leftShifted + rightShifted) / 2;\n        fMid = secularEq(midShifted, col0, diag, perm, diagShifted, shift);\n        if (fLeft * fMid < 0)\n        {\n          rightShifted = midShifted;\n        }\n        else\n        {\n          leftShifted = midShifted;\n          fLeft = fMid;\n        }\n      }\n\n      muCur = (leftShifted + rightShifted) / 2;\n    }\n      \n    singVals[k] = shift + muCur;\n    shifts[k] = shift;\n    mus[k] = muCur;\n\n    // perturb singular value slightly if it equals diagonal entry to avoid division by zero later\n    // (deflation is supposed to avoid this from happening)\n    // - this does no seem to be necessary anymore -\n//     if (singVals[k] == left) singVals[k] *= 1 + NumTraits<RealScalar>::epsilon();\n//     if (singVals[k] == right) singVals[k] *= 1 - NumTraits<RealScalar>::epsilon();\n  }\n}\n\n\n// zhat is perturbation of col0 for which singular vectors can be computed stably (see Section 3.1)\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::perturbCol0\n   (const ArrayRef& col0, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,\n    const ArrayRef& shifts, const ArrayRef& mus, ArrayRef zhat)\n{\n  using std::sqrt;\n  Index n = col0.size();\n  Index m = perm.size();\n  if(m==0)\n  {\n    zhat.setZero();\n    return;\n  }\n  Index last = perm(m-1);\n  // The offset permits to skip deflated entries while computing zhat\n  for (Index k = 0; k < n; ++k)\n  {\n    if (col0(k) == 0) // deflated\n      zhat(k) = 0;\n    else\n    {\n      // see equation (3.6)\n      RealScalar dk = diag(k);\n      RealScalar prod = (singVals(last) + dk) * (mus(last) + (shifts(last) - dk));\n\n      for(Index l = 0; l<m; ++l)\n      {\n        Index i = perm(l);\n        if(i!=k)\n        {\n          Index j = i<k ? i : perm(l-1);\n          prod *= ((singVals(j)+dk) / ((diag(i)+dk))) * ((mus(j)+(shifts(j)-dk)) / ((diag(i)-dk)));\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n          if(i!=k && std::abs(((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) - 1) > 0.9 )\n            std::cout << \"     \" << ((singVals(j)+dk)*(mus(j)+(shifts(j)-dk)))/((diag(i)+dk)*(diag(i)-dk)) << \" == (\" << (singVals(j)+dk) << \" * \" << (mus(j)+(shifts(j)-dk))\n                       << \") / (\" << (diag(i)+dk) << \" * \" << (diag(i)-dk) << \")\\n\";\n#endif\n        }\n      }\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n      std::cout << \"zhat(\" << k << \") =  sqrt( \" << prod << \")  ;  \" << (singVals(last) + dk) << \" * \" << mus(last) + shifts(last) << \" - \" << dk << \"\\n\";\n#endif\n      RealScalar tmp = sqrt(prod);\n      zhat(k) = col0(k) > 0 ? tmp : -tmp;\n    }\n  }\n}\n\n// compute singular vectors\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::computeSingVecs\n   (const ArrayRef& zhat, const ArrayRef& diag, const IndicesRef &perm, const VectorType& singVals,\n    const ArrayRef& shifts, const ArrayRef& mus, MatrixXr& U, MatrixXr& V)\n{\n  Index n = zhat.size();\n  Index m = perm.size();\n  \n  for (Index k = 0; k < n; ++k)\n  {\n    if (zhat(k) == 0)\n    {\n      U.col(k) = VectorType::Unit(n+1, k);\n      if (m_compV) V.col(k) = VectorType::Unit(n, k);\n    }\n    else\n    {\n      U.col(k).setZero();\n      for(Index l=0;l<m;++l)\n      {\n        Index i = perm(l);\n        U(i,k) = zhat(i)/(((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));\n      }\n      U(n,k) = 0;      \n      U.col(k).normalize();\n    \n      if (m_compV)\n      {\n        V.col(k).setZero();\n        for(Index l=1;l<m;++l)\n        {\n          Index i = perm(l);\n          V(i,k) = diag(i) * zhat(i) / (((diag(i) - shifts(k)) - mus(k)) )/( (diag(i) + singVals[k]));\n        }\n        V(0,k) = -1;\n        V.col(k).normalize();\n      }\n    }\n  }\n  U.col(n) = VectorType::Unit(n+1, n);\n}\n\n\n// page 12_13\n// i >= 1, di almost null and zi non null.\n// We use a rotation to zero out zi applied to the left of M\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size)\n{\n  using std::abs;\n  using std::sqrt;\n  using std::pow;\n  Index start = firstCol + shift;\n  RealScalar c = m_computed(start, start);\n  RealScalar s = m_computed(start+i, start);\n  RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));\n  if (r == 0)\n  {\n    m_computed(start+i, start+i) = 0;\n    return;\n  }\n  m_computed(start,start) = r;  \n  m_computed(start+i, start) = 0;\n  m_computed(start+i, start+i) = 0;\n  \n  JacobiRotation<RealScalar> J(c/r,-s/r);\n  if (m_compU)  m_naiveU.middleRows(firstCol, size+1).applyOnTheRight(firstCol, firstCol+i, J);\n  else          m_naiveU.applyOnTheRight(firstCol, firstCol+i, J);\n}// end deflation 43\n\n\n// page 13\n// i,j >= 1, i!=j and |di - dj| < epsilon * norm2(M)\n// We apply two rotations to have zj = 0;\n// TODO deflation44 is still broken and not properly tested\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size)\n{\n  using std::abs;\n  using std::sqrt;\n  using std::conj;\n  using std::pow;\n  RealScalar c = m_computed(firstColm+i, firstColm);\n  RealScalar s = m_computed(firstColm+j, firstColm);\n  RealScalar r = sqrt(numext::abs2(c) + numext::abs2(s));\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"deflation 4.4: \" << i << \",\" << j << \" -> \" << c << \" \" << s << \" \" << r << \" ; \"\n    << m_computed(firstColm + i-1, firstColm)  << \" \"\n    << m_computed(firstColm + i, firstColm)  << \" \"\n    << m_computed(firstColm + i+1, firstColm) << \" \"\n    << m_computed(firstColm + i+2, firstColm) << \"\\n\";\n  std::cout << m_computed(firstColm + i-1, firstColm + i-1)  << \" \"\n    << m_computed(firstColm + i, firstColm+i)  << \" \"\n    << m_computed(firstColm + i+1, firstColm+i+1) << \" \"\n    << m_computed(firstColm + i+2, firstColm+i+2) << \"\\n\";\n#endif\n  if (r==0)\n  {\n    m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);\n    return;\n  }\n  c/=r;\n  s/=r;\n  m_computed(firstColm + i, firstColm) = r;  \n  m_computed(firstColm + j, firstColm + j) = m_computed(firstColm + i, firstColm + i);\n  m_computed(firstColm + j, firstColm) = 0;\n\n  JacobiRotation<RealScalar> J(c,-s);\n  if (m_compU)  m_naiveU.middleRows(firstColu, size+1).applyOnTheRight(firstColu + i, firstColu + j, J);\n  else          m_naiveU.applyOnTheRight(firstColu+i, firstColu+j, J);\n  if (m_compV)  m_naiveV.middleRows(firstRowW, size).applyOnTheRight(firstColW + i, firstColW + j, J);\n}// end deflation 44\n\n\n// acts on block from (firstCol+shift, firstCol+shift) to (lastCol+shift, lastCol+shift) [inclusive]\ntemplate <typename MatrixType>\nvoid BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift)\n{\n  using std::sqrt;\n  using std::abs;\n  const Index length = lastCol + 1 - firstCol;\n  \n  Block<MatrixXr,Dynamic,1> col0(m_computed, firstCol+shift, firstCol+shift, length, 1);\n  Diagonal<MatrixXr> fulldiag(m_computed);\n  VectorBlock<Diagonal<MatrixXr>,Dynamic> diag(fulldiag, firstCol+shift, length);\n  \n  const RealScalar considerZero = (std::numeric_limits<RealScalar>::min)();\n  RealScalar maxDiag = diag.tail((std::max)(Index(1),length-1)).cwiseAbs().maxCoeff();\n  RealScalar epsilon_strict = numext::maxi<RealScalar>(considerZero,NumTraits<RealScalar>::epsilon() * maxDiag);\n  RealScalar epsilon_coarse = 8 * NumTraits<RealScalar>::epsilon() * numext::maxi<RealScalar>(col0.cwiseAbs().maxCoeff(), maxDiag);\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE  \n  std::cout << \"\\ndeflate:\" << diag.head(k+1).transpose() << \"  |  \" << diag.segment(k+1,length-k-1).transpose() << \"\\n\";\n#endif\n  \n  //condition 4.1\n  if (diag(0) < epsilon_coarse)\n  { \n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n    std::cout << \"deflation 4.1, because \" << diag(0) << \" < \" << epsilon_coarse << \"\\n\";\n#endif\n    diag(0) = epsilon_coarse;\n  }\n\n  //condition 4.2\n  for (Index i=1;i<length;++i)\n    if (abs(col0(i)) < epsilon_strict)\n    {\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n      std::cout << \"deflation 4.2, set z(\" << i << \") to zero because \" << abs(col0(i)) << \" < \" << epsilon_strict << \"  (diag(\" << i << \")=\" << diag(i) << \")\\n\";\n#endif\n      col0(i) = 0;\n    }\n\n  //condition 4.3\n  for (Index i=1;i<length; i++)\n    if (diag(i) < epsilon_coarse)\n    {\n#ifdef  EIGEN_BDCSVD_DEBUG_VERBOSE\n      std::cout << \"deflation 4.3, cancel z(\" << i << \")=\" << col0(i) << \" because diag(\" << i << \")=\" << diag(i) << \" < \" << epsilon_coarse << \"\\n\";\n#endif\n      deflation43(firstCol, shift, i, length);\n    }\n\n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"to be sorted: \" << diag.transpose() << \"\\n\\n\";\n#endif\n  {\n    // Check for total deflation\n    // If we have a total deflation, then we have to consider col0(0)==diag(0) as a singular value during sorting\n    bool total_deflation = (col0.tail(length-1).array()<considerZero).all();\n    \n    // Sort the diagonal entries, since diag(1:k-1) and diag(k:length) are already sorted, let's do a sorted merge.\n    // First, compute the respective permutation.\n    Index *permutation = m_workspaceI.data();\n    {\n      permutation[0] = 0;\n      Index p = 1;\n      \n      // Move deflated diagonal entries at the end.\n      for(Index i=1; i<length; ++i)\n        if(abs(diag(i))<considerZero)\n          permutation[p++] = i;\n        \n      Index i=1, j=k+1;\n      for( ; p < length; ++p)\n      {\n             if (i > k)             permutation[p] = j++;\n        else if (j >= length)       permutation[p] = i++;\n        else if (diag(i) < diag(j)) permutation[p] = j++;\n        else                        permutation[p] = i++;\n      }\n    }\n    \n    // If we have a total deflation, then we have to insert diag(0) at the right place\n    if(total_deflation)\n    {\n      for(Index i=1; i<length; ++i)\n      {\n        Index pi = permutation[i];\n        if(abs(diag(pi))<considerZero || diag(0)<diag(pi))\n          permutation[i-1] = permutation[i];\n        else\n        {\n          permutation[i-1] = 0;\n          break;\n        }\n      }\n    }\n    \n    // Current index of each col, and current column of each index\n    Index *realInd = m_workspaceI.data()+length;\n    Index *realCol = m_workspaceI.data()+2*length;\n    \n    for(int pos = 0; pos< length; pos++)\n    {\n      realCol[pos] = pos;\n      realInd[pos] = pos;\n    }\n    \n    for(Index i = total_deflation?0:1; i < length; i++)\n    {\n      const Index pi = permutation[length - (total_deflation ? i+1 : i)];\n      const Index J = realCol[pi];\n      \n      using std::swap;\n      // swap diagonal and first column entries:\n      swap(diag(i), diag(J));\n      if(i!=0 && J!=0) swap(col0(i), col0(J));\n\n      // change columns\n      if (m_compU) m_naiveU.col(firstCol+i).segment(firstCol, length + 1).swap(m_naiveU.col(firstCol+J).segment(firstCol, length + 1));\n      else         m_naiveU.col(firstCol+i).segment(0, 2)                .swap(m_naiveU.col(firstCol+J).segment(0, 2));\n      if (m_compV) m_naiveV.col(firstColW + i).segment(firstRowW, length).swap(m_naiveV.col(firstColW + J).segment(firstRowW, length));\n\n      //update real pos\n      const Index realI = realInd[i];\n      realCol[realI] = J;\n      realCol[pi] = i;\n      realInd[J] = realI;\n      realInd[i] = pi;\n    }\n  }\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n  std::cout << \"sorted: \" << diag.transpose().format(bdcsvdfmt) << \"\\n\";\n  std::cout << \"      : \" << col0.transpose() << \"\\n\\n\";\n#endif\n    \n  //condition 4.4\n  {\n    Index i = length-1;\n    while(i>0 && (abs(diag(i))<considerZero || abs(col0(i))<considerZero)) --i;\n    for(; i>1;--i)\n       if( (diag(i) - diag(i-1)) < NumTraits<RealScalar>::epsilon()*maxDiag )\n      {\n#ifdef EIGEN_BDCSVD_DEBUG_VERBOSE\n        std::cout << \"deflation 4.4 with i = \" << i << \" because \" << (diag(i) - diag(i-1)) << \" < \" << NumTraits<RealScalar>::epsilon()*diag(i) << \"\\n\";\n#endif\n        eigen_internal_assert(abs(diag(i) - diag(i-1))<epsilon_coarse && \" diagonal entries are not properly sorted\");\n        deflation44(firstCol, firstCol + shift, firstRowW, firstColW, i-1, i, length);\n      }\n  }\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  for(Index j=2;j<length;++j)\n    assert(diag(j-1)<=diag(j) || abs(diag(j))<considerZero);\n#endif\n  \n#ifdef EIGEN_BDCSVD_SANITY_CHECKS\n  assert(m_naiveU.allFinite());\n  assert(m_naiveV.allFinite());\n  assert(m_computed.allFinite());\n#endif\n}//end deflation\n\n#ifndef __CUDACC__\n/** \\svd_module\n  *\n  * \\return the singular value decomposition of \\c *this computed by Divide & Conquer algorithm\n  *\n  * \\sa class BDCSVD\n  */\ntemplate<typename Derived>\nBDCSVD<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const\n{\n  return BDCSVD<PlainObject>(*this, computationOptions);\n}\n#endif\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SVD/JacobiSVD.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_JACOBISVD_H\n#define EIGEN_JACOBISVD_H\n\nnamespace Eigen { \n\nnamespace internal {\n// forward declaration (needed by ICC)\n// the empty body is required by MSVC\ntemplate<typename MatrixType, int QRPreconditioner,\n         bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>\nstruct svd_precondition_2x2_block_to_be_real {};\n\n/*** QR preconditioners (R-SVD)\n ***\n *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.\n *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for\n *** JacobiSVD which by itself is only able to work on square matrices.\n ***/\n\nenum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };\n\ntemplate<typename MatrixType, int QRPreconditioner, int Case>\nstruct qr_preconditioner_should_do_anything\n{\n  enum { a = MatrixType::RowsAtCompileTime != Dynamic &&\n             MatrixType::ColsAtCompileTime != Dynamic &&\n             MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,\n         b = MatrixType::RowsAtCompileTime != Dynamic &&\n             MatrixType::ColsAtCompileTime != Dynamic &&\n             MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,\n         ret = !( (QRPreconditioner == NoQRPreconditioner) ||\n                  (Case == PreconditionIfMoreColsThanRows && bool(a)) ||\n                  (Case == PreconditionIfMoreRowsThanCols && bool(b)) )\n  };\n};\n\ntemplate<typename MatrixType, int QRPreconditioner, int Case,\n         bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret\n> struct qr_preconditioner_impl {};\n\ntemplate<typename MatrixType, int QRPreconditioner, int Case>\nclass qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>\n{\npublic:\n  void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}\n  bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)\n  {\n    return false;\n  }\n};\n\n/*** preconditioner using FullPivHouseholderQR ***/\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>\n{\npublic:\n  typedef typename MatrixType::Scalar Scalar;\n  enum\n  {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime\n  };\n  typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;\n\n  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)\n  {\n    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.rows(), svd.cols());\n    }\n    if (svd.m_computeFullU) m_workspace.resize(svd.rows());\n  }\n\n  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.rows() > matrix.cols())\n    {\n      m_qr.compute(matrix);\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();\n      if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);\n      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();\n      return true;\n    }\n    return false;\n  }\nprivate:\n  typedef FullPivHouseholderQR<MatrixType> QRType;\n  QRType m_qr;\n  WorkspaceType m_workspace;\n};\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>\n{\npublic:\n  typedef typename MatrixType::Scalar Scalar;\n  enum\n  {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))\n              : ColsAtCompileTime==1 ? (MatrixType::Options |   RowMajor)\n              : MatrixType::Options\n  };\n  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>\n          TransposeTypeWithSameStorageOrder;\n\n  void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)\n  {\n    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.cols(), svd.rows());\n    }\n    m_adjoint.resize(svd.cols(), svd.rows());\n    if (svd.m_computeFullV) m_workspace.resize(svd.cols());\n  }\n\n  bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.cols() > matrix.rows())\n    {\n      m_adjoint = matrix.adjoint();\n      m_qr.compute(m_adjoint);\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();\n      if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);\n      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();\n      return true;\n    }\n    else return false;\n  }\nprivate:\n  typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;\n  QRType m_qr;\n  TransposeTypeWithSameStorageOrder m_adjoint;\n  typename internal::plain_row_type<MatrixType>::type m_workspace;\n};\n\n/*** preconditioner using ColPivHouseholderQR ***/\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>\n{\npublic:\n  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)\n  {\n    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.rows(), svd.cols());\n    }\n    if (svd.m_computeFullU) m_workspace.resize(svd.rows());\n    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());\n  }\n\n  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.rows() > matrix.cols())\n    {\n      m_qr.compute(matrix);\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();\n      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);\n      else if(svd.m_computeThinU)\n      {\n        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());\n        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);\n      }\n      if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();\n      return true;\n    }\n    return false;\n  }\n\nprivate:\n  typedef ColPivHouseholderQR<MatrixType> QRType;\n  QRType m_qr;\n  typename internal::plain_col_type<MatrixType>::type m_workspace;\n};\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>\n{\npublic:\n  typedef typename MatrixType::Scalar Scalar;\n  enum\n  {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    TrOptions = RowsAtCompileTime==1 ? (MatrixType::Options & ~(RowMajor))\n              : ColsAtCompileTime==1 ? (MatrixType::Options |   RowMajor)\n              : MatrixType::Options\n  };\n\n  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, TrOptions, MaxColsAtCompileTime, MaxRowsAtCompileTime>\n          TransposeTypeWithSameStorageOrder;\n\n  void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)\n  {\n    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.cols(), svd.rows());\n    }\n    if (svd.m_computeFullV) m_workspace.resize(svd.cols());\n    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());\n    m_adjoint.resize(svd.cols(), svd.rows());\n  }\n\n  bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.cols() > matrix.rows())\n    {\n      m_adjoint = matrix.adjoint();\n      m_qr.compute(m_adjoint);\n\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();\n      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);\n      else if(svd.m_computeThinV)\n      {\n        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());\n        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);\n      }\n      if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();\n      return true;\n    }\n    else return false;\n  }\n\nprivate:\n  typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;\n  QRType m_qr;\n  TransposeTypeWithSameStorageOrder m_adjoint;\n  typename internal::plain_row_type<MatrixType>::type m_workspace;\n};\n\n/*** preconditioner using HouseholderQR ***/\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>\n{\npublic:\n  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)\n  {\n    if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.rows(), svd.cols());\n    }\n    if (svd.m_computeFullU) m_workspace.resize(svd.rows());\n    else if (svd.m_computeThinU) m_workspace.resize(svd.cols());\n  }\n\n  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.rows() > matrix.cols())\n    {\n      m_qr.compute(matrix);\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();\n      if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);\n      else if(svd.m_computeThinU)\n      {\n        svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());\n        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);\n      }\n      if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());\n      return true;\n    }\n    return false;\n  }\nprivate:\n  typedef HouseholderQR<MatrixType> QRType;\n  QRType m_qr;\n  typename internal::plain_col_type<MatrixType>::type m_workspace;\n};\n\ntemplate<typename MatrixType>\nclass qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>\n{\npublic:\n  typedef typename MatrixType::Scalar Scalar;\n  enum\n  {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    Options = MatrixType::Options\n  };\n\n  typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>\n          TransposeTypeWithSameStorageOrder;\n\n  void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)\n  {\n    if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())\n    {\n      m_qr.~QRType();\n      ::new (&m_qr) QRType(svd.cols(), svd.rows());\n    }\n    if (svd.m_computeFullV) m_workspace.resize(svd.cols());\n    else if (svd.m_computeThinV) m_workspace.resize(svd.rows());\n    m_adjoint.resize(svd.cols(), svd.rows());\n  }\n\n  bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)\n  {\n    if(matrix.cols() > matrix.rows())\n    {\n      m_adjoint = matrix.adjoint();\n      m_qr.compute(m_adjoint);\n\n      svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();\n      if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);\n      else if(svd.m_computeThinV)\n      {\n        svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());\n        m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);\n      }\n      if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());\n      return true;\n    }\n    else return false;\n  }\n\nprivate:\n  typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;\n  QRType m_qr;\n  TransposeTypeWithSameStorageOrder m_adjoint;\n  typename internal::plain_row_type<MatrixType>::type m_workspace;\n};\n\n/*** 2x2 SVD implementation\n ***\n *** JacobiSVD consists in performing a series of 2x2 SVD subproblems\n ***/\n\ntemplate<typename MatrixType, int QRPreconditioner>\nstruct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>\n{\n  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;\n  typedef typename MatrixType::RealScalar RealScalar;\n  static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }\n};\n\ntemplate<typename MatrixType, int QRPreconditioner>\nstruct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>\n{\n  typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename MatrixType::RealScalar RealScalar;\n  static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)\n  {\n    using std::sqrt;\n    using std::abs;\n    Scalar z;\n    JacobiRotation<Scalar> rot;\n    RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));\n\n    const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();\n    const RealScalar precision = NumTraits<Scalar>::epsilon();\n\n    if(n==0)\n    {\n      // make sure first column is zero\n      work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);\n\n      if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)\n      {\n        // work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n\n        z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);\n        work_matrix.row(p) *= z;\n        if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);\n      }\n      if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)\n      {\n        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);\n        work_matrix.row(q) *= z;\n        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);\n      }\n      // otherwise the second row is already zero, so we have nothing to do.\n    }\n    else\n    {\n      rot.c() = conj(work_matrix.coeff(p,p)) / n;\n      rot.s() = work_matrix.coeff(q,p) / n;\n      work_matrix.applyOnTheLeft(p,q,rot);\n      if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());\n      if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)\n      {\n        z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);\n        work_matrix.col(q) *= z;\n        if(svd.computeV()) svd.m_matrixV.col(q) *= z;\n      }\n      if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)\n      {\n        z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);\n        work_matrix.row(q) *= z;\n        if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);\n      }\n    }\n\n    // update largest diagonal entry\n    maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));\n    // and check whether the 2x2 block is already diagonal\n    RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);\n    return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;\n  }\n};\n\ntemplate<typename _MatrixType, int QRPreconditioner> \nstruct traits<JacobiSVD<_MatrixType,QRPreconditioner> >\n{\n  typedef _MatrixType MatrixType;\n};\n\n} // end namespace internal\n\n/** \\ingroup SVD_Module\n  *\n  *\n  * \\class JacobiSVD\n  *\n  * \\brief Two-sided Jacobi SVD decomposition of a rectangular matrix\n  *\n  * \\tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition\n  * \\tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally\n  *                        for the R-SVD step for non-square matrices. See discussion of possible values below.\n  *\n  * SVD decomposition consists in decomposing any n-by-p matrix \\a A as a product\n  *   \\f[ A = U S V^* \\f]\n  * where \\a U is a n-by-n unitary, \\a V is a p-by-p unitary, and \\a S is a n-by-p real positive matrix which is zero outside of its main diagonal;\n  * the diagonal entries of S are known as the \\em singular \\em values of \\a A and the columns of \\a U and \\a V are known as the left\n  * and right \\em singular \\em vectors of \\a A respectively.\n  *\n  * Singular values are always sorted in decreasing order.\n  *\n  * This JacobiSVD decomposition computes only the singular values by default. If you want \\a U or \\a V, you need to ask for them explicitly.\n  *\n  * You can ask for only \\em thin \\a U or \\a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \\a m be the\n  * smaller value among \\a n and \\a p, there are only \\a m singular vectors; the remaining columns of \\a U and \\a V do not correspond to actual\n  * singular vectors. Asking for \\em thin \\a U or \\a V means asking for only their \\a m first columns to be formed. So \\a U is then a n-by-m matrix,\n  * and \\a V is then a p-by-m matrix. Notice that thin \\a U and \\a V are all you need for (least squares) solving.\n  *\n  * Here's an example demonstrating basic usage:\n  * \\include JacobiSVD_basic.cpp\n  * Output: \\verbinclude JacobiSVD_basic.out\n  *\n  * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than\n  * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \\f$ O(n^2p) \\f$ where \\a n is the smaller dimension and\n  * \\a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.\n  * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.\n  *\n  * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to\n  * terminate in finite (and reasonable) time.\n  *\n  * The possible values for QRPreconditioner are:\n  * \\li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.\n  * \\li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.\n  *     Contrary to other QRs, it doesn't allow computing thin unitaries.\n  * \\li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.\n  *     This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization\n  *     is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive\n  *     process is more reliable than the optimized bidiagonal SVD iterations.\n  * \\li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing\n  *     JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in\n  *     faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking\n  *     if QR preconditioning is needed before applying it anyway.\n  *\n  * \\sa MatrixBase::jacobiSvd()\n  */\ntemplate<typename _MatrixType, int QRPreconditioner> class JacobiSVD\n : public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >\n{\n    typedef SVDBase<JacobiSVD> Base;\n  public:\n\n    typedef _MatrixType MatrixType;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),\n      MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n      MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),\n      MatrixOptions = MatrixType::Options\n    };\n\n    typedef typename Base::MatrixUType MatrixUType;\n    typedef typename Base::MatrixVType MatrixVType;\n    typedef typename Base::SingularValuesType SingularValuesType;\n    \n    typedef typename internal::plain_row_type<MatrixType>::type RowType;\n    typedef typename internal::plain_col_type<MatrixType>::type ColType;\n    typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,\n                   MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>\n            WorkMatrixType;\n\n    /** \\brief Default Constructor.\n      *\n      * The default constructor is useful in cases in which the user intends to\n      * perform decompositions via JacobiSVD::compute(const MatrixType&).\n      */\n    JacobiSVD()\n    {}\n\n\n    /** \\brief Default Constructor with memory preallocation\n      *\n      * Like the default constructor but with preallocation of the internal data\n      * according to the specified problem size.\n      * \\sa JacobiSVD()\n      */\n    JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)\n    {\n      allocate(rows, cols, computationOptions);\n    }\n\n    /** \\brief Constructor performing the decomposition of given matrix.\n     *\n     * \\param matrix the matrix to decompose\n     * \\param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.\n     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,\n     *                           #ComputeFullV, #ComputeThinV.\n     *\n     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not\n     * available with the (non-default) FullPivHouseholderQR preconditioner.\n     */\n    explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)\n    {\n      compute(matrix, computationOptions);\n    }\n\n    /** \\brief Method performing the decomposition of given matrix using custom options.\n     *\n     * \\param matrix the matrix to decompose\n     * \\param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.\n     *                           By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,\n     *                           #ComputeFullV, #ComputeThinV.\n     *\n     * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not\n     * available with the (non-default) FullPivHouseholderQR preconditioner.\n     */\n    JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);\n\n    /** \\brief Method performing the decomposition of given matrix using current options.\n     *\n     * \\param matrix the matrix to decompose\n     *\n     * This method uses the current \\a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).\n     */\n    JacobiSVD& compute(const MatrixType& matrix)\n    {\n      return compute(matrix, m_computationOptions);\n    }\n\n    using Base::computeU;\n    using Base::computeV;\n    using Base::rows;\n    using Base::cols;\n    using Base::rank;\n\n  private:\n    void allocate(Index rows, Index cols, unsigned int computationOptions);\n\n  protected:\n    using Base::m_matrixU;\n    using Base::m_matrixV;\n    using Base::m_singularValues;\n    using Base::m_isInitialized;\n    using Base::m_isAllocated;\n    using Base::m_usePrescribedThreshold;\n    using Base::m_computeFullU;\n    using Base::m_computeThinU;\n    using Base::m_computeFullV;\n    using Base::m_computeThinV;\n    using Base::m_computationOptions;\n    using Base::m_nonzeroSingularValues;\n    using Base::m_rows;\n    using Base::m_cols;\n    using Base::m_diagSize;\n    using Base::m_prescribedThreshold;\n    WorkMatrixType m_workMatrix;\n\n    template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>\n    friend struct internal::svd_precondition_2x2_block_to_be_real;\n    template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>\n    friend struct internal::qr_preconditioner_impl;\n\n    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;\n    internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;\n    MatrixType m_scaledMatrix;\n};\n\ntemplate<typename MatrixType, int QRPreconditioner>\nvoid JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)\n{\n  eigen_assert(rows >= 0 && cols >= 0);\n\n  if (m_isAllocated &&\n      rows == m_rows &&\n      cols == m_cols &&\n      computationOptions == m_computationOptions)\n  {\n    return;\n  }\n\n  m_rows = rows;\n  m_cols = cols;\n  m_isInitialized = false;\n  m_isAllocated = true;\n  m_computationOptions = computationOptions;\n  m_computeFullU = (computationOptions & ComputeFullU) != 0;\n  m_computeThinU = (computationOptions & ComputeThinU) != 0;\n  m_computeFullV = (computationOptions & ComputeFullV) != 0;\n  m_computeThinV = (computationOptions & ComputeThinV) != 0;\n  eigen_assert(!(m_computeFullU && m_computeThinU) && \"JacobiSVD: you can't ask for both full and thin U\");\n  eigen_assert(!(m_computeFullV && m_computeThinV) && \"JacobiSVD: you can't ask for both full and thin V\");\n  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&\n              \"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.\");\n  if (QRPreconditioner == FullPivHouseholderQRPreconditioner)\n  {\n      eigen_assert(!(m_computeThinU || m_computeThinV) &&\n              \"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. \"\n              \"Use the ColPivHouseholderQR preconditioner instead.\");\n  }\n  m_diagSize = (std::min)(m_rows, m_cols);\n  m_singularValues.resize(m_diagSize);\n  if(RowsAtCompileTime==Dynamic)\n    m_matrixU.resize(m_rows, m_computeFullU ? m_rows\n                            : m_computeThinU ? m_diagSize\n                            : 0);\n  if(ColsAtCompileTime==Dynamic)\n    m_matrixV.resize(m_cols, m_computeFullV ? m_cols\n                            : m_computeThinV ? m_diagSize\n                            : 0);\n  m_workMatrix.resize(m_diagSize, m_diagSize);\n  \n  if(m_cols>m_rows)   m_qr_precond_morecols.allocate(*this);\n  if(m_rows>m_cols)   m_qr_precond_morerows.allocate(*this);\n  if(m_rows!=m_cols)  m_scaledMatrix.resize(rows,cols);\n}\n\ntemplate<typename MatrixType, int QRPreconditioner>\nJacobiSVD<MatrixType, QRPreconditioner>&\nJacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)\n{\n  using std::abs;\n  allocate(matrix.rows(), matrix.cols(), computationOptions);\n\n  // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,\n  // only worsening the precision of U and V as we accumulate more rotations\n  const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();\n\n  // limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)\n  const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();\n\n  // Scaling factor to reduce over/under-flows\n  RealScalar scale = matrix.cwiseAbs().maxCoeff();\n  if(scale==RealScalar(0)) scale = RealScalar(1);\n  \n  /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */\n\n  if(m_rows!=m_cols)\n  {\n    m_scaledMatrix = matrix / scale;\n    m_qr_precond_morecols.run(*this, m_scaledMatrix);\n    m_qr_precond_morerows.run(*this, m_scaledMatrix);\n  }\n  else\n  {\n    m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;\n    if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);\n    if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);\n    if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);\n    if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);\n  }\n\n  /*** step 2. The main Jacobi SVD iteration. ***/\n  RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();\n\n  bool finished = false;\n  while(!finished)\n  {\n    finished = true;\n\n    // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix\n\n    for(Index p = 1; p < m_diagSize; ++p)\n    {\n      for(Index q = 0; q < p; ++q)\n      {\n        // if this 2x2 sub-matrix is not diagonal already...\n        // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't\n        // keep us iterating forever. Similarly, small denormal numbers are considered zero.\n        RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);\n        if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)\n        {\n          finished = false;\n          // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal\n          // the complex to real operation returns true if the updated 2x2 block is not already diagonal\n          if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))\n          {\n            JacobiRotation<RealScalar> j_left, j_right;\n            internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);\n\n            // accumulate resulting Jacobi rotations\n            m_workMatrix.applyOnTheLeft(p,q,j_left);\n            if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());\n\n            m_workMatrix.applyOnTheRight(p,q,j_right);\n            if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);\n\n            // keep track of the largest diagonal coefficient\n            maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));\n          }\n        }\n      }\n    }\n  }\n\n  /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/\n\n  for(Index i = 0; i < m_diagSize; ++i)\n  {\n    // For a complex matrix, some diagonal coefficients might note have been\n    // treated by svd_precondition_2x2_block_to_be_real, and the imaginary part\n    // of some diagonal entry might not be null.\n    if(NumTraits<Scalar>::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero)\n    {\n      RealScalar a = abs(m_workMatrix.coeff(i,i));\n      m_singularValues.coeffRef(i) = abs(a);\n      if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;\n    }\n    else\n    {\n      // m_workMatrix.coeff(i,i) is already real, no difficulty:\n      RealScalar a = numext::real(m_workMatrix.coeff(i,i));\n      m_singularValues.coeffRef(i) = abs(a);\n      if(computeU() && (a<RealScalar(0))) m_matrixU.col(i) = -m_matrixU.col(i);\n    }\n  }\n  \n  m_singularValues *= scale;\n\n  /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/\n\n  m_nonzeroSingularValues = m_diagSize;\n  for(Index i = 0; i < m_diagSize; i++)\n  {\n    Index pos;\n    RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);\n    if(maxRemainingSingularValue == RealScalar(0))\n    {\n      m_nonzeroSingularValues = i;\n      break;\n    }\n    if(pos)\n    {\n      pos += i;\n      std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));\n      if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));\n      if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));\n    }\n  }\n\n  m_isInitialized = true;\n  return *this;\n}\n\n/** \\svd_module\n  *\n  * \\return the singular value decomposition of \\c *this computed by two-sided\n  * Jacobi transformations.\n  *\n  * \\sa class JacobiSVD\n  */\ntemplate<typename Derived>\nJacobiSVD<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const\n{\n  return JacobiSVD<PlainObject>(*this, computationOptions);\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_JACOBISVD_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SVD/JacobiSVD_LAPACKE.h",
    "content": "/*\n Copyright (c) 2011, Intel Corporation. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n * Neither the name of Intel Corporation nor the names of its contributors may\n   be used to endorse or promote products derived from this software without\n   specific prior written permission.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n ********************************************************************************\n *   Content : Eigen bindings to LAPACKe\n *    Singular Value Decomposition - SVD.\n ********************************************************************************\n*/\n\n#ifndef EIGEN_JACOBISVD_LAPACKE_H\n#define EIGEN_JACOBISVD_LAPACKE_H\n\nnamespace Eigen { \n\n/** \\internal Specialization for the data types supported by LAPACKe */\n\n#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \\\ntemplate<> inline \\\nJacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \\\nJacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \\\n{ \\\n  typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \\\n  /*typedef MatrixType::Scalar Scalar;*/ \\\n  /*typedef MatrixType::RealScalar RealScalar;*/ \\\n  allocate(matrix.rows(), matrix.cols(), computationOptions); \\\n\\\n  /*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \\\n  m_nonzeroSingularValues = m_diagSize; \\\n\\\n  lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \\\n  lapack_int matrix_order = LAPACKE_COLROW; \\\n  char jobu, jobvt; \\\n  LAPACKE_TYPE *u, *vt, dummy; \\\n  jobu  = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \\\n  jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \\\n  if (computeU()) { \\\n    ldu  = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \\\n    u    = (LAPACKE_TYPE*)m_matrixU.data(); \\\n  } else { ldu=1; u=&dummy; }\\\n  MatrixType localV; \\\n  ldvt = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \\\n  if (computeV()) { \\\n    localV.resize(ldvt, m_cols); \\\n    vt   = (LAPACKE_TYPE*)localV.data(); \\\n  } else { ldvt=1; vt=&dummy; }\\\n  Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \\\n  MatrixType m_temp; m_temp = matrix; \\\n  LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \\\n  if (computeV()) m_matrixV = localV.adjoint(); \\\n /* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \\\n  m_isInitialized = true; \\\n  return *this; \\\n}\n\nEIGEN_LAPACKE_SVD(double,   double,                double, d, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SVD(float,    float,                 float , s, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)\nEIGEN_LAPACKE_SVD(scomplex, lapack_complex_float,  float , c, ColMajor, LAPACK_COL_MAJOR)\n\nEIGEN_LAPACKE_SVD(double,   double,                double, d, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_SVD(float,    float,                 float , s, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)\nEIGEN_LAPACKE_SVD(scomplex, lapack_complex_float,  float , c, RowMajor, LAPACK_ROW_MAJOR)\n\n} // end namespace Eigen\n\n#endif // EIGEN_JACOBISVD_LAPACKE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SVD/SVDBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>\n// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>\n// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>\n// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SVDBASE_H\n#define EIGEN_SVDBASE_H\n\nnamespace Eigen {\n/** \\ingroup SVD_Module\n *\n *\n * \\class SVDBase\n *\n * \\brief Base class of SVD algorithms\n *\n * \\tparam Derived the type of the actual SVD decomposition\n *\n * SVD decomposition consists in decomposing any n-by-p matrix \\a A as a product\n *   \\f[ A = U S V^* \\f]\n * where \\a U is a n-by-n unitary, \\a V is a p-by-p unitary, and \\a S is a n-by-p real positive matrix which is zero outside of its main diagonal;\n * the diagonal entries of S are known as the \\em singular \\em values of \\a A and the columns of \\a U and \\a V are known as the left\n * and right \\em singular \\em vectors of \\a A respectively.\n *\n * Singular values are always sorted in decreasing order.\n *\n * \n * You can ask for only \\em thin \\a U or \\a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \\a m be the\n * smaller value among \\a n and \\a p, there are only \\a m singular vectors; the remaining columns of \\a U and \\a V do not correspond to actual\n * singular vectors. Asking for \\em thin \\a U or \\a V means asking for only their \\a m first columns to be formed. So \\a U is then a n-by-m matrix,\n * and \\a V is then a p-by-m matrix. Notice that thin \\a U and \\a V are all you need for (least squares) solving.\n *  \n * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to\n * terminate in finite (and reasonable) time.\n * \\sa class BDCSVD, class JacobiSVD\n */\ntemplate<typename Derived>\nclass SVDBase\n{\n\npublic:\n  typedef typename internal::traits<Derived>::MatrixType MatrixType;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n  enum {\n    RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n    ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n    DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),\n    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,\n    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,\n    MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),\n    MatrixOptions = MatrixType::Options\n  };\n\n  typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;\n  typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;\n  typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;\n  \n  Derived& derived() { return *static_cast<Derived*>(this); }\n  const Derived& derived() const { return *static_cast<const Derived*>(this); }\n\n  /** \\returns the \\a U matrix.\n   *\n   * For the SVD decomposition of a n-by-p matrix, letting \\a m be the minimum of \\a n and \\a p,\n   * the U matrix is n-by-n if you asked for \\link Eigen::ComputeFullU ComputeFullU \\endlink, and is n-by-m if you asked for \\link Eigen::ComputeThinU ComputeThinU \\endlink.\n   *\n   * The \\a m first columns of \\a U are the left singular vectors of the matrix being decomposed.\n   *\n   * This method asserts that you asked for \\a U to be computed.\n   */\n  const MatrixUType& matrixU() const\n  {\n    eigen_assert(m_isInitialized && \"SVD is not initialized.\");\n    eigen_assert(computeU() && \"This SVD decomposition didn't compute U. Did you ask for it?\");\n    return m_matrixU;\n  }\n\n  /** \\returns the \\a V matrix.\n   *\n   * For the SVD decomposition of a n-by-p matrix, letting \\a m be the minimum of \\a n and \\a p,\n   * the V matrix is p-by-p if you asked for \\link Eigen::ComputeFullV ComputeFullV \\endlink, and is p-by-m if you asked for \\link Eigen::ComputeThinV ComputeThinV \\endlink.\n   *\n   * The \\a m first columns of \\a V are the right singular vectors of the matrix being decomposed.\n   *\n   * This method asserts that you asked for \\a V to be computed.\n   */\n  const MatrixVType& matrixV() const\n  {\n    eigen_assert(m_isInitialized && \"SVD is not initialized.\");\n    eigen_assert(computeV() && \"This SVD decomposition didn't compute V. Did you ask for it?\");\n    return m_matrixV;\n  }\n\n  /** \\returns the vector of singular values.\n   *\n   * For the SVD decomposition of a n-by-p matrix, letting \\a m be the minimum of \\a n and \\a p, the\n   * returned vector has size \\a m.  Singular values are always sorted in decreasing order.\n   */\n  const SingularValuesType& singularValues() const\n  {\n    eigen_assert(m_isInitialized && \"SVD is not initialized.\");\n    return m_singularValues;\n  }\n\n  /** \\returns the number of singular values that are not exactly 0 */\n  Index nonzeroSingularValues() const\n  {\n    eigen_assert(m_isInitialized && \"SVD is not initialized.\");\n    return m_nonzeroSingularValues;\n  }\n  \n  /** \\returns the rank of the matrix of which \\c *this is the SVD.\n    *\n    * \\note This method has to determine which singular values should be considered nonzero.\n    *       For that, it uses the threshold value that you can control by calling\n    *       setThreshold(const RealScalar&).\n    */\n  inline Index rank() const\n  {\n    using std::abs;\n    eigen_assert(m_isInitialized && \"JacobiSVD is not initialized.\");\n    if(m_singularValues.size()==0) return 0;\n    RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());\n    Index i = m_nonzeroSingularValues-1;\n    while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;\n    return i+1;\n  }\n  \n  /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),\n    * which need to determine when singular values are to be considered nonzero.\n    * This is not used for the SVD decomposition itself.\n    *\n    * When it needs to get the threshold value, Eigen calls threshold().\n    * The default is \\c NumTraits<Scalar>::epsilon()\n    *\n    * \\param threshold The new value to use as the threshold.\n    *\n    * A singular value will be considered nonzero if its value is strictly greater than\n    *  \\f$ \\vert singular value \\vert \\leqslant threshold \\times \\vert max singular value \\vert \\f$.\n    *\n    * If you want to come back to the default behavior, call setThreshold(Default_t)\n    */\n  Derived& setThreshold(const RealScalar& threshold)\n  {\n    m_usePrescribedThreshold = true;\n    m_prescribedThreshold = threshold;\n    return derived();\n  }\n\n  /** Allows to come back to the default behavior, letting Eigen use its default formula for\n    * determining the threshold.\n    *\n    * You should pass the special object Eigen::Default as parameter here.\n    * \\code svd.setThreshold(Eigen::Default); \\endcode\n    *\n    * See the documentation of setThreshold(const RealScalar&).\n    */\n  Derived& setThreshold(Default_t)\n  {\n    m_usePrescribedThreshold = false;\n    return derived();\n  }\n\n  /** Returns the threshold that will be used by certain methods such as rank().\n    *\n    * See the documentation of setThreshold(const RealScalar&).\n    */\n  RealScalar threshold() const\n  {\n    eigen_assert(m_isInitialized || m_usePrescribedThreshold);\n    return m_usePrescribedThreshold ? m_prescribedThreshold\n                                    : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();\n  }\n\n  /** \\returns true if \\a U (full or thin) is asked for in this SVD decomposition */\n  inline bool computeU() const { return m_computeFullU || m_computeThinU; }\n  /** \\returns true if \\a V (full or thin) is asked for in this SVD decomposition */\n  inline bool computeV() const { return m_computeFullV || m_computeThinV; }\n\n  inline Index rows() const { return m_rows; }\n  inline Index cols() const { return m_cols; }\n  \n  /** \\returns a (least squares) solution of \\f$ A x = b \\f$ using the current SVD decomposition of A.\n    *\n    * \\param b the right-hand-side of the equation to solve.\n    *\n    * \\note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.\n    *\n    * \\note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.\n    * In other words, the returned solution is guaranteed to minimize the Euclidean norm \\f$ \\Vert A x - b \\Vert \\f$.\n    */\n  template<typename Rhs>\n  inline const Solve<Derived, Rhs>\n  solve(const MatrixBase<Rhs>& b) const\n  {\n    eigen_assert(m_isInitialized && \"SVD is not initialized.\");\n    eigen_assert(computeU() && computeV() && \"SVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).\");\n    return Solve<Derived, Rhs>(derived(), b.derived());\n  }\n  \n  #ifndef EIGEN_PARSED_BY_DOXYGEN\n  template<typename RhsType, typename DstType>\n  void _solve_impl(const RhsType &rhs, DstType &dst) const;\n  #endif\n\nprotected:\n  \n  static void check_template_parameters()\n  {\n    EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);\n  }\n  \n  // return true if already allocated\n  bool allocate(Index rows, Index cols, unsigned int computationOptions) ;\n\n  MatrixUType m_matrixU;\n  MatrixVType m_matrixV;\n  SingularValuesType m_singularValues;\n  bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;\n  bool m_computeFullU, m_computeThinU;\n  bool m_computeFullV, m_computeThinV;\n  unsigned int m_computationOptions;\n  Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;\n  RealScalar m_prescribedThreshold;\n\n  /** \\brief Default Constructor.\n   *\n   * Default constructor of SVDBase\n   */\n  SVDBase()\n    : m_isInitialized(false),\n      m_isAllocated(false),\n      m_usePrescribedThreshold(false),\n      m_computationOptions(0),\n      m_rows(-1), m_cols(-1), m_diagSize(0)\n  {\n    check_template_parameters();\n  }\n\n\n};\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename Derived>\ntemplate<typename RhsType, typename DstType>\nvoid SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const\n{\n  eigen_assert(rhs.rows() == rows());\n\n  // A = U S V^*\n  // So A^{-1} = V S^{-1} U^*\n\n  Matrix<Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;\n  Index l_rank = rank();\n  tmp.noalias() =  m_matrixU.leftCols(l_rank).adjoint() * rhs;\n  tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;\n  dst = m_matrixV.leftCols(l_rank) * tmp;\n}\n#endif\n\ntemplate<typename MatrixType>\nbool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)\n{\n  eigen_assert(rows >= 0 && cols >= 0);\n\n  if (m_isAllocated &&\n      rows == m_rows &&\n      cols == m_cols &&\n      computationOptions == m_computationOptions)\n  {\n    return true;\n  }\n\n  m_rows = rows;\n  m_cols = cols;\n  m_isInitialized = false;\n  m_isAllocated = true;\n  m_computationOptions = computationOptions;\n  m_computeFullU = (computationOptions & ComputeFullU) != 0;\n  m_computeThinU = (computationOptions & ComputeThinU) != 0;\n  m_computeFullV = (computationOptions & ComputeFullV) != 0;\n  m_computeThinV = (computationOptions & ComputeThinV) != 0;\n  eigen_assert(!(m_computeFullU && m_computeThinU) && \"SVDBase: you can't ask for both full and thin U\");\n  eigen_assert(!(m_computeFullV && m_computeThinV) && \"SVDBase: you can't ask for both full and thin V\");\n  eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&\n\t       \"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.\");\n\n  m_diagSize = (std::min)(m_rows, m_cols);\n  m_singularValues.resize(m_diagSize);\n  if(RowsAtCompileTime==Dynamic)\n    m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);\n  if(ColsAtCompileTime==Dynamic)\n    m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);\n\n  return false;\n}\n\n}// end namespace\n\n#endif // EIGEN_SVDBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SVD/UpperBidiagonalization.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_BIDIAGONALIZATION_H\n#define EIGEN_BIDIAGONALIZATION_H\n\nnamespace Eigen { \n\nnamespace internal {\n// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.\n// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.\n\ntemplate<typename _MatrixType> class UpperBidiagonalization\n{\n  public:\n\n    typedef _MatrixType MatrixType;\n    enum {\n      RowsAtCompileTime = MatrixType::RowsAtCompileTime,\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret\n    };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef Eigen::Index Index; ///< \\deprecated since Eigen 3.3\n    typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;\n    typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;\n    typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;\n    typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;\n    typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;\n    typedef HouseholderSequence<\n              const MatrixType,\n              const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type\n            > HouseholderUSequenceType;\n    typedef HouseholderSequence<\n              const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,\n              Diagonal<const MatrixType,1>,\n              OnTheRight\n            > HouseholderVSequenceType;\n    \n    /**\n    * \\brief Default Constructor.\n    *\n    * The default constructor is useful in cases in which the user intends to\n    * perform decompositions via Bidiagonalization::compute(const MatrixType&).\n    */\n    UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}\n\n    explicit UpperBidiagonalization(const MatrixType& matrix)\n      : m_householder(matrix.rows(), matrix.cols()),\n        m_bidiagonal(matrix.cols(), matrix.cols()),\n        m_isInitialized(false)\n    {\n      compute(matrix);\n    }\n    \n    UpperBidiagonalization& compute(const MatrixType& matrix);\n    UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);\n    \n    const MatrixType& householder() const { return m_householder; }\n    const BidiagonalType& bidiagonal() const { return m_bidiagonal; }\n    \n    const HouseholderUSequenceType householderU() const\n    {\n      eigen_assert(m_isInitialized && \"UpperBidiagonalization is not initialized.\");\n      return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());\n    }\n\n    const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy\n    {\n      eigen_assert(m_isInitialized && \"UpperBidiagonalization is not initialized.\");\n      return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())\n             .setLength(m_householder.cols()-1)\n             .setShift(1);\n    }\n    \n  protected:\n    MatrixType m_householder;\n    BidiagonalType m_bidiagonal;\n    bool m_isInitialized;\n};\n\n// Standard upper bidiagonalization without fancy optimizations\n// This version should be faster for small matrix size\ntemplate<typename MatrixType>\nvoid upperbidiagonalization_inplace_unblocked(MatrixType& mat,\n                                              typename MatrixType::RealScalar *diagonal,\n                                              typename MatrixType::RealScalar *upper_diagonal,\n                                              typename MatrixType::Scalar* tempData = 0)\n{\n  typedef typename MatrixType::Scalar Scalar;\n\n  Index rows = mat.rows();\n  Index cols = mat.cols();\n\n  typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;\n  TempType tempVector;\n  if(tempData==0)\n  {\n    tempVector.resize(rows);\n    tempData = tempVector.data();\n  }\n\n  for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)\n  {\n    Index remainingRows = rows - k;\n    Index remainingCols = cols - k - 1;\n\n    // construct left householder transform in-place in A\n    mat.col(k).tail(remainingRows)\n       .makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);\n    // apply householder transform to remaining part of A on the left\n    mat.bottomRightCorner(remainingRows, remainingCols)\n       .applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);\n\n    if(k == cols-1) break;\n\n    // construct right householder transform in-place in mat\n    mat.row(k).tail(remainingCols)\n       .makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);\n    // apply householder transform to remaining part of mat on the left\n    mat.bottomRightCorner(remainingRows-1, remainingCols)\n       .applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).transpose(), mat.coeff(k,k+1), tempData);\n  }\n}\n\n/** \\internal\n  * Helper routine for the block reduction to upper bidiagonal form.\n  *\n  * Let's partition the matrix A:\n  * \n  *      | A00 A01 |\n  *  A = |         |\n  *      | A10 A11 |\n  *\n  * This function reduces to bidiagonal form the left \\c rows x \\a blockSize vertical panel [A00/A10]\n  * and the \\a blockSize x \\c cols horizontal panel [A00 A01] of the matrix \\a A. The bottom-right block A11\n  * is updated using matrix-matrix products:\n  *   A22 -= V * Y^T - X * U^T\n  * where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01\n  * respectively, and the update matrices X and Y are computed during the reduction.\n  * \n  */\ntemplate<typename MatrixType>\nvoid upperbidiagonalization_blocked_helper(MatrixType& A,\n                                           typename MatrixType::RealScalar *diagonal,\n                                           typename MatrixType::RealScalar *upper_diagonal,\n                                           Index bs,\n                                           Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,\n                                                      traits<MatrixType>::Flags & RowMajorBit> > X,\n                                           Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,\n                                                      traits<MatrixType>::Flags & RowMajorBit> > Y)\n{\n  typedef typename MatrixType::Scalar Scalar;\n  enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };\n  typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;\n  typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;\n  typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride>    SubColumnType;\n  typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride>    SubRowType;\n  typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;\n  \n  Index brows = A.rows();\n  Index bcols = A.cols();\n\n  Scalar tau_u, tau_u_prev(0), tau_v;\n\n  for(Index k = 0; k < bs; ++k)\n  {\n    Index remainingRows = brows - k;\n    Index remainingCols = bcols - k - 1;\n\n    SubMatType X_k1( X.block(k,0, remainingRows,k) );\n    SubMatType V_k1( A.block(k,0, remainingRows,k) );\n\n    // 1 - update the k-th column of A\n    SubColumnType v_k = A.col(k).tail(remainingRows);\n          v_k -= V_k1 * Y.row(k).head(k).adjoint();\n    if(k) v_k -= X_k1 * A.col(k).head(k);\n    \n    // 2 - construct left Householder transform in-place\n    v_k.makeHouseholderInPlace(tau_v, diagonal[k]);\n       \n    if(k+1<bcols)\n    {\n      SubMatType Y_k  ( Y.block(k+1,0, remainingCols, k+1) );\n      SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );\n      \n      // this eases the application of Householder transforAions\n      // A(k,k) will store tau_v later\n      A(k,k) = Scalar(1);\n\n      // 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )\n      {\n        SubColumnType y_k( Y.col(k).tail(remainingCols) );\n        \n        // let's use the begining of column k of Y as a temporary vector\n        SubColumnType tmp( Y.col(k).head(k) );\n        y_k.noalias()  = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck\n        tmp.noalias()  = V_k1.adjoint()  * v_k;\n        y_k.noalias() -= Y_k.leftCols(k) * tmp;\n        tmp.noalias()  = X_k1.adjoint()  * v_k;\n        y_k.noalias() -= U_k1.adjoint()  * tmp;\n        y_k *= numext::conj(tau_v);\n      }\n\n      // 4 - update k-th row of A (it will become u_k)\n      SubRowType u_k( A.row(k).tail(remainingCols) );\n      u_k = u_k.conjugate();\n      {\n        u_k -= Y_k * A.row(k).head(k+1).adjoint();\n        if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();\n      }\n\n      // 5 - construct right Householder transform in-place\n      u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);\n\n      // this eases the application of Householder transformations\n      // A(k,k+1) will store tau_u later\n      A(k,k+1) = Scalar(1);\n\n      // 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )\n      {\n        SubColumnType x_k ( X.col(k).tail(remainingRows-1) );\n        \n        // let's use the begining of column k of X as a temporary vectors\n        // note that tmp0 and tmp1 overlaps\n        SubColumnType tmp0 ( X.col(k).head(k) ),\n                      tmp1 ( X.col(k).head(k+1) );\n                    \n        x_k.noalias()   = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck\n        tmp0.noalias()  = U_k1 * u_k.transpose();\n        x_k.noalias()  -= X_k1.bottomRows(remainingRows-1) * tmp0;\n        tmp1.noalias()  = Y_k.adjoint() * u_k.transpose();\n        x_k.noalias()  -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;\n        x_k *= numext::conj(tau_u);\n        tau_u = numext::conj(tau_u);\n        u_k = u_k.conjugate();\n      }\n\n      if(k>0) A.coeffRef(k-1,k) = tau_u_prev;\n      tau_u_prev = tau_u;\n    }\n    else\n      A.coeffRef(k-1,k) = tau_u_prev;\n\n    A.coeffRef(k,k) = tau_v;\n  }\n  \n  if(bs<bcols)\n    A.coeffRef(bs-1,bs) = tau_u_prev;\n\n  // update A22\n  if(bcols>bs && brows>bs)\n  {\n    SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );\n    SubMatType A10( A.block(bs,0, brows-bs,bs) );\n    SubMatType A01( A.block(0,bs, bs,bcols-bs) );\n    Scalar tmp = A01(bs-1,0);\n    A01(bs-1,0) = 1;\n    A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();\n    A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;\n    A01(bs-1,0) = tmp;\n  }\n}\n\n/** \\internal\n  *\n  * Implementation of a block-bidiagonal reduction.\n  * It is based on the following paper:\n  *   The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.\n  *   by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)\n  *   section 3.3\n  */\ntemplate<typename MatrixType, typename BidiagType>\nvoid upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,\n                                            Index maxBlockSize=32,\n                                            typename MatrixType::Scalar* /*tempData*/ = 0)\n{\n  typedef typename MatrixType::Scalar Scalar;\n  typedef Block<MatrixType,Dynamic,Dynamic> BlockType;\n\n  Index rows = A.rows();\n  Index cols = A.cols();\n  Index size = (std::min)(rows, cols);\n\n  // X and Y are work space\n  enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };\n  Matrix<Scalar,\n         MatrixType::RowsAtCompileTime,\n         Dynamic,\n         StorageOrder,\n         MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);\n  Matrix<Scalar,\n         MatrixType::ColsAtCompileTime,\n         Dynamic,\n         StorageOrder,\n         MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);\n  Index blockSize = (std::min)(maxBlockSize,size);\n\n  Index k = 0;\n  for(k = 0; k < size; k += blockSize)\n  {\n    Index bs = (std::min)(size-k,blockSize);  // actual size of the block\n    Index brows = rows - k;                   // rows of the block\n    Index bcols = cols - k;                   // columns of the block\n\n    // partition the matrix A:\n    // \n    //      | A00 A01 A02 |\n    //      |             |\n    // A  = | A10 A11 A12 |\n    //      |             |\n    //      | A20 A21 A22 |\n    //\n    // where A11 is a bs x bs diagonal block,\n    // and let:\n    //      | A11 A12 |\n    //  B = |         |\n    //      | A21 A22 |\n\n    BlockType B = A.block(k,k,brows,bcols);\n    \n    // This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.\n    // Finally, the algorithm continue on the updated A22.\n    //\n    // However, if B is too small, or A22 empty, then let's use an unblocked strategy\n    if(k+bs==cols || bcols<48) // somewhat arbitrary threshold\n    {\n      upperbidiagonalization_inplace_unblocked(B,\n                                               &(bidiagonal.template diagonal<0>().coeffRef(k)),\n                                               &(bidiagonal.template diagonal<1>().coeffRef(k)),\n                                               X.data()\n                                              );\n      break; // We're done\n    }\n    else\n    {\n      upperbidiagonalization_blocked_helper<BlockType>( B,\n                                                        &(bidiagonal.template diagonal<0>().coeffRef(k)),\n                                                        &(bidiagonal.template diagonal<1>().coeffRef(k)),\n                                                        bs,\n                                                        X.topLeftCorner(brows,bs),\n                                                        Y.topLeftCorner(bcols,bs)\n                                                      );\n    }\n  }\n}\n\ntemplate<typename _MatrixType>\nUpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)\n{\n  Index rows = matrix.rows();\n  Index cols = matrix.cols();\n  EIGEN_ONLY_USED_FOR_DEBUG(cols);\n\n  eigen_assert(rows >= cols && \"UpperBidiagonalization is only for Arices satisfying rows>=cols.\");\n\n  m_householder = matrix;\n\n  ColVectorType temp(rows);\n\n  upperbidiagonalization_inplace_unblocked(m_householder,\n                                           &(m_bidiagonal.template diagonal<0>().coeffRef(0)),\n                                           &(m_bidiagonal.template diagonal<1>().coeffRef(0)),\n                                           temp.data());\n\n  m_isInitialized = true;\n  return *this;\n}\n\ntemplate<typename _MatrixType>\nUpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)\n{\n  Index rows = matrix.rows();\n  Index cols = matrix.cols();\n  EIGEN_ONLY_USED_FOR_DEBUG(rows);\n  EIGEN_ONLY_USED_FOR_DEBUG(cols);\n\n  eigen_assert(rows >= cols && \"UpperBidiagonalization is only for Arices satisfying rows>=cols.\");\n\n  m_householder = matrix;\n  upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);\n            \n  m_isInitialized = true;\n  return *this;\n}\n\n#if 0\n/** \\return the Householder QR decomposition of \\c *this.\n  *\n  * \\sa class Bidiagonalization\n  */\ntemplate<typename Derived>\nconst UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>\nMatrixBase<Derived>::bidiagonalization() const\n{\n  return UpperBidiagonalization<PlainObject>(eval());\n}\n#endif\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_BIDIAGONALIZATION_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCholesky/SimplicialCholesky.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SIMPLICIAL_CHOLESKY_H\n#define EIGEN_SIMPLICIAL_CHOLESKY_H\n\nnamespace Eigen { \n\nenum SimplicialCholeskyMode {\n  SimplicialCholeskyLLT,\n  SimplicialCholeskyLDLT\n};\n\nnamespace internal {\n  template<typename CholMatrixType, typename InputMatrixType>\n  struct simplicial_cholesky_grab_input {\n    typedef CholMatrixType const * ConstCholMatrixPtr;\n    static void run(const InputMatrixType& input, ConstCholMatrixPtr &pmat, CholMatrixType &tmp)\n    {\n      tmp = input;\n      pmat = &tmp;\n    }\n  };\n  \n  template<typename MatrixType>\n  struct simplicial_cholesky_grab_input<MatrixType,MatrixType> {\n    typedef MatrixType const * ConstMatrixPtr;\n    static void run(const MatrixType& input, ConstMatrixPtr &pmat, MatrixType &/*tmp*/)\n    {\n      pmat = &input;\n    }\n  };\n} // end namespace internal\n\n/** \\ingroup SparseCholesky_Module\n  * \\brief A base class for direct sparse Cholesky factorizations\n  *\n  * This is a base class for LL^T and LDL^T Cholesky factorizations of sparse matrices that are\n  * selfadjoint and positive definite. These factorizations allow for solving A.X = B where\n  * X and B can be either dense or sparse.\n  * \n  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization\n  * such that the factorized matrix is P A P^-1.\n  *\n  * \\tparam Derived the type of the derived class, that is the actual factorization type.\n  *\n  */\ntemplate<typename Derived>\nclass SimplicialCholeskyBase : public SparseSolverBase<Derived>\n{\n    typedef SparseSolverBase<Derived> Base;\n    using Base::m_isInitialized;\n    \n  public:\n    typedef typename internal::traits<Derived>::MatrixType MatrixType;\n    typedef typename internal::traits<Derived>::OrderingType OrderingType;\n    enum { UpLo = internal::traits<Derived>::UpLo };\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;\n    typedef CholMatrixType const * ConstCholMatrixPtr;\n    typedef Matrix<Scalar,Dynamic,1> VectorType;\n    typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n  public:\n    \n    using Base::derived;\n\n    /** Default constructor */\n    SimplicialCholeskyBase()\n      : m_info(Success), m_shiftOffset(0), m_shiftScale(1)\n    {}\n\n    explicit SimplicialCholeskyBase(const MatrixType& matrix)\n      : m_info(Success), m_shiftOffset(0), m_shiftScale(1)\n    {\n      derived().compute(matrix);\n    }\n\n    ~SimplicialCholeskyBase()\n    {\n    }\n\n    Derived& derived() { return *static_cast<Derived*>(this); }\n    const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    \n    inline Index cols() const { return m_matrix.cols(); }\n    inline Index rows() const { return m_matrix.rows(); }\n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n    \n    /** \\returns the permutation P\n      * \\sa permutationPinv() */\n    const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationP() const\n    { return m_P; }\n    \n    /** \\returns the inverse P^-1 of the permutation P\n      * \\sa permutationP() */\n    const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& permutationPinv() const\n    { return m_Pinv; }\n\n    /** Sets the shift parameters that will be used to adjust the diagonal coefficients during the numerical factorization.\n      *\n      * During the numerical factorization, the diagonal coefficients are transformed by the following linear model:\\n\n      * \\c d_ii = \\a offset + \\a scale * \\c d_ii\n      *\n      * The default is the identity transformation with \\a offset=0, and \\a scale=1.\n      *\n      * \\returns a reference to \\c *this.\n      */\n    Derived& setShift(const RealScalar& offset, const RealScalar& scale = 1)\n    {\n      m_shiftOffset = offset;\n      m_shiftScale = scale;\n      return derived();\n    }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal */\n    template<typename Stream>\n    void dumpMemory(Stream& s)\n    {\n      int total = 0;\n      s << \"  L:        \" << ((total+=(m_matrix.cols()+1) * sizeof(int) + m_matrix.nonZeros()*(sizeof(int)+sizeof(Scalar))) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  diag:     \" << ((total+=m_diag.size() * sizeof(Scalar)) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  tree:     \" << ((total+=m_parent.size() * sizeof(int)) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  nonzeros: \" << ((total+=m_nonZerosPerCol.size() * sizeof(int)) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  perm:     \" << ((total+=m_P.size() * sizeof(int)) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  perm^-1:  \" << ((total+=m_Pinv.size() * sizeof(int)) >> 20) << \"Mb\" << \"\\n\";\n      s << \"  TOTAL:    \" << (total>> 20) << \"Mb\" << \"\\n\";\n    }\n\n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const\n    {\n      eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()\");\n      eigen_assert(m_matrix.rows()==b.rows());\n\n      if(m_info!=Success)\n        return;\n\n      if(m_P.size()>0)\n        dest = m_P * b;\n      else\n        dest = b;\n\n      if(m_matrix.nonZeros()>0) // otherwise L==I\n        derived().matrixL().solveInPlace(dest);\n\n      if(m_diag.size()>0)\n        dest = m_diag.asDiagonal().inverse() * dest;\n\n      if (m_matrix.nonZeros()>0) // otherwise U==I\n        derived().matrixU().solveInPlace(dest);\n\n      if(m_P.size()>0)\n        dest = m_Pinv * dest;\n    }\n    \n    template<typename Rhs,typename Dest>\n    void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const\n    {\n      internal::solve_sparse_through_dense_panels(derived(), b, dest);\n    }\n\n#endif // EIGEN_PARSED_BY_DOXYGEN\n\n  protected:\n    \n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    template<bool DoLDLT>\n    void compute(const MatrixType& matrix)\n    {\n      eigen_assert(matrix.rows()==matrix.cols());\n      Index size = matrix.cols();\n      CholMatrixType tmp(size,size);\n      ConstCholMatrixPtr pmat;\n      ordering(matrix, pmat, tmp);\n      analyzePattern_preordered(*pmat, DoLDLT);\n      factorize_preordered<DoLDLT>(*pmat);\n    }\n    \n    template<bool DoLDLT>\n    void factorize(const MatrixType& a)\n    {\n      eigen_assert(a.rows()==a.cols());\n      Index size = a.cols();\n      CholMatrixType tmp(size,size);\n      ConstCholMatrixPtr pmat;\n      \n      if(m_P.size()==0 && (UpLo&Upper)==Upper)\n      {\n        // If there is no ordering, try to directly use the input matrix without any copy\n        internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, tmp);\n      }\n      else\n      {\n        tmp.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);\n        pmat = &tmp;\n      }\n      \n      factorize_preordered<DoLDLT>(*pmat);\n    }\n\n    template<bool DoLDLT>\n    void factorize_preordered(const CholMatrixType& a);\n\n    void analyzePattern(const MatrixType& a, bool doLDLT)\n    {\n      eigen_assert(a.rows()==a.cols());\n      Index size = a.cols();\n      CholMatrixType tmp(size,size);\n      ConstCholMatrixPtr pmat;\n      ordering(a, pmat, tmp);\n      analyzePattern_preordered(*pmat,doLDLT);\n    }\n    void analyzePattern_preordered(const CholMatrixType& a, bool doLDLT);\n    \n    void ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap);\n\n    /** keeps off-diagonal entries; drops diagonal entries */\n    struct keep_diag {\n      inline bool operator() (const Index& row, const Index& col, const Scalar&) const\n      {\n        return row!=col;\n      }\n    };\n\n    mutable ComputationInfo m_info;\n    bool m_factorizationIsOk;\n    bool m_analysisIsOk;\n    \n    CholMatrixType m_matrix;\n    VectorType m_diag;                                // the diagonal coefficients (LDLT mode)\n    VectorI m_parent;                                 // elimination tree\n    VectorI m_nonZerosPerCol;\n    PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_P;     // the permutation\n    PermutationMatrix<Dynamic,Dynamic,StorageIndex> m_Pinv;  // the inverse permutation\n\n    RealScalar m_shiftOffset;\n    RealScalar m_shiftScale;\n};\n\ntemplate<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLLT;\ntemplate<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialLDLT;\ntemplate<typename _MatrixType, int _UpLo = Lower, typename _Ordering = AMDOrdering<typename _MatrixType::StorageIndex> > class SimplicialCholesky;\n\nnamespace internal {\n\ntemplate<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Ordering OrderingType;\n  enum { UpLo = _UpLo };\n  typedef typename MatrixType::Scalar                         Scalar;\n  typedef typename MatrixType::StorageIndex                   StorageIndex;\n  typedef SparseMatrix<Scalar, ColMajor, StorageIndex>        CholMatrixType;\n  typedef TriangularView<const CholMatrixType, Eigen::Lower>  MatrixL;\n  typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::Upper>   MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }\n};\n\ntemplate<typename _MatrixType,int _UpLo, typename _Ordering> struct traits<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Ordering OrderingType;\n  enum { UpLo = _UpLo };\n  typedef typename MatrixType::Scalar                             Scalar;\n  typedef typename MatrixType::StorageIndex                       StorageIndex;\n  typedef SparseMatrix<Scalar, ColMajor, StorageIndex>            CholMatrixType;\n  typedef TriangularView<const CholMatrixType, Eigen::UnitLower>  MatrixL;\n  typedef TriangularView<const typename CholMatrixType::AdjointReturnType, Eigen::UnitUpper> MatrixU;\n  static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }\n  static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }\n};\n\ntemplate<typename _MatrixType, int _UpLo, typename _Ordering> struct traits<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >\n{\n  typedef _MatrixType MatrixType;\n  typedef _Ordering OrderingType;\n  enum { UpLo = _UpLo };\n};\n\n}\n\n/** \\ingroup SparseCholesky_Module\n  * \\class SimplicialLLT\n  * \\brief A direct sparse LLT Cholesky factorizations\n  *\n  * This class provides a LL^T Cholesky factorizations of sparse matrices that are\n  * selfadjoint and positive definite. The factorization allows for solving A.X = B where\n  * X and B can be either dense or sparse.\n  * \n  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization\n  * such that the factorized matrix is P A P^-1.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  * \\tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa class SimplicialLDLT, class AMDOrdering, class NaturalOrdering\n  */\ntemplate<typename _MatrixType, int _UpLo, typename _Ordering>\n    class SimplicialLLT : public SimplicialCholeskyBase<SimplicialLLT<_MatrixType,_UpLo,_Ordering> >\n{\npublic:\n    typedef _MatrixType MatrixType;\n    enum { UpLo = _UpLo };\n    typedef SimplicialCholeskyBase<SimplicialLLT> Base;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,Index> CholMatrixType;\n    typedef Matrix<Scalar,Dynamic,1> VectorType;\n    typedef internal::traits<SimplicialLLT> Traits;\n    typedef typename Traits::MatrixL  MatrixL;\n    typedef typename Traits::MatrixU  MatrixU;\npublic:\n    /** Default constructor */\n    SimplicialLLT() : Base() {}\n    /** Constructs and performs the LLT factorization of \\a matrix */\n    explicit SimplicialLLT(const MatrixType& matrix)\n        : Base(matrix) {}\n\n    /** \\returns an expression of the factor L */\n    inline const MatrixL matrixL() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial LLT not factorized\");\n        return Traits::getL(Base::m_matrix);\n    }\n\n    /** \\returns an expression of the factor U (= L^*) */\n    inline const MatrixU matrixU() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial LLT not factorized\");\n        return Traits::getU(Base::m_matrix);\n    }\n    \n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    SimplicialLLT& compute(const MatrixType& matrix)\n    {\n      Base::template compute<false>(matrix);\n      return *this;\n    }\n\n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      *\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& a)\n    {\n      Base::analyzePattern(a, false);\n    }\n\n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& a)\n    {\n      Base::template factorize<false>(a);\n    }\n\n    /** \\returns the determinant of the underlying matrix from the current factorization */\n    Scalar determinant() const\n    {\n      Scalar detL = Base::m_matrix.diagonal().prod();\n      return numext::abs2(detL);\n    }\n};\n\n/** \\ingroup SparseCholesky_Module\n  * \\class SimplicialLDLT\n  * \\brief A direct sparse LDLT Cholesky factorizations without square root.\n  *\n  * This class provides a LDL^T Cholesky factorizations without square root of sparse matrices that are\n  * selfadjoint and positive definite. The factorization allows for solving A.X = B where\n  * X and B can be either dense or sparse.\n  * \n  * In order to reduce the fill-in, a symmetric permutation P is applied prior to the factorization\n  * such that the factorized matrix is P A P^-1.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  * \\tparam _UpLo the triangular part that will be used for the computations. It can be Lower\n  *               or Upper. Default is Lower.\n  * \\tparam _Ordering The ordering method to use, either AMDOrdering<> or NaturalOrdering<>. Default is AMDOrdering<>\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa class SimplicialLLT, class AMDOrdering, class NaturalOrdering\n  */\ntemplate<typename _MatrixType, int _UpLo, typename _Ordering>\n    class SimplicialLDLT : public SimplicialCholeskyBase<SimplicialLDLT<_MatrixType,_UpLo,_Ordering> >\n{\npublic:\n    typedef _MatrixType MatrixType;\n    enum { UpLo = _UpLo };\n    typedef SimplicialCholeskyBase<SimplicialLDLT> Base;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;\n    typedef Matrix<Scalar,Dynamic,1> VectorType;\n    typedef internal::traits<SimplicialLDLT> Traits;\n    typedef typename Traits::MatrixL  MatrixL;\n    typedef typename Traits::MatrixU  MatrixU;\npublic:\n    /** Default constructor */\n    SimplicialLDLT() : Base() {}\n\n    /** Constructs and performs the LLT factorization of \\a matrix */\n    explicit SimplicialLDLT(const MatrixType& matrix)\n        : Base(matrix) {}\n\n    /** \\returns a vector expression of the diagonal D */\n    inline const VectorType vectorD() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial LDLT not factorized\");\n        return Base::m_diag;\n    }\n    /** \\returns an expression of the factor L */\n    inline const MatrixL matrixL() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial LDLT not factorized\");\n        return Traits::getL(Base::m_matrix);\n    }\n\n    /** \\returns an expression of the factor U (= L^*) */\n    inline const MatrixU matrixU() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial LDLT not factorized\");\n        return Traits::getU(Base::m_matrix);\n    }\n\n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    SimplicialLDLT& compute(const MatrixType& matrix)\n    {\n      Base::template compute<true>(matrix);\n      return *this;\n    }\n    \n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      *\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& a)\n    {\n      Base::analyzePattern(a, true);\n    }\n\n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& a)\n    {\n      Base::template factorize<true>(a);\n    }\n\n    /** \\returns the determinant of the underlying matrix from the current factorization */\n    Scalar determinant() const\n    {\n      return Base::m_diag.prod();\n    }\n};\n\n/** \\deprecated use SimplicialLDLT or class SimplicialLLT\n  * \\ingroup SparseCholesky_Module\n  * \\class SimplicialCholesky\n  *\n  * \\sa class SimplicialLDLT, class SimplicialLLT\n  */\ntemplate<typename _MatrixType, int _UpLo, typename _Ordering>\n    class SimplicialCholesky : public SimplicialCholeskyBase<SimplicialCholesky<_MatrixType,_UpLo,_Ordering> >\n{\npublic:\n    typedef _MatrixType MatrixType;\n    enum { UpLo = _UpLo };\n    typedef SimplicialCholeskyBase<SimplicialCholesky> Base;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> CholMatrixType;\n    typedef Matrix<Scalar,Dynamic,1> VectorType;\n    typedef internal::traits<SimplicialCholesky> Traits;\n    typedef internal::traits<SimplicialLDLT<MatrixType,UpLo> > LDLTTraits;\n    typedef internal::traits<SimplicialLLT<MatrixType,UpLo>  > LLTTraits;\n  public:\n    SimplicialCholesky() : Base(), m_LDLT(true) {}\n\n    explicit SimplicialCholesky(const MatrixType& matrix)\n      : Base(), m_LDLT(true)\n    {\n      compute(matrix);\n    }\n\n    SimplicialCholesky& setMode(SimplicialCholeskyMode mode)\n    {\n      switch(mode)\n      {\n      case SimplicialCholeskyLLT:\n        m_LDLT = false;\n        break;\n      case SimplicialCholeskyLDLT:\n        m_LDLT = true;\n        break;\n      default:\n        break;\n      }\n\n      return *this;\n    }\n\n    inline const VectorType vectorD() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial Cholesky not factorized\");\n        return Base::m_diag;\n    }\n    inline const CholMatrixType rawMatrix() const {\n        eigen_assert(Base::m_factorizationIsOk && \"Simplicial Cholesky not factorized\");\n        return Base::m_matrix;\n    }\n    \n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    SimplicialCholesky& compute(const MatrixType& matrix)\n    {\n      if(m_LDLT)\n        Base::template compute<true>(matrix);\n      else\n        Base::template compute<false>(matrix);\n      return *this;\n    }\n\n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      *\n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& a)\n    {\n      Base::analyzePattern(a, m_LDLT);\n    }\n\n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& a)\n    {\n      if(m_LDLT)\n        Base::template factorize<true>(a);\n      else\n        Base::template factorize<false>(a);\n    }\n\n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const\n    {\n      eigen_assert(Base::m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()\");\n      eigen_assert(Base::m_matrix.rows()==b.rows());\n\n      if(Base::m_info!=Success)\n        return;\n\n      if(Base::m_P.size()>0)\n        dest = Base::m_P * b;\n      else\n        dest = b;\n\n      if(Base::m_matrix.nonZeros()>0) // otherwise L==I\n      {\n        if(m_LDLT)\n          LDLTTraits::getL(Base::m_matrix).solveInPlace(dest);\n        else\n          LLTTraits::getL(Base::m_matrix).solveInPlace(dest);\n      }\n\n      if(Base::m_diag.size()>0)\n        dest = Base::m_diag.asDiagonal().inverse() * dest;\n\n      if (Base::m_matrix.nonZeros()>0) // otherwise I==I\n      {\n        if(m_LDLT)\n          LDLTTraits::getU(Base::m_matrix).solveInPlace(dest);\n        else\n          LLTTraits::getU(Base::m_matrix).solveInPlace(dest);\n      }\n\n      if(Base::m_P.size()>0)\n        dest = Base::m_Pinv * dest;\n    }\n    \n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const\n    {\n      internal::solve_sparse_through_dense_panels(*this, b, dest);\n    }\n    \n    Scalar determinant() const\n    {\n      if(m_LDLT)\n      {\n        return Base::m_diag.prod();\n      }\n      else\n      {\n        Scalar detL = Diagonal<const CholMatrixType>(Base::m_matrix).prod();\n        return numext::abs2(detL);\n      }\n    }\n    \n  protected:\n    bool m_LDLT;\n};\n\ntemplate<typename Derived>\nvoid SimplicialCholeskyBase<Derived>::ordering(const MatrixType& a, ConstCholMatrixPtr &pmat, CholMatrixType& ap)\n{\n  eigen_assert(a.rows()==a.cols());\n  const Index size = a.rows();\n  pmat = &ap;\n  // Note that ordering methods compute the inverse permutation\n  if(!internal::is_same<OrderingType,NaturalOrdering<Index> >::value)\n  {\n    {\n      CholMatrixType C;\n      C = a.template selfadjointView<UpLo>();\n      \n      OrderingType ordering;\n      ordering(C,m_Pinv);\n    }\n\n    if(m_Pinv.size()>0) m_P = m_Pinv.inverse();\n    else                m_P.resize(0);\n    \n    ap.resize(size,size);\n    ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>().twistedBy(m_P);\n  }\n  else\n  {\n    m_Pinv.resize(0);\n    m_P.resize(0);\n    if(int(UpLo)==int(Lower) || MatrixType::IsRowMajor)\n    {\n      // we have to transpose the lower part to to the upper one\n      ap.resize(size,size);\n      ap.template selfadjointView<Upper>() = a.template selfadjointView<UpLo>();\n    }\n    else\n      internal::simplicial_cholesky_grab_input<CholMatrixType,MatrixType>::run(a, pmat, ap);\n  }  \n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SIMPLICIAL_CHOLESKY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCholesky/SimplicialCholesky_impl.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n\n/*\n\nNOTE: thes functions vave been adapted from the LDL library:\n\nLDL Copyright (c) 2005 by Timothy A. Davis.  All Rights Reserved.\n\nLDL License:\n\n    Your use or distribution of LDL or any modified version of\n    LDL implies that you agree to this License.\n\n    This library is free software; you can redistribute it and/or\n    modify it under the terms of the GNU Lesser General Public\n    License as published by the Free Software Foundation; either\n    version 2.1 of the License, or (at your option) any later version.\n\n    This library is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n    Lesser General Public License for more details.\n\n    You should have received a copy of the GNU Lesser General Public\n    License along with this library; if not, write to the Free Software\n    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301\n    USA\n\n    Permission is hereby granted to use or copy this program under the\n    terms of the GNU LGPL, provided that the Copyright, this License,\n    and the Availability of the original version is retained on all copies.\n    User documentation of any code that uses this code or any modified\n    version of this code must cite the Copyright, this License, the\n    Availability note, and \"Used by permission.\" Permission to modify\n    the code and to distribute modified code is granted, provided the\n    Copyright, this License, and the Availability note are retained,\n    and a notice that the code was modified is included.\n */\n\n#include \"../Core/util/NonMPL2.h\"\n\n#ifndef EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H\n#define EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H\n\nnamespace Eigen {\n\ntemplate<typename Derived>\nvoid SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)\n{\n  const StorageIndex size = StorageIndex(ap.rows());\n  m_matrix.resize(size, size);\n  m_parent.resize(size);\n  m_nonZerosPerCol.resize(size);\n\n  ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);\n\n  for(StorageIndex k = 0; k < size; ++k)\n  {\n    /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */\n    m_parent[k] = -1;             /* parent of k is not yet known */\n    tags[k] = k;                  /* mark node k as visited */\n    m_nonZerosPerCol[k] = 0;      /* count of nonzeros in column k of L */\n    for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)\n    {\n      StorageIndex i = it.index();\n      if(i < k)\n      {\n        /* follow path from i to root of etree, stop at flagged node */\n        for(; tags[i] != k; i = m_parent[i])\n        {\n          /* find parent of i if not yet determined */\n          if (m_parent[i] == -1)\n            m_parent[i] = k;\n          m_nonZerosPerCol[i]++;        /* L (k,i) is nonzero */\n          tags[i] = k;                  /* mark i as visited */\n        }\n      }\n    }\n  }\n\n  /* construct Lp index array from m_nonZerosPerCol column counts */\n  StorageIndex* Lp = m_matrix.outerIndexPtr();\n  Lp[0] = 0;\n  for(StorageIndex k = 0; k < size; ++k)\n    Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);\n\n  m_matrix.resizeNonZeros(Lp[size]);\n\n  m_isInitialized     = true;\n  m_info              = Success;\n  m_analysisIsOk      = true;\n  m_factorizationIsOk = false;\n}\n\n\ntemplate<typename Derived>\ntemplate<bool DoLDLT>\nvoid SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType& ap)\n{\n  using std::sqrt;\n\n  eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n  eigen_assert(ap.rows()==ap.cols());\n  eigen_assert(m_parent.size()==ap.rows());\n  eigen_assert(m_nonZerosPerCol.size()==ap.rows());\n\n  const StorageIndex size = StorageIndex(ap.rows());\n  const StorageIndex* Lp = m_matrix.outerIndexPtr();\n  StorageIndex* Li = m_matrix.innerIndexPtr();\n  Scalar* Lx = m_matrix.valuePtr();\n\n  ei_declare_aligned_stack_constructed_variable(Scalar, y, size, 0);\n  ei_declare_aligned_stack_constructed_variable(StorageIndex,  pattern, size, 0);\n  ei_declare_aligned_stack_constructed_variable(StorageIndex,  tags, size, 0);\n\n  bool ok = true;\n  m_diag.resize(DoLDLT ? size : 0);\n\n  for(StorageIndex k = 0; k < size; ++k)\n  {\n    // compute nonzero pattern of kth row of L, in topological order\n    y[k] = 0.0;                     // Y(0:k) is now all zero\n    StorageIndex top = size;               // stack for pattern is empty\n    tags[k] = k;                    // mark node k as visited\n    m_nonZerosPerCol[k] = 0;        // count of nonzeros in column k of L\n    for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)\n    {\n      StorageIndex i = it.index();\n      if(i <= k)\n      {\n        y[i] += numext::conj(it.value());            /* scatter A(i,k) into Y (sum duplicates) */\n        Index len;\n        for(len = 0; tags[i] != k; i = m_parent[i])\n        {\n          pattern[len++] = i;     /* L(k,i) is nonzero */\n          tags[i] = k;            /* mark i as visited */\n        }\n        while(len > 0)\n          pattern[--top] = pattern[--len];\n      }\n    }\n\n    /* compute numerical values kth row of L (a sparse triangular solve) */\n\n    RealScalar d = numext::real(y[k]) * m_shiftScale + m_shiftOffset;    // get D(k,k), apply the shift function, and clear Y(k)\n    y[k] = 0.0;\n    for(; top < size; ++top)\n    {\n      Index i = pattern[top];       /* pattern[top:n-1] is pattern of L(:,k) */\n      Scalar yi = y[i];             /* get and clear Y(i) */\n      y[i] = 0.0;\n\n      /* the nonzero entry L(k,i) */\n      Scalar l_ki;\n      if(DoLDLT)\n        l_ki = yi / m_diag[i];\n      else\n        yi = l_ki = yi / Lx[Lp[i]];\n\n      Index p2 = Lp[i] + m_nonZerosPerCol[i];\n      Index p;\n      for(p = Lp[i] + (DoLDLT ? 0 : 1); p < p2; ++p)\n        y[Li[p]] -= numext::conj(Lx[p]) * yi;\n      d -= numext::real(l_ki * numext::conj(yi));\n      Li[p] = k;                          /* store L(k,i) in column form of L */\n      Lx[p] = l_ki;\n      ++m_nonZerosPerCol[i];              /* increment count of nonzeros in col i */\n    }\n    if(DoLDLT)\n    {\n      m_diag[k] = d;\n      if(d == RealScalar(0))\n      {\n        ok = false;                         /* failure, D(k,k) is zero */\n        break;\n      }\n    }\n    else\n    {\n      Index p = Lp[k] + m_nonZerosPerCol[k]++;\n      Li[p] = k ;                /* store L(k,k) = sqrt (d) in column k */\n      if(d <= RealScalar(0)) {\n        ok = false;              /* failure, matrix is not positive definite */\n        break;\n      }\n      Lx[p] = sqrt(d) ;\n    }\n  }\n\n  m_info = ok ? Success : NumericalIssue;\n  m_factorizationIsOk = true;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SIMPLICIAL_CHOLESKY_IMPL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/AmbiVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_AMBIVECTOR_H\n#define EIGEN_AMBIVECTOR_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal\n  * Hybrid sparse/dense vector class designed for intensive read-write operations.\n  *\n  * See BasicSparseLLT and SparseProduct for usage examples.\n  */\ntemplate<typename _Scalar, typename _StorageIndex>\nclass AmbiVector\n{\n  public:\n    typedef _Scalar Scalar;\n    typedef _StorageIndex StorageIndex;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    explicit AmbiVector(Index size)\n      : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)\n    {\n      resize(size);\n    }\n\n    void init(double estimatedDensity);\n    void init(int mode);\n\n    Index nonZeros() const;\n\n    /** Specifies a sub-vector to work on */\n    void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }\n\n    void setZero();\n\n    void restart();\n    Scalar& coeffRef(Index i);\n    Scalar& coeff(Index i);\n\n    class Iterator;\n\n    ~AmbiVector() { delete[] m_buffer; }\n\n    void resize(Index size)\n    {\n      if (m_allocatedSize < size)\n        reallocate(size);\n      m_size = convert_index(size);\n    }\n\n    StorageIndex size() const { return m_size; }\n\n  protected:\n    StorageIndex convert_index(Index idx)\n    {\n      return internal::convert_index<StorageIndex>(idx);\n    }\n\n    void reallocate(Index size)\n    {\n      // if the size of the matrix is not too large, let's allocate a bit more than needed such\n      // that we can handle dense vector even in sparse mode.\n      delete[] m_buffer;\n      if (size<1000)\n      {\n        Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);\n        m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));\n        m_buffer = new Scalar[allocSize];\n      }\n      else\n      {\n        m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));\n        m_buffer = new Scalar[size];\n      }\n      m_size = convert_index(size);\n      m_start = 0;\n      m_end = m_size;\n    }\n\n    void reallocateSparse()\n    {\n      Index copyElements = m_allocatedElements;\n      m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);\n      Index allocSize = m_allocatedElements * sizeof(ListEl);\n      allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);\n      Scalar* newBuffer = new Scalar[allocSize];\n      memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));\n      delete[] m_buffer;\n      m_buffer = newBuffer;\n    }\n\n  protected:\n    // element type of the linked list\n    struct ListEl\n    {\n      StorageIndex next;\n      StorageIndex index;\n      Scalar value;\n    };\n\n    // used to store data in both mode\n    Scalar* m_buffer;\n    Scalar m_zero;\n    StorageIndex m_size;\n    StorageIndex m_start;\n    StorageIndex m_end;\n    StorageIndex m_allocatedSize;\n    StorageIndex m_allocatedElements;\n    StorageIndex m_mode;\n\n    // linked list mode\n    StorageIndex m_llStart;\n    StorageIndex m_llCurrent;\n    StorageIndex m_llSize;\n};\n\n/** \\returns the number of non zeros in the current sub vector */\ntemplate<typename _Scalar,typename _StorageIndex>\nIndex AmbiVector<_Scalar,_StorageIndex>::nonZeros() const\n{\n  if (m_mode==IsSparse)\n    return m_llSize;\n  else\n    return m_end - m_start;\n}\n\ntemplate<typename _Scalar,typename _StorageIndex>\nvoid AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)\n{\n  if (estimatedDensity>0.1)\n    init(IsDense);\n  else\n    init(IsSparse);\n}\n\ntemplate<typename _Scalar,typename _StorageIndex>\nvoid AmbiVector<_Scalar,_StorageIndex>::init(int mode)\n{\n  m_mode = mode;\n  if (m_mode==IsSparse)\n  {\n    m_llSize = 0;\n    m_llStart = -1;\n  }\n}\n\n/** Must be called whenever we might perform a write access\n  * with an index smaller than the previous one.\n  *\n  * Don't worry, this function is extremely cheap.\n  */\ntemplate<typename _Scalar,typename _StorageIndex>\nvoid AmbiVector<_Scalar,_StorageIndex>::restart()\n{\n  m_llCurrent = m_llStart;\n}\n\n/** Set all coefficients of current subvector to zero */\ntemplate<typename _Scalar,typename _StorageIndex>\nvoid AmbiVector<_Scalar,_StorageIndex>::setZero()\n{\n  if (m_mode==IsDense)\n  {\n    for (Index i=m_start; i<m_end; ++i)\n      m_buffer[i] = Scalar(0);\n  }\n  else\n  {\n    eigen_assert(m_mode==IsSparse);\n    m_llSize = 0;\n    m_llStart = -1;\n  }\n}\n\ntemplate<typename _Scalar,typename _StorageIndex>\n_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)\n{\n  if (m_mode==IsDense)\n    return m_buffer[i];\n  else\n  {\n    ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);\n    // TODO factorize the following code to reduce code generation\n    eigen_assert(m_mode==IsSparse);\n    if (m_llSize==0)\n    {\n      // this is the first element\n      m_llStart = 0;\n      m_llCurrent = 0;\n      ++m_llSize;\n      llElements[0].value = Scalar(0);\n      llElements[0].index = convert_index(i);\n      llElements[0].next = -1;\n      return llElements[0].value;\n    }\n    else if (i<llElements[m_llStart].index)\n    {\n      // this is going to be the new first element of the list\n      ListEl& el = llElements[m_llSize];\n      el.value = Scalar(0);\n      el.index = convert_index(i);\n      el.next = m_llStart;\n      m_llStart = m_llSize;\n      ++m_llSize;\n      m_llCurrent = m_llStart;\n      return el.value;\n    }\n    else\n    {\n      StorageIndex nextel = llElements[m_llCurrent].next;\n      eigen_assert(i>=llElements[m_llCurrent].index && \"you must call restart() before inserting an element with lower or equal index\");\n      while (nextel >= 0 && llElements[nextel].index<=i)\n      {\n        m_llCurrent = nextel;\n        nextel = llElements[nextel].next;\n      }\n\n      if (llElements[m_llCurrent].index==i)\n      {\n        // the coefficient already exists and we found it !\n        return llElements[m_llCurrent].value;\n      }\n      else\n      {\n        if (m_llSize>=m_allocatedElements)\n        {\n          reallocateSparse();\n          llElements = reinterpret_cast<ListEl*>(m_buffer);\n        }\n        eigen_internal_assert(m_llSize<m_allocatedElements && \"internal error: overflow in sparse mode\");\n        // let's insert a new coefficient\n        ListEl& el = llElements[m_llSize];\n        el.value = Scalar(0);\n        el.index = convert_index(i);\n        el.next = llElements[m_llCurrent].next;\n        llElements[m_llCurrent].next = m_llSize;\n        ++m_llSize;\n        return el.value;\n      }\n    }\n  }\n}\n\ntemplate<typename _Scalar,typename _StorageIndex>\n_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)\n{\n  if (m_mode==IsDense)\n    return m_buffer[i];\n  else\n  {\n    ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);\n    eigen_assert(m_mode==IsSparse);\n    if ((m_llSize==0) || (i<llElements[m_llStart].index))\n    {\n      return m_zero;\n    }\n    else\n    {\n      Index elid = m_llStart;\n      while (elid >= 0 && llElements[elid].index<i)\n        elid = llElements[elid].next;\n\n      if (llElements[elid].index==i)\n        return llElements[m_llCurrent].value;\n      else\n        return m_zero;\n    }\n  }\n}\n\n/** Iterator over the nonzero coefficients */\ntemplate<typename _Scalar,typename _StorageIndex>\nclass AmbiVector<_Scalar,_StorageIndex>::Iterator\n{\n  public:\n    typedef _Scalar Scalar;\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    /** Default constructor\n      * \\param vec the vector on which we iterate\n      * \\param epsilon the minimal value used to prune zero coefficients.\n      * In practice, all coefficients having a magnitude smaller than \\a epsilon\n      * are skipped.\n      */\n    explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)\n      : m_vector(vec)\n    {\n      using std::abs;\n      m_epsilon = epsilon;\n      m_isDense = m_vector.m_mode==IsDense;\n      if (m_isDense)\n      {\n        m_currentEl = 0;   // this is to avoid a compilation warning\n        m_cachedValue = 0; // this is to avoid a compilation warning\n        m_cachedIndex = m_vector.m_start-1;\n        ++(*this);\n      }\n      else\n      {\n        ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);\n        m_currentEl = m_vector.m_llStart;\n        while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)\n          m_currentEl = llElements[m_currentEl].next;\n        if (m_currentEl<0)\n        {\n          m_cachedValue = 0; // this is to avoid a compilation warning\n          m_cachedIndex = -1;\n        }\n        else\n        {\n          m_cachedIndex = llElements[m_currentEl].index;\n          m_cachedValue = llElements[m_currentEl].value;\n        }\n      }\n    }\n\n    StorageIndex index() const { return m_cachedIndex; }\n    Scalar value() const { return m_cachedValue; }\n\n    operator bool() const { return m_cachedIndex>=0; }\n\n    Iterator& operator++()\n    {\n      using std::abs;\n      if (m_isDense)\n      {\n        do {\n          ++m_cachedIndex;\n        } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);\n        if (m_cachedIndex<m_vector.m_end)\n          m_cachedValue = m_vector.m_buffer[m_cachedIndex];\n        else\n          m_cachedIndex=-1;\n      }\n      else\n      {\n        ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);\n        do {\n          m_currentEl = llElements[m_currentEl].next;\n        } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);\n        if (m_currentEl<0)\n        {\n          m_cachedIndex = -1;\n        }\n        else\n        {\n          m_cachedIndex = llElements[m_currentEl].index;\n          m_cachedValue = llElements[m_currentEl].value;\n        }\n      }\n      return *this;\n    }\n\n  protected:\n    const AmbiVector& m_vector; // the target vector\n    StorageIndex m_currentEl;   // the current element in sparse/linked-list mode\n    RealScalar m_epsilon;       // epsilon used to prune zero coefficients\n    StorageIndex m_cachedIndex; // current coordinate\n    Scalar m_cachedValue;       // current value\n    bool m_isDense;             // mode of the vector\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_AMBIVECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/CompressedStorage.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_COMPRESSED_STORAGE_H\n#define EIGEN_COMPRESSED_STORAGE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\internal\n  * Stores a sparse set of values as a list of values and a list of indices.\n  *\n  */\ntemplate<typename _Scalar,typename _StorageIndex>\nclass CompressedStorage\n{\n  public:\n\n    typedef _Scalar Scalar;\n    typedef _StorageIndex StorageIndex;\n\n  protected:\n\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n  public:\n\n    CompressedStorage()\n      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)\n    {}\n\n    explicit CompressedStorage(Index size)\n      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)\n    {\n      resize(size);\n    }\n\n    CompressedStorage(const CompressedStorage& other)\n      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)\n    {\n      *this = other;\n    }\n\n    CompressedStorage& operator=(const CompressedStorage& other)\n    {\n      resize(other.size());\n      if(other.size()>0)\n      {\n        internal::smart_copy(other.m_values,  other.m_values  + m_size, m_values);\n        internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);\n      }\n      return *this;\n    }\n\n    void swap(CompressedStorage& other)\n    {\n      std::swap(m_values, other.m_values);\n      std::swap(m_indices, other.m_indices);\n      std::swap(m_size, other.m_size);\n      std::swap(m_allocatedSize, other.m_allocatedSize);\n    }\n\n    ~CompressedStorage()\n    {\n      delete[] m_values;\n      delete[] m_indices;\n    }\n\n    void reserve(Index size)\n    {\n      Index newAllocatedSize = m_size + size;\n      if (newAllocatedSize > m_allocatedSize)\n        reallocate(newAllocatedSize);\n    }\n\n    void squeeze()\n    {\n      if (m_allocatedSize>m_size)\n        reallocate(m_size);\n    }\n\n    void resize(Index size, double reserveSizeFactor = 0)\n    {\n      if (m_allocatedSize<size)\n      {\n        Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(),  size + Index(reserveSizeFactor*double(size)));\n        if(realloc_size<size)\n          internal::throw_std_bad_alloc();\n        reallocate(realloc_size);\n      }\n      m_size = size;\n    }\n\n    void append(const Scalar& v, Index i)\n    {\n      Index id = m_size;\n      resize(m_size+1, 1);\n      m_values[id] = v;\n      m_indices[id] = internal::convert_index<StorageIndex>(i);\n    }\n\n    inline Index size() const { return m_size; }\n    inline Index allocatedSize() const { return m_allocatedSize; }\n    inline void clear() { m_size = 0; }\n\n    const Scalar* valuePtr() const { return m_values; }\n    Scalar* valuePtr() { return m_values; }\n    const StorageIndex* indexPtr() const { return m_indices; }\n    StorageIndex* indexPtr() { return m_indices; }\n\n    inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }\n    inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }\n\n    inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }\n    inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }\n\n    /** \\returns the largest \\c k such that for all \\c j in [0,k) index[\\c j]\\<\\a key */\n    inline Index searchLowerIndex(Index key) const\n    {\n      return searchLowerIndex(0, m_size, key);\n    }\n\n    /** \\returns the largest \\c k in [start,end) such that for all \\c j in [start,k) index[\\c j]\\<\\a key */\n    inline Index searchLowerIndex(Index start, Index end, Index key) const\n    {\n      while(end>start)\n      {\n        Index mid = (end+start)>>1;\n        if (m_indices[mid]<key)\n          start = mid+1;\n        else\n          end = mid;\n      }\n      return start;\n    }\n\n    /** \\returns the stored value at index \\a key\n      * If the value does not exist, then the value \\a defaultValue is returned without any insertion. */\n    inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const\n    {\n      if (m_size==0)\n        return defaultValue;\n      else if (key==m_indices[m_size-1])\n        return m_values[m_size-1];\n      // ^^  optimization: let's first check if it is the last coefficient\n      // (very common in high level algorithms)\n      const Index id = searchLowerIndex(0,m_size-1,key);\n      return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;\n    }\n\n    /** Like at(), but the search is performed in the range [start,end) */\n    inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const\n    {\n      if (start>=end)\n        return defaultValue;\n      else if (end>start && key==m_indices[end-1])\n        return m_values[end-1];\n      // ^^  optimization: let's first check if it is the last coefficient\n      // (very common in high level algorithms)\n      const Index id = searchLowerIndex(start,end-1,key);\n      return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;\n    }\n\n    /** \\returns a reference to the value at index \\a key\n      * If the value does not exist, then the value \\a defaultValue is inserted\n      * such that the keys are sorted. */\n    inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))\n    {\n      Index id = searchLowerIndex(0,m_size,key);\n      if (id>=m_size || m_indices[id]!=key)\n      {\n        if (m_allocatedSize<m_size+1)\n        {\n          m_allocatedSize = 2*(m_size+1);\n          internal::scoped_array<Scalar> newValues(m_allocatedSize);\n          internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);\n\n          // copy first chunk\n          internal::smart_copy(m_values,  m_values +id, newValues.ptr());\n          internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());\n\n          // copy the rest\n          if(m_size>id)\n          {\n            internal::smart_copy(m_values +id,  m_values +m_size, newValues.ptr() +id+1);\n            internal::smart_copy(m_indices+id,  m_indices+m_size, newIndices.ptr()+id+1);\n          }\n          std::swap(m_values,newValues.ptr());\n          std::swap(m_indices,newIndices.ptr());\n        }\n        else if(m_size>id)\n        {\n          internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);\n          internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);\n        }\n        m_size++;\n        m_indices[id] = internal::convert_index<StorageIndex>(key);\n        m_values[id] = defaultValue;\n      }\n      return m_values[id];\n    }\n\n    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())\n    {\n      Index k = 0;\n      Index n = size();\n      for (Index i=0; i<n; ++i)\n      {\n        if (!internal::isMuchSmallerThan(value(i), reference, epsilon))\n        {\n          value(k) = value(i);\n          index(k) = index(i);\n          ++k;\n        }\n      }\n      resize(k,0);\n    }\n\n  protected:\n\n    inline void reallocate(Index size)\n    {\n      #ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN\n        EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN\n      #endif\n      eigen_internal_assert(size!=m_allocatedSize);\n      internal::scoped_array<Scalar> newValues(size);\n      internal::scoped_array<StorageIndex> newIndices(size);\n      Index copySize = (std::min)(size, m_size);\n      if (copySize>0) {\n        internal::smart_copy(m_values, m_values+copySize, newValues.ptr());\n        internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());\n      }\n      std::swap(m_values,newValues.ptr());\n      std::swap(m_indices,newIndices.ptr());\n      m_allocatedSize = size;\n    }\n\n  protected:\n    Scalar* m_values;\n    StorageIndex* m_indices;\n    Index m_size;\n    Index m_allocatedSize;\n\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_COMPRESSED_STORAGE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H\n#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstatic void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)\n{\n  typedef typename remove_all<Lhs>::type::Scalar Scalar;\n\n  // make sure to call innerSize/outerSize since we fake the storage order.\n  Index rows = lhs.innerSize();\n  Index cols = rhs.outerSize();\n  eigen_assert(lhs.outerSize() == rhs.innerSize());\n  \n  ei_declare_aligned_stack_constructed_variable(bool,   mask,     rows, 0);\n  ei_declare_aligned_stack_constructed_variable(Scalar, values,   rows, 0);\n  ei_declare_aligned_stack_constructed_variable(Index,  indices,  rows, 0);\n  \n  std::memset(mask,0,sizeof(bool)*rows);\n\n  evaluator<Lhs> lhsEval(lhs);\n  evaluator<Rhs> rhsEval(rhs);\n  \n  // estimate the number of non zero entries\n  // given a rhs column containing Y non zeros, we assume that the respective Y columns\n  // of the lhs differs in average of one non zeros, thus the number of non zeros for\n  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero\n  // per column of the lhs.\n  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)\n  Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();\n\n  res.setZero();\n  res.reserve(Index(estimated_nnz_prod));\n  // we compute each column of the result, one after the other\n  for (Index j=0; j<cols; ++j)\n  {\n\n    res.startVec(j);\n    Index nnz = 0;\n    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)\n    {\n      Scalar y = rhsIt.value();\n      Index k = rhsIt.index();\n      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)\n      {\n        Index i = lhsIt.index();\n        Scalar x = lhsIt.value();\n        if(!mask[i])\n        {\n          mask[i] = true;\n          values[i] = x * y;\n          indices[nnz] = i;\n          ++nnz;\n        }\n        else\n          values[i] += x * y;\n      }\n    }\n    if(!sortedInsertion)\n    {\n      // unordered insertion\n      for(Index k=0; k<nnz; ++k)\n      {\n        Index i = indices[k];\n        res.insertBackByOuterInnerUnordered(j,i) = values[i];\n        mask[i] = false;\n      }\n    }\n    else\n    {\n      // alternative ordered insertion code:\n      const Index t200 = rows/11; // 11 == (log2(200)*1.39)\n      const Index t = (rows*100)/139;\n\n      // FIXME reserve nnz non zeros\n      // FIXME implement faster sorting algorithms for very small nnz\n      // if the result is sparse enough => use a quick sort\n      // otherwise => loop through the entire vector\n      // In order to avoid to perform an expensive log2 when the\n      // result is clearly very sparse we use a linear bound up to 200.\n      if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)\n      {\n        if(nnz>1) std::sort(indices,indices+nnz);\n        for(Index k=0; k<nnz; ++k)\n        {\n          Index i = indices[k];\n          res.insertBackByOuterInner(j,i) = values[i];\n          mask[i] = false;\n        }\n      }\n      else\n      {\n        // dense path\n        for(Index i=0; i<rows; ++i)\n        {\n          if(mask[i])\n          {\n            mask[i] = false;\n            res.insertBackByOuterInner(j,i) = values[i];\n          }\n        }\n      }\n    }\n  }\n  res.finalize();\n}\n\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, typename ResultType,\n  int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n  int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n  int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>\nstruct conservative_sparse_sparse_product_selector;\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>\n{\n  typedef typename remove_all<Lhs>::type LhsCleaned;\n  typedef typename LhsCleaned::Scalar Scalar;\n\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;\n    typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;\n    \n    // If the result is tall and thin (in the extreme case a column vector)\n    // then it is faster to sort the coefficients inplace instead of transposing twice.\n    // FIXME, the following heuristic is probably not very good.\n    if(lhs.rows()>rhs.cols())\n    {\n      ColMajorMatrix resCol(lhs.rows(),rhs.cols());\n      // perform sorted insertion\n      internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);\n      res = resCol.markAsRValue();\n    }\n    else\n    {\n      ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());\n      // ressort to transpose to sort the entries\n      internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);\n      RowMajorMatrix resRow(resCol);\n      res = resRow.markAsRValue();\n    }\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n     typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;\n     RowMajorMatrix rhsRow = rhs;\n     RowMajorMatrix resRow(lhs.rows(), rhs.cols());\n     internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);\n     res = resRow;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;\n    RowMajorMatrix lhsRow = lhs;\n    RowMajorMatrix resRow(lhs.rows(), rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);\n    res = resRow;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;\n    RowMajorMatrix resRow(lhs.rows(), rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);\n    res = resRow;\n  }\n};\n\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>\n{\n  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;\n\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    ColMajorMatrix resCol(lhs.rows(), rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);\n    res = resCol;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    ColMajorMatrix lhsCol = lhs;\n    ColMajorMatrix resCol(lhs.rows(), rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);\n    res = resCol;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    ColMajorMatrix rhsCol = rhs;\n    ColMajorMatrix resCol(lhs.rows(), rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);\n    res = resCol;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    RowMajorMatrix resRow(lhs.rows(),rhs.cols());\n    internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);\n    // sort the non zeros:\n    ColMajorMatrix resCol(resRow);\n    res = resCol;\n  }\n};\n\n} // end namespace internal\n\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstatic void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n{\n  typedef typename remove_all<Lhs>::type::Scalar Scalar;\n  Index cols = rhs.outerSize();\n  eigen_assert(lhs.outerSize() == rhs.innerSize());\n\n  evaluator<Lhs> lhsEval(lhs);\n  evaluator<Rhs> rhsEval(rhs);\n\n  for (Index j=0; j<cols; ++j)\n  {\n    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)\n    {\n      Scalar y = rhsIt.value();\n      Index k = rhsIt.index();\n      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)\n      {\n        Index i = lhsIt.index();\n        Scalar x = lhsIt.value();\n        res.coeffRef(i,j) += x * y;\n      }\n    }\n  }\n}\n\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, typename ResultType,\n  int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,\n  int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>\nstruct sparse_sparse_to_dense_product_selector;\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    ColMajorMatrix lhsCol(lhs);\n    internal::sparse_sparse_to_dense_product_impl<ColMajorMatrix,Rhs,ResultType>(lhsCol, rhs, res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;\n    ColMajorMatrix rhsCol(rhs);\n    internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorMatrix,ResultType>(lhs, rhsCol, res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>\n{\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)\n  {\n    Transpose<ResultType> trRes(res);\n    internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);\n  }\n};\n\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/MappedSparseMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MAPPED_SPARSEMATRIX_H\n#define EIGEN_MAPPED_SPARSEMATRIX_H\n\nnamespace Eigen {\n\n/** \\deprecated Use Map<SparseMatrix<> >\n  * \\class MappedSparseMatrix\n  *\n  * \\brief Sparse matrix\n  *\n  * \\param _Scalar the scalar type, i.e. the type of the coefficients\n  *\n  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.\n  *\n  */\nnamespace internal {\ntemplate<typename _Scalar, int _Flags, typename _StorageIndex>\nstruct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >\n{};\n} // end namespace internal\n\ntemplate<typename _Scalar, int _Flags, typename _StorageIndex>\nclass MappedSparseMatrix\n  : public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >\n{\n    typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;\n\n  public:\n    \n    typedef typename Base::StorageIndex StorageIndex;\n    typedef typename Base::Scalar Scalar;\n\n    inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)\n      : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)\n    {}\n\n    /** Empty destructor */\n    inline ~MappedSparseMatrix() {}\n};\n\nnamespace internal {\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nstruct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >\n  : evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >\n{\n  typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;\n  typedef evaluator<SparseCompressedBase<XprType> > Base;\n  \n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_MAPPED_SPARSEMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseAssign.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEASSIGN_H\n#define EIGEN_SPARSEASSIGN_H\n\nnamespace Eigen { \n\ntemplate<typename Derived>    \ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)\n{\n  internal::call_assignment_no_alias(derived(), other.derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)\n{\n  // TODO use the evaluator mechanism\n  other.evalTo(derived());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)\n{\n  // by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine\n  internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >\n          ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\ninline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)\n{\n  internal::call_assignment_no_alias(derived(), other.derived());\n  return derived();\n}\n\nnamespace internal {\n\ntemplate<>\nstruct storage_kind_to_evaluator_kind<Sparse> {\n  typedef IteratorBased Kind;\n};\n\ntemplate<>\nstruct storage_kind_to_shape<Sparse> {\n  typedef SparseShape Shape;\n};\n\nstruct Sparse2Sparse {};\nstruct Sparse2Dense  {};\n\ntemplate<> struct AssignmentKind<SparseShape, SparseShape>           { typedef Sparse2Sparse Kind; };\ntemplate<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };\ntemplate<> struct AssignmentKind<DenseShape,  SparseShape>           { typedef Sparse2Dense  Kind; };\ntemplate<> struct AssignmentKind<DenseShape,  SparseTriangularShape> { typedef Sparse2Dense  Kind; };\n\n\ntemplate<typename DstXprType, typename SrcXprType>\nvoid assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)\n{\n  typedef typename DstXprType::Scalar Scalar;\n  typedef internal::evaluator<DstXprType> DstEvaluatorType;\n  typedef internal::evaluator<SrcXprType> SrcEvaluatorType;\n\n  SrcEvaluatorType srcEvaluator(src);\n\n  const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);\n  const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();\n  if ((!transpose) && src.isRValue())\n  {\n    // eval without temporary\n    dst.resize(src.rows(), src.cols());\n    dst.setZero();\n    dst.reserve((std::max)(src.rows(),src.cols())*2);\n    for (Index j=0; j<outerEvaluationSize; ++j)\n    {\n      dst.startVec(j);\n      for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)\n      {\n        Scalar v = it.value();\n        dst.insertBackByOuterInner(j,it.index()) = v;\n      }\n    }\n    dst.finalize();\n  }\n  else\n  {\n    // eval through a temporary\n    eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||\n              (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&\n              \"the transpose operation is supposed to be handled in SparseMatrix::operator=\");\n\n    enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };\n\n    \n    DstXprType temp(src.rows(), src.cols());\n\n    temp.reserve((std::max)(src.rows(),src.cols())*2);\n    for (Index j=0; j<outerEvaluationSize; ++j)\n    {\n      temp.startVec(j);\n      for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)\n      {\n        Scalar v = it.value();\n        temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;\n      }\n    }\n    temp.finalize();\n\n    dst = temp.markAsRValue();\n  }\n}\n\n// Generic Sparse to Sparse assignment\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>\n{\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  {\n    assign_sparse_to_sparse(dst.derived(), src.derived());\n  }\n};\n\n// Generic Sparse to Dense assignment\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>\n{\n  static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)\n  {\n    if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)\n      dst.setZero();\n    \n    internal::evaluator<SrcXprType> srcEval(src);\n    resize_if_allowed(dst, src, func);\n    internal::evaluator<DstXprType> dstEval(dst);\n    \n    const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();\n    for (Index j=0; j<outerEvaluationSize; ++j)\n      for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)\n        func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());\n  }\n};\n\n// Specialization for \"dst = dec.solve(rhs)\"\n// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error\ntemplate<typename DstXprType, typename DecType, typename RhsType, typename Scalar>\nstruct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>\n{\n  typedef Solve<DecType,RhsType> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    src.dec()._solve_impl(src.rhs(), dst);\n  }\n};\n\nstruct Diagonal2Sparse {};\n\ntemplate<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };\n\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>\n{\n  typedef typename DstXprType::StorageIndex StorageIndex;\n  typedef typename DstXprType::Scalar Scalar;\n  typedef Array<StorageIndex,Dynamic,1> ArrayXI;\n  typedef Array<Scalar,Dynamic,1> ArrayXS;\n  template<int Options>\n  static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n\n    Index size = src.diagonal().size();\n    dst.makeCompressed();\n    dst.resizeNonZeros(size);\n    Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);\n    Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));\n    Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();\n  }\n  \n  template<typename DstDerived>\n  static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  {\n    dst.diagonal() = src.diagonal();\n  }\n  \n  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  { dst.diagonal() += src.diagonal(); }\n  \n  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)\n  { dst.diagonal() -= src.diagonal(); }\n};\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEASSIGN_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseBlock.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_BLOCK_H\n#define EIGEN_SPARSE_BLOCK_H\n\nnamespace Eigen {\n\n// Subset of columns or rows\ntemplate<typename XprType, int BlockRows, int BlockCols>\nclass BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>\n  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >\n{\n    typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;\n    typedef Block<XprType, BlockRows, BlockCols, true> BlockType;\npublic:\n    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };\nprotected:\n    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };\n    typedef SparseMatrixBase<BlockType> Base;\n    using Base::convert_index;\npublic:\n    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)\n\n    inline BlockImpl(XprType& xpr, Index i)\n      : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)\n    {}\n\n    inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n      : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))\n    {}\n\n    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }\n    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }\n\n    Index nonZeros() const\n    {\n      typedef internal::evaluator<XprType> EvaluatorType;\n      EvaluatorType matEval(m_matrix);\n      Index nnz = 0;\n      Index end = m_outerStart + m_outerSize.value();\n      for(Index j=m_outerStart; j<end; ++j)\n        for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)\n          ++nnz;\n      return nnz;\n    }\n\n    inline const Scalar coeff(Index row, Index col) const\n    {\n      return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));\n    }\n\n    inline const Scalar coeff(Index index) const\n    {\n      return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);\n    }\n\n    inline const XprType& nestedExpression() const { return m_matrix; }\n    inline XprType& nestedExpression() { return m_matrix; }\n    Index startRow() const { return IsRowMajor ? m_outerStart : 0; }\n    Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }\n    Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }\n    Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }\n\n  protected:\n\n    typename internal::ref_selector<XprType>::non_const_type m_matrix;\n    Index m_outerStart;\n    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;\n\n  protected:\n    // Disable assignment with clear error message.\n    // Note that simply removing operator= yields compilation errors with ICC+MSVC\n    template<typename T>\n    BlockImpl& operator=(const T&)\n    {\n      EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);\n      return *this;\n    }\n};\n\n\n/***************************************************************************\n* specialization for SparseMatrix\n***************************************************************************/\n\nnamespace internal {\n\ntemplate<typename SparseMatrixType, int BlockRows, int BlockCols>\nclass sparse_matrix_block_impl\n  : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >\n{\n    typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;\n    typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;\n    typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;\n    using Base::convert_index;\npublic:\n    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };\n    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)\nprotected:\n    typedef typename Base::IndexVector IndexVector;\n    enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };\npublic:\n\n    inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)\n      : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)\n    {}\n\n    inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n      : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))\n    {}\n\n    template<typename OtherDerived>\n    inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)\n    {\n      typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;\n      _NestedMatrixType& matrix = m_matrix;\n      // This assignment is slow if this vector set is not empty\n      // and/or it is not at the end of the nonzeros of the underlying matrix.\n\n      // 1 - eval to a temporary to avoid transposition and/or aliasing issues\n      Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());\n      eigen_internal_assert(tmp.outerSize()==m_outerSize.value());\n\n      // 2 - let's check whether there is enough allocated memory\n      Index nnz           = tmp.nonZeros();\n      Index start         = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block\n      Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block\n      Index block_size    = end - start;                                                // available room in the current block\n      Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;\n\n      Index free_size     = m_matrix.isCompressed()\n                          ? Index(matrix.data().allocatedSize()) + block_size\n                          : block_size;\n\n      Index tmp_start = tmp.outerIndexPtr()[0];\n\n      bool update_trailing_pointers = false;\n      if(nnz>free_size)\n      {\n        // realloc manually to reduce copies\n        typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);\n\n        internal::smart_copy(m_matrix.valuePtr(),       m_matrix.valuePtr() + start,      newdata.valuePtr());\n        internal::smart_copy(m_matrix.innerIndexPtr(),  m_matrix.innerIndexPtr() + start, newdata.indexPtr());\n\n        internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       newdata.valuePtr() + start);\n        internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  newdata.indexPtr() + start);\n\n        internal::smart_copy(matrix.valuePtr()+end,       matrix.valuePtr()+end + tail_size,      newdata.valuePtr()+start+nnz);\n        internal::smart_copy(matrix.innerIndexPtr()+end,  matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);\n\n        newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);\n\n        matrix.data().swap(newdata);\n\n        update_trailing_pointers = true;\n      }\n      else\n      {\n        if(m_matrix.isCompressed())\n        {\n          // no need to realloc, simply copy the tail at its respective position and insert tmp\n          matrix.data().resize(start + nnz + tail_size);\n\n          internal::smart_memmove(matrix.valuePtr()+end,      matrix.valuePtr() + end+tail_size,      matrix.valuePtr() + start+nnz);\n          internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);\n\n          update_trailing_pointers = true;\n        }\n\n        internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       matrix.valuePtr() + start);\n        internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  matrix.innerIndexPtr() + start);\n      }\n\n      // update outer index pointers and innerNonZeros\n      if(IsVectorAtCompileTime)\n      {\n        if(!m_matrix.isCompressed())\n          matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);\n        matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);\n      }\n      else\n      {\n        StorageIndex p = StorageIndex(start);\n        for(Index k=0; k<m_outerSize.value(); ++k)\n        {\n          StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());\n          if(!m_matrix.isCompressed())\n            matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;\n          matrix.outerIndexPtr()[m_outerStart+k] = p;\n          p += nnz_k;\n        }\n      }\n\n      if(update_trailing_pointers)\n      {\n        StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);\n        for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)\n        {\n          matrix.outerIndexPtr()[k] += offset;\n        }\n      }\n\n      return derived();\n    }\n\n    inline BlockType& operator=(const BlockType& other)\n    {\n      return operator=<BlockType>(other);\n    }\n\n    inline const Scalar* valuePtr() const\n    { return m_matrix.valuePtr(); }\n    inline Scalar* valuePtr()\n    { return m_matrix.valuePtr(); }\n\n    inline const StorageIndex* innerIndexPtr() const\n    { return m_matrix.innerIndexPtr(); }\n    inline StorageIndex* innerIndexPtr()\n    { return m_matrix.innerIndexPtr(); }\n\n    inline const StorageIndex* outerIndexPtr() const\n    { return m_matrix.outerIndexPtr() + m_outerStart; }\n    inline StorageIndex* outerIndexPtr()\n    { return m_matrix.outerIndexPtr() + m_outerStart; }\n\n    inline const StorageIndex* innerNonZeroPtr() const\n    { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }\n    inline StorageIndex* innerNonZeroPtr()\n    { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }\n\n    bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }\n\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));\n    }\n\n    inline const Scalar coeff(Index row, Index col) const\n    {\n      return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));\n    }\n\n    inline const Scalar coeff(Index index) const\n    {\n      return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);\n    }\n\n    const Scalar& lastCoeff() const\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);\n      eigen_assert(Base::nonZeros()>0);\n      if(m_matrix.isCompressed())\n        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];\n      else\n        return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];\n    }\n\n    EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }\n    EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }\n\n    inline const SparseMatrixType& nestedExpression() const { return m_matrix; }\n    inline SparseMatrixType& nestedExpression() { return m_matrix; }\n    Index startRow() const { return IsRowMajor ? m_outerStart : 0; }\n    Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }\n    Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }\n    Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }\n\n  protected:\n\n    typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;\n    Index m_outerStart;\n    const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;\n\n};\n\n} // namespace internal\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>\nclass BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>\n  : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>\n{\npublic:\n  typedef _StorageIndex StorageIndex;\n  typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;\n  typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;\n  inline BlockImpl(SparseMatrixType& xpr, Index i)\n    : Base(xpr, i)\n  {}\n\n  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n    : Base(xpr, startRow, startCol, blockRows, blockCols)\n  {}\n\n  using Base::operator=;\n};\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>\nclass BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>\n  : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>\n{\npublic:\n  typedef _StorageIndex StorageIndex;\n  typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;\n  typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;\n  inline BlockImpl(SparseMatrixType& xpr, Index i)\n    : Base(xpr, i)\n  {}\n\n  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n    : Base(xpr, startRow, startCol, blockRows, blockCols)\n  {}\n\n  using Base::operator=;\nprivate:\n  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);\n  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);\n};\n\n//----------\n\n/** \\returns the \\a outer -th column (resp. row) of the matrix \\c *this if \\c *this\n  * is col-major (resp. row-major).\n  */\ntemplate<typename Derived>\ntypename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)\n{ return InnerVectorReturnType(derived(), outer); }\n\n/** \\returns the \\a outer -th column (resp. row) of the matrix \\c *this if \\c *this\n  * is col-major (resp. row-major). Read-only.\n  */\ntemplate<typename Derived>\nconst typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const\n{ return ConstInnerVectorReturnType(derived(), outer); }\n\n/** \\returns the \\a outer -th column (resp. row) of the matrix \\c *this if \\c *this\n  * is col-major (resp. row-major).\n  */\ntemplate<typename Derived>\ntypename SparseMatrixBase<Derived>::InnerVectorsReturnType\nSparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)\n{\n  return Block<Derived,Dynamic,Dynamic,true>(derived(),\n                                             IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,\n                                             IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);\n\n}\n\n/** \\returns the \\a outer -th column (resp. row) of the matrix \\c *this if \\c *this\n  * is col-major (resp. row-major). Read-only.\n  */\ntemplate<typename Derived>\nconst typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType\nSparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const\n{\n  return Block<const Derived,Dynamic,Dynamic,true>(derived(),\n                                                  IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,\n                                                  IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);\n\n}\n\n/** Generic implementation of sparse Block expression.\n  * Real-only.\n  */\ntemplate<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>\nclass BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>\n  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator\n{\n    typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;\n    typedef SparseMatrixBase<BlockType> Base;\n    using Base::convert_index;\npublic:\n    enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };\n    EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)\n\n    typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;\n\n    /** Column or Row constructor\n      */\n    inline BlockImpl(XprType& xpr, Index i)\n      : m_matrix(xpr),\n        m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),\n        m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),\n        m_blockRows(BlockRows==1 ? 1 : xpr.rows()),\n        m_blockCols(BlockCols==1 ? 1 : xpr.cols())\n    {}\n\n    /** Dynamic-size constructor\n      */\n    inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)\n      : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))\n    {}\n\n    inline Index rows() const { return m_blockRows.value(); }\n    inline Index cols() const { return m_blockCols.value(); }\n\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());\n    }\n\n    inline const Scalar coeff(Index row, Index col) const\n    {\n      return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());\n    }\n\n    inline Scalar& coeffRef(Index index)\n    {\n      return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n                               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    inline const Scalar coeff(Index index) const\n    {\n      return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),\n                            m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));\n    }\n\n    inline const XprType& nestedExpression() const { return m_matrix; }\n    inline XprType& nestedExpression() { return m_matrix; }\n    Index startRow() const { return m_startRow.value(); }\n    Index startCol() const { return m_startCol.value(); }\n    Index blockRows() const { return m_blockRows.value(); }\n    Index blockCols() const { return m_blockCols.value(); }\n\n  protected:\n//     friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;\n    friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;\n\n    Index nonZeros() const { return Dynamic; }\n\n    typename internal::ref_selector<XprType>::non_const_type m_matrix;\n    const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;\n    const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;\n    const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;\n    const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;\n\n  protected:\n    // Disable assignment with clear error message.\n    // Note that simply removing operator= yields compilation errors with ICC+MSVC\n    template<typename T>\n    BlockImpl& operator=(const T&)\n    {\n      EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);\n      return *this;\n    }\n\n};\n\nnamespace internal {\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>\nstruct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >\n : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >\n{\n    class InnerVectorInnerIterator;\n    class OuterVectorInnerIterator;\n  public:\n    typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;\n    typedef typename XprType::StorageIndex StorageIndex;\n    typedef typename XprType::Scalar Scalar;\n\n    enum {\n      IsRowMajor = XprType::IsRowMajor,\n\n      OuterVector =  (BlockCols==1 && ArgType::IsRowMajor)\n                    | // FIXME | instead of || to please GCC 4.4.0 stupid warning \"suggest parentheses around &&\".\n                      // revert to || as soon as not needed anymore.\n                     (BlockRows==1 && !ArgType::IsRowMajor),\n\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n      Flags = XprType::Flags\n    };\n\n    typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;\n\n    explicit unary_evaluator(const XprType& op)\n      : m_argImpl(op.nestedExpression()), m_block(op)\n    {}\n\n    inline Index nonZerosEstimate() const {\n      Index nnz = m_block.nonZeros();\n      if(nnz<0)\n        return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();\n      return nnz;\n    }\n\n  protected:\n    typedef typename evaluator<ArgType>::InnerIterator EvalIterator;\n\n    evaluator<ArgType> m_argImpl;\n    const XprType &m_block;\n};\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>\nclass unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator\n : public EvalIterator\n{\n  enum { IsRowMajor = unary_evaluator::IsRowMajor };\n  const XprType& m_block;\n  Index m_end;\npublic:\n\n  EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)\n    : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),\n      m_block(aEval.m_block),\n      m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())\n  {\n    while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )\n      EvalIterator::operator++();\n  }\n\n  inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }\n  inline Index outer()  const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }\n  inline Index row()    const { return EvalIterator::row()   - m_block.startRow(); }\n  inline Index col()    const { return EvalIterator::col()   - m_block.startCol(); }\n\n  inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }\n};\n\ntemplate<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>\nclass unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator\n{\n  enum { IsRowMajor = unary_evaluator::IsRowMajor };\n  const unary_evaluator& m_eval;\n  Index m_outerPos;\n  const Index m_innerIndex;\n  Index m_end;\n  EvalIterator m_it;\npublic:\n\n  EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)\n    : m_eval(aEval),\n      m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),\n      m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),\n      m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),\n      m_it(m_eval.m_argImpl, m_outerPos)\n  {\n    EIGEN_UNUSED_VARIABLE(outer);\n    eigen_assert(outer==0);\n\n    while(m_it && m_it.index() < m_innerIndex) ++m_it;\n    if((!m_it) || (m_it.index()!=m_innerIndex))\n      ++(*this);\n  }\n\n  inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }\n  inline Index outer()  const { return 0; }\n  inline Index row()    const { return IsRowMajor ? 0 : index(); }\n  inline Index col()    const { return IsRowMajor ? index() : 0; }\n\n  inline Scalar value() const { return m_it.value(); }\n  inline Scalar& valueRef() { return m_it.valueRef(); }\n\n  inline OuterVectorInnerIterator& operator++()\n  {\n    // search next non-zero entry\n    while(++m_outerPos<m_end)\n    {\n      // Restart iterator at the next inner-vector:\n      m_it.~EvalIterator();\n      ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);\n      // search for the key m_innerIndex in the current outer-vector\n      while(m_it && m_it.index() < m_innerIndex) ++m_it;\n      if(m_it && m_it.index()==m_innerIndex) break;\n    }\n    return *this;\n  }\n\n  inline operator bool() const { return m_outerPos < m_end; }\n};\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>\nstruct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>\n  : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >\n{\n  typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;\n  typedef evaluator<SparseCompressedBase<XprType> > Base;\n  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}\n};\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>\nstruct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>\n  : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >\n{\n  typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;\n  typedef evaluator<SparseCompressedBase<XprType> > Base;\n  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}\n};\n\n} // end namespace internal\n\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_BLOCK_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseColEtree.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n/* \n \n * NOTE: This file is the modified version of sp_coletree.c file in SuperLU \n \n * -- SuperLU routine (version 3.1) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * August 1, 2008\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSE_COLETREE_H\n#define SPARSE_COLETREE_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n/** Find the root of the tree/set containing the vertex i : Use Path halving */ \ntemplate<typename Index, typename IndexVector>\nIndex etree_find (Index i, IndexVector& pp)\n{\n  Index p = pp(i); // Parent \n  Index gp = pp(p); // Grand parent \n  while (gp != p) \n  {\n    pp(i) = gp; // Parent pointer on find path is changed to former grand parent\n    i = gp; \n    p = pp(i);\n    gp = pp(p);\n  }\n  return p; \n}\n\n/** Compute the column elimination tree of a sparse matrix\n  * \\param mat The matrix in column-major format. \n  * \\param parent The elimination tree\n  * \\param firstRowElt The column index of the first element in each row\n  * \\param perm The permutation to apply to the column of \\b mat\n  */\ntemplate <typename MatrixType, typename IndexVector>\nint coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)\n{\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns\n  StorageIndex m = convert_index<StorageIndex>(mat.rows());\n  StorageIndex diagSize = (std::min)(nc,m);\n  IndexVector root(nc); // root of subtree of etree \n  root.setZero();\n  IndexVector pp(nc); // disjoint sets \n  pp.setZero(); // Initialize disjoint sets \n  parent.resize(mat.cols());\n  //Compute first nonzero column in each row \n  firstRowElt.resize(m);\n  firstRowElt.setConstant(nc);\n  firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);\n  bool found_diag;\n  for (StorageIndex col = 0; col < nc; col++)\n  {\n    StorageIndex pcol = col;\n    if(perm) pcol  = perm[col];\n    for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)\n    { \n      Index row = it.row();\n      firstRowElt(row) = (std::min)(firstRowElt(row), col);\n    }\n  }\n  /* Compute etree by Liu's algorithm for symmetric matrices,\n          except use (firstRowElt[r],c) in place of an edge (r,c) of A.\n    Thus each row clique in A'*A is replaced by a star\n    centered at its first vertex, which has the same fill. */\n  StorageIndex rset, cset, rroot;\n  for (StorageIndex col = 0; col < nc; col++) \n  {\n    found_diag = col>=m;\n    pp(col) = col; \n    cset = col; \n    root(cset) = col; \n    parent(col) = nc; \n    /* The diagonal element is treated here even if it does not exist in the matrix\n     * hence the loop is executed once more */ \n    StorageIndex pcol = col;\n    if(perm) pcol  = perm[col];\n    for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)\n    { //  A sequence of interleaved find and union is performed \n      Index i = col;\n      if(it) i = it.index();\n      if (i == col) found_diag = true;\n      \n      StorageIndex row = firstRowElt(i);\n      if (row >= col) continue; \n      rset = internal::etree_find(row, pp); // Find the name of the set containing row\n      rroot = root(rset);\n      if (rroot != col) \n      {\n        parent(rroot) = col; \n        pp(cset) = rset; \n        cset = rset; \n        root(cset) = col; \n      }\n    }\n  }\n  return 0;  \n}\n\n/** \n  * Depth-first search from vertex n.  No recursion.\n  * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.\n*/\ntemplate <typename IndexVector>\nvoid nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)\n{\n  typedef typename IndexVector::Scalar StorageIndex;\n  StorageIndex current = n, first, next;\n  while (postnum != n) \n  {\n    // No kid for the current node\n    first = first_kid(current);\n    \n    // no kid for the current node\n    if (first == -1) \n    {\n      // Numbering this node because it has no kid \n      post(current) = postnum++;\n      \n      // looking for the next kid \n      next = next_kid(current); \n      while (next == -1) \n      {\n        // No more kids : back to the parent node\n        current = parent(current); \n        // numbering the parent node \n        post(current) = postnum++;\n        \n        // Get the next kid \n        next = next_kid(current); \n      }\n      // stopping criterion \n      if (postnum == n+1) return; \n      \n      // Updating current node \n      current = next; \n    }\n    else \n    {\n      current = first; \n    }\n  }\n}\n\n\n/**\n  * \\brief Post order a tree \n  * \\param n the number of nodes\n  * \\param parent Input tree\n  * \\param post postordered tree\n  */\ntemplate <typename IndexVector>\nvoid treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)\n{\n  typedef typename IndexVector::Scalar StorageIndex;\n  IndexVector first_kid, next_kid; // Linked list of children \n  StorageIndex postnum; \n  // Allocate storage for working arrays and results \n  first_kid.resize(n+1); \n  next_kid.setZero(n+1);\n  post.setZero(n+1);\n  \n  // Set up structure describing children\n  first_kid.setConstant(-1); \n  for (StorageIndex v = n-1; v >= 0; v--) \n  {\n    StorageIndex dad = parent(v);\n    next_kid(v) = first_kid(dad); \n    first_kid(dad) = v; \n  }\n  \n  // Depth-first search from dummy root vertex #n\n  postnum = 0; \n  internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // SPARSE_COLETREE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseCompressedBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H\n#define EIGEN_SPARSE_COMPRESSED_BASE_H\n\nnamespace Eigen { \n\ntemplate<typename Derived> class SparseCompressedBase;\n  \nnamespace internal {\n\ntemplate<typename Derived>\nstruct traits<SparseCompressedBase<Derived> > : traits<Derived>\n{};\n\n} // end namespace internal\n\n/** \\ingroup SparseCore_Module\n  * \\class SparseCompressedBase\n  * \\brief Common base class for sparse [compressed]-{row|column}-storage format.\n  *\n  * This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:\n  *  - SparseMatrix\n  *  - Ref<SparseMatrixType,Options>\n  *  - Map<SparseMatrixType>\n  *\n  */\ntemplate<typename Derived>\nclass SparseCompressedBase\n  : public SparseMatrixBase<Derived>\n{\n  public:\n    typedef SparseMatrixBase<Derived> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)\n    using Base::operator=;\n    using Base::IsRowMajor;\n    \n    class InnerIterator;\n    class ReverseInnerIterator;\n    \n  protected:\n    typedef typename Base::IndexVector IndexVector;\n    Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }\n    const  Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }\n        \n  public:\n    \n    /** \\returns the number of non zero coefficients */\n    inline Index nonZeros() const\n    {\n      if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0)\n        return derived().nonZeros();\n      else if(isCompressed())\n        return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];\n      else if(derived().outerSize()==0)\n        return 0;\n      else\n        return innerNonZeros().sum();\n    }\n    \n    /** \\returns a const pointer to the array of values.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa innerIndexPtr(), outerIndexPtr() */\n    inline const Scalar* valuePtr() const { return derived().valuePtr(); }\n    /** \\returns a non-const pointer to the array of values.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa innerIndexPtr(), outerIndexPtr() */\n    inline Scalar* valuePtr() { return derived().valuePtr(); }\n\n    /** \\returns a const pointer to the array of inner indices.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), outerIndexPtr() */\n    inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); }\n    /** \\returns a non-const pointer to the array of inner indices.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), outerIndexPtr() */\n    inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); }\n\n    /** \\returns a const pointer to the array of the starting positions of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 for SparseVector\n      * \\sa valuePtr(), innerIndexPtr() */\n    inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); }\n    /** \\returns a non-const pointer to the array of the starting positions of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 for SparseVector\n      * \\sa valuePtr(), innerIndexPtr() */\n    inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); }\n\n    /** \\returns a const pointer to the array of the number of non zeros of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 in compressed mode */\n    inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }\n    /** \\returns a non-const pointer to the array of the number of non zeros of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 in compressed mode */\n    inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }\n    \n    /** \\returns whether \\c *this is in compressed form. */\n    inline bool isCompressed() const { return innerNonZeroPtr()==0; }\n\n    /** \\returns a read-only view of the stored coefficients as a 1D array expression.\n      *\n      * \\warning this method is for \\b compressed \\b storage \\b only, and it will trigger an assertion otherwise.\n      *\n      * \\sa valuePtr(), isCompressed() */\n    const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }\n\n    /** \\returns a read-write view of the stored coefficients as a 1D array expression\n      *\n      * \\warning this method is for \\b compressed \\b storage \\b only, and it will trigger an assertion otherwise.\n      *\n      * Here is an example:\n      * \\include SparseMatrix_coeffs.cpp\n      * and the output is:\n      * \\include SparseMatrix_coeffs.out\n      *\n      * \\sa valuePtr(), isCompressed() */\n    Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }\n\n  protected:\n    /** Default constructor. Do nothing. */\n    SparseCompressedBase() {}\n  private:\n    template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);\n};\n\ntemplate<typename Derived>\nclass SparseCompressedBase<Derived>::InnerIterator\n{\n  public:\n    InnerIterator()\n      : m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)\n    {}\n\n    InnerIterator(const InnerIterator& other)\n      : m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)\n    {}\n\n    InnerIterator& operator=(const InnerIterator& other)\n    {\n      m_values = other.m_values;\n      m_indices = other.m_indices;\n      const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());\n      m_id = other.m_id;\n      m_end = other.m_end;\n      return *this;\n    }\n\n    InnerIterator(const SparseCompressedBase& mat, Index outer)\n      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)\n    {\n      if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)\n      {\n        m_id = 0;\n        m_end = mat.nonZeros();\n      }\n      else\n      {\n        m_id = mat.outerIndexPtr()[outer];\n        if(mat.isCompressed())\n          m_end = mat.outerIndexPtr()[outer+1];\n        else\n          m_end = m_id + mat.innerNonZeroPtr()[outer];\n      }\n    }\n\n    explicit InnerIterator(const SparseCompressedBase& mat)\n      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n    }\n\n    explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)\n      : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n    }\n\n    inline InnerIterator& operator++() { m_id++; return *this; }\n    inline InnerIterator& operator+=(Index i) { m_id += i ; return *this; }\n\n    inline InnerIterator operator+(Index i) \n    { \n        InnerIterator result = *this;\n        result += i;\n        return result;\n    }\n\n    inline const Scalar& value() const { return m_values[m_id]; }\n    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }\n\n    inline StorageIndex index() const { return m_indices[m_id]; }\n    inline Index outer() const { return m_outer.value(); }\n    inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }\n    inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }\n\n    inline operator bool() const { return (m_id < m_end); }\n\n  protected:\n    const Scalar* m_values;\n    const StorageIndex* m_indices;\n    typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;\n    const OuterType m_outer;\n    Index m_id;\n    Index m_end;\n  private:\n    // If you get here, then you're not using the right InnerIterator type, e.g.:\n    //   SparseMatrix<double,RowMajor> A;\n    //   SparseMatrix<double>::InnerIterator it(A,0);\n    template<typename T> InnerIterator(const SparseMatrixBase<T>&, Index outer);\n};\n\ntemplate<typename Derived>\nclass SparseCompressedBase<Derived>::ReverseInnerIterator\n{\n  public:\n    ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)\n      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)\n    {\n      if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)\n      {\n        m_start = 0;\n        m_id = mat.nonZeros();\n      }\n      else\n      {\n        m_start = mat.outerIndexPtr()[outer];\n        if(mat.isCompressed())\n          m_id = mat.outerIndexPtr()[outer+1];\n        else\n          m_id = m_start + mat.innerNonZeroPtr()[outer];\n      }\n    }\n\n    explicit ReverseInnerIterator(const SparseCompressedBase& mat)\n      : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros())\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n    }\n\n    explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)\n      : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())\n    {\n      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);\n    }\n\n    inline ReverseInnerIterator& operator--() { --m_id; return *this; }\n    inline ReverseInnerIterator& operator-=(Index i) { m_id -= i; return *this; }\n\n    inline ReverseInnerIterator operator-(Index i) \n    {\n        ReverseInnerIterator result = *this;\n        result -= i;\n        return result;\n    }\n\n    inline const Scalar& value() const { return m_values[m_id-1]; }\n    inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }\n\n    inline StorageIndex index() const { return m_indices[m_id-1]; }\n    inline Index outer() const { return m_outer.value(); }\n    inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }\n    inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }\n\n    inline operator bool() const { return (m_id > m_start); }\n\n  protected:\n    const Scalar* m_values;\n    const StorageIndex* m_indices;\n    typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;\n    const OuterType m_outer;\n    Index m_start;\n    Index m_id;\n};\n\nnamespace internal {\n\ntemplate<typename Derived>\nstruct evaluator<SparseCompressedBase<Derived> >\n  : evaluator_base<Derived>\n{\n  typedef typename Derived::Scalar Scalar;\n  typedef typename Derived::InnerIterator InnerIterator;\n  \n  enum {\n    CoeffReadCost = NumTraits<Scalar>::ReadCost,\n    Flags = Derived::Flags\n  };\n  \n  evaluator() : m_matrix(0), m_zero(0)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return m_matrix->nonZeros();\n  }\n  \n  operator Derived&() { return m_matrix->const_cast_derived(); }\n  operator const Derived&() const { return *m_matrix; }\n  \n  typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;\n  const Scalar& coeff(Index row, Index col) const\n  {\n    Index p = find(row,col);\n\n    if(p==Dynamic)\n      return m_zero;\n    else\n      return m_matrix->const_cast_derived().valuePtr()[p];\n  }\n\n  Scalar& coeffRef(Index row, Index col)\n  {\n    Index p = find(row,col);\n    eigen_assert(p!=Dynamic && \"written coefficient does not exist\");\n    return m_matrix->const_cast_derived().valuePtr()[p];\n  }\n\nprotected:\n\n  Index find(Index row, Index col) const\n  {\n    eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());\n\n    const Index outer = Derived::IsRowMajor ? row : col;\n    const Index inner = Derived::IsRowMajor ? col : row;\n\n    Index start = m_matrix->outerIndexPtr()[outer];\n    Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];\n    eigen_assert(end>=start && \"you are using a non finalized sparse matrix or written coefficient does not exist\");\n    const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();\n\n    return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;\n  }\n\n  const Derived *m_matrix;\n  const Scalar m_zero;\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_COMPRESSED_BASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseCwiseBinaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H\n#define EIGEN_SPARSE_CWISE_BINARY_OP_H\n\nnamespace Eigen { \n\n// Here we have to handle 3 cases:\n//  1 - sparse op dense\n//  2 - dense op sparse\n//  3 - sparse op sparse\n// We also need to implement a 4th iterator for:\n//  4 - dense op dense\n// Finally, we also need to distinguish between the product and other operations :\n//                configuration      returned mode\n//  1 - sparse op dense    product      sparse\n//                         generic      dense\n//  2 - dense op sparse    product      sparse\n//                         generic      dense\n//  3 - sparse op sparse   product      sparse\n//                         generic      sparse\n//  4 - dense op dense     product      dense\n//                         generic      dense\n//\n// TODO to ease compiler job, we could specialize product/quotient with a scalar\n//      and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.\n\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nclass CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>\n  : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\n  public:\n    typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;\n    typedef SparseMatrixBase<Derived> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)\n    CwiseBinaryOpImpl()\n    {\n      EIGEN_STATIC_ASSERT((\n                (!internal::is_same<typename internal::traits<Lhs>::StorageKind,\n                                    typename internal::traits<Rhs>::StorageKind>::value)\n            ||  ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),\n            THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);\n    }\n};\n\nnamespace internal {\n\n  \n// Generic \"sparse OP sparse\"\ntemplate<typename XprType> struct binary_sparse_evaluator;\n\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>\n  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\nprotected:\n  typedef typename evaluator<Lhs>::InnerIterator  LhsIterator;\n  typedef typename evaluator<Rhs>::InnerIterator  RhsIterator;\n  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;\n  typedef typename traits<XprType>::Scalar Scalar;\n  typedef typename XprType::StorageIndex StorageIndex;\npublic:\n\n  class InnerIterator\n  {\n  public:\n    \n    EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)\n      : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)\n    {\n      this->operator++();\n    }\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))\n      {\n        m_id = m_lhsIter.index();\n        m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());\n        ++m_lhsIter;\n        ++m_rhsIter;\n      }\n      else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))\n      {\n        m_id = m_lhsIter.index();\n        m_value = m_functor(m_lhsIter.value(), Scalar(0));\n        ++m_lhsIter;\n      }\n      else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))\n      {\n        m_id = m_rhsIter.index();\n        m_value = m_functor(Scalar(0), m_rhsIter.value());\n        ++m_rhsIter;\n      }\n      else\n      {\n        m_value = 0; // this is to avoid a compilation warning\n        m_id = -1;\n      }\n      return *this;\n    }\n\n    EIGEN_STRONG_INLINE Scalar value() const { return m_value; }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }\n    EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }\n    EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }\n\n  protected:\n    LhsIterator m_lhsIter;\n    RhsIterator m_rhsIter;\n    const BinaryOp& m_functor;\n    Scalar m_value;\n    StorageIndex m_id;\n  };\n  \n  \n  enum {\n    CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n  \n  explicit binary_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()), \n      m_rhsImpl(xpr.rhs())  \n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<Lhs> m_lhsImpl;\n  evaluator<Rhs> m_rhsImpl;\n};\n\n// dense op sparse\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>\n  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\nprotected:\n  typedef typename evaluator<Rhs>::InnerIterator  RhsIterator;\n  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;\n  typedef typename traits<XprType>::Scalar Scalar;\n  typedef typename XprType::StorageIndex StorageIndex;\npublic:\n\n  class InnerIterator\n  {\n    enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };\n  public:\n\n    EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)\n      : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())\n    {\n      this->operator++();\n    }\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      ++m_id;\n      if(m_id<m_innerSize)\n      {\n        Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,\n                                        IsRowMajor?m_id:m_rhsIter.outer());\n        if(m_rhsIter && m_rhsIter.index()==m_id)\n        {\n          m_value = m_functor(lhsVal, m_rhsIter.value());\n          ++m_rhsIter;\n        }\n        else\n          m_value = m_functor(lhsVal, Scalar(0));\n      }\n\n      return *this;\n    }\n\n    EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }\n    EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }\n    EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }\n\n  protected:\n    const evaluator<Lhs> &m_lhsEval;\n    RhsIterator m_rhsIter;\n    const BinaryOp& m_functor;\n    Scalar m_value;\n    StorageIndex m_id;\n    StorageIndex m_innerSize;\n  };\n\n\n  enum {\n    CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n\n  explicit binary_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()),\n      m_rhsImpl(xpr.rhs()),\n      m_expr(xpr)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  inline Index nonZerosEstimate() const {\n    return m_expr.size();\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<Lhs> m_lhsImpl;\n  evaluator<Rhs> m_rhsImpl;\n  const XprType &m_expr;\n};\n\n// sparse op dense\ntemplate<typename BinaryOp, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>\n  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >\n{\nprotected:\n  typedef typename evaluator<Lhs>::InnerIterator  LhsIterator;\n  typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;\n  typedef typename traits<XprType>::Scalar Scalar;\n  typedef typename XprType::StorageIndex StorageIndex;\npublic:\n\n  class InnerIterator\n  {\n    enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };\n  public:\n\n    EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)\n      : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())\n    {\n      this->operator++();\n    }\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      ++m_id;\n      if(m_id<m_innerSize)\n      {\n        Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,\n                                        IsRowMajor?m_id:m_lhsIter.outer());\n        if(m_lhsIter && m_lhsIter.index()==m_id)\n        {\n          m_value = m_functor(m_lhsIter.value(), rhsVal);\n          ++m_lhsIter;\n        }\n        else\n          m_value = m_functor(Scalar(0),rhsVal);\n      }\n\n      return *this;\n    }\n\n    EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }\n    EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }\n    EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }\n\n  protected:\n    LhsIterator m_lhsIter;\n    const evaluator<Rhs> &m_rhsEval;\n    const BinaryOp& m_functor;\n    Scalar m_value;\n    StorageIndex m_id;\n    StorageIndex m_innerSize;\n  };\n\n\n  enum {\n    CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n\n  explicit binary_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()),\n      m_rhsImpl(xpr.rhs()),\n      m_expr(xpr)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n\n  inline Index nonZerosEstimate() const {\n    return m_expr.size();\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<Lhs> m_lhsImpl;\n  evaluator<Rhs> m_rhsImpl;\n  const XprType &m_expr;\n};\n\ntemplate<typename T,\n         typename LhsKind   = typename evaluator_traits<typename T::Lhs>::Kind,\n         typename RhsKind   = typename evaluator_traits<typename T::Rhs>::Kind,\n         typename LhsScalar = typename traits<typename T::Lhs>::Scalar,\n         typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_conjunction_evaluator;\n\n// \"sparse .* sparse\"\ntemplate<typename T1, typename T2, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n// \"dense .* sparse\"\ntemplate<typename T1, typename T2, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n// \"sparse .* dense\"\ntemplate<typename T1, typename T2, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n\n// \"sparse ./ dense\"\ntemplate<typename T1, typename T2, typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n\n// \"sparse && sparse\"\ntemplate<typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IteratorBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n// \"dense && sparse\"\ntemplate<typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IndexBased, IteratorBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n// \"sparse && dense\"\ntemplate<typename Lhs, typename Rhs>\nstruct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IndexBased>\n  : sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >\n{\n  typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;\n  typedef sparse_conjunction_evaluator<XprType> Base;\n  explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}\n};\n\n// \"sparse ^ sparse\"\ntemplate<typename XprType>\nstruct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>\n  : evaluator_base<XprType>\n{\nprotected:\n  typedef typename XprType::Functor BinaryOp;\n  typedef typename XprType::Lhs LhsArg;\n  typedef typename XprType::Rhs RhsArg;\n  typedef typename evaluator<LhsArg>::InnerIterator  LhsIterator;\n  typedef typename evaluator<RhsArg>::InnerIterator  RhsIterator;\n  typedef typename XprType::StorageIndex StorageIndex;\n  typedef typename traits<XprType>::Scalar Scalar;\npublic:\n\n  class InnerIterator\n  {\n  public:\n    \n    EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)\n      : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)\n    {\n      while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))\n      {\n        if (m_lhsIter.index() < m_rhsIter.index())\n          ++m_lhsIter;\n        else\n          ++m_rhsIter;\n      }\n    }\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      ++m_lhsIter;\n      ++m_rhsIter;\n      while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))\n      {\n        if (m_lhsIter.index() < m_rhsIter.index())\n          ++m_lhsIter;\n        else\n          ++m_rhsIter;\n      }\n      return *this;\n    }\n    \n    EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }\n    EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }\n    EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }\n\n  protected:\n    LhsIterator m_lhsIter;\n    RhsIterator m_rhsIter;\n    const BinaryOp& m_functor;\n  };\n  \n  \n  enum {\n    CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n  \n  explicit sparse_conjunction_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()), \n      m_rhsImpl(xpr.rhs())  \n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<LhsArg> m_lhsImpl;\n  evaluator<RhsArg> m_rhsImpl;\n};\n\n// \"dense ^ sparse\"\ntemplate<typename XprType>\nstruct sparse_conjunction_evaluator<XprType, IndexBased, IteratorBased>\n  : evaluator_base<XprType>\n{\nprotected:\n  typedef typename XprType::Functor BinaryOp;\n  typedef typename XprType::Lhs LhsArg;\n  typedef typename XprType::Rhs RhsArg;\n  typedef evaluator<LhsArg> LhsEvaluator;\n  typedef typename evaluator<RhsArg>::InnerIterator  RhsIterator;\n  typedef typename XprType::StorageIndex StorageIndex;\n  typedef typename traits<XprType>::Scalar Scalar;\npublic:\n\n  class InnerIterator\n  {\n    enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit };\n\n  public:\n    \n    EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)\n      : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)\n    {}\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      ++m_rhsIter;\n      return *this;\n    }\n\n    EIGEN_STRONG_INLINE Scalar value() const\n    { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }\n    EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }\n    EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }\n    \n  protected:\n    const LhsEvaluator &m_lhsEval;\n    RhsIterator m_rhsIter;\n    const BinaryOp& m_functor;\n    const Index m_outer;\n  };\n  \n  \n  enum {\n    CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n  \n  explicit sparse_conjunction_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()), \n      m_rhsImpl(xpr.rhs())  \n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return m_rhsImpl.nonZerosEstimate();\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<LhsArg> m_lhsImpl;\n  evaluator<RhsArg> m_rhsImpl;\n};\n\n// \"sparse ^ dense\"\ntemplate<typename XprType>\nstruct sparse_conjunction_evaluator<XprType, IteratorBased, IndexBased>\n  : evaluator_base<XprType>\n{\nprotected:\n  typedef typename XprType::Functor BinaryOp;\n  typedef typename XprType::Lhs LhsArg;\n  typedef typename XprType::Rhs RhsArg;\n  typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;\n  typedef evaluator<RhsArg> RhsEvaluator;\n  typedef typename XprType::StorageIndex StorageIndex;\n  typedef typename traits<XprType>::Scalar Scalar;\npublic:\n\n  class InnerIterator\n  {\n    enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit };\n\n  public:\n    \n    EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)\n      : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)\n    {}\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    {\n      ++m_lhsIter;\n      return *this;\n    }\n\n    EIGEN_STRONG_INLINE Scalar value() const\n    { return m_functor(m_lhsIter.value(),\n                       m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }\n\n    EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }\n    EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }\n    EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }\n    EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }\n\n    EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }\n    \n  protected:\n    LhsIterator m_lhsIter;\n    const evaluator<RhsArg> &m_rhsEval;\n    const BinaryOp& m_functor;\n    const Index m_outer;\n  };\n  \n  \n  enum {\n    CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,\n    Flags = XprType::Flags\n  };\n  \n  explicit sparse_conjunction_evaluator(const XprType& xpr)\n    : m_functor(xpr.functor()),\n      m_lhsImpl(xpr.lhs()), \n      m_rhsImpl(xpr.rhs())  \n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return m_lhsImpl.nonZerosEstimate();\n  }\n\nprotected:\n  const BinaryOp m_functor;\n  evaluator<LhsArg> m_lhsImpl;\n  evaluator<RhsArg> m_rhsImpl;\n};\n\n}\n\n/***************************************************************************\n* Implementation of SparseMatrixBase and SparseCwise functions/operators\n***************************************************************************/\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)\n{\n  call_assignment(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_STRONG_INLINE Derived &\nSparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)\n{\n  return derived() = derived() - other.derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_STRONG_INLINE Derived &\nSparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)\n{\n  return derived() = derived() + other.derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)\n{\n  call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nDerived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)\n{\n  call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());\n  return derived();\n}\n    \ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nEIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type\nSparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const\n{\n  return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());\n}\n\ntemplate<typename DenseDerived, typename SparseDerived>\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>\noperator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)\n{\n  return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());\n}\n\ntemplate<typename SparseDerived, typename DenseDerived>\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>\noperator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)\n{\n  return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());\n}\n\ntemplate<typename DenseDerived, typename SparseDerived>\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>\noperator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)\n{\n  return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());\n}\n\ntemplate<typename SparseDerived, typename DenseDerived>\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>\noperator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)\n{\n  return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseCwiseUnaryOp.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H\n#define EIGEN_SPARSE_CWISE_UNARY_OP_H\n\nnamespace Eigen { \n\nnamespace internal {\n  \ntemplate<typename UnaryOp, typename ArgType>\nstruct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>\n  : public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >\n{\n  public:\n    typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;\n\n    class InnerIterator;\n    \n    enum {\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,\n      Flags = XprType::Flags\n    };\n    \n    explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())\n    {\n      EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);\n      EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n    }\n    \n    inline Index nonZerosEstimate() const {\n      return m_argImpl.nonZerosEstimate();\n    }\n\n  protected:\n    typedef typename evaluator<ArgType>::InnerIterator        EvalIterator;\n    \n    const UnaryOp m_functor;\n    evaluator<ArgType> m_argImpl;\n};\n\ntemplate<typename UnaryOp, typename ArgType>\nclass unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator\n    : public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator\n{\n    typedef typename XprType::Scalar Scalar;\n    typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;\n  public:\n\n    EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)\n      : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)\n    {}\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    { Base::operator++(); return *this; }\n\n    EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }\n\n  protected:\n    const UnaryOp m_functor;\n  private:\n    Scalar& valueRef();\n};\n\ntemplate<typename ViewOp, typename ArgType>\nstruct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>\n  : public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >\n{\n  public:\n    typedef CwiseUnaryView<ViewOp, ArgType> XprType;\n\n    class InnerIterator;\n    \n    enum {\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,\n      Flags = XprType::Flags\n    };\n    \n    explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())\n    {\n      EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);\n      EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n    }\n\n  protected:\n    typedef typename evaluator<ArgType>::InnerIterator        EvalIterator;\n    \n    const ViewOp m_functor;\n    evaluator<ArgType> m_argImpl;\n};\n\ntemplate<typename ViewOp, typename ArgType>\nclass unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator\n    : public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator\n{\n    typedef typename XprType::Scalar Scalar;\n    typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;\n  public:\n\n    EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)\n      : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)\n    {}\n\n    EIGEN_STRONG_INLINE InnerIterator& operator++()\n    { Base::operator++(); return *this; }\n\n    EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }\n    EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }\n\n  protected:\n    const ViewOp m_functor;\n};\n\n} // end namespace internal\n\ntemplate<typename Derived>\nEIGEN_STRONG_INLINE Derived&\nSparseMatrixBase<Derived>::operator*=(const Scalar& other)\n{\n  typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;\n  internal::evaluator<Derived> thisEval(derived());\n  for (Index j=0; j<outerSize(); ++j)\n    for (EvalIterator i(thisEval,j); i; ++i)\n      i.valueRef() *= other;\n  return derived();\n}\n\ntemplate<typename Derived>\nEIGEN_STRONG_INLINE Derived&\nSparseMatrixBase<Derived>::operator/=(const Scalar& other)\n{\n  typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;\n  internal::evaluator<Derived> thisEval(derived());\n  for (Index j=0; j<outerSize(); ++j)\n    for (EvalIterator i(thisEval,j); i; ++i)\n      i.valueRef() /= other;\n  return derived();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseDenseProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEDENSEPRODUCT_H\n#define EIGEN_SPARSEDENSEPRODUCT_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };\ntemplate <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType,\n         typename AlphaType,\n         int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,\n         bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>\nstruct sparse_time_dense_product_impl;\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType>\nstruct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>\n{\n  typedef typename internal::remove_all<SparseLhsType>::type Lhs;\n  typedef typename internal::remove_all<DenseRhsType>::type Rhs;\n  typedef typename internal::remove_all<DenseResType>::type Res;\n  typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;\n  typedef evaluator<Lhs> LhsEval;\n  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)\n  {\n    LhsEval lhsEval(lhs);\n    \n    Index n = lhs.outerSize();\n#ifdef EIGEN_HAS_OPENMP\n    Eigen::initParallel();\n    Index threads = Eigen::nbThreads();\n#endif\n    \n    for(Index c=0; c<rhs.cols(); ++c)\n    {\n#ifdef EIGEN_HAS_OPENMP\n      // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.\n      // It basically represents the minimal amount of work to be done to be worth it.\n      if(threads>1 && lhsEval.nonZerosEstimate() > 20000)\n      {\n        #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)\n        for(Index i=0; i<n; ++i)\n          processRow(lhsEval,rhs,res,alpha,i,c);\n      }\n      else\n#endif\n      {\n        for(Index i=0; i<n; ++i)\n          processRow(lhsEval,rhs,res,alpha,i,c);\n      }\n    }\n  }\n  \n  static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)\n  {\n    typename Res::Scalar tmp(0);\n    for(LhsInnerIterator it(lhsEval,i); it ;++it)\n      tmp += it.value() * rhs.coeff(it.index(),col);\n    res.coeffRef(i,col) += alpha * tmp;\n  }\n  \n};\n\n// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?\n// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators\n// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>\n// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >\n// {\n//   enum {\n//     Defined = 1\n//   };\n//   typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;\n// };\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>\nstruct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>\n{\n  typedef typename internal::remove_all<SparseLhsType>::type Lhs;\n  typedef typename internal::remove_all<DenseRhsType>::type Rhs;\n  typedef typename internal::remove_all<DenseResType>::type Res;\n  typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;\n  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)\n  {\n    evaluator<Lhs> lhsEval(lhs);\n    for(Index c=0; c<rhs.cols(); ++c)\n    {\n      for(Index j=0; j<lhs.outerSize(); ++j)\n      {\n//        typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);\n        typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));\n        for(LhsInnerIterator it(lhsEval,j); it ;++it)\n          res.coeffRef(it.index(),c) += it.value() * rhs_j;\n      }\n    }\n  }\n};\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType>\nstruct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>\n{\n  typedef typename internal::remove_all<SparseLhsType>::type Lhs;\n  typedef typename internal::remove_all<DenseRhsType>::type Rhs;\n  typedef typename internal::remove_all<DenseResType>::type Res;\n  typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;\n  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)\n  {\n    evaluator<Lhs> lhsEval(lhs);\n    for(Index j=0; j<lhs.outerSize(); ++j)\n    {\n      typename Res::RowXpr res_j(res.row(j));\n      for(LhsInnerIterator it(lhsEval,j); it ;++it)\n        res_j += (alpha*it.value()) * rhs.row(it.index());\n    }\n  }\n};\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType>\nstruct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>\n{\n  typedef typename internal::remove_all<SparseLhsType>::type Lhs;\n  typedef typename internal::remove_all<DenseRhsType>::type Rhs;\n  typedef typename internal::remove_all<DenseResType>::type Res;\n  typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;\n  static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)\n  {\n    evaluator<Lhs> lhsEval(lhs);\n    for(Index j=0; j<lhs.outerSize(); ++j)\n    {\n      typename Rhs::ConstRowXpr rhs_j(rhs.row(j));\n      for(LhsInnerIterator it(lhsEval,j); it ;++it)\n        res.row(it.index()) += (alpha*it.value()) * rhs_j;\n    }\n  }\n};\n\ntemplate<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>\ninline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)\n{\n  sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);\n}\n\n} // end namespace internal\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>\n : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;\n    typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;\n    LhsNested lhsNested(lhs);\n    RhsNested rhsNested(rhs);\n    internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>\n  : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>\n{};\n\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>\n  : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >\n{\n  typedef typename Product<Lhs,Rhs>::Scalar Scalar;\n  \n  template<typename Dst>\n  static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)\n  {\n    typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;\n    typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;\n    LhsNested lhsNested(lhs);\n    RhsNested rhsNested(rhs);\n    \n    // transpose everything\n    Transpose<Dst> dstT(dst);\n    internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>\n  : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>\n{};\n\ntemplate<typename LhsT, typename RhsT, bool NeedToTranspose>\nstruct sparse_dense_outer_product_evaluator\n{\nprotected:\n  typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;\n  typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;\n  typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;\n  \n  // if the actual left-hand side is a dense vector,\n  // then build a sparse-view so that we can seamlessly iterate over it.\n  typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,\n            Lhs1, SparseView<Lhs1> >::type ActualLhs;\n  typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,\n            Lhs1 const&, SparseView<Lhs1> >::type LhsArg;\n            \n  typedef evaluator<ActualLhs> LhsEval;\n  typedef evaluator<ActualRhs> RhsEval;\n  typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;\n  typedef typename ProdXprType::Scalar Scalar;\n  \npublic:\n  enum {\n    Flags = NeedToTranspose ? RowMajorBit : 0,\n    CoeffReadCost = HugeCost\n  };\n  \n  class InnerIterator : public LhsIterator\n  {\n  public:\n    InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)\n      : LhsIterator(xprEval.m_lhsXprImpl, 0),\n        m_outer(outer),\n        m_empty(false),\n        m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))\n    {}\n    \n    EIGEN_STRONG_INLINE Index outer() const { return m_outer; }\n    EIGEN_STRONG_INLINE Index row()   const { return NeedToTranspose ? m_outer : LhsIterator::index(); }\n    EIGEN_STRONG_INLINE Index col()   const { return NeedToTranspose ? LhsIterator::index() : m_outer; }\n\n    EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }\n    EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }\n    \n  protected:\n    Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const\n    {\n      return rhs.coeff(outer);\n    }\n    \n    Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())\n    {\n      typename RhsEval::InnerIterator it(rhs, outer);\n      if (it && it.index()==0 && it.value()!=Scalar(0))\n        return it.value();\n      m_empty = true;\n      return Scalar(0);\n    }\n    \n    Index m_outer;\n    bool m_empty;\n    Scalar m_factor;\n  };\n  \n  sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)\n     : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  // transpose case\n  sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)\n     : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n    \nprotected:\n  const LhsArg m_lhs;\n  evaluator<ActualLhs> m_lhsXprImpl;\n  evaluator<ActualRhs> m_rhsXprImpl;\n};\n\n// sparse * dense outer product\ntemplate<typename Lhs, typename Rhs>\nstruct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>\n  : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>\n{\n  typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;\n  \n  typedef Product<Lhs, Rhs> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n\n  explicit product_evaluator(const XprType& xpr)\n    : Base(xpr.lhs(), xpr.rhs())\n  {}\n  \n};\n\ntemplate<typename Lhs, typename Rhs>\nstruct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>\n  : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>\n{\n  typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;\n  \n  typedef Product<Lhs, Rhs> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n\n  explicit product_evaluator(const XprType& xpr)\n    : Base(xpr.lhs(), xpr.rhs())\n  {}\n  \n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEDENSEPRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseDiagonalProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H\n#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H\n\nnamespace Eigen { \n\n// The product of a diagonal matrix with a sparse matrix can be easily\n// implemented using expression template.\n// We have two consider very different cases:\n// 1 - diag * row-major sparse\n//     => each inner vector <=> scalar * sparse vector product\n//     => so we can reuse CwiseUnaryOp::InnerIterator\n// 2 - diag * col-major sparse\n//     => each inner vector <=> densevector * sparse vector cwise product\n//     => again, we can reuse specialization of CwiseBinaryOp::InnerIterator\n//        for that particular case\n// The two other cases are symmetric.\n\nnamespace internal {\n\nenum {\n  SDP_AsScalarProduct,\n  SDP_AsCwiseProduct\n};\n  \ntemplate<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>\nstruct sparse_diagonal_product_evaluator;\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape>\n  : public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>\n{\n  typedef Product<Lhs, Rhs, DefaultProduct> XprType;\n  enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags\n  \n  typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;\n  explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape>\n  : public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>\n{\n  typedef Product<Lhs, Rhs, DefaultProduct> XprType;\n  enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags\n  \n  typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;\n  explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {}\n};\n\ntemplate<typename SparseXprType, typename DiagonalCoeffType>\nstruct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>\n{\nprotected:\n  typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;\n  typedef typename SparseXprType::Scalar Scalar;\n  \npublic:\n  class InnerIterator : public SparseXprInnerIterator\n  {\n  public:\n    InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)\n      : SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),\n        m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))\n    {}\n    \n    EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }\n  protected:\n    typename DiagonalCoeffType::Scalar m_coeff;\n  };\n  \n  sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)\n    : m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)\n  {}\n\n  Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); }\n    \nprotected:\n  evaluator<SparseXprType> m_sparseXprImpl;\n  evaluator<DiagonalCoeffType> m_diagCoeffImpl;\n};\n\n\ntemplate<typename SparseXprType, typename DiagCoeffType>\nstruct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>\n{\n  typedef typename SparseXprType::Scalar Scalar;\n  typedef typename SparseXprType::StorageIndex StorageIndex;\n  \n  typedef typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime\n                                                                       : SparseXprType::ColsAtCompileTime>::type DiagCoeffNested;\n  \n  class InnerIterator\n  {\n    typedef typename evaluator<SparseXprType>::InnerIterator SparseXprIter;\n  public:\n    InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)\n      : m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested)\n    {}\n    \n    inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); }\n    inline StorageIndex index() const  { return m_sparseIter.index(); }\n    inline Index outer() const  { return m_sparseIter.outer(); }\n    inline Index col() const    { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); }\n    inline Index row() const    { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); }\n    \n    EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; }\n    inline operator bool() const  { return m_sparseIter; }\n    \n  protected:\n    SparseXprIter m_sparseIter;\n    DiagCoeffNested m_diagCoeffNested;\n  };\n  \n  sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)\n    : m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff)\n  {}\n\n  Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); }\n    \nprotected:\n  evaluator<SparseXprType> m_sparseXprEval;\n  DiagCoeffNested m_diagCoeffNested;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseDot.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_DOT_H\n#define EIGEN_SPARSE_DOT_H\n\nnamespace Eigen { \n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ntypename internal::traits<Derived>::Scalar\nSparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)\n  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),\n    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n\n  eigen_assert(size() == other.size());\n  eigen_assert(other.size()>0 && \"you are using a non initialized vector\");\n\n  internal::evaluator<Derived> thisEval(derived());\n  typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);\n  Scalar res(0);\n  while (i)\n  {\n    res += numext::conj(i.value()) * other.coeff(i.index());\n    ++i;\n  }\n  return res;\n}\n\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ntypename internal::traits<Derived>::Scalar\nSparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)\n  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)\n  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),\n    YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n\n  eigen_assert(size() == other.size());\n\n  internal::evaluator<Derived> thisEval(derived());\n  typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);\n  \n  internal::evaluator<OtherDerived>  otherEval(other.derived());\n  typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);\n\n  Scalar res(0);\n  while (i && j)\n  {\n    if (i.index()==j.index())\n    {\n      res += numext::conj(i.value()) * j.value();\n      ++i; ++j;\n    }\n    else if (i.index()<j.index())\n      ++i;\n    else\n      ++j;\n  }\n  return res;\n}\n\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nSparseMatrixBase<Derived>::squaredNorm() const\n{\n  return numext::real((*this).cwiseAbs2().sum());\n}\n\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nSparseMatrixBase<Derived>::norm() const\n{\n  using std::sqrt;\n  return sqrt(squaredNorm());\n}\n\ntemplate<typename Derived>\ninline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real\nSparseMatrixBase<Derived>::blueNorm() const\n{\n  return internal::blueNorm_impl(*this);\n}\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_DOT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseFuzzy.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_FUZZY_H\n#define EIGEN_SPARSE_FUZZY_H\n\nnamespace Eigen {\n  \ntemplate<typename Derived>\ntemplate<typename OtherDerived>\nbool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const\n{\n  const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());\n  typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),\n    const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,\n    const PlainObject>::type actualB(other.derived());\n\n  return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_FUZZY_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseMap.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_MAP_H\n#define EIGEN_SPARSE_MAP_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >\n{\n  typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;\n  typedef traits<PlainObjectType> TraitsBase;\n  enum {\n    Flags = TraitsBase::Flags & (~NestByRefBit)\n  };\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >\n{\n  typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;\n  typedef traits<PlainObjectType> TraitsBase;\n  enum {\n    Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))\n  };\n};\n\n} // end namespace internal\n\ntemplate<typename Derived,\n         int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors\n> class SparseMapBase;\n\n/** \\ingroup SparseCore_Module\n  * class SparseMapBase\n  * \\brief Common base class for Map and Ref instance of sparse matrix and vector.\n  */\ntemplate<typename Derived>\nclass SparseMapBase<Derived,ReadOnlyAccessors>\n  : public SparseCompressedBase<Derived>\n{\n  public:\n    typedef SparseCompressedBase<Derived> Base;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::StorageIndex StorageIndex;\n    enum { IsRowMajor = Base::IsRowMajor };\n    using Base::operator=;\n  protected:\n    \n    typedef typename internal::conditional<\n                         bool(internal::is_lvalue<Derived>::value),\n                         Scalar *, const Scalar *>::type ScalarPointer;\n    typedef typename internal::conditional<\n                         bool(internal::is_lvalue<Derived>::value),\n                         StorageIndex *, const StorageIndex *>::type IndexPointer;\n\n    Index   m_outerSize;\n    Index   m_innerSize;\n    Array<StorageIndex,2,1>  m_zero_nnz;\n    IndexPointer  m_outerIndex;\n    IndexPointer  m_innerIndices;\n    ScalarPointer m_values;\n    IndexPointer  m_innerNonZeros;\n\n  public:\n\n    /** \\copydoc SparseMatrixBase::rows() */\n    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }\n    /** \\copydoc SparseMatrixBase::cols() */\n    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }\n    /** \\copydoc SparseMatrixBase::innerSize() */\n    inline Index innerSize() const { return m_innerSize; }\n    /** \\copydoc SparseMatrixBase::outerSize() */\n    inline Index outerSize() const { return m_outerSize; }\n    /** \\copydoc SparseCompressedBase::nonZeros */\n    inline Index nonZeros() const { return m_zero_nnz[1]; }\n    \n    /** \\copydoc SparseCompressedBase::isCompressed */\n    bool isCompressed() const { return m_innerNonZeros==0; }\n\n    //----------------------------------------\n    // direct access interface\n    /** \\copydoc SparseMatrix::valuePtr */\n    inline const Scalar* valuePtr() const { return m_values; }\n    /** \\copydoc SparseMatrix::innerIndexPtr */\n    inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }\n    /** \\copydoc SparseMatrix::outerIndexPtr */\n    inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }\n    /** \\copydoc SparseMatrix::innerNonZeroPtr */\n    inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }\n    //----------------------------------------\n\n    /** \\copydoc SparseMatrix::coeff */\n    inline Scalar coeff(Index row, Index col) const\n    {\n      const Index outer = IsRowMajor ? row : col;\n      const Index inner = IsRowMajor ? col : row;\n\n      Index start = m_outerIndex[outer];\n      Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];\n      if (start==end)\n        return Scalar(0);\n      else if (end>0 && inner==m_innerIndices[end-1])\n        return m_values[end-1];\n      // ^^  optimization: let's first check if it is the last coefficient\n      // (very common in high level algorithms)\n\n      const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);\n      const Index id = r-&m_innerIndices[0];\n      return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);\n    }\n\n    inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,\n                              ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)\n      : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(outerIndexPtr),\n        m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)\n    {}\n\n    // for vectors\n    inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr)\n      : m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(m_zero_nnz.data()),\n        m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0)\n    {}\n\n    /** Empty destructor */\n    inline ~SparseMapBase() {}\n\n  protected:\n    inline SparseMapBase() {}\n};\n\n/** \\ingroup SparseCore_Module\n  * class SparseMapBase\n  * \\brief Common base class for writable Map and Ref instance of sparse matrix and vector.\n  */\ntemplate<typename Derived>\nclass SparseMapBase<Derived,WriteAccessors>\n  : public SparseMapBase<Derived,ReadOnlyAccessors>\n{\n    typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;\n    \n  public:\n    typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::StorageIndex StorageIndex;\n    enum { IsRowMajor = Base::IsRowMajor };\n    \n    using Base::operator=;\n\n  public:\n    \n    //----------------------------------------\n    // direct access interface\n    using Base::valuePtr;\n    using Base::innerIndexPtr;\n    using Base::outerIndexPtr;\n    using Base::innerNonZeroPtr;\n    /** \\copydoc SparseMatrix::valuePtr */\n    inline Scalar* valuePtr()              { return Base::m_values; }\n    /** \\copydoc SparseMatrix::innerIndexPtr */\n    inline StorageIndex* innerIndexPtr()   { return Base::m_innerIndices; }\n    /** \\copydoc SparseMatrix::outerIndexPtr */\n    inline StorageIndex* outerIndexPtr()   { return Base::m_outerIndex; }\n    /** \\copydoc SparseMatrix::innerNonZeroPtr */\n    inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }\n    //----------------------------------------\n\n    /** \\copydoc SparseMatrix::coeffRef */\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      const Index outer = IsRowMajor ? row : col;\n      const Index inner = IsRowMajor ? col : row;\n\n      Index start = Base::m_outerIndex[outer];\n      Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];\n      eigen_assert(end>=start && \"you probably called coeffRef on a non finalized matrix\");\n      eigen_assert(end>start && \"coeffRef cannot be called on a zero coefficient\");\n      StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);\n      const Index id = r - &Base::m_innerIndices[0];\n      eigen_assert((*r==inner) && (id<end) && \"coeffRef cannot be called on a zero coefficient\");\n      return const_cast<Scalar*>(Base::m_values)[id];\n    }\n    \n    inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,\n                         Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)\n      : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)\n    {}\n\n    // for vectors\n    inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr)\n      : Base(size, nnz, innerIndexPtr, valuePtr)\n    {}\n\n    /** Empty destructor */\n    inline ~SparseMapBase() {}\n\n  protected:\n    inline SparseMapBase() {}\n};\n\n/** \\ingroup SparseCore_Module\n  *\n  * \\brief Specialization of class Map for SparseMatrix-like storage.\n  *\n  * \\tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.\n  *\n  * \\sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>\n  */\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>\n  : public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n#else\ntemplate<typename SparseMatrixType>\nclass Map<SparseMatrixType>\n  : public SparseMapBase<Derived,WriteAccessors>\n#endif\n{\n  public:\n    typedef SparseMapBase<Map> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Map)\n    enum { IsRowMajor = Base::IsRowMajor };\n\n  public:\n\n    /** Constructs a read-write Map to a sparse matrix of size \\a rows x \\a cols, containing \\a nnz non-zero coefficients,\n      * stored as a sparse format as defined by the pointers \\a outerIndexPtr, \\a innerIndexPtr, and \\a valuePtr.\n      * If the optional parameter \\a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.\n      *\n      * This constructor is available only if \\c SparseMatrixType is non-const.\n      *\n      * More details on the expected storage schemes are given in the \\ref TutorialSparse \"manual pages\".\n      */\n    inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,\n               StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)\n      : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)\n    {}\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** Empty destructor */\n    inline ~Map() {}\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>\n  : public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n{\n  public:\n    typedef SparseMapBase<Map> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Map)\n    enum { IsRowMajor = Base::IsRowMajor };\n\n  public:\n#endif\n    /** This is the const version of the above constructor.\n      *\n      * This constructor is available only if \\c SparseMatrixType is const, e.g.:\n      * \\code Map<const SparseMatrix<double> >  \\endcode\n      */\n    inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,\n               const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)\n      : Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)\n    {}\n\n    /** Empty destructor */\n    inline ~Map() {}\n};\n\nnamespace internal {\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;  \n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;  \n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_MAP_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEMATRIX_H\n#define EIGEN_SPARSEMATRIX_H\n\nnamespace Eigen { \n\n/** \\ingroup SparseCore_Module\n  *\n  * \\class SparseMatrix\n  *\n  * \\brief A versatible sparse matrix representation\n  *\n  * This class implements a more versatile variants of the common \\em compressed row/column storage format.\n  * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.\n  * All the non zeros are stored in a single large buffer. Unlike the \\em compressed format, there might be extra\n  * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero\n  * can be done with limited memory reallocation and copies.\n  *\n  * A call to the function makeCompressed() turns the matrix into the standard \\em compressed format\n  * compatible with many library.\n  *\n  * More details on this storage sceheme are given in the \\ref TutorialSparse \"manual pages\".\n  *\n  * \\tparam _Scalar the scalar type, i.e. the type of the coefficients\n  * \\tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility\n  *                 is ColMajor or RowMajor. The default is 0 which means column-major.\n  * \\tparam _StorageIndex the type of the indices. It has to be a \\b signed type (e.g., short, int, std::ptrdiff_t). Default is \\c int.\n  *\n  * \\warning In %Eigen 3.2, the undocumented type \\c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),\n  *          whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.\n  *          Codes making use of \\c SparseMatrix::Index, might thus likely have to be changed to use \\c SparseMatrix::StorageIndex instead.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_SPARSEMATRIX_PLUGIN.\n  */\n\nnamespace internal {\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nstruct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >\n{\n  typedef _Scalar Scalar;\n  typedef _StorageIndex StorageIndex;\n  typedef Sparse StorageKind;\n  typedef MatrixXpr XprKind;\n  enum {\n    RowsAtCompileTime = Dynamic,\n    ColsAtCompileTime = Dynamic,\n    MaxRowsAtCompileTime = Dynamic,\n    MaxColsAtCompileTime = Dynamic,\n    Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,\n    SupportedAccessPatterns = InnerRandomAccessPattern\n  };\n};\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>\nstruct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >\n{\n  typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;\n  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;\n  typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;\n\n  typedef _Scalar Scalar;\n  typedef Dense StorageKind;\n  typedef _StorageIndex StorageIndex;\n  typedef MatrixXpr XprKind;\n\n  enum {\n    RowsAtCompileTime = Dynamic,\n    ColsAtCompileTime = 1,\n    MaxRowsAtCompileTime = Dynamic,\n    MaxColsAtCompileTime = 1,\n    Flags = LvalueBit\n  };\n};\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>\nstruct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >\n : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >\n{\n  enum {\n    Flags = 0\n  };\n};\n\n} // end namespace internal\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nclass SparseMatrix\n  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >\n{\n    typedef SparseCompressedBase<SparseMatrix> Base;\n    using Base::convert_index;\n    friend class SparseVector<_Scalar,0,_StorageIndex>;\n  public:\n    using Base::isCompressed;\n    using Base::nonZeros;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)\n    using Base::operator+=;\n    using Base::operator-=;\n\n    typedef MappedSparseMatrix<Scalar,Flags> Map;\n    typedef Diagonal<SparseMatrix> DiagonalReturnType;\n    typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;\n    typedef typename Base::InnerIterator InnerIterator;\n    typedef typename Base::ReverseInnerIterator ReverseInnerIterator;\n    \n\n    using Base::IsRowMajor;\n    typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;\n    enum {\n      Options = _Options\n    };\n\n    typedef typename Base::IndexVector IndexVector;\n    typedef typename Base::ScalarVector ScalarVector;\n  protected:\n    typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;\n\n    Index m_outerSize;\n    Index m_innerSize;\n    StorageIndex* m_outerIndex;\n    StorageIndex* m_innerNonZeros;     // optional, if null then the data is compressed\n    Storage m_data;\n\n  public:\n    \n    /** \\returns the number of rows of the matrix */\n    inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }\n    /** \\returns the number of columns of the matrix */\n    inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }\n\n    /** \\returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */\n    inline Index innerSize() const { return m_innerSize; }\n    /** \\returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */\n    inline Index outerSize() const { return m_outerSize; }\n    \n    /** \\returns a const pointer to the array of values.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa innerIndexPtr(), outerIndexPtr() */\n    inline const Scalar* valuePtr() const { return m_data.valuePtr(); }\n    /** \\returns a non-const pointer to the array of values.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa innerIndexPtr(), outerIndexPtr() */\n    inline Scalar* valuePtr() { return m_data.valuePtr(); }\n\n    /** \\returns a const pointer to the array of inner indices.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), outerIndexPtr() */\n    inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }\n    /** \\returns a non-const pointer to the array of inner indices.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), outerIndexPtr() */\n    inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }\n\n    /** \\returns a const pointer to the array of the starting positions of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), innerIndexPtr() */\n    inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }\n    /** \\returns a non-const pointer to the array of the starting positions of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\sa valuePtr(), innerIndexPtr() */\n    inline StorageIndex* outerIndexPtr() { return m_outerIndex; }\n\n    /** \\returns a const pointer to the array of the number of non zeros of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 in compressed mode */\n    inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }\n    /** \\returns a non-const pointer to the array of the number of non zeros of the inner vectors.\n      * This function is aimed at interoperability with other libraries.\n      * \\warning it returns the null pointer 0 in compressed mode */\n    inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }\n\n    /** \\internal */\n    inline Storage& data() { return m_data; }\n    /** \\internal */\n    inline const Storage& data() const { return m_data; }\n\n    /** \\returns the value of the matrix at position \\a i, \\a j\n      * This function returns Scalar(0) if the element is an explicit \\em zero */\n    inline Scalar coeff(Index row, Index col) const\n    {\n      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());\n      \n      const Index outer = IsRowMajor ? row : col;\n      const Index inner = IsRowMajor ? col : row;\n      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];\n      return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));\n    }\n\n    /** \\returns a non-const reference to the value of the matrix at position \\a i, \\a j\n      *\n      * If the element does not exist then it is inserted via the insert(Index,Index) function\n      * which itself turns the matrix into a non compressed form if that was not the case.\n      *\n      * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)\n      * function if the element does not already exist.\n      */\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());\n      \n      const Index outer = IsRowMajor ? row : col;\n      const Index inner = IsRowMajor ? col : row;\n\n      Index start = m_outerIndex[outer];\n      Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];\n      eigen_assert(end>=start && \"you probably called coeffRef on a non finalized matrix\");\n      if(end<=start)\n        return insert(row,col);\n      const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));\n      if((p<end) && (m_data.index(p)==inner))\n        return m_data.value(p);\n      else\n        return insert(row,col);\n    }\n\n    /** \\returns a reference to a novel non zero coefficient with coordinates \\a row x \\a col.\n      * The non zero coefficient must \\b not already exist.\n      *\n      * If the matrix \\c *this is in compressed mode, then \\c *this is turned into uncompressed\n      * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.\n      * In this case, the insertion procedure is optimized for a \\e sequential insertion mode where elements are assumed to be\n      * inserted by increasing outer-indices.\n      * \n      * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first\n      * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.\n      *\n      * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)\n      * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.\n      *\n      */\n    Scalar& insert(Index row, Index col);\n\n  public:\n\n    /** Removes all non zeros but keep allocated memory\n      *\n      * This function does not free the currently allocated memory. To release as much as memory as possible,\n      * call \\code mat.data().squeeze(); \\endcode after resizing it.\n      * \n      * \\sa resize(Index,Index), data()\n      */\n    inline void setZero()\n    {\n      m_data.clear();\n      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));\n      if(m_innerNonZeros)\n        memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));\n    }\n\n    /** Preallocates \\a reserveSize non zeros.\n      *\n      * Precondition: the matrix must be in compressed mode. */\n    inline void reserve(Index reserveSize)\n    {\n      eigen_assert(isCompressed() && \"This function does not make sense in non compressed mode.\");\n      m_data.reserve(reserveSize);\n    }\n    \n    #ifdef EIGEN_PARSED_BY_DOXYGEN\n    /** Preallocates \\a reserveSize[\\c j] non zeros for each column (resp. row) \\c j.\n      *\n      * This function turns the matrix in non-compressed mode.\n      * \n      * The type \\c SizesType must expose the following interface:\n        \\code\n        typedef value_type;\n        const value_type& operator[](i) const;\n        \\endcode\n      * for \\c i in the [0,this->outerSize()[ range.\n      * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.\n      */\n    template<class SizesType>\n    inline void reserve(const SizesType& reserveSizes);\n    #else\n    template<class SizesType>\n    inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =\n    #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename\n        typename\n    #endif\n        SizesType::value_type())\n    {\n      EIGEN_UNUSED_VARIABLE(enableif);\n      reserveInnerVectors(reserveSizes);\n    }\n    #endif // EIGEN_PARSED_BY_DOXYGEN\n  protected:\n    template<class SizesType>\n    inline void reserveInnerVectors(const SizesType& reserveSizes)\n    {\n      if(isCompressed())\n      {\n        Index totalReserveSize = 0;\n        // turn the matrix into non-compressed mode\n        m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));\n        if (!m_innerNonZeros) internal::throw_std_bad_alloc();\n        \n        // temporarily use m_innerSizes to hold the new starting points.\n        StorageIndex* newOuterIndex = m_innerNonZeros;\n        \n        StorageIndex count = 0;\n        for(Index j=0; j<m_outerSize; ++j)\n        {\n          newOuterIndex[j] = count;\n          count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);\n          totalReserveSize += reserveSizes[j];\n        }\n        m_data.reserve(totalReserveSize);\n        StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];\n        for(Index j=m_outerSize-1; j>=0; --j)\n        {\n          StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];\n          for(Index i=innerNNZ-1; i>=0; --i)\n          {\n            m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);\n            m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);\n          }\n          previousOuterIndex = m_outerIndex[j];\n          m_outerIndex[j] = newOuterIndex[j];\n          m_innerNonZeros[j] = innerNNZ;\n        }\n        m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];\n        \n        m_data.resize(m_outerIndex[m_outerSize]);\n      }\n      else\n      {\n        StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));\n        if (!newOuterIndex) internal::throw_std_bad_alloc();\n        \n        StorageIndex count = 0;\n        for(Index j=0; j<m_outerSize; ++j)\n        {\n          newOuterIndex[j] = count;\n          StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];\n          StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);\n          count += toReserve + m_innerNonZeros[j];\n        }\n        newOuterIndex[m_outerSize] = count;\n        \n        m_data.resize(count);\n        for(Index j=m_outerSize-1; j>=0; --j)\n        {\n          Index offset = newOuterIndex[j] - m_outerIndex[j];\n          if(offset>0)\n          {\n            StorageIndex innerNNZ = m_innerNonZeros[j];\n            for(Index i=innerNNZ-1; i>=0; --i)\n            {\n              m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);\n              m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);\n            }\n          }\n        }\n        \n        std::swap(m_outerIndex, newOuterIndex);\n        std::free(newOuterIndex);\n      }\n      \n    }\n  public:\n\n    //--- low level purely coherent filling ---\n\n    /** \\internal\n      * \\returns a reference to the non zero coefficient at position \\a row, \\a col assuming that:\n      * - the nonzero does not already exist\n      * - the new coefficient is the last one according to the storage order\n      *\n      * Before filling a given inner vector you must call the statVec(Index) function.\n      *\n      * After an insertion session, you should call the finalize() function.\n      *\n      * \\sa insert, insertBackByOuterInner, startVec */\n    inline Scalar& insertBack(Index row, Index col)\n    {\n      return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);\n    }\n\n    /** \\internal\n      * \\sa insertBack, startVec */\n    inline Scalar& insertBackByOuterInner(Index outer, Index inner)\n    {\n      eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && \"Invalid ordered insertion (invalid outer index)\");\n      eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && \"Invalid ordered insertion (invalid inner index)\");\n      Index p = m_outerIndex[outer+1];\n      ++m_outerIndex[outer+1];\n      m_data.append(Scalar(0), inner);\n      return m_data.value(p);\n    }\n\n    /** \\internal\n      * \\warning use it only if you know what you are doing */\n    inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)\n    {\n      Index p = m_outerIndex[outer+1];\n      ++m_outerIndex[outer+1];\n      m_data.append(Scalar(0), inner);\n      return m_data.value(p);\n    }\n\n    /** \\internal\n      * \\sa insertBack, insertBackByOuterInner */\n    inline void startVec(Index outer)\n    {\n      eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && \"You must call startVec for each inner vector sequentially\");\n      eigen_assert(m_outerIndex[outer+1]==0 && \"You must call startVec for each inner vector sequentially\");\n      m_outerIndex[outer+1] = m_outerIndex[outer];\n    }\n\n    /** \\internal\n      * Must be called after inserting a set of non zero entries using the low level compressed API.\n      */\n    inline void finalize()\n    {\n      if(isCompressed())\n      {\n        StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());\n        Index i = m_outerSize;\n        // find the last filled column\n        while (i>=0 && m_outerIndex[i]==0)\n          --i;\n        ++i;\n        while (i<=m_outerSize)\n        {\n          m_outerIndex[i] = size;\n          ++i;\n        }\n      }\n    }\n\n    //---\n\n    template<typename InputIterators>\n    void setFromTriplets(const InputIterators& begin, const InputIterators& end);\n\n    template<typename InputIterators,typename DupFunctor>\n    void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);\n\n    void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }\n\n    template<typename DupFunctor>\n    void collapseDuplicates(DupFunctor dup_func = DupFunctor());\n\n    //---\n    \n    /** \\internal\n      * same as insert(Index,Index) except that the indices are given relative to the storage order */\n    Scalar& insertByOuterInner(Index j, Index i)\n    {\n      return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);\n    }\n\n    /** Turns the matrix into the \\em compressed format.\n      */\n    void makeCompressed()\n    {\n      if(isCompressed())\n        return;\n      \n      eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);\n      \n      Index oldStart = m_outerIndex[1];\n      m_outerIndex[1] = m_innerNonZeros[0];\n      for(Index j=1; j<m_outerSize; ++j)\n      {\n        Index nextOldStart = m_outerIndex[j+1];\n        Index offset = oldStart - m_outerIndex[j];\n        if(offset>0)\n        {\n          for(Index k=0; k<m_innerNonZeros[j]; ++k)\n          {\n            m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);\n            m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);\n          }\n        }\n        m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];\n        oldStart = nextOldStart;\n      }\n      std::free(m_innerNonZeros);\n      m_innerNonZeros = 0;\n      m_data.resize(m_outerIndex[m_outerSize]);\n      m_data.squeeze();\n    }\n\n    /** Turns the matrix into the uncompressed mode */\n    void uncompress()\n    {\n      if(m_innerNonZeros != 0)\n        return; \n      m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));\n      for (Index i = 0; i < m_outerSize; i++)\n      {\n        m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; \n      }\n    }\n    \n    /** Suppresses all nonzeros which are \\b much \\b smaller \\b than \\a reference under the tolerence \\a epsilon */\n    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())\n    {\n      prune(default_prunning_func(reference,epsilon));\n    }\n    \n    /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \\a keep.\n      * The functor type \\a KeepFunc must implement the following function:\n      * \\code\n      * bool operator() (const Index& row, const Index& col, const Scalar& value) const;\n      * \\endcode\n      * \\sa prune(Scalar,RealScalar)\n      */\n    template<typename KeepFunc>\n    void prune(const KeepFunc& keep = KeepFunc())\n    {\n      // TODO optimize the uncompressed mode to avoid moving and allocating the data twice\n      makeCompressed();\n\n      StorageIndex k = 0;\n      for(Index j=0; j<m_outerSize; ++j)\n      {\n        Index previousStart = m_outerIndex[j];\n        m_outerIndex[j] = k;\n        Index end = m_outerIndex[j+1];\n        for(Index i=previousStart; i<end; ++i)\n        {\n          if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))\n          {\n            m_data.value(k) = m_data.value(i);\n            m_data.index(k) = m_data.index(i);\n            ++k;\n          }\n        }\n      }\n      m_outerIndex[m_outerSize] = k;\n      m_data.resize(k,0);\n    }\n\n    /** Resizes the matrix to a \\a rows x \\a cols matrix leaving old values untouched.\n      *\n      * If the sizes of the matrix are decreased, then the matrix is turned to \\b uncompressed-mode\n      * and the storage of the out of bounds coefficients is kept and reserved.\n      * Call makeCompressed() to pack the entries and squeeze extra memory.\n      *\n      * \\sa reserve(), setZero(), makeCompressed()\n      */\n    void conservativeResize(Index rows, Index cols) \n    {\n      // No change\n      if (this->rows() == rows && this->cols() == cols) return;\n      \n      // If one dimension is null, then there is nothing to be preserved\n      if(rows==0 || cols==0) return resize(rows,cols);\n\n      Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();\n      Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();\n      StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);\n\n      // Deals with inner non zeros\n      if (m_innerNonZeros)\n      {\n        // Resize m_innerNonZeros\n        StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));\n        if (!newInnerNonZeros) internal::throw_std_bad_alloc();\n        m_innerNonZeros = newInnerNonZeros;\n        \n        for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)          \n          m_innerNonZeros[i] = 0;\n      } \n      else if (innerChange < 0) \n      {\n        // Inner size decreased: allocate a new m_innerNonZeros\n        m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));\n        if (!m_innerNonZeros) internal::throw_std_bad_alloc();\n        for(Index i = 0; i < m_outerSize; i++)\n          m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];\n      }\n      \n      // Change the m_innerNonZeros in case of a decrease of inner size\n      if (m_innerNonZeros && innerChange < 0)\n      {\n        for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)\n        {\n          StorageIndex &n = m_innerNonZeros[i];\n          StorageIndex start = m_outerIndex[i];\n          while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; \n        }\n      }\n      \n      m_innerSize = newInnerSize;\n\n      // Re-allocate outer index structure if necessary\n      if (outerChange == 0)\n        return;\n          \n      StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));\n      if (!newOuterIndex) internal::throw_std_bad_alloc();\n      m_outerIndex = newOuterIndex;\n      if (outerChange > 0)\n      {\n        StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];\n        for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)          \n          m_outerIndex[i] = last; \n      }\n      m_outerSize += outerChange;\n    }\n    \n    /** Resizes the matrix to a \\a rows x \\a cols matrix and initializes it to zero.\n      * \n      * This function does not free the currently allocated memory. To release as much as memory as possible,\n      * call \\code mat.data().squeeze(); \\endcode after resizing it.\n      * \n      * \\sa reserve(), setZero()\n      */\n    void resize(Index rows, Index cols)\n    {\n      const Index outerSize = IsRowMajor ? rows : cols;\n      m_innerSize = IsRowMajor ? cols : rows;\n      m_data.clear();\n      if (m_outerSize != outerSize || m_outerSize==0)\n      {\n        std::free(m_outerIndex);\n        m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));\n        if (!m_outerIndex) internal::throw_std_bad_alloc();\n        \n        m_outerSize = outerSize;\n      }\n      if(m_innerNonZeros)\n      {\n        std::free(m_innerNonZeros);\n        m_innerNonZeros = 0;\n      }\n      memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));\n    }\n\n    /** \\internal\n      * Resize the nonzero vector to \\a size */\n    void resizeNonZeros(Index size)\n    {\n      m_data.resize(size);\n    }\n\n    /** \\returns a const expression of the diagonal coefficients. */\n    const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }\n    \n    /** \\returns a read-write expression of the diagonal coefficients.\n      * \\warning If the diagonal entries are written, then all diagonal\n      * entries \\b must already exist, otherwise an assertion will be raised.\n      */\n    DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }\n\n    /** Default constructor yielding an empty \\c 0 \\c x \\c 0 matrix */\n    inline SparseMatrix()\n      : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      resize(0, 0);\n    }\n\n    /** Constructs a \\a rows \\c x \\a cols empty matrix */\n    inline SparseMatrix(Index rows, Index cols)\n      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      resize(rows, cols);\n    }\n\n    /** Constructs a sparse matrix from the sparse expression \\a other */\n    template<typename OtherDerived>\n    inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)\n      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),\n        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n      check_template_parameters();\n      const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);\n      if (needToTranspose)\n        *this = other.derived();\n      else\n      {\n        #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n          EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n        #endif\n        internal::call_assignment_no_alias(*this, other.derived());\n      }\n    }\n    \n    /** Constructs a sparse matrix from the sparse selfadjoint view \\a other */\n    template<typename OtherDerived, unsigned int UpLo>\n    inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)\n      : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      Base::operator=(other);\n    }\n\n    /** Copy constructor (it performs a deep copy) */\n    inline SparseMatrix(const SparseMatrix& other)\n      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      *this = other.derived();\n    }\n\n    /** \\brief Copy constructor with in-place evaluation */\n    template<typename OtherDerived>\n    SparseMatrix(const ReturnByValue<OtherDerived>& other)\n      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      initAssignment(other);\n      other.evalTo(*this);\n    }\n    \n    /** \\brief Copy constructor with in-place evaluation */\n    template<typename OtherDerived>\n    explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)\n      : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)\n    {\n      check_template_parameters();\n      *this = other.derived();\n    }\n\n    /** Swaps the content of two sparse matrices of the same type.\n      * This is a fast operation that simply swaps the underlying pointers and parameters. */\n    inline void swap(SparseMatrix& other)\n    {\n      //EIGEN_DBG_SPARSE(std::cout << \"SparseMatrix:: swap\\n\");\n      std::swap(m_outerIndex, other.m_outerIndex);\n      std::swap(m_innerSize, other.m_innerSize);\n      std::swap(m_outerSize, other.m_outerSize);\n      std::swap(m_innerNonZeros, other.m_innerNonZeros);\n      m_data.swap(other.m_data);\n    }\n\n    /** Sets *this to the identity matrix.\n      * This function also turns the matrix into compressed mode, and drop any reserved memory. */\n    inline void setIdentity()\n    {\n      eigen_assert(rows() == cols() && \"ONLY FOR SQUARED MATRICES\");\n      this->m_data.resize(rows());\n      Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));\n      Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();\n      Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));\n      std::free(m_innerNonZeros);\n      m_innerNonZeros = 0;\n    }\n    inline SparseMatrix& operator=(const SparseMatrix& other)\n    {\n      if (other.isRValue())\n      {\n        swap(other.const_cast_derived());\n      }\n      else if(this!=&other)\n      {\n        #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n          EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n        #endif\n        initAssignment(other);\n        if(other.isCompressed())\n        {\n          internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);\n          m_data = other.m_data;\n        }\n        else\n        {\n          Base::operator=(other);\n        }\n      }\n      return *this;\n    }\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename OtherDerived>\n    inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)\n    { return Base::operator=(other.derived()); }\n#endif // EIGEN_PARSED_BY_DOXYGEN\n\n    template<typename OtherDerived>\n    EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);\n\n    friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)\n    {\n      EIGEN_DBG_SPARSE(\n        s << \"Nonzero entries:\\n\";\n        if(m.isCompressed())\n        {\n          for (Index i=0; i<m.nonZeros(); ++i)\n            s << \"(\" << m.m_data.value(i) << \",\" << m.m_data.index(i) << \") \";\n        }\n        else\n        {\n          for (Index i=0; i<m.outerSize(); ++i)\n          {\n            Index p = m.m_outerIndex[i];\n            Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];\n            Index k=p;\n            for (; k<pe; ++k) {\n              s << \"(\" << m.m_data.value(k) << \",\" << m.m_data.index(k) << \") \";\n            }\n            for (; k<m.m_outerIndex[i+1]; ++k) {\n              s << \"(_,_) \";\n            }\n          }\n        }\n        s << std::endl;\n        s << std::endl;\n        s << \"Outer pointers:\\n\";\n        for (Index i=0; i<m.outerSize(); ++i) {\n          s << m.m_outerIndex[i] << \" \";\n        }\n        s << \" $\" << std::endl;\n        if(!m.isCompressed())\n        {\n          s << \"Inner non zeros:\\n\";\n          for (Index i=0; i<m.outerSize(); ++i) {\n            s << m.m_innerNonZeros[i] << \" \";\n          }\n          s << \" $\" << std::endl;\n        }\n        s << std::endl;\n      );\n      s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);\n      return s;\n    }\n\n    /** Destructor */\n    inline ~SparseMatrix()\n    {\n      std::free(m_outerIndex);\n      std::free(m_innerNonZeros);\n    }\n\n    /** Overloaded for performance */\n    Scalar sum() const;\n    \n#   ifdef EIGEN_SPARSEMATRIX_PLUGIN\n#     include EIGEN_SPARSEMATRIX_PLUGIN\n#   endif\n\nprotected:\n\n    template<typename Other>\n    void initAssignment(const Other& other)\n    {\n      resize(other.rows(), other.cols());\n      if(m_innerNonZeros)\n      {\n        std::free(m_innerNonZeros);\n        m_innerNonZeros = 0;\n      }\n    }\n\n    /** \\internal\n      * \\sa insert(Index,Index) */\n    EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);\n\n    /** \\internal\n      * A vector object that is equal to 0 everywhere but v at the position i */\n    class SingletonVector\n    {\n        StorageIndex m_index;\n        StorageIndex m_value;\n      public:\n        typedef StorageIndex value_type;\n        SingletonVector(Index i, Index v)\n          : m_index(convert_index(i)), m_value(convert_index(v))\n        {}\n\n        StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }\n    };\n\n    /** \\internal\n      * \\sa insert(Index,Index) */\n    EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);\n\npublic:\n    /** \\internal\n      * \\sa insert(Index,Index) */\n    EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)\n    {\n      const Index outer = IsRowMajor ? row : col;\n      const Index inner = IsRowMajor ? col : row;\n\n      eigen_assert(!isCompressed());\n      eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));\n\n      Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;\n      m_data.index(p) = convert_index(inner);\n      return (m_data.value(p) = 0);\n    }\n\nprivate:\n  static void check_template_parameters()\n  {\n    EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);\n    EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);\n  }\n\n  struct default_prunning_func {\n    default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}\n    inline bool operator() (const Index&, const Index&, const Scalar& value) const\n    {\n      return !internal::isMuchSmallerThan(value, reference, epsilon);\n    }\n    Scalar reference;\n    RealScalar epsilon;\n  };\n};\n\nnamespace internal {\n\ntemplate<typename InputIterator, typename SparseMatrixType, typename DupFunctor>\nvoid set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)\n{\n  enum { IsRowMajor = SparseMatrixType::IsRowMajor };\n  typedef typename SparseMatrixType::Scalar Scalar;\n  typedef typename SparseMatrixType::StorageIndex StorageIndex;\n  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());\n\n  if(begin!=end)\n  {\n    // pass 1: count the nnz per inner-vector\n    typename SparseMatrixType::IndexVector wi(trMat.outerSize());\n    wi.setZero();\n    for(InputIterator it(begin); it!=end; ++it)\n    {\n      eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());\n      wi(IsRowMajor ? it->col() : it->row())++;\n    }\n\n    // pass 2: insert all the elements into trMat\n    trMat.reserve(wi);\n    for(InputIterator it(begin); it!=end; ++it)\n      trMat.insertBackUncompressed(it->row(),it->col()) = it->value();\n\n    // pass 3:\n    trMat.collapseDuplicates(dup_func);\n  }\n\n  // pass 4: transposed copy -> implicit sorting\n  mat = trMat;\n}\n\n}\n\n\n/** Fill the matrix \\c *this with the list of \\em triplets defined by the iterator range \\a begin - \\a end.\n  *\n  * A \\em triplet is a tuple (i,j,value) defining a non-zero element.\n  * The input list of triplets does not have to be sorted, and can contains duplicated elements.\n  * In any case, the result is a \\b sorted and \\b compressed sparse matrix where the duplicates have been summed up.\n  * This is a \\em O(n) operation, with \\em n the number of triplet elements.\n  * The initial contents of \\c *this is destroyed.\n  * The matrix \\c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,\n  * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.\n  *\n  * The \\a InputIterators value_type must provide the following interface:\n  * \\code\n  * Scalar value() const; // the value\n  * Scalar row() const;   // the row index i\n  * Scalar col() const;   // the column index j\n  * \\endcode\n  * See for instance the Eigen::Triplet template class.\n  *\n  * Here is a typical usage example:\n  * \\code\n    typedef Triplet<double> T;\n    std::vector<T> tripletList;\n    triplets.reserve(estimation_of_entries);\n    for(...)\n    {\n      // ...\n      tripletList.push_back(T(i,j,v_ij));\n    }\n    SparseMatrixType m(rows,cols);\n    m.setFromTriplets(tripletList.begin(), tripletList.end());\n    // m is ready to go!\n  * \\endcode\n  *\n  * \\warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define\n  * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather\n  * be explicitely stored into a std::vector for instance.\n  */\ntemplate<typename Scalar, int _Options, typename _StorageIndex>\ntemplate<typename InputIterators>\nvoid SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)\n{\n  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());\n}\n\n/** The same as setFromTriplets but when duplicates are met the functor \\a dup_func is applied:\n  * \\code\n  * value = dup_func(OldValue, NewValue)\n  * \\endcode \n  * Here is a C++11 example keeping the latest entry only:\n  * \\code\n  * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });\n  * \\endcode\n  */\ntemplate<typename Scalar, int _Options, typename _StorageIndex>\ntemplate<typename InputIterators,typename DupFunctor>\nvoid SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)\n{\n  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);\n}\n\n/** \\internal */\ntemplate<typename Scalar, int _Options, typename _StorageIndex>\ntemplate<typename DupFunctor>\nvoid SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)\n{\n  eigen_assert(!isCompressed());\n  // TODO, in practice we should be able to use m_innerNonZeros for that task\n  IndexVector wi(innerSize());\n  wi.fill(-1);\n  StorageIndex count = 0;\n  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers\n  for(Index j=0; j<outerSize(); ++j)\n  {\n    StorageIndex start   = count;\n    Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];\n    for(Index k=m_outerIndex[j]; k<oldEnd; ++k)\n    {\n      Index i = m_data.index(k);\n      if(wi(i)>=start)\n      {\n        // we already meet this entry => accumulate it\n        m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));\n      }\n      else\n      {\n        m_data.value(count) = m_data.value(k);\n        m_data.index(count) = m_data.index(k);\n        wi(i) = count;\n        ++count;\n      }\n    }\n    m_outerIndex[j] = start;\n  }\n  m_outerIndex[m_outerSize] = count;\n\n  // turn the matrix into compressed form\n  std::free(m_innerNonZeros);\n  m_innerNonZeros = 0;\n  m_data.resize(m_outerIndex[m_outerSize]);\n}\n\ntemplate<typename Scalar, int _Options, typename _StorageIndex>\ntemplate<typename OtherDerived>\nEIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),\n        YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)\n\n  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n    EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n  #endif\n      \n  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);\n  if (needToTranspose)\n  {\n    #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN\n      EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN\n    #endif\n    // two passes algorithm:\n    //  1 - compute the number of coeffs per dest inner vector\n    //  2 - do the actual copy/eval\n    // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed\n    typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;\n    typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;\n    typedef internal::evaluator<_OtherCopy> OtherCopyEval;\n    OtherCopy otherCopy(other.derived());\n    OtherCopyEval otherCopyEval(otherCopy);\n\n    SparseMatrix dest(other.rows(),other.cols());\n    Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();\n\n    // pass 1\n    // FIXME the above copy could be merged with that pass\n    for (Index j=0; j<otherCopy.outerSize(); ++j)\n      for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)\n        ++dest.m_outerIndex[it.index()];\n\n    // prefix sum\n    StorageIndex count = 0;\n    IndexVector positions(dest.outerSize());\n    for (Index j=0; j<dest.outerSize(); ++j)\n    {\n      StorageIndex tmp = dest.m_outerIndex[j];\n      dest.m_outerIndex[j] = count;\n      positions[j] = count;\n      count += tmp;\n    }\n    dest.m_outerIndex[dest.outerSize()] = count;\n    // alloc\n    dest.m_data.resize(count);\n    // pass 2\n    for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)\n    {\n      for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)\n      {\n        Index pos = positions[it.index()]++;\n        dest.m_data.index(pos) = j;\n        dest.m_data.value(pos) = it.value();\n      }\n    }\n    this->swap(dest);\n    return *this;\n  }\n  else\n  {\n    if(other.isRValue())\n    {\n      initAssignment(other.derived());\n    }\n    // there is no special optimization\n    return Base::operator=(other.derived());\n  }\n}\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\ntypename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)\n{\n  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());\n  \n  const Index outer = IsRowMajor ? row : col;\n  const Index inner = IsRowMajor ? col : row;\n  \n  if(isCompressed())\n  {\n    if(nonZeros()==0)\n    {\n      // reserve space if not already done\n      if(m_data.allocatedSize()==0)\n        m_data.reserve(2*m_innerSize);\n      \n      // turn the matrix into non-compressed mode\n      m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));\n      if(!m_innerNonZeros) internal::throw_std_bad_alloc();\n      \n      memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));\n      \n      // pack all inner-vectors to the end of the pre-allocated space\n      // and allocate the entire free-space to the first inner-vector\n      StorageIndex end = convert_index(m_data.allocatedSize());\n      for(Index j=1; j<=m_outerSize; ++j)\n        m_outerIndex[j] = end;\n    }\n    else\n    {\n      // turn the matrix into non-compressed mode\n      m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));\n      if(!m_innerNonZeros) internal::throw_std_bad_alloc();\n      for(Index j=0; j<m_outerSize; ++j)\n        m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];\n    }\n  }\n  \n  // check whether we can do a fast \"push back\" insertion\n  Index data_end = m_data.allocatedSize();\n  \n  // First case: we are filling a new inner vector which is packed at the end.\n  // We assume that all remaining inner-vectors are also empty and packed to the end.\n  if(m_outerIndex[outer]==data_end)\n  {\n    eigen_internal_assert(m_innerNonZeros[outer]==0);\n    \n    // pack previous empty inner-vectors to end of the used-space\n    // and allocate the entire free-space to the current inner-vector.\n    StorageIndex p = convert_index(m_data.size());\n    Index j = outer;\n    while(j>=0 && m_innerNonZeros[j]==0)\n      m_outerIndex[j--] = p;\n    \n    // push back the new element\n    ++m_innerNonZeros[outer];\n    m_data.append(Scalar(0), inner);\n    \n    // check for reallocation\n    if(data_end != m_data.allocatedSize())\n    {\n      // m_data has been reallocated\n      //  -> move remaining inner-vectors back to the end of the free-space\n      //     so that the entire free-space is allocated to the current inner-vector.\n      eigen_internal_assert(data_end < m_data.allocatedSize());\n      StorageIndex new_end = convert_index(m_data.allocatedSize());\n      for(Index k=outer+1; k<=m_outerSize; ++k)\n        if(m_outerIndex[k]==data_end)\n          m_outerIndex[k] = new_end;\n    }\n    return m_data.value(p);\n  }\n  \n  // Second case: the next inner-vector is packed to the end\n  // and the current inner-vector end match the used-space.\n  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())\n  {\n    eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);\n    \n    // add space for the new element\n    ++m_innerNonZeros[outer];\n    m_data.resize(m_data.size()+1);\n    \n    // check for reallocation\n    if(data_end != m_data.allocatedSize())\n    {\n      // m_data has been reallocated\n      //  -> move remaining inner-vectors back to the end of the free-space\n      //     so that the entire free-space is allocated to the current inner-vector.\n      eigen_internal_assert(data_end < m_data.allocatedSize());\n      StorageIndex new_end = convert_index(m_data.allocatedSize());\n      for(Index k=outer+1; k<=m_outerSize; ++k)\n        if(m_outerIndex[k]==data_end)\n          m_outerIndex[k] = new_end;\n    }\n    \n    // and insert it at the right position (sorted insertion)\n    Index startId = m_outerIndex[outer];\n    Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;\n    while ( (p > startId) && (m_data.index(p-1) > inner) )\n    {\n      m_data.index(p) = m_data.index(p-1);\n      m_data.value(p) = m_data.value(p-1);\n      --p;\n    }\n    \n    m_data.index(p) = convert_index(inner);\n    return (m_data.value(p) = 0);\n  }\n  \n  if(m_data.size() != m_data.allocatedSize())\n  {\n    // make sure the matrix is compatible to random un-compressed insertion:\n    m_data.resize(m_data.allocatedSize());\n    this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));\n  }\n  \n  return insertUncompressed(row,col);\n}\n    \ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nEIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)\n{\n  eigen_assert(!isCompressed());\n\n  const Index outer = IsRowMajor ? row : col;\n  const StorageIndex inner = convert_index(IsRowMajor ? col : row);\n\n  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];\n  StorageIndex innerNNZ = m_innerNonZeros[outer];\n  if(innerNNZ>=room)\n  {\n    // this inner vector is full, we need to reallocate the whole buffer :(\n    reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));\n  }\n\n  Index startId = m_outerIndex[outer];\n  Index p = startId + m_innerNonZeros[outer];\n  while ( (p > startId) && (m_data.index(p-1) > inner) )\n  {\n    m_data.index(p) = m_data.index(p-1);\n    m_data.value(p) = m_data.value(p-1);\n    --p;\n  }\n  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && \"you cannot insert an element that already exists, you must call coeffRef to this end\");\n\n  m_innerNonZeros[outer]++;\n\n  m_data.index(p) = inner;\n  return (m_data.value(p) = 0);\n}\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nEIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)\n{\n  eigen_assert(isCompressed());\n\n  const Index outer = IsRowMajor ? row : col;\n  const Index inner = IsRowMajor ? col : row;\n\n  Index previousOuter = outer;\n  if (m_outerIndex[outer+1]==0)\n  {\n    // we start a new inner vector\n    while (previousOuter>=0 && m_outerIndex[previousOuter]==0)\n    {\n      m_outerIndex[previousOuter] = convert_index(m_data.size());\n      --previousOuter;\n    }\n    m_outerIndex[outer+1] = m_outerIndex[outer];\n  }\n\n  // here we have to handle the tricky case where the outerIndex array\n  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,\n  // the 2nd inner vector...\n  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))\n                && (std::size_t(m_outerIndex[outer+1]) == m_data.size());\n\n  std::size_t startId = m_outerIndex[outer];\n  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)\n  std::size_t p = m_outerIndex[outer+1];\n  ++m_outerIndex[outer+1];\n\n  double reallocRatio = 1;\n  if (m_data.allocatedSize()<=m_data.size())\n  {\n    // if there is no preallocated memory, let's reserve a minimum of 32 elements\n    if (m_data.size()==0)\n    {\n      m_data.reserve(32);\n    }\n    else\n    {\n      // we need to reallocate the data, to reduce multiple reallocations\n      // we use a smart resize algorithm based on the current filling ratio\n      // in addition, we use double to avoid integers overflows\n      double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);\n      reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());\n      // furthermore we bound the realloc ratio to:\n      //   1) reduce multiple minor realloc when the matrix is almost filled\n      //   2) avoid to allocate too much memory when the matrix is almost empty\n      reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);\n    }\n  }\n  m_data.resize(m_data.size()+1,reallocRatio);\n\n  if (!isLastVec)\n  {\n    if (previousOuter==-1)\n    {\n      // oops wrong guess.\n      // let's correct the outer offsets\n      for (Index k=0; k<=(outer+1); ++k)\n        m_outerIndex[k] = 0;\n      Index k=outer+1;\n      while(m_outerIndex[k]==0)\n        m_outerIndex[k++] = 1;\n      while (k<=m_outerSize && m_outerIndex[k]!=0)\n        m_outerIndex[k++]++;\n      p = 0;\n      --k;\n      k = m_outerIndex[k]-1;\n      while (k>0)\n      {\n        m_data.index(k) = m_data.index(k-1);\n        m_data.value(k) = m_data.value(k-1);\n        k--;\n      }\n    }\n    else\n    {\n      // we are not inserting into the last inner vec\n      // update outer indices:\n      Index j = outer+2;\n      while (j<=m_outerSize && m_outerIndex[j]!=0)\n        m_outerIndex[j++]++;\n      --j;\n      // shift data of last vecs:\n      Index k = m_outerIndex[j]-1;\n      while (k>=Index(p))\n      {\n        m_data.index(k) = m_data.index(k-1);\n        m_data.value(k) = m_data.value(k-1);\n        k--;\n      }\n    }\n  }\n\n  while ( (p > startId) && (m_data.index(p-1) > inner) )\n  {\n    m_data.index(p) = m_data.index(p-1);\n    m_data.value(p) = m_data.value(p-1);\n    --p;\n  }\n\n  m_data.index(p) = inner;\n  return (m_data.value(p) = 0);\n}\n\nnamespace internal {\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nstruct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >\n  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >\n{\n  typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;\n  typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;\n  evaluator() : Base() {}\n  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEMATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseMatrixBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEMATRIXBASE_H\n#define EIGEN_SPARSEMATRIXBASE_H\n\nnamespace Eigen { \n\n/** \\ingroup SparseCore_Module\n  *\n  * \\class SparseMatrixBase\n  *\n  * \\brief Base class of any sparse matrices or sparse expressions\n  *\n  * \\tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_SPARSEMATRIXBASE_PLUGIN.\n  */\ntemplate<typename Derived> class SparseMatrixBase\n  : public EigenBase<Derived>\n{\n  public:\n\n    typedef typename internal::traits<Derived>::Scalar Scalar;\n    \n    /** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.\n      *\n      * It is an alias for the Scalar type */\n    typedef Scalar value_type;\n    \n    typedef typename internal::packet_traits<Scalar>::type PacketScalar;\n    typedef typename internal::traits<Derived>::StorageKind StorageKind;\n\n    /** The integer type used to \\b store indices within a SparseMatrix.\n      * For a \\c SparseMatrix<Scalar,Options,IndexType> it an alias of the third template parameter \\c IndexType. */\n    typedef typename internal::traits<Derived>::StorageIndex StorageIndex;\n\n    typedef typename internal::add_const_on_value_type_if_arithmetic<\n                         typename internal::packet_traits<Scalar>::type\n                     >::type PacketReturnType;\n\n    typedef SparseMatrixBase StorageBaseType;\n\n    typedef Matrix<StorageIndex,Dynamic,1> IndexVector;\n    typedef Matrix<Scalar,Dynamic,1> ScalarVector;\n    \n    template<typename OtherDerived>\n    Derived& operator=(const EigenBase<OtherDerived> &other);\n\n    enum {\n\n      RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,\n        /**< The number of rows at compile-time. This is just a copy of the value provided\n          * by the \\a Derived type. If a value is not known at compile-time,\n          * it is set to the \\a Dynamic constant.\n          * \\sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */\n\n      ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,\n        /**< The number of columns at compile-time. This is just a copy of the value provided\n          * by the \\a Derived type. If a value is not known at compile-time,\n          * it is set to the \\a Dynamic constant.\n          * \\sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */\n\n\n      SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,\n                                                   internal::traits<Derived>::ColsAtCompileTime>::ret),\n        /**< This is equal to the number of coefficients, i.e. the number of\n          * rows times the number of columns, or to \\a Dynamic if this is not\n          * known at compile-time. \\sa RowsAtCompileTime, ColsAtCompileTime */\n\n      MaxRowsAtCompileTime = RowsAtCompileTime,\n      MaxColsAtCompileTime = ColsAtCompileTime,\n\n      MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,\n                                                      MaxColsAtCompileTime>::ret),\n\n      IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,\n        /**< This is set to true if either the number of rows or the number of\n          * columns is known at compile-time to be equal to 1. Indeed, in that case,\n          * we are dealing with a column-vector (if there is only one column) or with\n          * a row-vector (if there is only one row). */\n\n      Flags = internal::traits<Derived>::Flags,\n        /**< This stores expression \\ref flags flags which may or may not be inherited by new expressions\n          * constructed from this one. See the \\ref flags \"list of flags\".\n          */\n\n      IsRowMajor = Flags&RowMajorBit ? 1 : 0,\n      \n      InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)\n                             : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),\n\n      #ifndef EIGEN_PARSED_BY_DOXYGEN\n      _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC\n      #endif\n    };\n\n    /** \\internal the return type of MatrixBase::adjoint() */\n    typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                        CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,\n                        Transpose<const Derived>\n                     >::type AdjointReturnType;\n    typedef Transpose<Derived> TransposeReturnType;\n    typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;\n\n    // FIXME storage order do not match evaluator storage order\n    typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** This is the \"real scalar\" type; if the \\a Scalar type is already real numbers\n      * (e.g. int, float or double) then \\a RealScalar is just the same as \\a Scalar. If\n      * \\a Scalar is \\a std::complex<T> then RealScalar is \\a T.\n      *\n      * \\sa class NumTraits\n      */\n    typedef typename NumTraits<Scalar>::Real RealScalar;\n\n    /** \\internal the return type of coeff()\n      */\n    typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;\n\n    /** \\internal Represents a matrix with all coefficients equal to one another*/\n    typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;\n\n    /** type of the equivalent dense matrix */\n    typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;\n    /** type of the equivalent square matrix */\n    typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),\n                          EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;\n\n    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    inline Derived& derived() { return *static_cast<Derived*>(this); }\n    inline Derived& const_cast_derived() const\n    { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }\n\n    typedef EigenBase<Derived> Base;\n\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP)           /** <p>This method does not change the sparsity of \\c *this: the OP is applied to explicitly stored coefficients only. \\sa SparseCompressedBase::coeffs() </p> */\n#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL      /** <p> \\warning This method returns a read-only expression for any sparse matrices. \\sa \\ref TutorialSparse_SubMatrices \"Sparse block operations\" </p> */\n#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \\warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \\sa \\ref TutorialSparse_SubMatrices \"Sparse block operations\" </p> */\n#else\n#define EIGEN_DOC_UNARY_ADDONS(X,Y)\n#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)\n#endif\n#   include \"../plugins/CommonCwiseUnaryOps.h\"\n#   include \"../plugins/CommonCwiseBinaryOps.h\"\n#   include \"../plugins/MatrixCwiseUnaryOps.h\"\n#   include \"../plugins/MatrixCwiseBinaryOps.h\"\n#   include \"../plugins/BlockMethods.h\"\n#   ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN\n#     include EIGEN_SPARSEMATRIXBASE_PLUGIN\n#   endif\n#undef EIGEN_CURRENT_STORAGE_BASE_CLASS\n#undef EIGEN_DOC_UNARY_ADDONS\n#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF\n\n    /** \\returns the number of rows. \\sa cols() */\n    inline Index rows() const { return derived().rows(); }\n    /** \\returns the number of columns. \\sa rows() */\n    inline Index cols() const { return derived().cols(); }\n    /** \\returns the number of coefficients, which is \\a rows()*cols().\n      * \\sa rows(), cols(). */\n    inline Index size() const { return rows() * cols(); }\n    /** \\returns true if either the number of rows or the number of columns is equal to 1.\n      * In other words, this function returns\n      * \\code rows()==1 || cols()==1 \\endcode\n      * \\sa rows(), cols(), IsVectorAtCompileTime. */\n    inline bool isVector() const { return rows()==1 || cols()==1; }\n    /** \\returns the size of the storage major dimension,\n      * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */\n    Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }\n    /** \\returns the size of the inner dimension according to the storage order,\n      * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */\n    Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }\n\n    bool isRValue() const { return m_isRValue; }\n    Derived& markAsRValue() { m_isRValue = true; return derived(); }\n\n    SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }\n\n    \n    template<typename OtherDerived>\n    Derived& operator=(const ReturnByValue<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);\n\n    inline Derived& operator=(const Derived& other);\n\n  protected:\n\n    template<typename OtherDerived>\n    inline Derived& assign(const OtherDerived& other);\n\n    template<typename OtherDerived>\n    inline void assignGeneric(const OtherDerived& other);\n\n  public:\n\n    friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)\n    {\n      typedef typename Derived::Nested Nested;\n      typedef typename internal::remove_all<Nested>::type NestedCleaned;\n\n      if (Flags&RowMajorBit)\n      {\n        Nested nm(m.derived());\n        internal::evaluator<NestedCleaned> thisEval(nm);\n        for (Index row=0; row<nm.outerSize(); ++row)\n        {\n          Index col = 0;\n          for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, row); it; ++it)\n          {\n            for ( ; col<it.index(); ++col)\n              s << \"0 \";\n            s << it.value() << \" \";\n            ++col;\n          }\n          for ( ; col<m.cols(); ++col)\n            s << \"0 \";\n          s << std::endl;\n        }\n      }\n      else\n      {\n        Nested nm(m.derived());\n        internal::evaluator<NestedCleaned> thisEval(nm);\n        if (m.cols() == 1) {\n          Index row = 0;\n          for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, 0); it; ++it)\n          {\n            for ( ; row<it.index(); ++row)\n              s << \"0\" << std::endl;\n            s << it.value() << std::endl;\n            ++row;\n          }\n          for ( ; row<m.rows(); ++row)\n            s << \"0\" << std::endl;\n        }\n        else\n        {\n          SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;\n          s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);\n        }\n      }\n      return s;\n    }\n\n    template<typename OtherDerived>\n    Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);\n    template<typename OtherDerived>\n    Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);\n    \n    template<typename OtherDerived>\n    Derived& operator+=(const DiagonalBase<OtherDerived>& other);\n    template<typename OtherDerived>\n    Derived& operator-=(const DiagonalBase<OtherDerived>& other);\n\n    template<typename OtherDerived>\n    Derived& operator+=(const EigenBase<OtherDerived> &other);\n    template<typename OtherDerived>\n    Derived& operator-=(const EigenBase<OtherDerived> &other);\n\n    Derived& operator*=(const Scalar& other);\n    Derived& operator/=(const Scalar& other);\n\n    template<typename OtherDerived> struct CwiseProductDenseReturnType {\n      typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<\n                                                          typename internal::traits<Derived>::Scalar,\n                                                          typename internal::traits<OtherDerived>::Scalar\n                                                        >::ReturnType>,\n                            const Derived,\n                            const OtherDerived\n                          > Type;\n    };\n\n    template<typename OtherDerived>\n    EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type\n    cwiseProduct(const MatrixBase<OtherDerived> &other) const;\n\n    // sparse * diagonal\n    template<typename OtherDerived>\n    const Product<Derived,OtherDerived>\n    operator*(const DiagonalBase<OtherDerived> &other) const\n    { return Product<Derived,OtherDerived>(derived(), other.derived()); }\n\n    // diagonal * sparse\n    template<typename OtherDerived> friend\n    const Product<OtherDerived,Derived>\n    operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)\n    { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }\n    \n    // sparse * sparse\n    template<typename OtherDerived>\n    const Product<Derived,OtherDerived,AliasFreeProduct>\n    operator*(const SparseMatrixBase<OtherDerived> &other) const;\n    \n    // sparse * dense\n    template<typename OtherDerived>\n    const Product<Derived,OtherDerived>\n    operator*(const MatrixBase<OtherDerived> &other) const\n    { return Product<Derived,OtherDerived>(derived(), other.derived()); }\n    \n    // dense * sparse\n    template<typename OtherDerived> friend\n    const Product<OtherDerived,Derived>\n    operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)\n    { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }\n    \n     /** \\returns an expression of P H P^-1 where H is the matrix represented by \\c *this */\n    SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const\n    {\n      return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);\n    }\n\n    template<typename OtherDerived>\n    Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);\n\n    template<int Mode>\n    inline const TriangularView<const Derived, Mode> triangularView() const;\n    \n    template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView<Derived, UpLo> Type; };\n    template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView<const Derived, UpLo> Type; };\n\n    template<unsigned int UpLo> inline \n    typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;\n    template<unsigned int UpLo> inline\n    typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();\n\n    template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;\n    template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;\n    RealScalar squaredNorm() const;\n    RealScalar norm()  const;\n    RealScalar blueNorm() const;\n\n    TransposeReturnType transpose() { return TransposeReturnType(derived()); }\n    const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }\n    const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }\n\n    // inner-vector\n    typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true>       InnerVectorReturnType;\n    typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;\n    InnerVectorReturnType innerVector(Index outer);\n    const ConstInnerVectorReturnType innerVector(Index outer) const;\n\n    // set of inner-vectors\n    typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;\n    typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;\n    InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);\n    const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;\n\n    DenseMatrixType toDense() const\n    {\n      return DenseMatrixType(derived());\n    }\n\n    template<typename OtherDerived>\n    bool isApprox(const SparseMatrixBase<OtherDerived>& other,\n                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;\n\n    template<typename OtherDerived>\n    bool isApprox(const MatrixBase<OtherDerived>& other,\n                  const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const\n    { return toDense().isApprox(other,prec); }\n\n    /** \\returns the matrix or vector obtained by evaluating this expression.\n      *\n      * Notice that in the case of a plain matrix or vector (not an expression) this function just returns\n      * a const reference, in order to avoid a useless copy.\n      */\n    inline const typename internal::eval<Derived>::type eval() const\n    { return typename internal::eval<Derived>::type(derived()); }\n\n    Scalar sum() const;\n    \n    inline const SparseView<Derived>\n    pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;\n\n  protected:\n\n    bool m_isRValue;\n\n    static inline StorageIndex convert_index(const Index idx) {\n      return internal::convert_index<StorageIndex>(idx);\n    }\n  private:\n    template<typename Dest> void evalTo(Dest &) const;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEMATRIXBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparsePermutation.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_PERMUTATION_H\n#define EIGEN_SPARSE_PERMUTATION_H\n\n// This file implements sparse * permutation products\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename ExpressionType, int Side, bool Transposed>\nstruct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>\n{\n    typedef typename nested_eval<ExpressionType, 1>::type MatrixType;\n    typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;\n\n    typedef typename MatrixTypeCleaned::Scalar Scalar;\n    typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;\n\n    enum {\n      SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,\n      MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight\n    };\n    \n    typedef typename internal::conditional<MoveOuter,\n        SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,\n        SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;\n\n    template<typename Dest,typename PermutationType>\n    static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)\n    {\n      MatrixType mat(xpr);\n      if(MoveOuter)\n      {\n        SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(mat.rows(), mat.cols());\n        Matrix<StorageIndex,Dynamic,1> sizes(mat.outerSize());\n        for(Index j=0; j<mat.outerSize(); ++j)\n        {\n          Index jp = perm.indices().coeff(j);\n          sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(mat.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());\n        }\n        tmp.reserve(sizes);\n        for(Index j=0; j<mat.outerSize(); ++j)\n        {\n          Index jp = perm.indices().coeff(j);\n          Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;\n          Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;\n          for(typename MatrixTypeCleaned::InnerIterator it(mat,jsrc); it; ++it)\n            tmp.insertByOuterInner(jdst,it.index()) = it.value();\n        }\n        dst = tmp;\n      }\n      else\n      {\n        SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(mat.rows(), mat.cols());\n        Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());\n        sizes.setZero();\n        PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm_cpy;\n        if((Side==OnTheLeft) ^ Transposed)\n          perm_cpy = perm;\n        else\n          perm_cpy = perm.transpose();\n\n        for(Index j=0; j<mat.outerSize(); ++j)\n          for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)\n            sizes[perm_cpy.indices().coeff(it.index())]++;\n        tmp.reserve(sizes);\n        for(Index j=0; j<mat.outerSize(); ++j)\n          for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)\n            tmp.insertByOuterInner(perm_cpy.indices().coeff(it.index()),j) = it.value();\n        dst = tmp;\n      }\n    }\n};\n\n}\n\nnamespace internal {\n\ntemplate <int ProductTag> struct product_promote_storage_type<Sparse,             PermutationStorage, ProductTag> { typedef Sparse ret; };\ntemplate <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse,             ProductTag> { typedef Sparse ret; };\n\n// TODO, the following two overloads are only needed to define the right temporary type through \n// typename traits<permutation_sparse_matrix_product<Rhs,Lhs,OnTheRight,false> >::ReturnType\n// whereas it should be correctly handled by traits<Product<> >::PlainObject\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, PermutationShape, SparseShape>\n  : public evaluator<typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType>\n{\n  typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;\n  typedef typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  enum {\n    Flags = Base::Flags | EvalBeforeNestingBit\n  };\n\n  explicit product_evaluator(const XprType& xpr)\n    : m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());\n  }\n\nprotected:\n  PlainObject m_result;\n};\n\ntemplate<typename Lhs, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, SparseShape, PermutationShape >\n  : public evaluator<typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType>\n{\n  typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;\n  typedef typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  enum {\n    Flags = Base::Flags | EvalBeforeNestingBit\n  };\n\n  explicit product_evaluator(const XprType& xpr)\n    : m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());\n  }\n\nprotected:\n  PlainObject m_result;\n};\n\n} // end namespace internal\n\n/** \\returns the matrix with the permutation applied to the columns\n  */\ntemplate<typename SparseDerived, typename PermDerived>\ninline const Product<SparseDerived, PermDerived, AliasFreeProduct>\noperator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)\n{ return Product<SparseDerived, PermDerived, AliasFreeProduct>(matrix.derived(), perm.derived()); }\n\n/** \\returns the matrix with the permutation applied to the rows\n  */\ntemplate<typename SparseDerived, typename PermDerived>\ninline const Product<PermDerived, SparseDerived, AliasFreeProduct>\noperator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)\n{ return  Product<PermDerived, SparseDerived, AliasFreeProduct>(perm.derived(), matrix.derived()); }\n\n\n/** \\returns the matrix with the inverse permutation applied to the columns.\n  */\ntemplate<typename SparseDerived, typename PermutationType>\ninline const Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>\noperator*(const SparseMatrixBase<SparseDerived>& matrix, const InverseImpl<PermutationType, PermutationStorage>& tperm)\n{\n  return Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>(matrix.derived(), tperm.derived());\n}\n\n/** \\returns the matrix with the inverse permutation applied to the rows.\n  */\ntemplate<typename SparseDerived, typename PermutationType>\ninline const Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>\noperator*(const InverseImpl<PermutationType,PermutationStorage>& tperm, const SparseMatrixBase<SparseDerived>& matrix)\n{\n  return Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseProduct.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEPRODUCT_H\n#define EIGEN_SPARSEPRODUCT_H\n\nnamespace Eigen { \n\n/** \\returns an expression of the product of two sparse matrices.\n  * By default a conservative product preserving the symbolic non zeros is performed.\n  * The automatic pruning of the small values can be achieved by calling the pruned() function\n  * in which case a totally different product algorithm is employed:\n  * \\code\n  * C = (A*B).pruned();             // supress numerical zeros (exact)\n  * C = (A*B).pruned(ref);\n  * C = (A*B).pruned(ref,epsilon);\n  * \\endcode\n  * where \\c ref is a meaningful non zero reference value.\n  * */\ntemplate<typename Derived>\ntemplate<typename OtherDerived>\ninline const Product<Derived,OtherDerived,AliasFreeProduct>\nSparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const\n{\n  return Product<Derived,OtherDerived,AliasFreeProduct>(derived(), other.derived());\n}\n\nnamespace internal {\n\n// sparse * sparse\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>\n{\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)\n  {\n    evalTo(dst, lhs, rhs, typename evaluator_traits<Dest>::Shape());\n  }\n\n  // dense += sparse * sparse\n  template<typename Dest,typename ActualLhs>\n  static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)\n  {\n    typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;\n    typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;\n    LhsNested lhsNested(lhs);\n    RhsNested rhsNested(rhs);\n    internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,\n                                                      typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);\n  }\n\n  // dense -= sparse * sparse\n  template<typename Dest>\n  static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)\n  {\n    addTo(dst, -lhs, rhs);\n  }\n\nprotected:\n\n  // sparse = sparse * sparse\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape)\n  {\n    typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;\n    typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;\n    LhsNested lhsNested(lhs);\n    RhsNested rhsNested(rhs);\n    internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,\n                                                          typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);\n  }\n\n  // dense = sparse * sparse\n  template<typename Dest>\n  static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape)\n  {\n    dst.setZero();\n    addTo(dst, lhs, rhs);\n  }\n};\n\n// sparse * sparse-triangular\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType>\n : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>\n{};\n\n// sparse-triangular * sparse\ntemplate<typename Lhs, typename Rhs, int ProductType>\nstruct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>\n : public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>\n{};\n\n// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)\ntemplate< typename DstXprType, typename Lhs, typename Rhs>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>\n{\n  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)\n  {\n    Index dstRows = src.rows();\n    Index dstCols = src.cols();\n    if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))\n      dst.resize(dstRows, dstCols);\n    \n    generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());\n  }\n};\n\n// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)\ntemplate< typename DstXprType, typename Lhs, typename Rhs>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>\n{\n  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)\n  {\n    generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());\n  }\n};\n\n// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)\ntemplate< typename DstXprType, typename Lhs, typename Rhs>\nstruct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>\n{\n  typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)\n  {\n    generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, int Options>\nstruct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>\n : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>\n{\n  typedef SparseView<Product<Lhs, Rhs, Options> > XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  explicit unary_evaluator(const XprType& xpr)\n    : m_result(xpr.rows(), xpr.cols())\n  {\n    using std::abs;\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;\n    typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;\n    LhsNested lhsNested(xpr.nestedExpression().lhs());\n    RhsNested rhsNested(xpr.nestedExpression().rhs());\n\n    internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,\n                                                          typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,\n                                                                                                                  abs(xpr.reference())*xpr.epsilon());\n  }\n\nprotected:\n  PlainObject m_result;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEPRODUCT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseRedux.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEREDUX_H\n#define EIGEN_SPARSEREDUX_H\n\nnamespace Eigen { \n\ntemplate<typename Derived>\ntypename internal::traits<Derived>::Scalar\nSparseMatrixBase<Derived>::sum() const\n{\n  eigen_assert(rows()>0 && cols()>0 && \"you are using a non initialized matrix\");\n  Scalar res(0);\n  internal::evaluator<Derived> thisEval(derived());\n  for (Index j=0; j<outerSize(); ++j)\n    for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)\n      res += iter.value();\n  return res;\n}\n\ntemplate<typename _Scalar, int _Options, typename _Index>\ntypename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar\nSparseMatrix<_Scalar,_Options,_Index>::sum() const\n{\n  eigen_assert(rows()>0 && cols()>0 && \"you are using a non initialized matrix\");\n  if(this->isCompressed())\n    return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();\n  else\n    return Base::sum();\n}\n\ntemplate<typename _Scalar, int _Options, typename _Index>\ntypename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar\nSparseVector<_Scalar,_Options,_Index>::sum() const\n{\n  eigen_assert(rows()>0 && cols()>0 && \"you are using a non initialized matrix\");\n  return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEREDUX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseRef.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_REF_H\n#define EIGEN_SPARSE_REF_H\n\nnamespace Eigen {\n\nenum {\n  StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */\n};\n  \nnamespace internal {\n\ntemplate<typename Derived> class SparseRefBase;\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>\nstruct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n  : public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >\n{\n  typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;\n  enum {\n    Options = _Options,\n    Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit\n  };\n\n  template<typename Derived> struct match {\n    enum {\n      StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),\n      MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch\n    };\n    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;\n  };\n  \n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>\nstruct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n  : public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n{\n  enum {\n    Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit\n  };\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>\nstruct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n  : public traits<SparseVector<MatScalar,MatOptions,MatIndex> >\n{\n  typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;\n  enum {\n    Options = _Options,\n    Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit\n  };\n\n  template<typename Derived> struct match {\n    enum {\n      MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime\n    };\n    typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;\n  };\n\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>\nstruct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n  : public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >\n{\n  enum {\n    Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit\n  };\n};\n\ntemplate<typename Derived>\nstruct traits<SparseRefBase<Derived> > : public traits<Derived> {};\n\ntemplate<typename Derived> class SparseRefBase\n  : public SparseMapBase<Derived>\n{\npublic:\n\n  typedef SparseMapBase<Derived> Base;\n  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)\n\n  SparseRefBase()\n    : Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)\n  {}\n  \nprotected:\n\n  template<typename Expression>\n  void construct(Expression& expr)\n  {\n    if(expr.outerIndexPtr()==0)\n      ::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());\n    else\n      ::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());\n  }\n};\n\n} // namespace internal\n\n\n/** \n  * \\ingroup SparseCore_Module\n  *\n  * \\brief A sparse matrix expression referencing an existing sparse expression\n  *\n  * \\tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.\n  * \\tparam Options specifies whether the a standard compressed format is required \\c Options is  \\c #StandardCompressedFormat, or \\c 0.\n  *                The default is \\c 0.\n  *\n  * \\sa class Ref\n  */\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >\n  : public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >\n#else\ntemplate<typename SparseMatrixType, int Options>\nclass Ref<SparseMatrixType, Options>\n  : public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!\n#endif\n{\n    typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;\n    typedef internal::traits<Ref> Traits;\n    template<int OtherOptions>\n    inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);\n    template<int OtherOptions>\n    inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);\n  public:\n\n    typedef internal::SparseRefBase<Ref> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)\n\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<int OtherOptions>\n    inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)\n    {\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );\n      Base::construct(expr.derived());\n    }\n    \n    template<int OtherOptions>\n    inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)\n    {\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );\n      Base::construct(expr.derived());\n    }\n    \n    template<typename Derived>\n    inline Ref(const SparseCompressedBase<Derived>& expr)\n    #else\n    /** Implicit constructor from any sparse expression (2D matrix or 1D vector) */\n    template<typename Derived>\n    inline Ref(SparseCompressedBase<Derived>& expr)\n    #endif\n    {\n      EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );\n      Base::construct(expr.const_cast_derived());\n    }\n};\n\n// this is the const ref version\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>\n  : public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n{\n    typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;\n    typedef internal::traits<Ref> Traits;\n  public:\n\n    typedef internal::SparseRefBase<Ref> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)\n\n    template<typename Derived>\n    inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)\n    {\n      construct(expr.derived(), typename Traits::template match<Derived>::type());\n    }\n\n    inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {\n      // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy\n    }\n\n    template<typename OtherRef>\n    inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {\n      construct(other.derived(), typename Traits::template match<OtherRef>::type());\n    }\n\n    ~Ref() {\n      if(m_hasCopy) {\n        TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);\n        obj->~TPlainObjectType();\n      }\n    }\n\n  protected:\n\n    template<typename Expression>\n    void construct(const Expression& expr,internal::true_type)\n    {\n      if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))\n      {\n        TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);\n        ::new (obj) TPlainObjectType(expr);\n        m_hasCopy = true;\n        Base::construct(*obj);\n      }\n      else\n      {\n        Base::construct(expr);\n      }\n    }\n\n    template<typename Expression>\n    void construct(const Expression& expr, internal::false_type)\n    {\n      TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);\n      ::new (obj) TPlainObjectType(expr);\n      m_hasCopy = true;\n      Base::construct(*obj);\n    }\n\n  protected:\n    char m_object_bytes[sizeof(TPlainObjectType)];\n    bool m_hasCopy;\n};\n\n\n\n/**\n  * \\ingroup SparseCore_Module\n  *\n  * \\brief A sparse vector expression referencing an existing sparse vector expression\n  *\n  * \\tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.\n  *\n  * \\sa class Ref\n  */\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >\n  : public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >\n#else\ntemplate<typename SparseVectorType>\nclass Ref<SparseVectorType>\n  : public SparseMapBase<Derived,WriteAccessors>\n#endif\n{\n    typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;\n    typedef internal::traits<Ref> Traits;\n    template<int OtherOptions>\n    inline Ref(const SparseVector<MatScalar,OtherOptions,MatIndex>& expr);\n  public:\n\n    typedef internal::SparseRefBase<Ref> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<int OtherOptions>\n    inline Ref(SparseVector<MatScalar,OtherOptions,MatIndex>& expr)\n    {\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseVector<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      Base::construct(expr.derived());\n    }\n\n    template<typename Derived>\n    inline Ref(const SparseCompressedBase<Derived>& expr)\n    #else\n    /** Implicit constructor from any 1D sparse vector expression */\n    template<typename Derived>\n    inline Ref(SparseCompressedBase<Derived>& expr)\n    #endif\n    {\n      EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);\n      EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);\n      Base::construct(expr.const_cast_derived());\n    }\n};\n\n// this is the const ref version\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nclass Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType>\n  : public internal::SparseRefBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n{\n    typedef SparseVector<MatScalar,MatOptions,MatIndex> TPlainObjectType;\n    typedef internal::traits<Ref> Traits;\n  public:\n\n    typedef internal::SparseRefBase<Ref> Base;\n    EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)\n\n    template<typename Derived>\n    inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)\n    {\n      construct(expr.derived(), typename Traits::template match<Derived>::type());\n    }\n\n    inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {\n      // copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy\n    }\n\n    template<typename OtherRef>\n    inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {\n      construct(other.derived(), typename Traits::template match<OtherRef>::type());\n    }\n\n    ~Ref() {\n      if(m_hasCopy) {\n        TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);\n        obj->~TPlainObjectType();\n      }\n    }\n\n  protected:\n\n    template<typename Expression>\n    void construct(const Expression& expr,internal::true_type)\n    {\n      Base::construct(expr);\n    }\n\n    template<typename Expression>\n    void construct(const Expression& expr, internal::false_type)\n    {\n      TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);\n      ::new (obj) TPlainObjectType(expr);\n      m_hasCopy = true;\n      Base::construct(*obj);\n    }\n\n  protected:\n    char m_object_bytes[sizeof(TPlainObjectType)];\n    bool m_hasCopy;\n};\n\nnamespace internal {\n\n// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing...\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;  \n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;  \n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;\n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\ntemplate<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>\nstruct evaluator<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >\n  : evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >\n{\n  typedef evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;\n  typedef Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;\n  evaluator() : Base() {}\n  explicit evaluator(const XprType &mat) : Base(mat) {}\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_REF_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseSelfAdjointView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H\n#define EIGEN_SPARSE_SELFADJOINTVIEW_H\n\nnamespace Eigen { \n  \n/** \\ingroup SparseCore_Module\n  * \\class SparseSelfAdjointView\n  *\n  * \\brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.\n  *\n  * \\param MatrixType the type of the dense matrix storing the coefficients\n  * \\param Mode can be either \\c #Lower or \\c #Upper\n  *\n  * This class is an expression of a sefladjoint matrix from a triangular part of a matrix\n  * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()\n  * and most of the time this is the only way that it is used.\n  *\n  * \\sa SparseMatrixBase::selfadjointView()\n  */\nnamespace internal {\n  \ntemplate<typename MatrixType, unsigned int Mode>\nstruct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {\n};\n\ntemplate<int SrcMode,int DstMode,typename MatrixType,int DestOrder>\nvoid permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);\n\ntemplate<int Mode,typename MatrixType,int DestOrder>\nvoid permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);\n\n}\n\ntemplate<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView\n  : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >\n{\n  public:\n    \n    enum {\n      Mode = _Mode,\n      RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime\n    };\n\n    typedef EigenBase<SparseSelfAdjointView> Base;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n    typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;\n    typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;\n    \n    explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)\n    {\n      eigen_assert(rows()==cols() && \"SelfAdjointView is only for squared matrices\");\n    }\n\n    inline Index rows() const { return m_matrix.rows(); }\n    inline Index cols() const { return m_matrix.cols(); }\n\n    /** \\internal \\returns a reference to the nested matrix */\n    const _MatrixTypeNested& matrix() const { return m_matrix; }\n    typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }\n\n    /** \\returns an expression of the matrix product between a sparse self-adjoint matrix \\c *this and a sparse matrix \\a rhs.\n      *\n      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.\n      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.\n      */\n    template<typename OtherDerived>\n    Product<SparseSelfAdjointView, OtherDerived>\n    operator*(const SparseMatrixBase<OtherDerived>& rhs) const\n    {\n      return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());\n    }\n\n    /** \\returns an expression of the matrix product between a sparse matrix \\a lhs and a sparse self-adjoint matrix \\a rhs.\n      *\n      * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.\n      * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.\n      */\n    template<typename OtherDerived> friend\n    Product<OtherDerived, SparseSelfAdjointView>\n    operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)\n    {\n      return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);\n    }\n    \n    /** Efficient sparse self-adjoint matrix times dense vector/matrix product */\n    template<typename OtherDerived>\n    Product<SparseSelfAdjointView,OtherDerived>\n    operator*(const MatrixBase<OtherDerived>& rhs) const\n    {\n      return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());\n    }\n\n    /** Efficient dense vector/matrix times sparse self-adjoint matrix product */\n    template<typename OtherDerived> friend\n    Product<OtherDerived,SparseSelfAdjointView>\n    operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)\n    {\n      return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);\n    }\n\n    /** Perform a symmetric rank K update of the selfadjoint matrix \\c *this:\n      * \\f$ this = this + \\alpha ( u u^* ) \\f$ where \\a u is a vector or matrix.\n      *\n      * \\returns a reference to \\c *this\n      *\n      * To perform \\f$ this = this + \\alpha ( u^* u ) \\f$ you can simply\n      * call this function with u.adjoint().\n      */\n    template<typename DerivedU>\n    SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));\n    \n    /** \\returns an expression of P H P^-1 */\n    // TODO implement twists in a more evaluator friendly fashion\n    SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const\n    {\n      return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);\n    }\n\n    template<typename SrcMatrixType,int SrcMode>\n    SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)\n    {\n      internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);\n      return *this;\n    }\n\n    SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)\n    {\n      PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;\n      return *this = src.twistedBy(pnull);\n    }\n\n    template<typename SrcMatrixType,unsigned int SrcMode>\n    SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)\n    {\n      PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;\n      return *this = src.twistedBy(pnull);\n    }\n    \n    void resize(Index rows, Index cols)\n    {\n      EIGEN_ONLY_USED_FOR_DEBUG(rows);\n      EIGEN_ONLY_USED_FOR_DEBUG(cols);\n      eigen_assert(rows == this->rows() && cols == this->cols()\n                && \"SparseSelfadjointView::resize() does not actually allow to resize.\");\n    }\n    \n  protected:\n\n    MatrixTypeNested m_matrix;\n    //mutable VectorI m_countPerRow;\n    //mutable VectorI m_countPerCol;\n  private:\n    template<typename Dest> void evalTo(Dest &) const;\n};\n\n/***************************************************************************\n* Implementation of SparseMatrixBase methods\n***************************************************************************/\n\ntemplate<typename Derived>\ntemplate<unsigned int UpLo>\ntypename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const\n{\n  return SparseSelfAdjointView<const Derived, UpLo>(derived());\n}\n\ntemplate<typename Derived>\ntemplate<unsigned int UpLo>\ntypename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()\n{\n  return SparseSelfAdjointView<Derived, UpLo>(derived());\n}\n\n/***************************************************************************\n* Implementation of SparseSelfAdjointView methods\n***************************************************************************/\n\ntemplate<typename MatrixType, unsigned int Mode>\ntemplate<typename DerivedU>\nSparseSelfAdjointView<MatrixType,Mode>&\nSparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)\n{\n  SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();\n  if(alpha==Scalar(0))\n    m_matrix = tmp.template triangularView<Mode>();\n  else\n    m_matrix += alpha * tmp.template triangularView<Mode>();\n\n  return *this;\n}\n\nnamespace internal {\n  \n// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>\n//      in the future selfadjoint-ness should be defined by the expression traits\n//      such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)\ntemplate<typename MatrixType, unsigned int Mode>\nstruct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >\n{\n  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;\n  typedef SparseSelfAdjointShape Shape;\n};\n\nstruct SparseSelfAdjoint2Sparse {};\n\ntemplate<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };\ntemplate<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };\n\ntemplate< typename DstXprType, typename SrcXprType, typename Functor>\nstruct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>\n{\n  typedef typename DstXprType::StorageIndex StorageIndex;\n  typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;\n\n  template<typename DestScalar,int StorageOrder>\n  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)\n  {\n    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);\n  }\n\n  // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:\n  template<typename DestScalar,int StorageOrder,typename AssignFunc>\n  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)\n  {\n    SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());\n    run(tmp, src, AssignOpType());\n    call_assignment_no_alias_no_transpose(dst, tmp, func);\n  }\n\n  template<typename DestScalar,int StorageOrder>\n  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,\n                  const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)\n  {\n    SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());\n    run(tmp, src, AssignOpType());\n    dst += tmp;\n  }\n\n  template<typename DestScalar,int StorageOrder>\n  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,\n                  const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)\n  {\n    SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());\n    run(tmp, src, AssignOpType());\n    dst -= tmp;\n  }\n  \n  template<typename DestScalar>\n  static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)\n  {\n    // TODO directly evaluate into dst;\n    SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());\n    internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);\n    dst = tmp;\n  }\n};\n\n} // end namespace internal\n\n/***************************************************************************\n* Implementation of sparse self-adjoint time dense matrix\n***************************************************************************/\n\nnamespace internal {\n\ntemplate<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>\ninline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)\n{\n  EIGEN_ONLY_USED_FOR_DEBUG(alpha);\n  \n  typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;\n  typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;\n  typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;\n  typedef typename LhsEval::InnerIterator LhsIterator;\n  typedef typename SparseLhsType::Scalar LhsScalar;\n  \n  enum {\n    LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,\n    ProcessFirstHalf =\n              ((Mode&(Upper|Lower))==(Upper|Lower))\n          || ( (Mode&Upper) && !LhsIsRowMajor)\n          || ( (Mode&Lower) && LhsIsRowMajor),\n    ProcessSecondHalf = !ProcessFirstHalf\n  };\n  \n  SparseLhsTypeNested lhs_nested(lhs);\n  LhsEval lhsEval(lhs_nested);\n\n  // work on one column at once\n  for (Index k=0; k<rhs.cols(); ++k)\n  {\n    for (Index j=0; j<lhs.outerSize(); ++j)\n    {\n      LhsIterator i(lhsEval,j);\n      // handle diagonal coeff\n      if (ProcessSecondHalf)\n      {\n        while (i && i.index()<j) ++i;\n        if(i && i.index()==j)\n        {\n          res(j,k) += alpha * i.value() * rhs(j,k);\n          ++i;\n        }\n      }\n\n      // premultiplied rhs for scatters\n      typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));\n      // accumulator for partial scalar product\n      typename DenseResType::Scalar res_j(0);\n      for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)\n      {\n        LhsScalar lhs_ij = i.value();\n        if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);\n        res_j += lhs_ij * rhs(i.index(),k);\n        res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;\n      }\n      res(j,k) += alpha * res_j;\n\n      // handle diagonal coeff\n      if (ProcessFirstHalf && i && (i.index()==j))\n        res(j,k) += alpha * i.value() * rhs(j,k);\n    }\n  }\n}\n\n\ntemplate<typename LhsView, typename Rhs, int ProductType>\nstruct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>\n: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >\n{\n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)\n  {\n    typedef typename LhsView::_MatrixTypeNested Lhs;\n    typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;\n    typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;\n    LhsNested lhsNested(lhsView.matrix());\n    RhsNested rhsNested(rhs);\n    \n    internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);\n  }\n};\n\ntemplate<typename Lhs, typename RhsView, int ProductType>\nstruct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>\n: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >\n{\n  template<typename Dest>\n  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)\n  {\n    typedef typename RhsView::_MatrixTypeNested Rhs;\n    typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;\n    typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;\n    LhsNested lhsNested(lhs);\n    RhsNested rhsNested(rhsView.matrix());\n    \n    // transpose everything\n    Transpose<Dest> dstT(dst);\n    internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);\n  }\n};\n\n// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix\n// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore\n\ntemplate<typename LhsView, typename Rhs, int ProductTag>\nstruct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>\n  : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>\n{\n  typedef Product<LhsView, Rhs, DefaultProduct> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  product_evaluator(const XprType& xpr)\n    : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());\n  }\n  \nprotected:\n  typename Rhs::PlainObject m_lhs;\n  PlainObject m_result;\n};\n\ntemplate<typename Lhs, typename RhsView, int ProductTag>\nstruct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>\n  : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>\n{\n  typedef Product<Lhs, RhsView, DefaultProduct> XprType;\n  typedef typename XprType::PlainObject PlainObject;\n  typedef evaluator<PlainObject> Base;\n\n  product_evaluator(const XprType& xpr)\n    : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())\n  {\n    ::new (static_cast<Base*>(this)) Base(m_result);\n    generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);\n  }\n  \nprotected:\n  typename Lhs::PlainObject m_rhs;\n  PlainObject m_result;\n};\n\n} // namespace internal\n\n/***************************************************************************\n* Implementation of symmetric copies and permutations\n***************************************************************************/\nnamespace internal {\n\ntemplate<int Mode,typename MatrixType,int DestOrder>\nvoid permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)\n{\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef typename MatrixType::Scalar Scalar;\n  typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;\n  typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n  typedef evaluator<MatrixType> MatEval;\n  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;\n  \n  MatEval matEval(mat);\n  Dest& dest(_dest.derived());\n  enum {\n    StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)\n  };\n  \n  Index size = mat.rows();\n  VectorI count;\n  count.resize(size);\n  count.setZero();\n  dest.resize(size,size);\n  for(Index j = 0; j<size; ++j)\n  {\n    Index jp = perm ? perm[j] : j;\n    for(MatIterator it(matEval,j); it; ++it)\n    {\n      Index i = it.index();\n      Index r = it.row();\n      Index c = it.col();\n      Index ip = perm ? perm[i] : i;\n      if(Mode==(Upper|Lower))\n        count[StorageOrderMatch ? jp : ip]++;\n      else if(r==c)\n        count[ip]++;\n      else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))\n      {\n        count[ip]++;\n        count[jp]++;\n      }\n    }\n  }\n  Index nnz = count.sum();\n  \n  // reserve space\n  dest.resizeNonZeros(nnz);\n  dest.outerIndexPtr()[0] = 0;\n  for(Index j=0; j<size; ++j)\n    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];\n  for(Index j=0; j<size; ++j)\n    count[j] = dest.outerIndexPtr()[j];\n  \n  // copy data\n  for(StorageIndex j = 0; j<size; ++j)\n  {\n    for(MatIterator it(matEval,j); it; ++it)\n    {\n      StorageIndex i = internal::convert_index<StorageIndex>(it.index());\n      Index r = it.row();\n      Index c = it.col();\n      \n      StorageIndex jp = perm ? perm[j] : j;\n      StorageIndex ip = perm ? perm[i] : i;\n      \n      if(Mode==(Upper|Lower))\n      {\n        Index k = count[StorageOrderMatch ? jp : ip]++;\n        dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;\n        dest.valuePtr()[k] = it.value();\n      }\n      else if(r==c)\n      {\n        Index k = count[ip]++;\n        dest.innerIndexPtr()[k] = ip;\n        dest.valuePtr()[k] = it.value();\n      }\n      else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))\n      {\n        if(!StorageOrderMatch)\n          std::swap(ip,jp);\n        Index k = count[jp]++;\n        dest.innerIndexPtr()[k] = ip;\n        dest.valuePtr()[k] = it.value();\n        k = count[ip]++;\n        dest.innerIndexPtr()[k] = jp;\n        dest.valuePtr()[k] = numext::conj(it.value());\n      }\n    }\n  }\n}\n\ntemplate<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>\nvoid permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)\n{\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef typename MatrixType::Scalar Scalar;\n  SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());\n  typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n  typedef evaluator<MatrixType> MatEval;\n  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;\n\n  enum {\n    SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,\n    StorageOrderMatch = int(SrcOrder) == int(DstOrder),\n    DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,\n    SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode\n  };\n\n  MatEval matEval(mat);\n  \n  Index size = mat.rows();\n  VectorI count(size);\n  count.setZero();\n  dest.resize(size,size);\n  for(StorageIndex j = 0; j<size; ++j)\n  {\n    StorageIndex jp = perm ? perm[j] : j;\n    for(MatIterator it(matEval,j); it; ++it)\n    {\n      StorageIndex i = it.index();\n      if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))\n        continue;\n                  \n      StorageIndex ip = perm ? perm[i] : i;\n      count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;\n    }\n  }\n  dest.outerIndexPtr()[0] = 0;\n  for(Index j=0; j<size; ++j)\n    dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];\n  dest.resizeNonZeros(dest.outerIndexPtr()[size]);\n  for(Index j=0; j<size; ++j)\n    count[j] = dest.outerIndexPtr()[j];\n  \n  for(StorageIndex j = 0; j<size; ++j)\n  {\n    \n    for(MatIterator it(matEval,j); it; ++it)\n    {\n      StorageIndex i = it.index();\n      if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))\n        continue;\n                  \n      StorageIndex jp = perm ? perm[j] : j;\n      StorageIndex ip = perm? perm[i] : i;\n      \n      Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;\n      dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);\n      \n      if(!StorageOrderMatch) std::swap(ip,jp);\n      if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))\n        dest.valuePtr()[k] = numext::conj(it.value());\n      else\n        dest.valuePtr()[k] = it.value();\n    }\n  }\n}\n\n}\n\n// TODO implement twists in a more evaluator friendly fashion\n\nnamespace internal {\n\ntemplate<typename MatrixType, int Mode>\nstruct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {\n};\n\n}\n\ntemplate<typename MatrixType,int Mode>\nclass SparseSymmetricPermutationProduct\n  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >\n{\n  public:\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    enum {\n      RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,\n      ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime\n    };\n  protected:\n    typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;\n  public:\n    typedef Matrix<StorageIndex,Dynamic,1> VectorI;\n    typedef typename MatrixType::Nested MatrixTypeNested;\n    typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;\n    \n    SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)\n      : m_matrix(mat), m_perm(perm)\n    {}\n    \n    inline Index rows() const { return m_matrix.rows(); }\n    inline Index cols() const { return m_matrix.cols(); }\n        \n    const NestedExpression& matrix() const { return m_matrix; }\n    const Perm& perm() const { return m_perm; }\n    \n  protected:\n    MatrixTypeNested m_matrix;\n    const Perm& m_perm;\n\n};\n\nnamespace internal {\n  \ntemplate<typename DstXprType, typename MatrixType, int Mode, typename Scalar>\nstruct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>\n{\n  typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;\n  typedef typename DstXprType::StorageIndex DstIndex;\n  template<int Options>\n  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)\n  {\n    // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());\n    SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;\n    internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());\n    dst = tmp;\n  }\n  \n  template<typename DestType,unsigned int DestMode>\n  static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)\n  {\n    internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseSolverBase.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSESOLVERBASE_H\n#define EIGEN_SPARSESOLVERBASE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n  /** \\internal\n  * Helper functions to solve with a sparse right-hand-side and result.\n  * The rhs is decomposed into small vertical panels which are solved through dense temporaries.\n  */\ntemplate<typename Decomposition, typename Rhs, typename Dest>\ntypename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type\nsolve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)\n{\n  EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n  typedef typename Dest::Scalar DestScalar;\n  // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.\n  static const Index NbColsAtOnce = 4;\n  Index rhsCols = rhs.cols();\n  Index size = rhs.rows();\n  // the temporary matrices do not need more columns than NbColsAtOnce:\n  Index tmpCols = (std::min)(rhsCols, NbColsAtOnce); \n  Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);\n  Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);\n  for(Index k=0; k<rhsCols; k+=NbColsAtOnce)\n  {\n    Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);\n    tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);\n    tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));\n    dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();\n  }\n}\n\n// Overload for vector as rhs\ntemplate<typename Decomposition, typename Rhs, typename Dest>\ntypename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type\nsolve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)\n{\n  typedef typename Dest::Scalar DestScalar;\n  Index size = rhs.rows();\n  Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);\n  Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);\n  dest_dense = dec.solve(rhs_dense);\n  dest = dest_dense.sparseView();\n}\n\n} // end namespace internal\n\n/** \\class SparseSolverBase\n  * \\ingroup SparseCore_Module\n  * \\brief A base class for sparse solvers\n  *\n  * \\tparam Derived the actual type of the solver.\n  *\n  */\ntemplate<typename Derived>\nclass SparseSolverBase : internal::noncopyable\n{\n  public:\n\n    /** Default constructor */\n    SparseSolverBase()\n      : m_isInitialized(false)\n    {}\n\n    ~SparseSolverBase()\n    {}\n\n    Derived& derived() { return *static_cast<Derived*>(this); }\n    const Derived& derived() const { return *static_cast<const Derived*>(this); }\n    \n    /** \\returns an expression of the solution x of \\f$ A x = b \\f$ using the current decomposition of A.\n      *\n      * \\sa compute()\n      */\n    template<typename Rhs>\n    inline const Solve<Derived, Rhs>\n    solve(const MatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"Solver is not initialized.\");\n      eigen_assert(derived().rows()==b.rows() && \"solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<Derived, Rhs>(derived(), b.derived());\n    }\n    \n    /** \\returns an expression of the solution x of \\f$ A x = b \\f$ using the current decomposition of A.\n      *\n      * \\sa compute()\n      */\n    template<typename Rhs>\n    inline const Solve<Derived, Rhs>\n    solve(const SparseMatrixBase<Rhs>& b) const\n    {\n      eigen_assert(m_isInitialized && \"Solver is not initialized.\");\n      eigen_assert(derived().rows()==b.rows() && \"solve(): invalid number of rows of the right hand side matrix b\");\n      return Solve<Derived, Rhs>(derived(), b.derived());\n    }\n    \n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal default implementation of solving with a sparse rhs */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const\n    {\n      internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());\n    }\n    #endif // EIGEN_PARSED_BY_DOXYGEN\n\n  protected:\n    \n    mutable bool m_isInitialized;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSESOLVERBASE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseSparseProductWithPruning.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H\n#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n\n// perform a pseudo in-place sparse * sparse product assuming all matrices are col major\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstatic void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)\n{\n  // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);\n\n  typedef typename remove_all<Lhs>::type::Scalar Scalar;\n  typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;\n\n  // make sure to call innerSize/outerSize since we fake the storage order.\n  Index rows = lhs.innerSize();\n  Index cols = rhs.outerSize();\n  //Index size = lhs.outerSize();\n  eigen_assert(lhs.outerSize() == rhs.innerSize());\n\n  // allocate a temporary buffer\n  AmbiVector<Scalar,StorageIndex> tempVector(rows);\n\n  // mimics a resizeByInnerOuter:\n  if(ResultType::IsRowMajor)\n    res.resize(cols, rows);\n  else\n    res.resize(rows, cols);\n  \n  evaluator<Lhs> lhsEval(lhs);\n  evaluator<Rhs> rhsEval(rhs);\n  \n  // estimate the number of non zero entries\n  // given a rhs column containing Y non zeros, we assume that the respective Y columns\n  // of the lhs differs in average of one non zeros, thus the number of non zeros for\n  // the product of a rhs column with the lhs is X+Y where X is the average number of non zero\n  // per column of the lhs.\n  // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)\n  Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();\n\n  res.reserve(estimated_nnz_prod);\n  double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));\n  for (Index j=0; j<cols; ++j)\n  {\n    // FIXME:\n    //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());\n    // let's do a more accurate determination of the nnz ratio for the current column j of res\n    tempVector.init(ratioColRes);\n    tempVector.setZero();\n    for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)\n    {\n      // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())\n      tempVector.restart();\n      Scalar x = rhsIt.value();\n      for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)\n      {\n        tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;\n      }\n    }\n    res.startVec(j);\n    for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)\n      res.insertBackByOuterInner(j,it.index()) = it.value();\n  }\n  res.finalize();\n}\n\ntemplate<typename Lhs, typename Rhs, typename ResultType,\n  int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,\n  int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,\n  int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>\nstruct sparse_sparse_product_with_pruning_selector;\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>\n{\n  typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;\n  typedef typename ResultType::RealScalar RealScalar;\n\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typename remove_all<ResultType>::type _res(res.rows(), res.cols());\n    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);\n    res.swap(_res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    // we need a col-major matrix to hold the result\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;\n    SparseTemporaryType _res(res.rows(), res.cols());\n    internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);\n    res = _res;\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    // let's transpose the product to get a column x column product\n    typename remove_all<ResultType>::type _res(res.rows(), res.cols());\n    internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);\n    res.swap(_res);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;\n    ColMajorMatrixLhs colLhs(lhs);\n    ColMajorMatrixRhs colRhs(rhs);\n    internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);\n\n    // let's transpose the product to get a column x column product\n//     typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;\n//     SparseTemporaryType _res(res.cols(), res.rows());\n//     sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);\n//     res = _res.transpose();\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;\n    RowMajorMatrixLhs rowLhs(lhs);\n    sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;\n    RowMajorMatrixRhs rowRhs(rhs);\n    sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;\n    ColMajorMatrixRhs colRhs(rhs);\n    internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);\n  }\n};\n\ntemplate<typename Lhs, typename Rhs, typename ResultType>\nstruct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>\n{\n  typedef typename ResultType::RealScalar RealScalar;\n  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)\n  {\n    typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;\n    ColMajorMatrixLhs colLhs(lhs);\n    internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseTranspose.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSETRANSPOSE_H\n#define EIGEN_SPARSETRANSPOSE_H\n\nnamespace Eigen { \n\nnamespace internal {\n  template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>\n  class SparseTransposeImpl\n    : public SparseMatrixBase<Transpose<MatrixType> >\n  {};\n  \n  template<typename MatrixType>\n  class SparseTransposeImpl<MatrixType,CompressedAccessBit>\n    : public SparseCompressedBase<Transpose<MatrixType> >\n  {\n    typedef SparseCompressedBase<Transpose<MatrixType> > Base;\n  public:\n    using Base::derived;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::StorageIndex StorageIndex;\n\n    inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }\n    \n    inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }\n    inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }\n    inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }\n    inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }\n\n    inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }\n    inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }\n    inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }\n    inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }\n  };\n}\n  \ntemplate<typename MatrixType> class TransposeImpl<MatrixType,Sparse>\n  : public internal::SparseTransposeImpl<MatrixType>\n{\n  protected:\n    typedef internal::SparseTransposeImpl<MatrixType> Base;\n};\n\nnamespace internal {\n  \ntemplate<typename ArgType>\nstruct unary_evaluator<Transpose<ArgType>, IteratorBased>\n  : public evaluator_base<Transpose<ArgType> >\n{\n    typedef typename evaluator<ArgType>::InnerIterator        EvalIterator;\n  public:\n    typedef Transpose<ArgType> XprType;\n    \n    inline Index nonZerosEstimate() const {\n      return m_argImpl.nonZerosEstimate();\n    }\n\n    class InnerIterator : public EvalIterator\n    {\n    public:\n      EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)\n        : EvalIterator(unaryOp.m_argImpl,outer)\n      {}\n      \n      Index row() const { return EvalIterator::col(); }\n      Index col() const { return EvalIterator::row(); }\n    };\n    \n    enum {\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n      Flags = XprType::Flags\n    };\n    \n    explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}\n\n  protected:\n    evaluator<ArgType> m_argImpl;\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSETRANSPOSE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseTriangularView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H\n#define EIGEN_SPARSE_TRIANGULARVIEW_H\n\nnamespace Eigen {\n\n/** \\ingroup SparseCore_Module\n  *\n  * \\brief Base class for a triangular part in a \\b sparse matrix\n  *\n  * This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.\n  * It extends class TriangularView with additional methods which are available for sparse expressions only.\n  *\n  * \\sa class TriangularView, SparseMatrixBase::triangularView()\n  */\ntemplate<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>\n  : public SparseMatrixBase<TriangularView<MatrixType,Mode> >\n{\n    enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))\n                    || ((Mode&Upper) &&  (MatrixType::Flags&RowMajorBit)),\n           SkipLast = !SkipFirst,\n           SkipDiag = (Mode&ZeroDiag) ? 1 : 0,\n           HasUnitDiag = (Mode&UnitDiag) ? 1 : 0\n    };\n    \n    typedef TriangularView<MatrixType,Mode> TriangularViewType;\n    \n  protected:\n    // dummy solve function to make TriangularView happy.\n    void solve() const;\n\n    typedef SparseMatrixBase<TriangularViewType> Base;\n  public:\n    \n    EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)\n    \n    typedef typename MatrixType::Nested MatrixTypeNested;\n    typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;\n    typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;\n\n    template<typename RhsType, typename DstType>\n    EIGEN_DEVICE_FUNC\n    EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {\n      if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))\n        dst = rhs;\n      this->solveInPlace(dst);\n    }\n\n    /** Applies the inverse of \\c *this to the dense vector or matrix \\a other, \"in-place\" */\n    template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;\n\n    /** Applies the inverse of \\c *this to the sparse vector or matrix \\a other, \"in-place\" */\n    template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;\n  \n};\n\nnamespace internal {\n\ntemplate<typename ArgType, unsigned int Mode>\nstruct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>\n : evaluator_base<TriangularView<ArgType,Mode> >\n{\n  typedef TriangularView<ArgType,Mode> XprType;\n  \nprotected:\n  \n  typedef typename XprType::Scalar Scalar;\n  typedef typename XprType::StorageIndex StorageIndex;\n  typedef typename evaluator<ArgType>::InnerIterator EvalIterator;\n  \n  enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))\n                    || ((Mode&Upper) &&  (ArgType::Flags&RowMajorBit)),\n         SkipLast = !SkipFirst,\n         SkipDiag = (Mode&ZeroDiag) ? 1 : 0,\n         HasUnitDiag = (Mode&UnitDiag) ? 1 : 0\n  };\n  \npublic:\n  \n  enum {\n    CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n    Flags = XprType::Flags\n  };\n    \n  explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}\n  \n  inline Index nonZerosEstimate() const {\n    return m_argImpl.nonZerosEstimate();\n  }\n  \n  class InnerIterator : public EvalIterator\n  {\n      typedef EvalIterator Base;\n    public:\n\n      EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)\n        : Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())\n      {\n        if(SkipFirst)\n        {\n          while((*this) && ((HasUnitDiag||SkipDiag)  ? this->index()<=outer : this->index()<outer))\n            Base::operator++();\n          if(HasUnitDiag)\n            m_returnOne = m_containsDiag;\n        }\n        else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))\n        {\n          if((!SkipFirst) && Base::operator bool())\n            Base::operator++();\n          m_returnOne = m_containsDiag;\n        }\n      }\n\n      EIGEN_STRONG_INLINE InnerIterator& operator++()\n      {\n        if(HasUnitDiag && m_returnOne)\n          m_returnOne = false;\n        else\n        {\n          Base::operator++();\n          if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))\n          {\n            if((!SkipFirst) && Base::operator bool())\n              Base::operator++();\n            m_returnOne = m_containsDiag;\n          }\n        }\n        return *this;\n      }\n      \n      EIGEN_STRONG_INLINE operator bool() const\n      {\n        if(HasUnitDiag && m_returnOne)\n          return true;\n        if(SkipFirst) return  Base::operator bool();\n        else\n        {\n          if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());\n          else return (Base::operator bool() && this->index() <= this->outer());\n        }\n      }\n\n//       inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }\n//       inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }\n      inline StorageIndex index() const\n      {\n        if(HasUnitDiag && m_returnOne)  return internal::convert_index<StorageIndex>(Base::outer());\n        else                            return Base::index();\n      }\n      inline Scalar value() const\n      {\n        if(HasUnitDiag && m_returnOne)  return Scalar(1);\n        else                            return Base::value();\n      }\n\n    protected:\n      bool m_returnOne;\n      bool m_containsDiag;\n    private:\n      Scalar& valueRef();\n  };\n  \nprotected:\n  evaluator<ArgType> m_argImpl;\n  const ArgType& m_arg;\n};\n\n} // end namespace internal\n\ntemplate<typename Derived>\ntemplate<int Mode>\ninline const TriangularView<const Derived, Mode>\nSparseMatrixBase<Derived>::triangularView() const\n{\n  return TriangularView<const Derived, Mode>(derived());\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSE_TRIANGULARVIEW_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseUtil.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEUTIL_H\n#define EIGEN_SPARSEUTIL_H\n\nnamespace Eigen { \n\n#ifdef NDEBUG\n#define EIGEN_DBG_SPARSE(X)\n#else\n#define EIGEN_DBG_SPARSE(X) X\n#endif\n\n#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \\\ntemplate<typename OtherDerived> \\\nEIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \\\n{ \\\n  return Base::operator Op(other.derived()); \\\n} \\\nEIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \\\n{ \\\n  return Base::operator Op(other); \\\n}\n\n#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \\\ntemplate<typename Other> \\\nEIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \\\n{ \\\n  return Base::operator Op(scalar); \\\n}\n\n#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \\\nEIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =)\n\n\n#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \\\n  EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)\n\n  \nconst int CoherentAccessPattern     = 0x1;\nconst int InnerRandomAccessPattern  = 0x2 | CoherentAccessPattern;\nconst int OuterRandomAccessPattern  = 0x4 | CoherentAccessPattern;\nconst int RandomAccessPattern       = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;\n\ntemplate<typename _Scalar, int _Flags = 0, typename _StorageIndex = int>  class SparseMatrix;\ntemplate<typename _Scalar, int _Flags = 0, typename _StorageIndex = int>  class DynamicSparseMatrix;\ntemplate<typename _Scalar, int _Flags = 0, typename _StorageIndex = int>  class SparseVector;\ntemplate<typename _Scalar, int _Flags = 0, typename _StorageIndex = int>  class MappedSparseMatrix;\n\ntemplate<typename MatrixType, unsigned int UpLo>  class SparseSelfAdjointView;\ntemplate<typename Lhs, typename Rhs>              class SparseDiagonalProduct;\ntemplate<typename MatrixType> class SparseView;\n\ntemplate<typename Lhs, typename Rhs>        class SparseSparseProduct;\ntemplate<typename Lhs, typename Rhs>        class SparseTimeDenseProduct;\ntemplate<typename Lhs, typename Rhs>        class DenseTimeSparseProduct;\ntemplate<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;\n\ntemplate<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;\ntemplate<typename Lhs, typename Rhs,\n         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;\n         \ntemplate<typename Lhs, typename Rhs,\n         int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;\ntemplate<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;\n\nnamespace internal {\n\ntemplate<typename T,int Rows,int Cols,int Flags> struct sparse_eval;\n\ntemplate<typename T> struct eval<T,Sparse>\n  : sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime,traits<T>::Flags>\n{};\n\ntemplate<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {\n    typedef typename traits<T>::Scalar _Scalar;\n    typedef typename traits<T>::StorageIndex _StorageIndex;\n  public:\n    typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;\n};\n\ntemplate<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {\n    typedef typename traits<T>::Scalar _Scalar;\n    typedef typename traits<T>::StorageIndex _StorageIndex;\n  public:\n    typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;\n};\n\n// TODO this seems almost identical to plain_matrix_type<T, Sparse>\ntemplate<typename T,int Rows,int Cols,int Flags> struct sparse_eval {\n    typedef typename traits<T>::Scalar _Scalar;\n    typedef typename traits<T>::StorageIndex _StorageIndex;\n    enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };\n  public:\n    typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;\n};\n\ntemplate<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {\n    typedef typename traits<T>::Scalar _Scalar;\n  public:\n    typedef Matrix<_Scalar, 1, 1> type;\n};\n\ntemplate<typename T> struct plain_matrix_type<T,Sparse>\n{\n  typedef typename traits<T>::Scalar _Scalar;\n  typedef typename traits<T>::StorageIndex _StorageIndex;\n  enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };\n  public:\n    typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;\n};\n\ntemplate<typename T>\nstruct plain_object_eval<T,Sparse>\n  : sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime, evaluator<T>::Flags>\n{};\n\ntemplate<typename Decomposition, typename RhsType>\nstruct solve_traits<Decomposition,RhsType,Sparse>\n{\n  typedef typename sparse_eval<RhsType, RhsType::RowsAtCompileTime, RhsType::ColsAtCompileTime,traits<RhsType>::Flags>::type PlainObject;\n};\n\ntemplate<typename Derived>\nstruct generic_xpr_base<Derived, MatrixXpr, Sparse>\n{\n  typedef SparseMatrixBase<Derived> type;\n};\n\nstruct SparseTriangularShape  { static std::string debugName() { return \"SparseTriangularShape\"; } };\nstruct SparseSelfAdjointShape { static std::string debugName() { return \"SparseSelfAdjointShape\"; } };\n\ntemplate<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type;  };\ntemplate<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape  type;  };\n\n} // end namespace internal\n\n/** \\ingroup SparseCore_Module\n  *\n  * \\class Triplet\n  *\n  * \\brief A small structure to hold a non zero as a triplet (i,j,value).\n  *\n  * \\sa SparseMatrix::setFromTriplets()\n  */\ntemplate<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >\nclass Triplet\n{\npublic:\n  Triplet() : m_row(0), m_col(0), m_value(0) {}\n\n  Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))\n    : m_row(i), m_col(j), m_value(v)\n  {}\n\n  /** \\returns the row index of the element */\n  const StorageIndex& row() const { return m_row; }\n\n  /** \\returns the column index of the element */\n  const StorageIndex& col() const { return m_col; }\n\n  /** \\returns the value of the element */\n  const Scalar& value() const { return m_value; }\nprotected:\n  StorageIndex m_row, m_col;\n  Scalar m_value;\n};\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEUTIL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEVECTOR_H\n#define EIGEN_SPARSEVECTOR_H\n\nnamespace Eigen { \n\n/** \\ingroup SparseCore_Module\n  * \\class SparseVector\n  *\n  * \\brief a sparse vector class\n  *\n  * \\tparam _Scalar the scalar type, i.e. the type of the coefficients\n  *\n  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.\n  *\n  * This class can be extended with the help of the plugin mechanism described on the page\n  * \\ref TopicCustomizing_Plugins by defining the preprocessor symbol \\c EIGEN_SPARSEVECTOR_PLUGIN.\n  */\n\nnamespace internal {\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nstruct traits<SparseVector<_Scalar, _Options, _StorageIndex> >\n{\n  typedef _Scalar Scalar;\n  typedef _StorageIndex StorageIndex;\n  typedef Sparse StorageKind;\n  typedef MatrixXpr XprKind;\n  enum {\n    IsColVector = (_Options & RowMajorBit) ? 0 : 1,\n\n    RowsAtCompileTime = IsColVector ? Dynamic : 1,\n    ColsAtCompileTime = IsColVector ? 1 : Dynamic,\n    MaxRowsAtCompileTime = RowsAtCompileTime,\n    MaxColsAtCompileTime = ColsAtCompileTime,\n    Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,\n    SupportedAccessPatterns = InnerRandomAccessPattern\n  };\n};\n\n// Sparse-Vector-Assignment kinds:\nenum {\n  SVA_RuntimeSwitch,\n  SVA_Inner,\n  SVA_Outer\n};\n\ntemplate< typename Dest, typename Src,\n          int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch\n                             : Src::InnerSizeAtCompileTime==1 ? SVA_Outer\n                             : SVA_Inner>\nstruct sparse_vector_assign_selector;\n\n}\n\ntemplate<typename _Scalar, int _Options, typename _StorageIndex>\nclass SparseVector\n  : public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >\n{\n    typedef SparseCompressedBase<SparseVector> Base;\n    using Base::convert_index;\n  public:\n    EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)\n    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)\n    EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)\n    \n    typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;\n    enum { IsColVector = internal::traits<SparseVector>::IsColVector };\n    \n    enum {\n      Options = _Options\n    };\n    \n    EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }\n    EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }\n    EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }\n    EIGEN_STRONG_INLINE Index outerSize() const { return 1; }\n\n    EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }\n    EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }\n\n    EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }\n    EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }\n\n    inline const StorageIndex* outerIndexPtr() const { return 0; }\n    inline StorageIndex* outerIndexPtr() { return 0; }\n    inline const StorageIndex* innerNonZeroPtr() const { return 0; }\n    inline StorageIndex* innerNonZeroPtr() { return 0; }\n    \n    /** \\internal */\n    inline Storage& data() { return m_data; }\n    /** \\internal */\n    inline const Storage& data() const { return m_data; }\n\n    inline Scalar coeff(Index row, Index col) const\n    {\n      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));\n      return coeff(IsColVector ? row : col);\n    }\n    inline Scalar coeff(Index i) const\n    {\n      eigen_assert(i>=0 && i<m_size);\n      return m_data.at(StorageIndex(i));\n    }\n\n    inline Scalar& coeffRef(Index row, Index col)\n    {\n      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));\n      return coeffRef(IsColVector ? row : col);\n    }\n\n    /** \\returns a reference to the coefficient value at given index \\a i\n      * This operation involes a log(rho*size) binary search. If the coefficient does not\n      * exist yet, then a sorted insertion into a sequential buffer is performed.\n      *\n      * This insertion might be very costly if the number of nonzeros above \\a i is large.\n      */\n    inline Scalar& coeffRef(Index i)\n    {\n      eigen_assert(i>=0 && i<m_size);\n\n      return m_data.atWithInsertion(StorageIndex(i));\n    }\n\n  public:\n\n    typedef typename Base::InnerIterator InnerIterator;\n    typedef typename Base::ReverseInnerIterator ReverseInnerIterator;\n\n    inline void setZero() { m_data.clear(); }\n\n    /** \\returns the number of non zero coefficients */\n    inline Index nonZeros() const  { return m_data.size(); }\n\n    inline void startVec(Index outer)\n    {\n      EIGEN_UNUSED_VARIABLE(outer);\n      eigen_assert(outer==0);\n    }\n\n    inline Scalar& insertBackByOuterInner(Index outer, Index inner)\n    {\n      EIGEN_UNUSED_VARIABLE(outer);\n      eigen_assert(outer==0);\n      return insertBack(inner);\n    }\n    inline Scalar& insertBack(Index i)\n    {\n      m_data.append(0, i);\n      return m_data.value(m_data.size()-1);\n    }\n    \n    Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)\n    {\n      EIGEN_UNUSED_VARIABLE(outer);\n      eigen_assert(outer==0);\n      return insertBackUnordered(inner);\n    }\n    inline Scalar& insertBackUnordered(Index i)\n    {\n      m_data.append(0, i);\n      return m_data.value(m_data.size()-1);\n    }\n\n    inline Scalar& insert(Index row, Index col)\n    {\n      eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));\n      \n      Index inner = IsColVector ? row : col;\n      Index outer = IsColVector ? col : row;\n      EIGEN_ONLY_USED_FOR_DEBUG(outer);\n      eigen_assert(outer==0);\n      return insert(inner);\n    }\n    Scalar& insert(Index i)\n    {\n      eigen_assert(i>=0 && i<m_size);\n      \n      Index startId = 0;\n      Index p = Index(m_data.size()) - 1;\n      // TODO smart realloc\n      m_data.resize(p+2,1);\n\n      while ( (p >= startId) && (m_data.index(p) > i) )\n      {\n        m_data.index(p+1) = m_data.index(p);\n        m_data.value(p+1) = m_data.value(p);\n        --p;\n      }\n      m_data.index(p+1) = convert_index(i);\n      m_data.value(p+1) = 0;\n      return m_data.value(p+1);\n    }\n\n    /**\n      */\n    inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }\n\n\n    inline void finalize() {}\n\n    /** \\copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */\n    void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())\n    {\n      m_data.prune(reference,epsilon);\n    }\n\n    /** Resizes the sparse vector to \\a rows x \\a cols\n      *\n      * This method is provided for compatibility with matrices.\n      * For a column vector, \\a cols must be equal to 1.\n      * For a row vector, \\a rows must be equal to 1.\n      *\n      * \\sa resize(Index)\n      */\n    void resize(Index rows, Index cols)\n    {\n      eigen_assert((IsColVector ? cols : rows)==1 && \"Outer dimension must equal 1\");\n      resize(IsColVector ? rows : cols);\n    }\n\n    /** Resizes the sparse vector to \\a newSize\n      * This method deletes all entries, thus leaving an empty sparse vector\n      *\n      * \\sa  conservativeResize(), setZero() */\n    void resize(Index newSize)\n    {\n      m_size = newSize;\n      m_data.clear();\n    }\n\n    /** Resizes the sparse vector to \\a newSize, while leaving old values untouched.\n      *\n      * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.\n      * Call .data().squeeze() to free extra memory.\n      *\n      * \\sa reserve(), setZero()\n      */\n    void conservativeResize(Index newSize)\n    {\n      if (newSize < m_size)\n      {\n        Index i = 0;\n        while (i<m_data.size() && m_data.index(i)<newSize) ++i;\n        m_data.resize(i);\n      }\n      m_size = newSize;\n    }\n\n    void resizeNonZeros(Index size) { m_data.resize(size); }\n\n    inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }\n\n    explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }\n\n    inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }\n\n    template<typename OtherDerived>\n    inline SparseVector(const SparseMatrixBase<OtherDerived>& other)\n      : m_size(0)\n    {\n      #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n        EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN\n      #endif\n      check_template_parameters();\n      *this = other.derived();\n    }\n\n    inline SparseVector(const SparseVector& other)\n      : Base(other), m_size(0)\n    {\n      check_template_parameters();\n      *this = other.derived();\n    }\n\n    /** Swaps the values of \\c *this and \\a other.\n      * Overloaded for performance: this version performs a \\em shallow swap by swaping pointers and attributes only.\n      * \\sa SparseMatrixBase::swap()\n      */\n    inline void swap(SparseVector& other)\n    {\n      std::swap(m_size, other.m_size);\n      m_data.swap(other.m_data);\n    }\n\n    template<int OtherOptions>\n    inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)\n    {\n      eigen_assert(other.outerSize()==1);\n      std::swap(m_size, other.m_innerSize);\n      m_data.swap(other.m_data);\n    }\n\n    inline SparseVector& operator=(const SparseVector& other)\n    {\n      if (other.isRValue())\n      {\n        swap(other.const_cast_derived());\n      }\n      else\n      {\n        resize(other.size());\n        m_data = other.m_data;\n      }\n      return *this;\n    }\n\n    template<typename OtherDerived>\n    inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)\n    {\n      SparseVector tmp(other.size());\n      internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());\n      this->swap(tmp);\n      return *this;\n    }\n\n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    template<typename Lhs, typename Rhs>\n    inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)\n    {\n      return Base::operator=(product);\n    }\n    #endif\n\n    friend std::ostream & operator << (std::ostream & s, const SparseVector& m)\n    {\n      for (Index i=0; i<m.nonZeros(); ++i)\n        s << \"(\" << m.m_data.value(i) << \",\" << m.m_data.index(i) << \") \";\n      s << std::endl;\n      return s;\n    }\n\n    /** Destructor */\n    inline ~SparseVector() {}\n\n    /** Overloaded for performance */\n    Scalar sum() const;\n\n  public:\n\n    /** \\internal \\deprecated use setZero() and reserve() */\n    EIGEN_DEPRECATED void startFill(Index reserve)\n    {\n      setZero();\n      m_data.reserve(reserve);\n    }\n\n    /** \\internal \\deprecated use insertBack(Index,Index) */\n    EIGEN_DEPRECATED Scalar& fill(Index r, Index c)\n    {\n      eigen_assert(r==0 || c==0);\n      return fill(IsColVector ? r : c);\n    }\n\n    /** \\internal \\deprecated use insertBack(Index) */\n    EIGEN_DEPRECATED Scalar& fill(Index i)\n    {\n      m_data.append(0, i);\n      return m_data.value(m_data.size()-1);\n    }\n\n    /** \\internal \\deprecated use insert(Index,Index) */\n    EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)\n    {\n      eigen_assert(r==0 || c==0);\n      return fillrand(IsColVector ? r : c);\n    }\n\n    /** \\internal \\deprecated use insert(Index) */\n    EIGEN_DEPRECATED Scalar& fillrand(Index i)\n    {\n      return insert(i);\n    }\n\n    /** \\internal \\deprecated use finalize() */\n    EIGEN_DEPRECATED void endFill() {}\n    \n    // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.\n    /** \\internal \\deprecated use data() */\n    EIGEN_DEPRECATED Storage& _data() { return m_data; }\n    /** \\internal \\deprecated use data() */\n    EIGEN_DEPRECATED const Storage& _data() const { return m_data; }\n    \n#   ifdef EIGEN_SPARSEVECTOR_PLUGIN\n#     include EIGEN_SPARSEVECTOR_PLUGIN\n#   endif\n\nprotected:\n  \n    static void check_template_parameters()\n    {\n      EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);\n      EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);\n    }\n    \n    Storage m_data;\n    Index m_size;\n};\n\nnamespace internal {\n\ntemplate<typename _Scalar, int _Options, typename _Index>\nstruct evaluator<SparseVector<_Scalar,_Options,_Index> >\n  : evaluator_base<SparseVector<_Scalar,_Options,_Index> >\n{\n  typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;\n  typedef evaluator_base<SparseVectorType> Base;\n  typedef typename SparseVectorType::InnerIterator InnerIterator;\n  typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;\n  \n  enum {\n    CoeffReadCost = NumTraits<_Scalar>::ReadCost,\n    Flags = SparseVectorType::Flags\n  };\n\n  evaluator() : Base() {}\n  \n  explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)\n  {\n    EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);\n  }\n  \n  inline Index nonZerosEstimate() const {\n    return m_matrix->nonZeros();\n  }\n  \n  operator SparseVectorType&() { return m_matrix->const_cast_derived(); }\n  operator const SparseVectorType&() const { return *m_matrix; }\n  \n  const SparseVectorType *m_matrix;\n};\n\ntemplate< typename Dest, typename Src>\nstruct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {\n  static void run(Dest& dst, const Src& src) {\n    eigen_internal_assert(src.innerSize()==src.size());\n    typedef internal::evaluator<Src> SrcEvaluatorType;\n    SrcEvaluatorType srcEval(src);\n    for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)\n      dst.insert(it.index()) = it.value();\n  }\n};\n\ntemplate< typename Dest, typename Src>\nstruct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {\n  static void run(Dest& dst, const Src& src) {\n    eigen_internal_assert(src.outerSize()==src.size());\n    typedef internal::evaluator<Src> SrcEvaluatorType;\n    SrcEvaluatorType srcEval(src);\n    for(Index i=0; i<src.size(); ++i)\n    {\n      typename SrcEvaluatorType::InnerIterator it(srcEval, i);\n      if(it)\n        dst.insert(i) = it.value();\n    }\n  }\n};\n\ntemplate< typename Dest, typename Src>\nstruct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {\n  static void run(Dest& dst, const Src& src) {\n    if(src.outerSize()==1)  sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);\n    else                    sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);\n  }\n};\n\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSEVECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/SparseView.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSEVIEW_H\n#define EIGEN_SPARSEVIEW_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename MatrixType>\nstruct traits<SparseView<MatrixType> > : traits<MatrixType>\n{\n  typedef typename MatrixType::StorageIndex StorageIndex;\n  typedef Sparse StorageKind;\n  enum {\n    Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)\n  };\n};\n\n} // end namespace internal\n\n/** \\ingroup SparseCore_Module\n  * \\class SparseView\n  *\n  * \\brief Expression of a dense or sparse matrix with zero or too small values removed\n  *\n  * \\tparam MatrixType the type of the object of which we are removing the small entries\n  *\n  * This class represents an expression of a given dense or sparse matrix with\n  * entries smaller than \\c reference * \\c epsilon are removed.\n  * It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()\n  * and most of the time this is the only way it is used.\n  *\n  * \\sa MatrixBase::sparseView(), SparseMatrixBase::pruned()\n  */\ntemplate<typename MatrixType>\nclass SparseView : public SparseMatrixBase<SparseView<MatrixType> >\n{\n  typedef typename MatrixType::Nested MatrixTypeNested;\n  typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;\n  typedef SparseMatrixBase<SparseView > Base;\npublic:\n  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)\n  typedef typename internal::remove_all<MatrixType>::type NestedExpression;\n\n  explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),\n                      const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())\n    : m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}\n\n  inline Index rows() const { return m_matrix.rows(); }\n  inline Index cols() const { return m_matrix.cols(); }\n\n  inline Index innerSize() const { return m_matrix.innerSize(); }\n  inline Index outerSize() const { return m_matrix.outerSize(); }\n  \n  /** \\returns the nested expression */\n  const typename internal::remove_all<MatrixTypeNested>::type&\n  nestedExpression() const { return m_matrix; }\n  \n  Scalar reference() const { return m_reference; }\n  RealScalar epsilon() const { return m_epsilon; }\n  \nprotected:\n  MatrixTypeNested m_matrix;\n  Scalar m_reference;\n  RealScalar m_epsilon;\n};\n\nnamespace internal {\n\n// TODO find a way to unify the two following variants\n// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is\n// not easy because the evaluators do not expose the sizes of the underlying expression.\n  \ntemplate<typename ArgType>\nstruct unary_evaluator<SparseView<ArgType>, IteratorBased>\n  : public evaluator_base<SparseView<ArgType> >\n{\n    typedef typename evaluator<ArgType>::InnerIterator EvalIterator;\n  public:\n    typedef SparseView<ArgType> XprType;\n    \n    class InnerIterator : public EvalIterator\n    {\n        typedef typename XprType::Scalar Scalar;\n      public:\n\n        EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)\n          : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)\n        {\n          incrementToNonZero();\n        }\n\n        EIGEN_STRONG_INLINE InnerIterator& operator++()\n        {\n          EvalIterator::operator++();\n          incrementToNonZero();\n          return *this;\n        }\n\n        using EvalIterator::value;\n\n      protected:\n        const XprType &m_view;\n\n      private:\n        void incrementToNonZero()\n        {\n          while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))\n          {\n            EvalIterator::operator++();\n          }\n        }\n    };\n    \n    enum {\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n      Flags = XprType::Flags\n    };\n    \n    explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}\n\n  protected:\n    evaluator<ArgType> m_argImpl;\n    const XprType &m_view;\n};\n\ntemplate<typename ArgType>\nstruct unary_evaluator<SparseView<ArgType>, IndexBased>\n  : public evaluator_base<SparseView<ArgType> >\n{\n  public:\n    typedef SparseView<ArgType> XprType;\n  protected:\n    enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };\n    typedef typename XprType::Scalar Scalar;\n    typedef typename XprType::StorageIndex StorageIndex;\n  public:\n    \n    class InnerIterator\n    {\n      public:\n\n        EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)\n          : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())\n        {\n          incrementToNonZero();\n        }\n\n        EIGEN_STRONG_INLINE InnerIterator& operator++()\n        {\n          m_inner++;\n          incrementToNonZero();\n          return *this;\n        }\n\n        EIGEN_STRONG_INLINE Scalar value() const\n        {\n          return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)\n                              : m_sve.m_argImpl.coeff(m_inner, m_outer);\n        }\n\n        EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }\n        inline Index row() const { return IsRowMajor ? m_outer : index(); }\n        inline Index col() const { return IsRowMajor ? index() : m_outer; }\n\n        EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }\n\n      protected:\n        const unary_evaluator &m_sve;\n        Index m_inner;\n        const Index m_outer;\n        const Index m_end;\n\n      private:\n        void incrementToNonZero()\n        {\n          while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))\n          {\n            m_inner++;\n          }\n        }\n    };\n    \n    enum {\n      CoeffReadCost = evaluator<ArgType>::CoeffReadCost,\n      Flags = XprType::Flags\n    };\n    \n    explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}\n\n  protected:\n    evaluator<ArgType> m_argImpl;\n    const XprType &m_view;\n};\n\n} // end namespace internal\n\n/** \\ingroup SparseCore_Module\n  *\n  * \\returns a sparse expression of the dense expression \\c *this with values smaller than\n  * \\a reference * \\a epsilon removed.\n  *\n  * This method is typically used when prototyping to convert a quickly assembled dense Matrix \\c D to a SparseMatrix \\c S:\n  * \\code\n  * MatrixXd D(n,m);\n  * SparseMatrix<double> S;\n  * S = D.sparseView();             // suppress numerical zeros (exact)\n  * S = D.sparseView(reference);\n  * S = D.sparseView(reference,epsilon);\n  * \\endcode\n  * where \\a reference is a meaningful non zero reference value,\n  * and \\a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().\n  *\n  * \\sa SparseMatrixBase::pruned(), class SparseView */\ntemplate<typename Derived>\nconst SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,\n                                                          const typename NumTraits<Scalar>::Real& epsilon) const\n{\n  return SparseView<Derived>(derived(), reference, epsilon);\n}\n\n/** \\returns an expression of \\c *this with values smaller than\n  * \\a reference * \\a epsilon removed.\n  *\n  * This method is typically used in conjunction with the product of two sparse matrices\n  * to automatically prune the smallest values as follows:\n  * \\code\n  * C = (A*B).pruned();             // suppress numerical zeros (exact)\n  * C = (A*B).pruned(ref);\n  * C = (A*B).pruned(ref,epsilon);\n  * \\endcode\n  * where \\c ref is a meaningful non zero reference value.\n  * */\ntemplate<typename Derived>\nconst SparseView<Derived>\nSparseMatrixBase<Derived>::pruned(const Scalar& reference,\n                                  const RealScalar& epsilon) const\n{\n  return SparseView<Derived>(derived(), reference, epsilon);\n}\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseCore/TriangularSolver.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSETRIANGULARSOLVER_H\n#define EIGEN_SPARSETRIANGULARSOLVER_H\n\nnamespace Eigen { \n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, int Mode,\n  int UpLo = (Mode & Lower)\n           ? Lower\n           : (Mode & Upper)\n           ? Upper\n           : -1,\n  int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>\nstruct sparse_solve_triangular_selector;\n\n// forward substitution, row-major\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef evaluator<Lhs> LhsEval;\n  typedef typename evaluator<Lhs>::InnerIterator LhsIterator;\n  static void run(const Lhs& lhs, Rhs& other)\n  {\n    LhsEval lhsEval(lhs);\n    for(Index col=0 ; col<other.cols() ; ++col)\n    {\n      for(Index i=0; i<lhs.rows(); ++i)\n      {\n        Scalar tmp = other.coeff(i,col);\n        Scalar lastVal(0);\n        Index lastIndex = 0;\n        for(LhsIterator it(lhsEval, i); it; ++it)\n        {\n          lastVal = it.value();\n          lastIndex = it.index();\n          if(lastIndex==i)\n            break;\n          tmp -= lastVal * other.coeff(lastIndex,col);\n        }\n        if (Mode & UnitDiag)\n          other.coeffRef(i,col) = tmp;\n        else\n        {\n          eigen_assert(lastIndex==i);\n          other.coeffRef(i,col) = tmp/lastVal;\n        }\n      }\n    }\n  }\n};\n\n// backward substitution, row-major\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef evaluator<Lhs> LhsEval;\n  typedef typename evaluator<Lhs>::InnerIterator LhsIterator;\n  static void run(const Lhs& lhs, Rhs& other)\n  {\n    LhsEval lhsEval(lhs);\n    for(Index col=0 ; col<other.cols() ; ++col)\n    {\n      for(Index i=lhs.rows()-1 ; i>=0 ; --i)\n      {\n        Scalar tmp = other.coeff(i,col);\n        Scalar l_ii(0);\n        LhsIterator it(lhsEval, i);\n        while(it && it.index()<i)\n          ++it;\n        if(!(Mode & UnitDiag))\n        {\n          eigen_assert(it && it.index()==i);\n          l_ii = it.value();\n          ++it;\n        }\n        else if (it && it.index() == i)\n          ++it;\n        for(; it; ++it)\n        {\n          tmp -= it.value() * other.coeff(it.index(),col);\n        }\n\n        if (Mode & UnitDiag)  other.coeffRef(i,col) = tmp;\n        else                  other.coeffRef(i,col) = tmp/l_ii;\n      }\n    }\n  }\n};\n\n// forward substitution, col-major\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef evaluator<Lhs> LhsEval;\n  typedef typename evaluator<Lhs>::InnerIterator LhsIterator;\n  static void run(const Lhs& lhs, Rhs& other)\n  {\n    LhsEval lhsEval(lhs);\n    for(Index col=0 ; col<other.cols() ; ++col)\n    {\n      for(Index i=0; i<lhs.cols(); ++i)\n      {\n        Scalar& tmp = other.coeffRef(i,col);\n        if (tmp!=Scalar(0)) // optimization when other is actually sparse\n        {\n          LhsIterator it(lhsEval, i);\n          while(it && it.index()<i)\n            ++it;\n          if(!(Mode & UnitDiag))\n          {\n            eigen_assert(it && it.index()==i);\n            tmp /= it.value();\n          }\n          if (it && it.index()==i)\n            ++it;\n          for(; it; ++it)\n            other.coeffRef(it.index(), col) -= tmp * it.value();\n        }\n      }\n    }\n  }\n};\n\n// backward substitution, col-major\ntemplate<typename Lhs, typename Rhs, int Mode>\nstruct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef evaluator<Lhs> LhsEval;\n  typedef typename evaluator<Lhs>::InnerIterator LhsIterator;\n  static void run(const Lhs& lhs, Rhs& other)\n  {\n    LhsEval lhsEval(lhs);\n    for(Index col=0 ; col<other.cols() ; ++col)\n    {\n      for(Index i=lhs.cols()-1; i>=0; --i)\n      {\n        Scalar& tmp = other.coeffRef(i,col);\n        if (tmp!=Scalar(0)) // optimization when other is actually sparse\n        {\n          if(!(Mode & UnitDiag))\n          {\n            // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements\n            LhsIterator it(lhsEval, i);\n            while(it && it.index()!=i)\n              ++it;\n            eigen_assert(it && it.index()==i);\n            other.coeffRef(i,col) /= it.value();\n          }\n          LhsIterator it(lhsEval, i);\n          for(; it && it.index()<i; ++it)\n            other.coeffRef(it.index(), col) -= tmp * it.value();\n        }\n      }\n    }\n  }\n};\n\n} // end namespace internal\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n\ntemplate<typename ExpressionType,unsigned int Mode>\ntemplate<typename OtherDerived>\nvoid TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const\n{\n  eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());\n  eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));\n\n  enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };\n\n  typedef typename internal::conditional<copy,\n    typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;\n  OtherCopy otherCopy(other.derived());\n\n  internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);\n\n  if (copy)\n    other = otherCopy;\n}\n#endif\n\n// pure sparse path\n\nnamespace internal {\n\ntemplate<typename Lhs, typename Rhs, int Mode,\n  int UpLo = (Mode & Lower)\n           ? Lower\n           : (Mode & Upper)\n           ? Upper\n           : -1,\n  int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>\nstruct sparse_solve_triangular_sparse_selector;\n\n// forward substitution, col-major\ntemplate<typename Lhs, typename Rhs, int Mode, int UpLo>\nstruct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>\n{\n  typedef typename Rhs::Scalar Scalar;\n  typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,\n                                      typename traits<Rhs>::StorageIndex>::type StorageIndex;\n  static void run(const Lhs& lhs, Rhs& other)\n  {\n    const bool IsLower = (UpLo==Lower);\n    AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);\n    tempVector.setBounds(0,other.rows());\n\n    Rhs res(other.rows(), other.cols());\n    res.reserve(other.nonZeros());\n\n    for(Index col=0 ; col<other.cols() ; ++col)\n    {\n      // FIXME estimate number of non zeros\n      tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);\n      tempVector.setZero();\n      tempVector.restart();\n      for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)\n      {\n        tempVector.coeffRef(rhsIt.index()) = rhsIt.value();\n      }\n\n      for(Index i=IsLower?0:lhs.cols()-1;\n          IsLower?i<lhs.cols():i>=0;\n          i+=IsLower?1:-1)\n      {\n        tempVector.restart();\n        Scalar& ci = tempVector.coeffRef(i);\n        if (ci!=Scalar(0))\n        {\n          // find\n          typename Lhs::InnerIterator it(lhs, i);\n          if(!(Mode & UnitDiag))\n          {\n            if (IsLower)\n            {\n              eigen_assert(it.index()==i);\n              ci /= it.value();\n            }\n            else\n              ci /= lhs.coeff(i,i);\n          }\n          tempVector.restart();\n          if (IsLower)\n          {\n            if (it.index()==i)\n              ++it;\n            for(; it; ++it)\n              tempVector.coeffRef(it.index()) -= ci * it.value();\n          }\n          else\n          {\n            for(; it && it.index()<i; ++it)\n              tempVector.coeffRef(it.index()) -= ci * it.value();\n          }\n        }\n      }\n\n\n      Index count = 0;\n      // FIXME compute a reference value to filter zeros\n      for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)\n      {\n        ++ count;\n//         std::cerr << \"fill \" << it.index() << \", \" << col << \"\\n\";\n//         std::cout << it.value() << \"  \";\n        // FIXME use insertBack\n        res.insert(it.index(), col) = it.value();\n      }\n//       std::cout << \"tempVector.nonZeros() == \" << int(count) << \" / \" << (other.rows()) << \"\\n\";\n    }\n    res.finalize();\n    other = res.markAsRValue();\n  }\n};\n\n} // end namespace internal\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename ExpressionType,unsigned int Mode>\ntemplate<typename OtherDerived>\nvoid TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const\n{\n  eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());\n  eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));\n\n//   enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };\n\n//   typedef typename internal::conditional<copy,\n//     typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;\n//   OtherCopy otherCopy(other.derived());\n\n  internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());\n\n//   if (copy)\n//     other = otherCopy;\n}\n#endif\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSETRIANGULARSOLVER_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_SPARSE_LU_H\n#define EIGEN_SPARSE_LU_H\n\nnamespace Eigen {\n\ntemplate <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::StorageIndex> > class SparseLU;\ntemplate <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;\ntemplate <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;\n\n/** \\ingroup SparseLU_Module\n  * \\class SparseLU\n  * \n  * \\brief Sparse supernodal LU factorization for general matrices\n  * \n  * This class implements the supernodal LU factorization for general matrices.\n  * It uses the main techniques from the sequential SuperLU package \n  * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real \n  * and complex arithmetics with single and double precision, depending on the \n  * scalar type of your input matrix. \n  * The code has been optimized to provide BLAS-3 operations during supernode-panel updates. \n  * It benefits directly from the built-in high-performant Eigen BLAS routines. \n  * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to \n  * enable a better optimization from the compiler. For best performance, \n  * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors. \n  * \n  * An important parameter of this class is the ordering method. It is used to reorder the columns \n  * (and eventually the rows) of the matrix to reduce the number of new elements that are created during \n  * numerical factorization. The cheapest method available is COLAMD. \n  * See  \\link OrderingMethods_Module the OrderingMethods module \\endlink for the list of \n  * built-in and external ordering methods. \n  *\n  * Simple example with key steps \n  * \\code\n  * VectorXd x(n), b(n);\n  * SparseMatrix<double, ColMajor> A;\n  * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> >   solver;\n  * // fill A and b;\n  * // Compute the ordering permutation vector from the structural pattern of A\n  * solver.analyzePattern(A); \n  * // Compute the numerical factorization \n  * solver.factorize(A); \n  * //Use the factors to solve the linear system \n  * x = solver.solve(b); \n  * \\endcode\n  * \n  * \\warning The input matrix A should be in a \\b compressed and \\b column-major form.\n  * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.\n  * \n  * \\note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix. \n  * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization. \n  * If this is the case for your matrices, you can try the basic scaling method at\n  *  \"unsupported/Eigen/src/IterativeSolvers/Scaling.h\"\n  * \n  * \\tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>\n  * \\tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD\n  *\n  * \\implsparsesolverconcept\n  * \n  * \\sa \\ref TutorialSparseSolverConcept\n  * \\sa \\ref OrderingMethods_Module\n  */\ntemplate <typename _MatrixType, typename _OrderingType>\nclass SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >, public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::StorageIndex>\n{\n  protected:\n    typedef SparseSolverBase<SparseLU<_MatrixType,_OrderingType> > APIBase;\n    using APIBase::m_isInitialized;\n  public:\n    using APIBase::_solve_impl;\n    \n    typedef _MatrixType MatrixType; \n    typedef _OrderingType OrderingType;\n    typedef typename MatrixType::Scalar Scalar; \n    typedef typename MatrixType::RealScalar RealScalar; \n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> NCMatrix;\n    typedef internal::MappedSuperNodalMatrix<Scalar, StorageIndex> SCMatrix;\n    typedef Matrix<Scalar,Dynamic,1> ScalarVector;\n    typedef Matrix<StorageIndex,Dynamic,1> IndexVector;\n    typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;\n    typedef internal::SparseLUImpl<Scalar, StorageIndex> Base;\n\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    \n  public:\n    SparseLU():m_lastError(\"\"),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)\n    {\n      initperfvalues(); \n    }\n    explicit SparseLU(const MatrixType& matrix)\n      : m_lastError(\"\"),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)\n    {\n      initperfvalues(); \n      compute(matrix);\n    }\n    \n    ~SparseLU()\n    {\n      // Free all explicit dynamic pointers \n    }\n    \n    void analyzePattern (const MatrixType& matrix);\n    void factorize (const MatrixType& matrix);\n    void simplicialfactorize(const MatrixType& matrix);\n    \n    /**\n      * Compute the symbolic and numeric factorization of the input sparse matrix.\n      * The input matrix should be in column-major storage. \n      */\n    void compute (const MatrixType& matrix)\n    {\n      // Analyze \n      analyzePattern(matrix); \n      //Factorize\n      factorize(matrix);\n    } \n    \n    inline Index rows() const { return m_mat.rows(); }\n    inline Index cols() const { return m_mat.cols(); }\n    /** Indicate that the pattern of the input matrix is symmetric */\n    void isSymmetric(bool sym)\n    {\n      m_symmetricmode = sym;\n    }\n    \n    /** \\returns an expression of the matrix L, internally stored as supernodes\n      * The only operation available with this expression is the triangular solve\n      * \\code\n      * y = b; matrixL().solveInPlace(y);\n      * \\endcode\n      */\n    SparseLUMatrixLReturnType<SCMatrix> matrixL() const\n    {\n      return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);\n    }\n    /** \\returns an expression of the matrix U,\n      * The only operation available with this expression is the triangular solve\n      * \\code\n      * y = b; matrixU().solveInPlace(y);\n      * \\endcode\n      */\n    SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,StorageIndex> > matrixU() const\n    {\n      return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,StorageIndex> >(m_Lstore, m_Ustore);\n    }\n\n    /**\n      * \\returns a reference to the row matrix permutation \\f$ P_r \\f$ such that \\f$P_r A P_c^T = L U\\f$\n      * \\sa colsPermutation()\n      */\n    inline const PermutationType& rowsPermutation() const\n    {\n      return m_perm_r;\n    }\n    /**\n      * \\returns a reference to the column matrix permutation\\f$ P_c^T \\f$ such that \\f$P_r A P_c^T = L U\\f$\n      * \\sa rowsPermutation()\n      */\n    inline const PermutationType& colsPermutation() const\n    {\n      return m_perm_c;\n    }\n    /** Set the threshold used for a diagonal entry to be an acceptable pivot. */\n    void setPivotThreshold(const RealScalar& thresh)\n    {\n      m_diagpivotthresh = thresh; \n    }\n\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n    /** \\returns the solution X of \\f$ A X = B \\f$ using the current decomposition of A.\n      *\n      * \\warning the destination matrix X in X = this->solve(B) must be colmun-major.\n      *\n      * \\sa compute()\n      */\n    template<typename Rhs>\n    inline const Solve<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const;\n#endif // EIGEN_PARSED_BY_DOXYGEN\n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance\n      *          \\c InvalidInput if the input matrix is invalid\n      *\n      * \\sa iparm()          \n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n    \n    /**\n      * \\returns A string describing the type of error\n      */\n    std::string lastErrorMessage() const\n    {\n      return m_lastError; \n    }\n\n    template<typename Rhs, typename Dest>\n    bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const\n    {\n      Dest& X(X_base.derived());\n      eigen_assert(m_factorizationIsOk && \"The matrix should be factorized first\");\n      EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,\n                        THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);\n      \n      // Permute the right hand side to form X = Pr*B\n      // on return, X is overwritten by the computed solution\n      X.resize(B.rows(),B.cols());\n\n      // this ugly const_cast_derived() helps to detect aliasing when applying the permutations\n      for(Index j = 0; j < B.cols(); ++j)\n        X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);\n      \n      //Forward substitution with L\n      this->matrixL().solveInPlace(X);\n      this->matrixU().solveInPlace(X);\n      \n      // Permute back the solution \n      for (Index j = 0; j < B.cols(); ++j)\n        X.col(j) = colsPermutation().inverse() * X.col(j);\n      \n      return true; \n    }\n    \n    /**\n      * \\returns the absolute value of the determinant of the matrix of which\n      * *this is the QR decomposition.\n      *\n      * \\warning a determinant can be very big or small, so for matrices\n      * of large enough dimension, there is a risk of overflow/underflow.\n      * One way to work around that is to use logAbsDeterminant() instead.\n      *\n      * \\sa logAbsDeterminant(), signDeterminant()\n      */\n    Scalar absDeterminant()\n    {\n      using std::abs;\n      eigen_assert(m_factorizationIsOk && \"The matrix should be factorized first.\");\n      // Initialize with the determinant of the row matrix\n      Scalar det = Scalar(1.);\n      // Note that the diagonal blocks of U are stored in supernodes,\n      // which are available in the  L part :)\n      for (Index j = 0; j < this->cols(); ++j)\n      {\n        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)\n        {\n          if(it.index() == j)\n          {\n            det *= abs(it.value());\n            break;\n          }\n        }\n      }\n      return det;\n    }\n\n    /** \\returns the natural log of the absolute value of the determinant of the matrix\n      * of which **this is the QR decomposition\n      *\n      * \\note This method is useful to work around the risk of overflow/underflow that's\n      * inherent to the determinant computation.\n      *\n      * \\sa absDeterminant(), signDeterminant()\n      */\n    Scalar logAbsDeterminant() const\n    {\n      using std::log;\n      using std::abs;\n\n      eigen_assert(m_factorizationIsOk && \"The matrix should be factorized first.\");\n      Scalar det = Scalar(0.);\n      for (Index j = 0; j < this->cols(); ++j)\n      {\n        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)\n        {\n          if(it.row() < j) continue;\n          if(it.row() == j)\n          {\n            det += log(abs(it.value()));\n            break;\n          }\n        }\n      }\n      return det;\n    }\n\n    /** \\returns A number representing the sign of the determinant\n      *\n      * \\sa absDeterminant(), logAbsDeterminant()\n      */\n    Scalar signDeterminant()\n    {\n      eigen_assert(m_factorizationIsOk && \"The matrix should be factorized first.\");\n      // Initialize with the determinant of the row matrix\n      Index det = 1;\n      // Note that the diagonal blocks of U are stored in supernodes,\n      // which are available in the  L part :)\n      for (Index j = 0; j < this->cols(); ++j)\n      {\n        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)\n        {\n          if(it.index() == j)\n          {\n            if(it.value()<0)\n              det = -det;\n            else if(it.value()==0)\n              return 0;\n            break;\n          }\n        }\n      }\n      return det * m_detPermR * m_detPermC;\n    }\n    \n    /** \\returns The determinant of the matrix.\n      *\n      * \\sa absDeterminant(), logAbsDeterminant()\n      */\n    Scalar determinant()\n    {\n      eigen_assert(m_factorizationIsOk && \"The matrix should be factorized first.\");\n      // Initialize with the determinant of the row matrix\n      Scalar det = Scalar(1.);\n      // Note that the diagonal blocks of U are stored in supernodes,\n      // which are available in the  L part :)\n      for (Index j = 0; j < this->cols(); ++j)\n      {\n        for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)\n        {\n          if(it.index() == j)\n          {\n            det *= it.value();\n            break;\n          }\n        }\n      }\n      return (m_detPermR * m_detPermC) > 0 ? det : -det;\n    }\n\n  protected:\n    // Functions \n    void initperfvalues()\n    {\n      m_perfv.panel_size = 16;\n      m_perfv.relax = 1; \n      m_perfv.maxsuper = 128; \n      m_perfv.rowblk = 16; \n      m_perfv.colblk = 8; \n      m_perfv.fillfactor = 20;  \n    }\n      \n    // Variables \n    mutable ComputationInfo m_info;\n    bool m_factorizationIsOk;\n    bool m_analysisIsOk;\n    std::string m_lastError;\n    NCMatrix m_mat; // The input (permuted ) matrix \n    SCMatrix m_Lstore; // The lower triangular matrix (supernodal)\n    MappedSparseMatrix<Scalar,ColMajor,StorageIndex> m_Ustore; // The upper triangular matrix\n    PermutationType m_perm_c; // Column permutation \n    PermutationType m_perm_r ; // Row permutation\n    IndexVector m_etree; // Column elimination tree \n    \n    typename Base::GlobalLU_t m_glu; \n                               \n    // SparseLU options \n    bool m_symmetricmode;\n    // values for performance \n    internal::perfvalues m_perfv;\n    RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot\n    Index m_nnzL, m_nnzU; // Nonzeros in L and U factors\n    Index m_detPermR, m_detPermC; // Determinants of the permutation matrices\n  private:\n    // Disable copy constructor \n    SparseLU (const SparseLU& );\n  \n}; // End class SparseLU\n\n\n\n// Functions needed by the anaysis phase\n/** \n  * Compute the column permutation to minimize the fill-in\n  * \n  *  - Apply this permutation to the input matrix - \n  * \n  *  - Compute the column elimination tree on the permuted matrix \n  * \n  *  - Postorder the elimination tree and the column permutation\n  * \n  */\ntemplate <typename MatrixType, typename OrderingType>\nvoid SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)\n{\n  \n  //TODO  It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.\n  \n  // Firstly, copy the whole input matrix. \n  m_mat = mat;\n  \n  // Compute fill-in ordering\n  OrderingType ord; \n  ord(m_mat,m_perm_c);\n  \n  // Apply the permutation to the column of the input  matrix\n  if (m_perm_c.size())\n  {\n    m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.  \n    // Then, permute only the column pointers\n    ei_declare_aligned_stack_constructed_variable(StorageIndex,outerIndexPtr,mat.cols()+1,mat.isCompressed()?const_cast<StorageIndex*>(mat.outerIndexPtr()):0);\n    \n    // If the input matrix 'mat' is uncompressed, then the outer-indices do not match the ones of m_mat, and a copy is thus needed.\n    if(!mat.isCompressed()) \n      IndexVector::Map(outerIndexPtr, mat.cols()+1) = IndexVector::Map(m_mat.outerIndexPtr(),mat.cols()+1);\n    \n    // Apply the permutation and compute the nnz per column.\n    for (Index i = 0; i < mat.cols(); i++)\n    {\n      m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];\n      m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];\n    }\n  }\n  \n  // Compute the column elimination tree of the permuted matrix \n  IndexVector firstRowElt;\n  internal::coletree(m_mat, m_etree,firstRowElt); \n     \n  // In symmetric mode, do not do postorder here\n  if (!m_symmetricmode) {\n    IndexVector post, iwork; \n    // Post order etree\n    internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post); \n      \n   \n    // Renumber etree in postorder \n    Index m = m_mat.cols(); \n    iwork.resize(m+1);\n    for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));\n    m_etree = iwork;\n    \n    // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree\n    PermutationType post_perm(m); \n    for (Index i = 0; i < m; i++) \n      post_perm.indices()(i) = post(i); \n        \n    // Combine the two permutations : postorder the permutation for future use\n    if(m_perm_c.size()) {\n      m_perm_c = post_perm * m_perm_c;\n    }\n    \n  } // end postordering \n  \n  m_analysisIsOk = true; \n}\n\n// Functions needed by the numerical factorization phase\n\n\n/** \n  *  - Numerical factorization \n  *  - Interleaved with the symbolic factorization \n  * On exit,  info is \n  * \n  *    = 0: successful factorization\n  * \n  *    > 0: if info = i, and i is\n  * \n  *       <= A->ncol: U(i,i) is exactly zero. The factorization has\n  *          been completed, but the factor U is exactly singular,\n  *          and division by zero will occur if it is used to solve a\n  *          system of equations.\n  * \n  *       > A->ncol: number of bytes allocated when memory allocation\n  *         failure occurred, plus A->ncol. If lwork = -1, it is\n  *         the estimated amount of space needed, plus A->ncol.  \n  */\ntemplate <typename MatrixType, typename OrderingType>\nvoid SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)\n{\n  using internal::emptyIdxLU;\n  eigen_assert(m_analysisIsOk && \"analyzePattern() should be called first\"); \n  eigen_assert((matrix.rows() == matrix.cols()) && \"Only for squared matrices\");\n  \n  typedef typename IndexVector::Scalar StorageIndex; \n  \n  m_isInitialized = true;\n  \n  \n  // Apply the column permutation computed in analyzepattern()\n  //   m_mat = matrix * m_perm_c.inverse(); \n  m_mat = matrix;\n  if (m_perm_c.size()) \n  {\n    m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.\n    //Then, permute only the column pointers\n    const StorageIndex * outerIndexPtr;\n    if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();\n    else\n    {\n      StorageIndex* outerIndexPtr_t = new StorageIndex[matrix.cols()+1];\n      for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];\n      outerIndexPtr = outerIndexPtr_t;\n    }\n    for (Index i = 0; i < matrix.cols(); i++)\n    {\n      m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];\n      m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];\n    }\n    if(!matrix.isCompressed()) delete[] outerIndexPtr;\n  } \n  else \n  { //FIXME This should not be needed if the empty permutation is handled transparently\n    m_perm_c.resize(matrix.cols());\n    for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;\n  }\n  \n  Index m = m_mat.rows();\n  Index n = m_mat.cols();\n  Index nnz = m_mat.nonZeros();\n  Index maxpanel = m_perfv.panel_size * m;\n  // Allocate working storage common to the factor routines\n  Index lwork = 0;\n  Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu); \n  if (info) \n  {\n    m_lastError = \"UNABLE TO ALLOCATE WORKING MEMORY\\n\\n\" ;\n    m_factorizationIsOk = false;\n    return ; \n  }\n  \n  // Set up pointers for integer working arrays \n  IndexVector segrep(m); segrep.setZero();\n  IndexVector parent(m); parent.setZero();\n  IndexVector xplore(m); xplore.setZero();\n  IndexVector repfnz(maxpanel);\n  IndexVector panel_lsub(maxpanel);\n  IndexVector xprune(n); xprune.setZero();\n  IndexVector marker(m*internal::LUNoMarker); marker.setZero();\n  \n  repfnz.setConstant(-1); \n  panel_lsub.setConstant(-1);\n  \n  // Set up pointers for scalar working arrays \n  ScalarVector dense; \n  dense.setZero(maxpanel);\n  ScalarVector tempv; \n  tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );\n  \n  // Compute the inverse of perm_c\n  PermutationType iperm_c(m_perm_c.inverse()); \n  \n  // Identify initial relaxed snodes\n  IndexVector relax_end(n);\n  if ( m_symmetricmode == true ) \n    Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);\n  else\n    Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);\n  \n  \n  m_perm_r.resize(m); \n  m_perm_r.indices().setConstant(-1);\n  marker.setConstant(-1);\n  m_detPermR = 1; // Record the determinant of the row permutation\n  \n  m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);\n  m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);\n  \n  // Work on one 'panel' at a time. A panel is one of the following :\n  //  (a) a relaxed supernode at the bottom of the etree, or\n  //  (b) panel_size contiguous columns, <panel_size> defined by the user\n  Index jcol; \n  IndexVector panel_histo(n);\n  Index pivrow; // Pivotal row number in the original row matrix\n  Index nseg1; // Number of segments in U-column above panel row jcol\n  Index nseg; // Number of segments in each U-column \n  Index irep; \n  Index i, k, jj; \n  for (jcol = 0; jcol < n; )\n  {\n    // Adjust panel size so that a panel won't overlap with the next relaxed snode. \n    Index panel_size = m_perfv.panel_size; // upper bound on panel width\n    for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)\n    {\n      if (relax_end(k) != emptyIdxLU) \n      {\n        panel_size = k - jcol; \n        break; \n      }\n    }\n    if (k == n) \n      panel_size = n - jcol; \n      \n    // Symbolic outer factorization on a panel of columns \n    Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu); \n    \n    // Numeric sup-panel updates in topological order \n    Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu); \n    \n    // Sparse LU within the panel, and below the panel diagonal \n    for ( jj = jcol; jj< jcol + panel_size; jj++) \n    {\n      k = (jj - jcol) * m; // Column index for w-wide arrays \n      \n      nseg = nseg1; // begin after all the panel segments\n      //Depth-first-search for the current column\n      VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);\n      VectorBlock<IndexVector> repfnz_k(repfnz, k, m); \n      info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu); \n      if ( info ) \n      {\n        m_lastError =  \"UNABLE TO EXPAND MEMORY IN COLUMN_DFS() \";\n        m_info = NumericalIssue; \n        m_factorizationIsOk = false; \n        return; \n      }\n      // Numeric updates to this column \n      VectorBlock<ScalarVector> dense_k(dense, k, m); \n      VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1); \n      info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu); \n      if ( info ) \n      {\n        m_lastError = \"UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() \";\n        m_info = NumericalIssue; \n        m_factorizationIsOk = false; \n        return; \n      }\n      \n      // Copy the U-segments to ucol(*)\n      info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu); \n      if ( info ) \n      {\n        m_lastError = \"UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() \";\n        m_info = NumericalIssue; \n        m_factorizationIsOk = false; \n        return; \n      }\n      \n      // Form the L-segment \n      info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);\n      if ( info ) \n      {\n        m_lastError = \"THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT \";\n        std::ostringstream returnInfo;\n        returnInfo << info; \n        m_lastError += returnInfo.str();\n        m_info = NumericalIssue; \n        m_factorizationIsOk = false; \n        return; \n      }\n      \n      // Update the determinant of the row permutation matrix\n      // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.\n      if (pivrow != jj) m_detPermR = -m_detPermR;\n\n      // Prune columns (0:jj-1) using column jj\n      Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu); \n      \n      // Reset repfnz for this column \n      for (i = 0; i < nseg; i++)\n      {\n        irep = segrep(i); \n        repfnz_k(irep) = emptyIdxLU; \n      }\n    } // end SparseLU within the panel  \n    jcol += panel_size;  // Move to the next panel\n  } // end for -- end elimination \n  \n  m_detPermR = m_perm_r.determinant();\n  m_detPermC = m_perm_c.determinant();\n  \n  // Count the number of nonzeros in factors \n  Base::countnz(n, m_nnzL, m_nnzU, m_glu); \n  // Apply permutation  to the L subscripts \n  Base::fixupL(n, m_perm_r.indices(), m_glu);\n  \n  // Create supernode matrix L \n  m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup); \n  // Create the column major upper sparse matrix  U; \n  new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, StorageIndex> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );\n  \n  m_info = Success;\n  m_factorizationIsOk = true;\n}\n\ntemplate<typename MappedSupernodalType>\nstruct SparseLUMatrixLReturnType : internal::no_assignment_operator\n{\n  typedef typename MappedSupernodalType::Scalar Scalar;\n  explicit SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)\n  { }\n  Index rows() { return m_mapL.rows(); }\n  Index cols() { return m_mapL.cols(); }\n  template<typename Dest>\n  void solveInPlace( MatrixBase<Dest> &X) const\n  {\n    m_mapL.solveInPlace(X);\n  }\n  const MappedSupernodalType& m_mapL;\n};\n\ntemplate<typename MatrixLType, typename MatrixUType>\nstruct SparseLUMatrixUReturnType : internal::no_assignment_operator\n{\n  typedef typename MatrixLType::Scalar Scalar;\n  SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)\n  : m_mapL(mapL),m_mapU(mapU)\n  { }\n  Index rows() { return m_mapL.rows(); }\n  Index cols() { return m_mapL.cols(); }\n\n  template<typename Dest>   void solveInPlace(MatrixBase<Dest> &X) const\n  {\n    Index nrhs = X.cols();\n    Index n    = X.rows();\n    // Backward solve with U\n    for (Index k = m_mapL.nsuper(); k >= 0; k--)\n    {\n      Index fsupc = m_mapL.supToCol()[k];\n      Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension\n      Index nsupc = m_mapL.supToCol()[k+1] - fsupc;\n      Index luptr = m_mapL.colIndexPtr()[fsupc];\n\n      if (nsupc == 1)\n      {\n        for (Index j = 0; j < nrhs; j++)\n        {\n          X(fsupc, j) /= m_mapL.valuePtr()[luptr];\n        }\n      }\n      else\n      {\n        Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );\n        Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );\n        U = A.template triangularView<Upper>().solve(U);\n      }\n\n      for (Index j = 0; j < nrhs; ++j)\n      {\n        for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)\n        {\n          typename MatrixUType::InnerIterator it(m_mapU, jcol);\n          for ( ; it; ++it)\n          {\n            Index irow = it.index();\n            X(irow, j) -= X(jcol, j) * it.value();\n          }\n        }\n      }\n    } // End For U-solve\n  }\n  const MatrixLType& m_mapL;\n  const MatrixUType& m_mapU;\n};\n\n} // End namespace Eigen \n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLUImpl.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#ifndef SPARSELU_IMPL_H\n#define SPARSELU_IMPL_H\n\nnamespace Eigen {\nnamespace internal {\n  \n/** \\ingroup SparseLU_Module\n  * \\class SparseLUImpl\n  * Base class for sparseLU\n  */\ntemplate <typename Scalar, typename StorageIndex>\nclass SparseLUImpl\n{\n  public:\n    typedef Matrix<Scalar,Dynamic,1> ScalarVector;\n    typedef Matrix<StorageIndex,Dynamic,1> IndexVector; \n    typedef Matrix<Scalar,Dynamic,Dynamic,ColMajor> ScalarMatrix;\n    typedef Map<ScalarMatrix, 0,  OuterStride<> > MappedMatrixBlock;\n    typedef typename ScalarVector::RealScalar RealScalar; \n    typedef Ref<Matrix<Scalar,Dynamic,1> > BlockScalarVector;\n    typedef Ref<Matrix<StorageIndex,Dynamic,1> > BlockIndexVector;\n    typedef LU_GlobalLU_t<IndexVector, ScalarVector> GlobalLU_t; \n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> MatrixType; \n    \n  protected:\n     template <typename VectorType>\n     Index expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions);\n     Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size,  GlobalLU_t& glu); \n     template <typename VectorType>\n     Index memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions);\n     void heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); \n     void relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end); \n     Index snode_dfs(const Index jcol, const Index kcol,const MatrixType& mat,  IndexVector& xprune, IndexVector& marker, GlobalLU_t& glu); \n     Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);\n     Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);\n     template <typename Traits>\n     void dfs_kernel(const StorageIndex jj, IndexVector& perm_r,\n                    Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,\n                    Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,\n                    IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);\n     void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);\n    \n     void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu);\n     Index column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg,  BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu);\n     Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu); \n     Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep, BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu); \n     void pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg, const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu);\n     void countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu); \n     void fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu); \n     \n     template<typename , typename >\n     friend struct column_dfs_traits;\n}; \n\n} // end namespace internal\n} // namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_Memory.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]memory.c files in SuperLU \n \n * -- SuperLU routine (version 3.1) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * August 1, 2008\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n\n#ifndef EIGEN_SPARSELU_MEMORY\n#define EIGEN_SPARSELU_MEMORY\n\nnamespace Eigen {\nnamespace internal {\n  \nenum { LUNoMarker = 3 };\nenum {emptyIdxLU = -1};\ninline Index LUnumTempV(Index& m, Index& w, Index& t, Index& b)\n{\n  return (std::max)(m, (t+b)*w);\n}\n\ntemplate< typename Scalar>\ninline Index LUTempSpace(Index&m, Index& w)\n{\n  return (2*w + 4 + LUNoMarker) * m * sizeof(Index) + (w + 1) * m * sizeof(Scalar);\n}\n\n\n\n\n/** \n  * Expand the existing storage to accomodate more fill-ins\n  * \\param vec Valid pointer to the vector to allocate or expand\n  * \\param[in,out] length  At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector\n  * \\param[in] nbElts Current number of elements in the factors\n  * \\param keep_prev  1: use length  and do not expand the vector; 0: compute new_len and expand\n  * \\param[in,out] num_expansions Number of times the memory has been expanded\n  */\ntemplate <typename Scalar, typename StorageIndex>\ntemplate <typename VectorType>\nIndex  SparseLUImpl<Scalar,StorageIndex>::expand(VectorType& vec, Index& length, Index nbElts, Index keep_prev, Index& num_expansions) \n{\n  \n  float alpha = 1.5; // Ratio of the memory increase \n  Index new_len; // New size of the allocated memory\n  \n  if(num_expansions == 0 || keep_prev) \n    new_len = length ; // First time allocate requested\n  else \n    new_len = (std::max)(length+1,Index(alpha * length));\n  \n  VectorType old_vec; // Temporary vector to hold the previous values   \n  if (nbElts > 0 )\n    old_vec = vec.segment(0,nbElts); \n  \n  //Allocate or expand the current vector\n#ifdef EIGEN_EXCEPTIONS\n  try\n#endif\n  {\n    vec.resize(new_len); \n  }\n#ifdef EIGEN_EXCEPTIONS\n  catch(std::bad_alloc& )\n#else\n  if(!vec.size())\n#endif\n  {\n    if (!num_expansions)\n    {\n      // First time to allocate from LUMemInit()\n      // Let LUMemInit() deals with it.\n      return -1;\n    }\n    if (keep_prev)\n    {\n      // In this case, the memory length should not not be reduced\n      return new_len;\n    }\n    else \n    {\n      // Reduce the size and increase again \n      Index tries = 0; // Number of attempts\n      do \n      {\n        alpha = (alpha + 1)/2;\n        new_len = (std::max)(length+1,Index(alpha * length));\n#ifdef EIGEN_EXCEPTIONS\n        try\n#endif\n        {\n          vec.resize(new_len); \n        }\n#ifdef EIGEN_EXCEPTIONS\n        catch(std::bad_alloc& )\n#else\n        if (!vec.size())\n#endif\n        {\n          tries += 1; \n          if ( tries > 10) return new_len; \n        }\n      } while (!vec.size());\n    }\n  }\n  //Copy the previous values to the newly allocated space \n  if (nbElts > 0)\n    vec.segment(0, nbElts) = old_vec;   \n   \n  \n  length  = new_len;\n  if(num_expansions) ++num_expansions;\n  return 0; \n}\n\n/**\n * \\brief  Allocate various working space for the numerical factorization phase.\n * \\param m number of rows of the input matrix \n * \\param n number of columns \n * \\param annz number of initial nonzeros in the matrix \n * \\param lwork  if lwork=-1, this routine returns an estimated size of the required memory\n * \\param glu persistent data to facilitate multiple factors : will be deleted later ??\n * \\param fillratio estimated ratio of fill in the factors\n * \\param panel_size Size of a panel\n * \\return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated memory when allocation failed, and 0 on success\n * \\note Unlike SuperLU, this routine does not support successive factorization with the same pattern and the same row permutation\n */\ntemplate <typename Scalar, typename StorageIndex>\nIndex SparseLUImpl<Scalar,StorageIndex>::memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size,  GlobalLU_t& glu)\n{\n  Index& num_expansions = glu.num_expansions; //No memory expansions so far\n  num_expansions = 0;\n  glu.nzumax = glu.nzlumax = (std::min)(fillratio * (annz+1) / n, m) * n; // estimated number of nonzeros in U \n  glu.nzlmax = (std::max)(Index(4), fillratio) * (annz+1) / 4; // estimated  nnz in L factor\n  // Return the estimated size to the user if necessary\n  Index tempSpace;\n  tempSpace = (2*panel_size + 4 + LUNoMarker) * m * sizeof(Index) + (panel_size + 1) * m * sizeof(Scalar);\n  if (lwork == emptyIdxLU) \n  {\n    Index estimated_size;\n    estimated_size = (5 * n + 5) * sizeof(Index)  + tempSpace\n                    + (glu.nzlmax + glu.nzumax) * sizeof(Index) + (glu.nzlumax+glu.nzumax) *  sizeof(Scalar) + n; \n    return estimated_size;\n  }\n  \n  // Setup the required space \n  \n  // First allocate Integer pointers for L\\U factors\n  glu.xsup.resize(n+1);\n  glu.supno.resize(n+1);\n  glu.xlsub.resize(n+1);\n  glu.xlusup.resize(n+1);\n  glu.xusub.resize(n+1);\n\n  // Reserve memory for L/U factors\n  do \n  {\n    if(     (expand<ScalarVector>(glu.lusup, glu.nzlumax, 0, 0, num_expansions)<0)\n        ||  (expand<ScalarVector>(glu.ucol,  glu.nzumax,  0, 0, num_expansions)<0)\n        ||  (expand<IndexVector> (glu.lsub,  glu.nzlmax,  0, 0, num_expansions)<0)\n        ||  (expand<IndexVector> (glu.usub,  glu.nzumax,  0, 1, num_expansions)<0) )\n    {\n      //Reduce the estimated size and retry\n      glu.nzlumax /= 2;\n      glu.nzumax /= 2;\n      glu.nzlmax /= 2;\n      if (glu.nzlumax < annz ) return glu.nzlumax; \n    }\n  } while (!glu.lusup.size() || !glu.ucol.size() || !glu.lsub.size() || !glu.usub.size());\n  \n  ++num_expansions;\n  return 0;\n  \n} // end LuMemInit\n\n/** \n * \\brief Expand the existing storage \n * \\param vec vector to expand \n * \\param[in,out] maxlen On input, previous size of vec (Number of elements to copy ). on output, new size\n * \\param nbElts current number of elements in the vector.\n * \\param memtype Type of the element to expand\n * \\param num_expansions Number of expansions \n * \\return 0 on success, > 0 size of the memory allocated so far\n */\ntemplate <typename Scalar, typename StorageIndex>\ntemplate <typename VectorType>\nIndex SparseLUImpl<Scalar,StorageIndex>::memXpand(VectorType& vec, Index& maxlen, Index nbElts, MemType memtype, Index& num_expansions)\n{\n  Index failed_size; \n  if (memtype == USUB)\n     failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 1, num_expansions);\n  else\n    failed_size = this->expand<VectorType>(vec, maxlen, nbElts, 0, num_expansions);\n\n  if (failed_size)\n    return failed_size; \n  \n  return 0 ;  \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif // EIGEN_SPARSELU_MEMORY\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_Structs.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n * NOTE: This file comes from a partly modified version of files slu_[s,d,c,z]defs.h\n * -- SuperLU routine (version 4.1) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * November, 2010\n * \n * Global data structures used in LU factorization -\n * \n *   nsuper: #supernodes = nsuper + 1, numbered [0, nsuper].\n *   (xsup,supno): supno[i] is the supernode no to which i belongs;\n *  xsup(s) points to the beginning of the s-th supernode.\n *  e.g.   supno 0 1 2 2 3 3 3 4 4 4 4 4   (n=12)\n *          xsup 0 1 2 4 7 12\n *  Note: dfs will be performed on supernode rep. relative to the new \n *        row pivoting ordering\n *\n *   (xlsub,lsub): lsub[*] contains the compressed subscript of\n *  rectangular supernodes; xlsub[j] points to the starting\n *  location of the j-th column in lsub[*]. Note that xlsub \n *  is indexed by column.\n *  Storage: original row subscripts\n *\n *      During the course of sparse LU factorization, we also use\n *  (xlsub,lsub) for the purpose of symmetric pruning. For each\n *  supernode {s,s+1,...,t=s+r} with first column s and last\n *  column t, the subscript set\n *    lsub[j], j=xlsub[s], .., xlsub[s+1]-1\n *  is the structure of column s (i.e. structure of this supernode).\n *  It is used for the storage of numerical values.\n *  Furthermore,\n *    lsub[j], j=xlsub[t], .., xlsub[t+1]-1\n *  is the structure of the last column t of this supernode.\n *  It is for the purpose of symmetric pruning. Therefore, the\n *  structural subscripts can be rearranged without making physical\n *  interchanges among the numerical values.\n *\n *  However, if the supernode has only one column, then we\n *  only keep one set of subscripts. For any subscript interchange\n *  performed, similar interchange must be done on the numerical\n *  values.\n *\n *  The last column structures (for pruning) will be removed\n *  after the numercial LU factorization phase.\n *\n *   (xlusup,lusup): lusup[*] contains the numerical values of the\n *  rectangular supernodes; xlusup[j] points to the starting\n *  location of the j-th column in storage vector lusup[*]\n *  Note: xlusup is indexed by column.\n *  Each rectangular supernode is stored by column-major\n *  scheme, consistent with Fortran 2-dim array storage.\n *\n *   (xusub,ucol,usub): ucol[*] stores the numerical values of\n *  U-columns outside the rectangular supernodes. The row\n *  subscript of nonzero ucol[k] is stored in usub[k].\n *  xusub[i] points to the starting location of column i in ucol.\n *  Storage: new row subscripts; that is subscripts of PA.\n */\n\n#ifndef EIGEN_LU_STRUCTS\n#define EIGEN_LU_STRUCTS\nnamespace Eigen {\nnamespace internal {\n  \ntypedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType; \n\ntemplate <typename IndexVector, typename ScalarVector>\nstruct LU_GlobalLU_t {\n  typedef typename IndexVector::Scalar StorageIndex; \n  IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode\n  IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)\n  ScalarVector  lusup; // nonzero values of L ordered by columns \n  IndexVector lsub; // Compressed row indices of L rectangular supernodes. \n  IndexVector xlusup; // pointers to the beginning of each column in lusup\n  IndexVector xlsub; // pointers to the beginning of each column in lsub\n  Index   nzlmax; // Current max size of lsub\n  Index   nzlumax; // Current max size of lusup\n  ScalarVector  ucol; // nonzero values of U ordered by columns \n  IndexVector usub; // row indices of U columns in ucol\n  IndexVector xusub; // Pointers to the beginning of each column of U in ucol \n  Index   nzumax; // Current max size of ucol\n  Index   n; // Number of columns in the matrix  \n  Index   num_expansions; \n};\n\n// Values to set for performance\nstruct perfvalues {\n  Index panel_size; // a panel consists of at most <panel_size> consecutive columns\n  Index relax; // To control degree of relaxing supernodes. If the number of nodes (columns) \n                // in a subtree of the elimination tree is less than relax, this subtree is considered \n                // as one supernode regardless of the row structures of those columns\n  Index maxsuper; // The maximum size for a supernode in complete LU\n  Index rowblk; // The minimum row dimension for 2-D blocking to be used;\n  Index colblk; // The minimum column dimension for 2-D blocking to be used;\n  Index fillfactor; // The estimated fills factors for L and U, compared with A\n}; \n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif // EIGEN_LU_STRUCTS\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_SupernodalMatrix.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSELU_SUPERNODAL_MATRIX_H\n#define EIGEN_SPARSELU_SUPERNODAL_MATRIX_H\n\nnamespace Eigen {\nnamespace internal {\n\n/** \\ingroup SparseLU_Module\n * \\brief a class to manipulate the L supernodal factor from the SparseLU factorization\n * \n * This class  contain the data to easily store \n * and manipulate the supernodes during the factorization and solution phase of Sparse LU. \n * Only the lower triangular matrix has supernodes.\n * \n * NOTE : This class corresponds to the SCformat structure in SuperLU\n * \n */\n/* TODO\n * InnerIterator as for sparsematrix \n * SuperInnerIterator to iterate through all supernodes \n * Function for triangular solve\n */\ntemplate <typename _Scalar, typename _StorageIndex>\nclass MappedSuperNodalMatrix\n{\n  public:\n    typedef _Scalar Scalar; \n    typedef _StorageIndex StorageIndex;\n    typedef Matrix<StorageIndex,Dynamic,1> IndexVector;\n    typedef Matrix<Scalar,Dynamic,1> ScalarVector;\n  public:\n    MappedSuperNodalMatrix()\n    {\n      \n    }\n    MappedSuperNodalMatrix(Index m, Index n,  ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,\n             IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )\n    {\n      setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);\n    }\n    \n    ~MappedSuperNodalMatrix()\n    {\n      \n    }\n    /**\n     * Set appropriate pointers for the lower triangular supernodal matrix\n     * These infos are available at the end of the numerical factorization\n     * FIXME This class will be modified such that it can be use in the course \n     * of the factorization.\n     */\n    void setInfos(Index m, Index n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,\n             IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )\n    {\n      m_row = m;\n      m_col = n; \n      m_nzval = nzval.data(); \n      m_nzval_colptr = nzval_colptr.data(); \n      m_rowind = rowind.data(); \n      m_rowind_colptr = rowind_colptr.data(); \n      m_nsuper = col_to_sup(n); \n      m_col_to_sup = col_to_sup.data(); \n      m_sup_to_col = sup_to_col.data(); \n    }\n    \n    /**\n     * Number of rows\n     */\n    Index rows() { return m_row; }\n    \n    /**\n     * Number of columns\n     */\n    Index cols() { return m_col; }\n    \n    /**\n     * Return the array of nonzero values packed by column\n     * \n     * The size is nnz\n     */\n    Scalar* valuePtr() {  return m_nzval; }\n    \n    const Scalar* valuePtr() const \n    {\n      return m_nzval; \n    }\n    /**\n     * Return the pointers to the beginning of each column in \\ref valuePtr()\n     */\n    StorageIndex* colIndexPtr()\n    {\n      return m_nzval_colptr; \n    }\n    \n    const StorageIndex* colIndexPtr() const\n    {\n      return m_nzval_colptr; \n    }\n    \n    /**\n     * Return the array of compressed row indices of all supernodes\n     */\n    StorageIndex* rowIndex()  { return m_rowind; }\n    \n    const StorageIndex* rowIndex() const\n    {\n      return m_rowind; \n    }\n    \n    /**\n     * Return the location in \\em rowvaluePtr() which starts each column\n     */\n    StorageIndex* rowIndexPtr() { return m_rowind_colptr; }\n    \n    const StorageIndex* rowIndexPtr() const\n    {\n      return m_rowind_colptr; \n    }\n    \n    /** \n     * Return the array of column-to-supernode mapping \n     */\n    StorageIndex* colToSup()  { return m_col_to_sup; }\n    \n    const StorageIndex* colToSup() const\n    {\n      return m_col_to_sup;       \n    }\n    /**\n     * Return the array of supernode-to-column mapping\n     */\n    StorageIndex* supToCol() { return m_sup_to_col; }\n    \n    const StorageIndex* supToCol() const\n    {\n      return m_sup_to_col;\n    }\n    \n    /**\n     * Return the number of supernodes\n     */\n    Index nsuper() const\n    {\n      return m_nsuper; \n    }\n    \n    class InnerIterator; \n    template<typename Dest>\n    void solveInPlace( MatrixBase<Dest>&X) const;\n    \n      \n      \n    \n  protected:\n    Index m_row; // Number of rows\n    Index m_col; // Number of columns\n    Index m_nsuper; // Number of supernodes\n    Scalar* m_nzval; //array of nonzero values packed by column\n    StorageIndex* m_nzval_colptr; //nzval_colptr[j] Stores the location in nzval[] which starts column j\n    StorageIndex* m_rowind; // Array of compressed row indices of rectangular supernodes\n    StorageIndex* m_rowind_colptr; //rowind_colptr[j] stores the location in rowind[] which starts column j\n    StorageIndex* m_col_to_sup; // col_to_sup[j] is the supernode number to which column j belongs\n    StorageIndex* m_sup_to_col; //sup_to_col[s] points to the starting column of the s-th supernode\n    \n  private :\n};\n\n/**\n  * \\brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L\n  * \n  */\ntemplate<typename Scalar, typename StorageIndex>\nclass MappedSuperNodalMatrix<Scalar,StorageIndex>::InnerIterator\n{\n  public:\n     InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)\n      : m_matrix(mat),\n        m_outer(outer),\n        m_supno(mat.colToSup()[outer]),\n        m_idval(mat.colIndexPtr()[outer]),\n        m_startidval(m_idval),\n        m_endidval(mat.colIndexPtr()[outer+1]),\n        m_idrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]]),\n        m_endidrow(mat.rowIndexPtr()[mat.supToCol()[mat.colToSup()[outer]]+1])\n    {}\n    inline InnerIterator& operator++()\n    { \n      m_idval++; \n      m_idrow++;\n      return *this;\n    }\n    inline Scalar value() const { return m_matrix.valuePtr()[m_idval]; }\n    \n    inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_idval]); }\n    \n    inline Index index() const { return m_matrix.rowIndex()[m_idrow]; }\n    inline Index row() const { return index(); }\n    inline Index col() const { return m_outer; }\n    \n    inline Index supIndex() const { return m_supno; }\n    \n    inline operator bool() const \n    { \n      return ( (m_idval < m_endidval) && (m_idval >= m_startidval)\n                && (m_idrow < m_endidrow) );\n    }\n    \n  protected:\n    const MappedSuperNodalMatrix& m_matrix; // Supernodal lower triangular matrix \n    const Index m_outer;                    // Current column \n    const Index m_supno;                    // Current SuperNode number\n    Index m_idval;                          // Index to browse the values in the current column\n    const Index m_startidval;               // Start of the column value\n    const Index m_endidval;                 // End of the column value\n    Index m_idrow;                          // Index to browse the row indices \n    Index m_endidrow;                       // End index of row indices of the current column\n};\n\n/**\n * \\brief Solve with the supernode triangular matrix\n * \n */\ntemplate<typename Scalar, typename Index_>\ntemplate<typename Dest>\nvoid MappedSuperNodalMatrix<Scalar,Index_>::solveInPlace( MatrixBase<Dest>&X) const\n{\n    /* Explicit type conversion as the Index type of MatrixBase<Dest> may be wider than Index */\n//    eigen_assert(X.rows() <= NumTraits<Index>::highest());\n//    eigen_assert(X.cols() <= NumTraits<Index>::highest());\n    Index n    = int(X.rows());\n    Index nrhs = Index(X.cols());\n    const Scalar * Lval = valuePtr();                 // Nonzero values \n    Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor> work(n, nrhs);     // working vector\n    work.setZero();\n    for (Index k = 0; k <= nsuper(); k ++)\n    {\n      Index fsupc = supToCol()[k];                    // First column of the current supernode \n      Index istart = rowIndexPtr()[fsupc];            // Pointer index to the subscript of the current column\n      Index nsupr = rowIndexPtr()[fsupc+1] - istart;  // Number of rows in the current supernode\n      Index nsupc = supToCol()[k+1] - fsupc;          // Number of columns in the current supernode\n      Index nrow = nsupr - nsupc;                     // Number of rows in the non-diagonal part of the supernode\n      Index irow;                                     //Current index row\n      \n      if (nsupc == 1 )\n      {\n        for (Index j = 0; j < nrhs; j++)\n        {\n          InnerIterator it(*this, fsupc);\n          ++it; // Skip the diagonal element\n          for (; it; ++it)\n          {\n            irow = it.row();\n            X(irow, j) -= X(fsupc, j) * it.value();\n          }\n        }\n      }\n      else\n      {\n        // The supernode has more than one column \n        Index luptr = colIndexPtr()[fsupc]; \n        Index lda = colIndexPtr()[fsupc+1] - luptr;\n        \n        // Triangular solve \n        Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(lda) );\n        Map< Matrix<Scalar,Dynamic,Dest::ColsAtCompileTime, ColMajor>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );\n        U = A.template triangularView<UnitLower>().solve(U); \n        \n        // Matrix-vector product \n        new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic, ColMajor>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );\n        work.topRows(nrow).noalias() = A * U;\n        \n        //Begin Scatter \n        for (Index j = 0; j < nrhs; j++)\n        {\n          Index iptr = istart + nsupc; \n          for (Index i = 0; i < nrow; i++)\n          {\n            irow = rowIndex()[iptr]; \n            X(irow, j) -= work(i, j); // Scatter operation\n            work(i, j) = Scalar(0); \n            iptr++;\n          }\n        }\n      }\n    } \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_SPARSELU_MATRIX_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_Utils.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\n#ifndef EIGEN_SPARSELU_UTILS_H\n#define EIGEN_SPARSELU_UTILS_H\n\nnamespace Eigen {\nnamespace internal {\n\n/**\n * \\brief Count Nonzero elements in the factors\n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::countnz(const Index n, Index& nnzL, Index& nnzU, GlobalLU_t& glu)\n{\n nnzL = 0; \n nnzU = (glu.xusub)(n); \n Index nsuper = (glu.supno)(n); \n Index jlen; \n Index i, j, fsupc;\n if (n <= 0 ) return; \n // For each supernode\n for (i = 0; i <= nsuper; i++)\n {\n   fsupc = glu.xsup(i); \n   jlen = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); \n   \n   for (j = fsupc; j < glu.xsup(i+1); j++)\n   {\n     nnzL += jlen; \n     nnzU += j - fsupc + 1; \n     jlen--; \n   }\n }\n}\n\n/**\n * \\brief Fix up the data storage lsub for L-subscripts. \n * \n * It removes the subscripts sets for structural pruning, \n * and applies permutation to the remaining subscripts\n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector& perm_r, GlobalLU_t& glu)\n{\n  Index fsupc, i, j, k, jstart; \n  \n  StorageIndex nextl = 0; \n  Index nsuper = (glu.supno)(n); \n  \n  // For each supernode \n  for (i = 0; i <= nsuper; i++)\n  {\n    fsupc = glu.xsup(i); \n    jstart = glu.xlsub(fsupc); \n    glu.xlsub(fsupc) = nextl; \n    for (j = jstart; j < glu.xlsub(fsupc + 1); j++)\n    {\n      glu.lsub(nextl) = perm_r(glu.lsub(j)); // Now indexed into P*A\n      nextl++;\n    }\n    for (k = fsupc+1; k < glu.xsup(i+1); k++)\n      glu.xlsub(k) = nextl; // other columns in supernode i\n  }\n  \n  glu.xlsub(n) = nextl; \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif // EIGEN_SPARSELU_UTILS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_column_bmod.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of xcolumn_bmod.c file in SuperLU \n \n * -- SuperLU routine (version 3.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * October 15, 2003\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_COLUMN_BMOD_H\n#define SPARSELU_COLUMN_BMOD_H\n\nnamespace Eigen {\n\nnamespace internal {\n/**\n * \\brief Performs numeric block updates (sup-col) in topological order\n * \n * \\param jcol current column to update\n * \\param nseg Number of segments in the U part\n * \\param dense Store the full representation of the column\n * \\param tempv working array \n * \\param segrep segment representative ...\n * \\param repfnz ??? First nonzero column in each row ???  ...\n * \\param fpanelc First column in the current panel\n * \\param glu Global LU data. \n * \\return 0 - successful return \n *         > 0 - number of bytes allocated when run out of space\n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nIndex SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector& tempv,\n                                                     BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t& glu)\n{\n  Index  jsupno, k, ksub, krep, ksupno; \n  Index lptr, nrow, isub, irow, nextlu, new_next, ufirst; \n  Index fsupc, nsupc, nsupr, luptr, kfnz, no_zeros; \n  /* krep = representative of current k-th supernode\n    * fsupc =  first supernodal column\n    * nsupc = number of columns in a supernode\n    * nsupr = number of rows in a supernode\n    * luptr = location of supernodal LU-block in storage\n    * kfnz = first nonz in the k-th supernodal segment\n    * no_zeros = no lf leading zeros in a supernodal U-segment\n    */\n  \n  jsupno = glu.supno(jcol);\n  // For each nonzero supernode segment of U[*,j] in topological order \n  k = nseg - 1; \n  Index d_fsupc; // distance between the first column of the current panel and the \n               // first column of the current snode\n  Index fst_col; // First column within small LU update\n  Index segsize; \n  for (ksub = 0; ksub < nseg; ksub++)\n  {\n    krep = segrep(k); k--; \n    ksupno = glu.supno(krep); \n    if (jsupno != ksupno )\n    {\n      // outside the rectangular supernode \n      fsupc = glu.xsup(ksupno); \n      fst_col = (std::max)(fsupc, fpanelc); \n      \n      // Distance from the current supernode to the current panel; \n      // d_fsupc = 0 if fsupc > fpanelc\n      d_fsupc = fst_col - fsupc; \n      \n      luptr = glu.xlusup(fst_col) + d_fsupc; \n      lptr = glu.xlsub(fsupc) + d_fsupc; \n      \n      kfnz = repfnz(krep); \n      kfnz = (std::max)(kfnz, fpanelc); \n      \n      segsize = krep - kfnz + 1; \n      nsupc = krep - fst_col + 1; \n      nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); \n      nrow = nsupr - d_fsupc - nsupc;\n      Index lda = glu.xlusup(fst_col+1) - glu.xlusup(fst_col);\n      \n      \n      // Perform a triangular solver and block update, \n      // then scatter the result of sup-col update to dense\n      no_zeros = kfnz - fst_col; \n      if(segsize==1)\n        LU_kernel_bmod<1>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);\n      else\n        LU_kernel_bmod<Dynamic>::run(segsize, dense, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);\n    } // end if jsupno \n  } // end for each segment\n  \n  // Process the supernodal portion of  L\\U[*,j]\n  nextlu = glu.xlusup(jcol); \n  fsupc = glu.xsup(jsupno);\n  \n  // copy the SPA dense into L\\U[*,j]\n  Index mem; \n  new_next = nextlu + glu.xlsub(fsupc + 1) - glu.xlsub(fsupc); \n  Index offset = internal::first_multiple<Index>(new_next, internal::packet_traits<Scalar>::size) - new_next;\n  if(offset)\n    new_next += offset;\n  while (new_next > glu.nzlumax )\n  {\n    mem = memXpand<ScalarVector>(glu.lusup, glu.nzlumax, nextlu, LUSUP, glu.num_expansions);  \n    if (mem) return mem; \n  }\n  \n  for (isub = glu.xlsub(fsupc); isub < glu.xlsub(fsupc+1); isub++)\n  {\n    irow = glu.lsub(isub);\n    glu.lusup(nextlu) = dense(irow);\n    dense(irow) = Scalar(0.0); \n    ++nextlu; \n  }\n  \n  if(offset)\n  {\n    glu.lusup.segment(nextlu,offset).setZero();\n    nextlu += offset;\n  }\n  glu.xlusup(jcol + 1) = StorageIndex(nextlu);  // close L\\U(*,jcol); \n  \n  /* For more updates within the panel (also within the current supernode),\n   * should start from the first column of the panel, or the first column\n   * of the supernode, whichever is bigger. There are two cases:\n   *  1) fsupc < fpanelc, then fst_col <-- fpanelc\n   *  2) fsupc >= fpanelc, then fst_col <-- fsupc\n   */\n  fst_col = (std::max)(fsupc, fpanelc); \n  \n  if (fst_col  < jcol)\n  {\n    // Distance between the current supernode and the current panel\n    // d_fsupc = 0 if fsupc >= fpanelc\n    d_fsupc = fst_col - fsupc; \n    \n    lptr = glu.xlsub(fsupc) + d_fsupc; \n    luptr = glu.xlusup(fst_col) + d_fsupc; \n    nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); // leading dimension\n    nsupc = jcol - fst_col; // excluding jcol \n    nrow = nsupr - d_fsupc - nsupc; \n    \n    // points to the beginning of jcol in snode L\\U(jsupno) \n    ufirst = glu.xlusup(jcol) + d_fsupc; \n    Index lda = glu.xlusup(jcol+1) - glu.xlusup(jcol);\n    MappedMatrixBlock A( &(glu.lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(lda) );\n    VectorBlock<ScalarVector> u(glu.lusup, ufirst, nsupc); \n    u = A.template triangularView<UnitLower>().solve(u); \n    \n    new (&A) MappedMatrixBlock ( &(glu.lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(lda) );\n    VectorBlock<ScalarVector> l(glu.lusup, ufirst+nsupc, nrow); \n    l.noalias() -= A * u;\n    \n  } // End if fst_col\n  return 0; \n}\n\n} // end namespace internal\n} // end namespace Eigen\n\n#endif // SPARSELU_COLUMN_BMOD_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_column_dfs.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]column_dfs.c file in SuperLU \n \n * -- SuperLU routine (version 2.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * November 15, 1997\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_COLUMN_DFS_H\n#define SPARSELU_COLUMN_DFS_H\n\ntemplate <typename Scalar, typename StorageIndex> class SparseLUImpl;\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename IndexVector, typename ScalarVector>\nstruct column_dfs_traits : no_assignment_operator\n{\n  typedef typename ScalarVector::Scalar Scalar;\n  typedef typename IndexVector::Scalar StorageIndex;\n  column_dfs_traits(Index jcol, Index& jsuper, typename SparseLUImpl<Scalar, StorageIndex>::GlobalLU_t& glu, SparseLUImpl<Scalar, StorageIndex>& luImpl)\n   : m_jcol(jcol), m_jsuper_ref(jsuper), m_glu(glu), m_luImpl(luImpl)\n {}\n  bool update_segrep(Index /*krep*/, Index /*jj*/)\n  {\n    return true;\n  }\n  void mem_expand(IndexVector& lsub, Index& nextl, Index chmark)\n  {\n    if (nextl >= m_glu.nzlmax)\n      m_luImpl.memXpand(lsub, m_glu.nzlmax, nextl, LSUB, m_glu.num_expansions); \n    if (chmark != (m_jcol-1)) m_jsuper_ref = emptyIdxLU;\n  }\n  enum { ExpandMem = true };\n  \n  Index m_jcol;\n  Index& m_jsuper_ref;\n  typename SparseLUImpl<Scalar, StorageIndex>::GlobalLU_t& m_glu;\n  SparseLUImpl<Scalar, StorageIndex>& m_luImpl;\n};\n\n\n/**\n * \\brief Performs a symbolic factorization on column jcol and decide the supernode boundary\n * \n * A supernode representative is the last column of a supernode.\n * The nonzeros in U[*,j] are segments that end at supernodes representatives. \n * The routine returns a list of the supernodal representatives \n * in topological order of the dfs that generates them. \n * The location of the first nonzero in each supernodal segment \n * (supernodal entry location) is also returned. \n * \n * \\param m number of rows in the matrix\n * \\param jcol Current column \n * \\param perm_r Row permutation\n * \\param maxsuper  Maximum number of column allowed in a supernode\n * \\param [in,out] nseg Number of segments in current U[*,j] - new segments appended\n * \\param lsub_col defines the rhs vector to start the dfs\n * \\param [in,out] segrep Segment representatives - new segments appended \n * \\param repfnz  First nonzero location in each row\n * \\param xprune \n * \\param marker  marker[i] == jj, if i was visited during dfs of current column jj;\n * \\param parent\n * \\param xplore working array\n * \\param glu global LU data \n * \\return 0 success\n *         > 0 number of bytes allocated when run out of space\n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nIndex SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index jcol, IndexVector& perm_r, Index maxsuper, Index& nseg,\n                                                    BlockIndexVector lsub_col, IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune,\n                                                    IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)\n{\n  \n  Index jsuper = glu.supno(jcol); \n  Index nextl = glu.xlsub(jcol); \n  VectorBlock<IndexVector> marker2(marker, 2*m, m); \n  \n  \n  column_dfs_traits<IndexVector, ScalarVector> traits(jcol, jsuper, glu, *this);\n  \n  // For each nonzero in A(*,jcol) do dfs \n  for (Index k = 0; ((k < m) ? lsub_col[k] != emptyIdxLU : false) ; k++)\n  {\n    Index krow = lsub_col(k); \n    lsub_col(k) = emptyIdxLU; \n    Index kmark = marker2(krow); \n    \n    // krow was visited before, go to the next nonz; \n    if (kmark == jcol) continue;\n    \n    dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,\n                   xplore, glu, nextl, krow, traits);\n  } // for each nonzero ... \n  \n  Index fsupc;\n  StorageIndex nsuper = glu.supno(jcol);\n  StorageIndex jcolp1 = StorageIndex(jcol) + 1;\n  Index jcolm1 = jcol - 1;\n  \n  // check to see if j belongs in the same supernode as j-1\n  if ( jcol == 0 )\n  { // Do nothing for column 0 \n    nsuper = glu.supno(0) = 0 ;\n  }\n  else \n  {\n    fsupc = glu.xsup(nsuper); \n    StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed\n    StorageIndex jm1ptr = glu.xlsub(jcolm1); \n    \n    // Use supernodes of type T2 : see SuperLU paper\n    if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;\n    \n    // Make sure the number of columns in a supernode doesn't\n    // exceed threshold\n    if ( (jcol - fsupc) >= maxsuper) jsuper = emptyIdxLU; \n    \n    /* If jcol starts a new supernode, reclaim storage space in\n     * glu.lsub from previous supernode. Note we only store \n     * the subscript set of the first and last columns of \n     * a supernode. (first for num values, last for pruning)\n     */\n    if (jsuper == emptyIdxLU)\n    { // starts a new supernode \n      if ( (fsupc < jcolm1-1) ) \n      { // >= 3 columns in nsuper\n        StorageIndex ito = glu.xlsub(fsupc+1);\n        glu.xlsub(jcolm1) = ito; \n        StorageIndex istop = ito + jptr - jm1ptr; \n        xprune(jcolm1) = istop; // intialize xprune(jcol-1)\n        glu.xlsub(jcol) = istop; \n        \n        for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)\n          glu.lsub(ito) = glu.lsub(ifrom); \n        nextl = ito;  // = istop + length(jcol)\n      }\n      nsuper++; \n      glu.supno(jcol) = nsuper; \n    } // if a new supernode \n  } // end else:  jcol > 0\n  \n  // Tidy up the pointers before exit\n  glu.xsup(nsuper+1) = jcolp1; \n  glu.supno(jcolp1) = nsuper; \n  xprune(jcol) = StorageIndex(nextl);  // Intialize upper bound for pruning\n  glu.xlsub(jcolp1) = StorageIndex(nextl); \n  \n  return 0; \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_copy_to_ucol.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]copy_to_ucol.c file in SuperLU \n \n * -- SuperLU routine (version 2.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * November 15, 1997\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_COPY_TO_UCOL_H\n#define SPARSELU_COPY_TO_UCOL_H\n\nnamespace Eigen {\nnamespace internal {\n\n/**\n * \\brief Performs numeric block updates (sup-col) in topological order\n * \n * \\param jcol current column to update\n * \\param nseg Number of segments in the U part\n * \\param segrep segment representative ...\n * \\param repfnz First nonzero column in each row  ...\n * \\param perm_r Row permutation \n * \\param dense Store the full representation of the column\n * \\param glu Global LU data. \n * \\return 0 - successful return \n *         > 0 - number of bytes allocated when run out of space\n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nIndex SparseLUImpl<Scalar,StorageIndex>::copy_to_ucol(const Index jcol, const Index nseg, IndexVector& segrep,\n                                                      BlockIndexVector repfnz ,IndexVector& perm_r, BlockScalarVector dense, GlobalLU_t& glu)\n{  \n  Index ksub, krep, ksupno; \n    \n  Index jsupno = glu.supno(jcol);\n  \n  // For each nonzero supernode segment of U[*,j] in topological order \n  Index k = nseg - 1, i; \n  StorageIndex nextu = glu.xusub(jcol); \n  Index kfnz, isub, segsize; \n  Index new_next,irow; \n  Index fsupc, mem; \n  for (ksub = 0; ksub < nseg; ksub++)\n  {\n    krep = segrep(k); k--; \n    ksupno = glu.supno(krep); \n    if (jsupno != ksupno ) // should go into ucol(); \n    {\n      kfnz = repfnz(krep); \n      if (kfnz != emptyIdxLU)\n      { // Nonzero U-segment \n        fsupc = glu.xsup(ksupno); \n        isub = glu.xlsub(fsupc) + kfnz - fsupc; \n        segsize = krep - kfnz + 1; \n        new_next = nextu + segsize; \n        while (new_next > glu.nzumax) \n        {\n          mem = memXpand<ScalarVector>(glu.ucol, glu.nzumax, nextu, UCOL, glu.num_expansions); \n          if (mem) return mem; \n          mem = memXpand<IndexVector>(glu.usub, glu.nzumax, nextu, USUB, glu.num_expansions); \n          if (mem) return mem; \n          \n        }\n        \n        for (i = 0; i < segsize; i++)\n        {\n          irow = glu.lsub(isub); \n          glu.usub(nextu) = perm_r(irow); // Unlike the L part, the U part is stored in its final order\n          glu.ucol(nextu) = dense(irow); \n          dense(irow) = Scalar(0.0); \n          nextu++;\n          isub++;\n        }\n        \n      } // end nonzero U-segment \n      \n    } // end if jsupno \n    \n  } // end for each segment\n  glu.xusub(jcol + 1) = nextu; // close U(*,jcol)\n  return 0; \n}\n\n} // namespace internal\n} // end namespace Eigen\n\n#endif // SPARSELU_COPY_TO_UCOL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_gemm_kernel.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSELU_GEMM_KERNEL_H\n#define EIGEN_SPARSELU_GEMM_KERNEL_H\n\nnamespace Eigen {\n\nnamespace internal {\n\n\n/** \\internal\n  * A general matrix-matrix product kernel optimized for the SparseLU factorization.\n  *  - A, B, and C must be column major\n  *  - lda and ldc must be multiples of the respective packet size\n  *  - C must have the same alignment as A\n  */\ntemplate<typename Scalar>\nEIGEN_DONT_INLINE\nvoid sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)\n{\n  using namespace Eigen::internal;\n  \n  typedef typename packet_traits<Scalar>::type Packet;\n  enum {\n    NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,\n    PacketSize = packet_traits<Scalar>::size,\n    PM = 8,                             // peeling in M\n    RN = 2,                             // register blocking\n    RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking\n    BM = 4096/sizeof(Scalar),           // number of rows of A-C per chunk\n    SM = PM*PacketSize                  // step along M\n  };\n  Index d_end = (d/RK)*RK;    // number of columns of A (rows of B) suitable for full register blocking\n  Index n_end = (n/RN)*RN;    // number of columns of B-C suitable for processing RN columns at once\n  Index i0 = internal::first_default_aligned(A,m);\n  \n  eigen_internal_assert(((lda%PacketSize)==0) && ((ldc%PacketSize)==0) && (i0==internal::first_default_aligned(C,m)));\n  \n  // handle the non aligned rows of A and C without any optimization:\n  for(Index i=0; i<i0; ++i)\n  {\n    for(Index j=0; j<n; ++j)\n    {\n      Scalar c = C[i+j*ldc];\n      for(Index k=0; k<d; ++k)\n        c += B[k+j*ldb] * A[i+k*lda];\n      C[i+j*ldc] = c;\n    }\n  }\n  // process the remaining rows per chunk of BM rows\n  for(Index ib=i0; ib<m; ib+=BM)\n  {\n    Index actual_b = std::min<Index>(BM, m-ib);                 // actual number of rows\n    Index actual_b_end1 = (actual_b/SM)*SM;                   // actual number of rows suitable for peeling\n    Index actual_b_end2 = (actual_b/PacketSize)*PacketSize;   // actual number of rows suitable for vectorization\n    \n    // Let's process two columns of B-C at once\n    for(Index j=0; j<n_end; j+=RN)\n    {\n      const Scalar* Bc0 = B+(j+0)*ldb;\n      const Scalar* Bc1 = B+(j+1)*ldb;\n      \n      for(Index k=0; k<d_end; k+=RK)\n      {\n        \n        // load and expand a RN x RK block of B\n        Packet b00, b10, b20, b30, b01, b11, b21, b31;\n                  { b00 = pset1<Packet>(Bc0[0]); }\n                  { b10 = pset1<Packet>(Bc0[1]); }\n        if(RK==4) { b20 = pset1<Packet>(Bc0[2]); }\n        if(RK==4) { b30 = pset1<Packet>(Bc0[3]); }\n                  { b01 = pset1<Packet>(Bc1[0]); }\n                  { b11 = pset1<Packet>(Bc1[1]); }\n        if(RK==4) { b21 = pset1<Packet>(Bc1[2]); }\n        if(RK==4) { b31 = pset1<Packet>(Bc1[3]); }\n        \n        Packet a0, a1, a2, a3, c0, c1, t0, t1;\n        \n        const Scalar* A0 = A+ib+(k+0)*lda;\n        const Scalar* A1 = A+ib+(k+1)*lda;\n        const Scalar* A2 = A+ib+(k+2)*lda;\n        const Scalar* A3 = A+ib+(k+3)*lda;\n        \n        Scalar* C0 = C+ib+(j+0)*ldc;\n        Scalar* C1 = C+ib+(j+1)*ldc;\n        \n                  a0 = pload<Packet>(A0);\n                  a1 = pload<Packet>(A1);\n        if(RK==4)\n        {\n          a2 = pload<Packet>(A2);\n          a3 = pload<Packet>(A3);\n        }\n        else\n        {\n          // workaround \"may be used uninitialized in this function\" warning\n          a2 = a3 = a0;\n        }\n        \n#define KMADD(c, a, b, tmp) {tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);}\n#define WORK(I)  \\\n                     c0 = pload<Packet>(C0+i+(I)*PacketSize);    \\\n                     c1 = pload<Packet>(C1+i+(I)*PacketSize);    \\\n                     KMADD(c0, a0, b00, t0)                      \\\n                     KMADD(c1, a0, b01, t1)                      \\\n                     a0 = pload<Packet>(A0+i+(I+1)*PacketSize);  \\\n                     KMADD(c0, a1, b10, t0)                      \\\n                     KMADD(c1, a1, b11, t1)                      \\\n                     a1 = pload<Packet>(A1+i+(I+1)*PacketSize);  \\\n          if(RK==4){ KMADD(c0, a2, b20, t0)                     }\\\n          if(RK==4){ KMADD(c1, a2, b21, t1)                     }\\\n          if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize); }\\\n          if(RK==4){ KMADD(c0, a3, b30, t0)                     }\\\n          if(RK==4){ KMADD(c1, a3, b31, t1)                     }\\\n          if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\\\n                     pstore(C0+i+(I)*PacketSize, c0);            \\\n                     pstore(C1+i+(I)*PacketSize, c1)\n        \n        // process rows of A' - C' with aggressive vectorization and peeling \n        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)\n        {\n          EIGEN_ASM_COMMENT(\"SPARSELU_GEMML_KERNEL1\");\n                    prefetch((A0+i+(5)*PacketSize));\n                    prefetch((A1+i+(5)*PacketSize));\n          if(RK==4) prefetch((A2+i+(5)*PacketSize));\n          if(RK==4) prefetch((A3+i+(5)*PacketSize));\n\n          WORK(0);\n          WORK(1);\n          WORK(2);\n          WORK(3);\n          WORK(4);\n          WORK(5);\n          WORK(6);\n          WORK(7);\n        }\n        // process the remaining rows with vectorization only\n        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)\n        {\n          WORK(0);\n        }\n#undef WORK\n        // process the remaining rows without vectorization\n        for(Index i=actual_b_end2; i<actual_b; ++i)\n        {\n          if(RK==4)\n          {\n            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];\n            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];\n          }\n          else\n          {\n            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];\n            C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];\n          }\n        }\n        \n        Bc0 += RK;\n        Bc1 += RK;\n      } // peeled loop on k\n    } // peeled loop on the columns j\n    // process the last column (we now perform a matrix-vector product)\n    if((n-n_end)>0)\n    {\n      const Scalar* Bc0 = B+(n-1)*ldb;\n      \n      for(Index k=0; k<d_end; k+=RK)\n      {\n        \n        // load and expand a 1 x RK block of B\n        Packet b00, b10, b20, b30;\n                  b00 = pset1<Packet>(Bc0[0]);\n                  b10 = pset1<Packet>(Bc0[1]);\n        if(RK==4) b20 = pset1<Packet>(Bc0[2]);\n        if(RK==4) b30 = pset1<Packet>(Bc0[3]);\n        \n        Packet a0, a1, a2, a3, c0, t0/*, t1*/;\n        \n        const Scalar* A0 = A+ib+(k+0)*lda;\n        const Scalar* A1 = A+ib+(k+1)*lda;\n        const Scalar* A2 = A+ib+(k+2)*lda;\n        const Scalar* A3 = A+ib+(k+3)*lda;\n        \n        Scalar* C0 = C+ib+(n_end)*ldc;\n        \n                  a0 = pload<Packet>(A0);\n                  a1 = pload<Packet>(A1);\n        if(RK==4)\n        {\n          a2 = pload<Packet>(A2);\n          a3 = pload<Packet>(A3);\n        }\n        else\n        {\n          // workaround \"may be used uninitialized in this function\" warning\n          a2 = a3 = a0;\n        }\n        \n#define WORK(I) \\\n                   c0 = pload<Packet>(C0+i+(I)*PacketSize);     \\\n                   KMADD(c0, a0, b00, t0)                       \\\n                   a0 = pload<Packet>(A0+i+(I+1)*PacketSize);   \\\n                   KMADD(c0, a1, b10, t0)                       \\\n                   a1 = pload<Packet>(A1+i+(I+1)*PacketSize);   \\\n        if(RK==4){ KMADD(c0, a2, b20, t0)                      }\\\n        if(RK==4){ a2 = pload<Packet>(A2+i+(I+1)*PacketSize);  }\\\n        if(RK==4){ KMADD(c0, a3, b30, t0)                      }\\\n        if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize);  }\\\n                   pstore(C0+i+(I)*PacketSize, c0);\n        \n        // agressive vectorization and peeling\n        for(Index i=0; i<actual_b_end1; i+=PacketSize*8)\n        {\n          EIGEN_ASM_COMMENT(\"SPARSELU_GEMML_KERNEL2\");\n          WORK(0);\n          WORK(1);\n          WORK(2);\n          WORK(3);\n          WORK(4);\n          WORK(5);\n          WORK(6);\n          WORK(7);\n        }\n        // vectorization only\n        for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)\n        {\n          WORK(0);\n        }\n        // remaining scalars\n        for(Index i=actual_b_end2; i<actual_b; ++i)\n        {\n          if(RK==4) \n            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];\n          else\n            C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];\n        }\n        \n        Bc0 += RK;\n#undef WORK\n      }\n    }\n    \n    // process the last columns of A, corresponding to the last rows of B\n    Index rd = d-d_end;\n    if(rd>0)\n    {\n      for(Index j=0; j<n; ++j)\n      {\n        enum {\n          Alignment = PacketSize>1 ? Aligned : 0\n        };\n        typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector;\n        typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector;\n        if(rd==1)       MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);\n        \n        else if(rd==2)  MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)\n                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b);\n        \n        else            MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b)\n                                                        + B[1+d_end+j*ldb] * ConstMapVector(A+(d_end+1)*lda+ib, actual_b)\n                                                        + B[2+d_end+j*ldb] * ConstMapVector(A+(d_end+2)*lda+ib, actual_b);\n      }\n    }\n  \n  } // blocking on the rows of A and C\n}\n#undef KMADD\n\n} // namespace internal\n\n} // namespace Eigen\n\n#endif // EIGEN_SPARSELU_GEMM_KERNEL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_heap_relax_snode.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* This file is a modified version of heap_relax_snode.c file in SuperLU\n * -- SuperLU routine (version 3.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * October 15, 2003\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n\n#ifndef SPARSELU_HEAP_RELAX_SNODE_H\n#define SPARSELU_HEAP_RELAX_SNODE_H\n\nnamespace Eigen {\nnamespace internal {\n\n/** \n * \\brief Identify the initial relaxed supernodes\n * \n * This routine applied to a symmetric elimination tree. \n * It assumes that the matrix has been reordered according to the postorder of the etree\n * \\param n The number of columns\n * \\param et elimination tree \n * \\param relax_columns Maximum number of columns allowed in a relaxed snode \n * \\param descendants Number of descendants of each node in the etree\n * \\param relax_end last column in a supernode\n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)\n{\n  \n  // The etree may not be postordered, but its heap ordered  \n  IndexVector post;\n  internal::treePostorder(StorageIndex(n), et, post); // Post order etree\n  IndexVector inv_post(n+1); \n  for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???\n  \n  // Renumber etree in postorder \n  IndexVector iwork(n);\n  IndexVector et_save(n+1);\n  for (Index i = 0; i < n; ++i)\n  {\n    iwork(post(i)) = post(et(i));\n  }\n  et_save = et; // Save the original etree\n  et = iwork; \n  \n  // compute the number of descendants of each node in the etree\n  relax_end.setConstant(emptyIdxLU);\n  Index j, parent; \n  descendants.setZero();\n  for (j = 0; j < n; j++) \n  {\n    parent = et(j);\n    if (parent != n) // not the dummy root\n      descendants(parent) += descendants(j) + 1;\n  }\n  // Identify the relaxed supernodes by postorder traversal of the etree\n  Index snode_start; // beginning of a snode \n  StorageIndex k;\n  Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree \n  Index nsuper_et = 0; // Number of relaxed snodes in the original etree \n  StorageIndex l; \n  for (j = 0; j < n; )\n  {\n    parent = et(j);\n    snode_start = j; \n    while ( parent != n && descendants(parent) < relax_columns ) \n    {\n      j = parent; \n      parent = et(j);\n    }\n    // Found a supernode in postordered etree, j is the last column \n    ++nsuper_et_post;\n    k = StorageIndex(n);\n    for (Index i = snode_start; i <= j; ++i)\n      k = (std::min)(k, inv_post(i));\n    l = inv_post(j);\n    if ( (l - k) == (j - snode_start) )  // Same number of columns in the snode\n    {\n      // This is also a supernode in the original etree\n      relax_end(k) = l; // Record last column \n      ++nsuper_et; \n    }\n    else \n    {\n      for (Index i = snode_start; i <= j; ++i) \n      {\n        l = inv_post(i);\n        if (descendants(i) == 0) \n        {\n          relax_end(l) = l;\n          ++nsuper_et;\n        }\n      }\n    }\n    j++;\n    // Search for a new leaf\n    while (descendants(j) != 0 && j < n) j++;\n  } // End postorder traversal of the etree\n  \n  // Recover the original etree\n  et = et_save; \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif // SPARSELU_HEAP_RELAX_SNODE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_kernel_bmod.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef SPARSELU_KERNEL_BMOD_H\n#define SPARSELU_KERNEL_BMOD_H\n\nnamespace Eigen {\nnamespace internal {\n  \ntemplate <int SegSizeAtCompileTime> struct LU_kernel_bmod\n{\n  /** \\internal\n    * \\brief Performs numeric block updates from a given supernode to a single column\n    *\n    * \\param segsize Size of the segment (and blocks ) to use for updates\n    * \\param[in,out] dense Packed values of the original matrix\n    * \\param tempv temporary vector to use for updates\n    * \\param lusup array containing the supernodes\n    * \\param lda Leading dimension in the supernode\n    * \\param nrow Number of rows in the rectangular part of the supernode\n    * \\param lsub compressed row subscripts of supernodes\n    * \\param lptr pointer to the first column of the current supernode in lsub\n    * \\param no_zeros Number of nonzeros elements before the diagonal part of the supernode\n    */\n  template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>\n  static EIGEN_DONT_INLINE void run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,\n                                    const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);\n};\n\ntemplate <int SegSizeAtCompileTime>\ntemplate <typename BlockScalarVector, typename ScalarVector, typename IndexVector>\nEIGEN_DONT_INLINE void LU_kernel_bmod<SegSizeAtCompileTime>::run(const Index segsize, BlockScalarVector& dense, ScalarVector& tempv, ScalarVector& lusup, Index& luptr, const Index lda,\n                                                                  const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)\n{\n  typedef typename ScalarVector::Scalar Scalar;\n  // First, copy U[*,j] segment from dense(*) to tempv(*)\n  // The result of triangular solve is in tempv[*]; \n    // The result of matric-vector update is in dense[*]\n  Index isub = lptr + no_zeros; \n  Index i;\n  Index irow;\n  for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)\n  {\n    irow = lsub(isub); \n    tempv(i) = dense(irow); \n    ++isub; \n  }\n  // Dense triangular solve -- start effective triangle\n  luptr += lda * no_zeros + no_zeros; \n  // Form Eigen matrix and vector \n  Map<Matrix<Scalar,SegSizeAtCompileTime,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > A( &(lusup.data()[luptr]), segsize, segsize, OuterStride<>(lda) );\n  Map<Matrix<Scalar,SegSizeAtCompileTime,1> > u(tempv.data(), segsize);\n  \n  u = A.template triangularView<UnitLower>().solve(u); \n  \n  // Dense matrix-vector product y <-- B*x \n  luptr += segsize;\n  const Index PacketSize = internal::packet_traits<Scalar>::size;\n  Index ldl = internal::first_multiple(nrow, PacketSize);\n  Map<Matrix<Scalar,Dynamic,SegSizeAtCompileTime, ColMajor>, 0, OuterStride<> > B( &(lusup.data()[luptr]), nrow, segsize, OuterStride<>(lda) );\n  Index aligned_offset = internal::first_default_aligned(tempv.data()+segsize, PacketSize);\n  Index aligned_with_B_offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize))%PacketSize;\n  Map<Matrix<Scalar,Dynamic,1>, 0, OuterStride<> > l(tempv.data()+segsize+aligned_offset+aligned_with_B_offset, nrow, OuterStride<>(ldl) );\n  \n  l.setZero();\n  internal::sparselu_gemm<Scalar>(l.rows(), l.cols(), B.cols(), B.data(), B.outerStride(), u.data(), u.outerStride(), l.data(), l.outerStride());\n  \n  // Scatter tempv[] into SPA dense[] as a temporary storage \n  isub = lptr + no_zeros;\n  for (i = 0; i < ((SegSizeAtCompileTime==Dynamic)?segsize:SegSizeAtCompileTime); i++)\n  {\n    irow = lsub(isub++); \n    dense(irow) = tempv(i);\n  }\n  \n  // Scatter l into SPA dense[]\n  for (i = 0; i < nrow; i++)\n  {\n    irow = lsub(isub++); \n    dense(irow) -= l(i);\n  } \n}\n\ntemplate <> struct LU_kernel_bmod<1>\n{\n  template <typename BlockScalarVector, typename ScalarVector, typename IndexVector>\n  static EIGEN_DONT_INLINE void run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,\n                                    const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros);\n};\n\n\ntemplate <typename BlockScalarVector, typename ScalarVector, typename IndexVector>\nEIGEN_DONT_INLINE void LU_kernel_bmod<1>::run(const Index /*segsize*/, BlockScalarVector& dense, ScalarVector& /*tempv*/, ScalarVector& lusup, Index& luptr,\n                                              const Index lda, const Index nrow, IndexVector& lsub, const Index lptr, const Index no_zeros)\n{\n  typedef typename ScalarVector::Scalar Scalar;\n  typedef typename IndexVector::Scalar StorageIndex;\n  Scalar f = dense(lsub(lptr + no_zeros));\n  luptr += lda * no_zeros + no_zeros + 1;\n  const Scalar* a(lusup.data() + luptr);\n  const StorageIndex*  irow(lsub.data()+lptr + no_zeros + 1);\n  Index i = 0;\n  for (; i+1 < nrow; i+=2)\n  {\n    Index i0 = *(irow++);\n    Index i1 = *(irow++);\n    Scalar a0 = *(a++);\n    Scalar a1 = *(a++);\n    Scalar d0 = dense.coeff(i0);\n    Scalar d1 = dense.coeff(i1);\n    d0 -= f*a0;\n    d1 -= f*a1;\n    dense.coeffRef(i0) = d0;\n    dense.coeffRef(i1) = d1;\n  }\n  if(i<nrow)\n    dense.coeffRef(*(irow++)) -= f * *(a++);\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif // SPARSELU_KERNEL_BMOD_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_panel_bmod.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]panel_bmod.c file in SuperLU \n \n * -- SuperLU routine (version 3.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * October 15, 2003\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_PANEL_BMOD_H\n#define SPARSELU_PANEL_BMOD_H\n\nnamespace Eigen {\nnamespace internal {\n\n/**\n * \\brief Performs numeric block updates (sup-panel) in topological order.\n * \n * Before entering this routine, the original nonzeros in the panel\n * were already copied i nto the spa[m,w]\n * \n * \\param m number of rows in the matrix\n * \\param w Panel size\n * \\param jcol Starting  column of the panel\n * \\param nseg Number of segments in the U part\n * \\param dense Store the full representation of the panel \n * \\param tempv working array \n * \\param segrep segment representative... first row in the segment\n * \\param repfnz First nonzero rows\n * \\param glu Global LU data. \n * \n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::panel_bmod(const Index m, const Index w, const Index jcol, \n                                            const Index nseg, ScalarVector& dense, ScalarVector& tempv,\n                                            IndexVector& segrep, IndexVector& repfnz, GlobalLU_t& glu)\n{\n  \n  Index ksub,jj,nextl_col; \n  Index fsupc, nsupc, nsupr, nrow; \n  Index krep, kfnz; \n  Index lptr; // points to the row subscripts of a supernode \n  Index luptr; // ...\n  Index segsize,no_zeros ; \n  // For each nonz supernode segment of U[*,j] in topological order\n  Index k = nseg - 1; \n  const Index PacketSize = internal::packet_traits<Scalar>::size;\n  \n  for (ksub = 0; ksub < nseg; ksub++)\n  { // For each updating supernode\n    /* krep = representative of current k-th supernode\n     * fsupc =  first supernodal column\n     * nsupc = number of columns in a supernode\n     * nsupr = number of rows in a supernode\n     */\n    krep = segrep(k); k--; \n    fsupc = glu.xsup(glu.supno(krep)); \n    nsupc = krep - fsupc + 1; \n    nsupr = glu.xlsub(fsupc+1) - glu.xlsub(fsupc); \n    nrow = nsupr - nsupc; \n    lptr = glu.xlsub(fsupc); \n    \n    // loop over the panel columns to detect the actual number of columns and rows\n    Index u_rows = 0;\n    Index u_cols = 0;\n    for (jj = jcol; jj < jcol + w; jj++)\n    {\n      nextl_col = (jj-jcol) * m; \n      VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row\n      \n      kfnz = repfnz_col(krep); \n      if ( kfnz == emptyIdxLU ) \n        continue; // skip any zero segment\n      \n      segsize = krep - kfnz + 1;\n      u_cols++;\n      u_rows = (std::max)(segsize,u_rows);\n    }\n    \n    if(nsupc >= 2)\n    { \n      Index ldu = internal::first_multiple<Index>(u_rows, PacketSize);\n      Map<ScalarMatrix, Aligned,  OuterStride<> > U(tempv.data(), u_rows, u_cols, OuterStride<>(ldu));\n      \n      // gather U\n      Index u_col = 0;\n      for (jj = jcol; jj < jcol + w; jj++)\n      {\n        nextl_col = (jj-jcol) * m; \n        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row\n        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here\n        \n        kfnz = repfnz_col(krep); \n        if ( kfnz == emptyIdxLU ) \n          continue; // skip any zero segment\n        \n        segsize = krep - kfnz + 1;\n        luptr = glu.xlusup(fsupc);    \n        no_zeros = kfnz - fsupc; \n        \n        Index isub = lptr + no_zeros;\n        Index off = u_rows-segsize;\n        for (Index i = 0; i < off; i++) U(i,u_col) = 0;\n        for (Index i = 0; i < segsize; i++)\n        {\n          Index irow = glu.lsub(isub); \n          U(i+off,u_col) = dense_col(irow); \n          ++isub; \n        }\n        u_col++;\n      }\n      // solve U = A^-1 U\n      luptr = glu.xlusup(fsupc);\n      Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc);\n      no_zeros = (krep - u_rows + 1) - fsupc;\n      luptr += lda * no_zeros + no_zeros;\n      MappedMatrixBlock A(glu.lusup.data()+luptr, u_rows, u_rows, OuterStride<>(lda) );\n      U = A.template triangularView<UnitLower>().solve(U);\n      \n      // update\n      luptr += u_rows;\n      MappedMatrixBlock B(glu.lusup.data()+luptr, nrow, u_rows, OuterStride<>(lda) );\n      eigen_assert(tempv.size()>w*ldu + nrow*w + 1);\n      \n      Index ldl = internal::first_multiple<Index>(nrow, PacketSize);\n      Index offset = (PacketSize-internal::first_default_aligned(B.data(), PacketSize)) % PacketSize;\n      MappedMatrixBlock L(tempv.data()+w*ldu+offset, nrow, u_cols, OuterStride<>(ldl));\n      \n      L.setZero();\n      internal::sparselu_gemm<Scalar>(L.rows(), L.cols(), B.cols(), B.data(), B.outerStride(), U.data(), U.outerStride(), L.data(), L.outerStride());\n      \n      // scatter U and L\n      u_col = 0;\n      for (jj = jcol; jj < jcol + w; jj++)\n      {\n        nextl_col = (jj-jcol) * m; \n        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row\n        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here\n        \n        kfnz = repfnz_col(krep); \n        if ( kfnz == emptyIdxLU ) \n          continue; // skip any zero segment\n        \n        segsize = krep - kfnz + 1;\n        no_zeros = kfnz - fsupc; \n        Index isub = lptr + no_zeros;\n        \n        Index off = u_rows-segsize;\n        for (Index i = 0; i < segsize; i++)\n        {\n          Index irow = glu.lsub(isub++); \n          dense_col(irow) = U.coeff(i+off,u_col);\n          U.coeffRef(i+off,u_col) = 0;\n        }\n        \n        // Scatter l into SPA dense[]\n        for (Index i = 0; i < nrow; i++)\n        {\n          Index irow = glu.lsub(isub++); \n          dense_col(irow) -= L.coeff(i,u_col);\n          L.coeffRef(i,u_col) = 0;\n        }\n        u_col++;\n      }\n    }\n    else // level 2 only\n    {\n      // Sequence through each column in the panel\n      for (jj = jcol; jj < jcol + w; jj++)\n      {\n        nextl_col = (jj-jcol) * m; \n        VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row\n        VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here\n        \n        kfnz = repfnz_col(krep); \n        if ( kfnz == emptyIdxLU ) \n          continue; // skip any zero segment\n        \n        segsize = krep - kfnz + 1;\n        luptr = glu.xlusup(fsupc);\n        \n        Index lda = glu.xlusup(fsupc+1)-glu.xlusup(fsupc);// nsupr\n        \n        // Perform a trianglar solve and block update, \n        // then scatter the result of sup-col update to dense[]\n        no_zeros = kfnz - fsupc; \n              if(segsize==1)  LU_kernel_bmod<1>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);\n        else  if(segsize==2)  LU_kernel_bmod<2>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);\n        else  if(segsize==3)  LU_kernel_bmod<3>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros);\n        else                  LU_kernel_bmod<Dynamic>::run(segsize, dense_col, tempv, glu.lusup, luptr, lda, nrow, glu.lsub, lptr, no_zeros); \n      } // End for each column in the panel \n    }\n    \n  } // End for each updating supernode\n} // end panel bmod\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // SPARSELU_PANEL_BMOD_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_panel_dfs.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]panel_dfs.c file in SuperLU \n \n * -- SuperLU routine (version 2.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * November 15, 1997\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_PANEL_DFS_H\n#define SPARSELU_PANEL_DFS_H\n\nnamespace Eigen {\n\nnamespace internal {\n  \ntemplate<typename IndexVector>\nstruct panel_dfs_traits\n{\n  typedef typename IndexVector::Scalar StorageIndex;\n  panel_dfs_traits(Index jcol, StorageIndex* marker)\n    : m_jcol(jcol), m_marker(marker)\n  {}\n  bool update_segrep(Index krep, StorageIndex jj)\n  {\n    if(m_marker[krep]<m_jcol)\n    {\n      m_marker[krep] = jj; \n      return true;\n    }\n    return false;\n  }\n  void mem_expand(IndexVector& /*glu.lsub*/, Index /*nextl*/, Index /*chmark*/) {}\n  enum { ExpandMem = false };\n  Index m_jcol;\n  StorageIndex* m_marker;\n};\n\n\ntemplate <typename Scalar, typename StorageIndex>\ntemplate <typename Traits>\nvoid SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const StorageIndex jj, IndexVector& perm_r,\n                   Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,\n                   Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,\n                   IndexVector& xplore, GlobalLU_t& glu,\n                   Index& nextl_col, Index krow, Traits& traits\n                  )\n{\n  \n  StorageIndex kmark = marker(krow);\n      \n  // For each unmarked krow of jj\n  marker(krow) = jj; \n  StorageIndex kperm = perm_r(krow); \n  if (kperm == emptyIdxLU ) {\n    // krow is in L : place it in structure of L(*, jj)\n    panel_lsub(nextl_col++) = StorageIndex(krow);  // krow is indexed into A\n    \n    traits.mem_expand(panel_lsub, nextl_col, kmark);\n  }\n  else \n  {\n    // krow is in U : if its supernode-representative krep\n    // has been explored, update repfnz(*)\n    // krep = supernode representative of the current row\n    StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1; \n    // First nonzero element in the current column:\n    StorageIndex myfnz = repfnz_col(krep); \n    \n    if (myfnz != emptyIdxLU )\n    {\n      // Representative visited before\n      if (myfnz > kperm ) repfnz_col(krep) = kperm; \n      \n    }\n    else \n    {\n      // Otherwise, perform dfs starting at krep\n      StorageIndex oldrep = emptyIdxLU; \n      parent(krep) = oldrep; \n      repfnz_col(krep) = kperm; \n      StorageIndex xdfs =  glu.xlsub(krep); \n      Index maxdfs = xprune(krep); \n      \n      StorageIndex kpar;\n      do \n      {\n        // For each unmarked kchild of krep\n        while (xdfs < maxdfs) \n        {\n          StorageIndex kchild = glu.lsub(xdfs); \n          xdfs++; \n          StorageIndex chmark = marker(kchild); \n          \n          if (chmark != jj ) \n          {\n            marker(kchild) = jj; \n            StorageIndex chperm = perm_r(kchild); \n            \n            if (chperm == emptyIdxLU) \n            {\n              // case kchild is in L: place it in L(*, j)\n              panel_lsub(nextl_col++) = kchild;\n              traits.mem_expand(panel_lsub, nextl_col, chmark);\n            }\n            else\n            {\n              // case kchild is in U :\n              // chrep = its supernode-rep. If its rep has been explored, \n              // update its repfnz(*)\n              StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1; \n              myfnz = repfnz_col(chrep); \n              \n              if (myfnz != emptyIdxLU) \n              { // Visited before \n                if (myfnz > chperm) \n                  repfnz_col(chrep) = chperm; \n              }\n              else \n              { // Cont. dfs at snode-rep of kchild\n                xplore(krep) = xdfs; \n                oldrep = krep; \n                krep = chrep; // Go deeper down G(L)\n                parent(krep) = oldrep; \n                repfnz_col(krep) = chperm; \n                xdfs = glu.xlsub(krep); \n                maxdfs = xprune(krep); \n                \n              } // end if myfnz != -1\n            } // end if chperm == -1 \n                \n          } // end if chmark !=jj\n        } // end while xdfs < maxdfs\n        \n        // krow has no more unexplored nbrs :\n        //    Place snode-rep krep in postorder DFS, if this \n        //    segment is seen for the first time. (Note that \n        //    \"repfnz(krep)\" may change later.)\n        //    Baktrack dfs to its parent\n        if(traits.update_segrep(krep,jj))\n        //if (marker1(krep) < jcol )\n        {\n          segrep(nseg) = krep; \n          ++nseg; \n          //marker1(krep) = jj; \n        }\n        \n        kpar = parent(krep); // Pop recursion, mimic recursion \n        if (kpar == emptyIdxLU) \n          break; // dfs done \n        krep = kpar; \n        xdfs = xplore(krep); \n        maxdfs = xprune(krep); \n\n      } while (kpar != emptyIdxLU); // Do until empty stack \n      \n    } // end if (myfnz = -1)\n\n  } // end if (kperm == -1)   \n}\n\n/**\n * \\brief Performs a symbolic factorization on a panel of columns [jcol, jcol+w)\n * \n * A supernode representative is the last column of a supernode.\n * The nonzeros in U[*,j] are segments that end at supernodes representatives\n * \n * The routine returns a list of the supernodal representatives \n * in topological order of the dfs that generates them. This list is \n * a superset of the topological order of each individual column within \n * the panel.\n * The location of the first nonzero in each supernodal segment \n * (supernodal entry location) is also returned. Each column has \n * a separate list for this purpose. \n * \n * Two markers arrays are used for dfs :\n *    marker[i] == jj, if i was visited during dfs of current column jj;\n *    marker1[i] >= jcol, if i was visited by earlier columns in this panel; \n * \n * \\param[in] m number of rows in the matrix\n * \\param[in] w Panel size\n * \\param[in] jcol Starting  column of the panel\n * \\param[in] A Input matrix in column-major storage\n * \\param[in] perm_r Row permutation\n * \\param[out] nseg Number of U segments\n * \\param[out] dense Accumulate the column vectors of the panel\n * \\param[out] panel_lsub Subscripts of the row in the panel \n * \\param[out] segrep Segment representative i.e first nonzero row of each segment\n * \\param[out] repfnz First nonzero location in each row\n * \\param[out] xprune The pruned elimination tree\n * \\param[out] marker work vector\n * \\param  parent The elimination tree\n * \\param xplore work vector\n * \\param glu The global data structure\n * \n */\n\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w, const Index jcol, MatrixType& A, IndexVector& perm_r, Index& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, GlobalLU_t& glu)\n{\n  Index nextl_col; // Next available position in panel_lsub[*,jj] \n  \n  // Initialize pointers \n  VectorBlock<IndexVector> marker1(marker, m, m); \n  nseg = 0; \n  \n  panel_dfs_traits<IndexVector> traits(jcol, marker1.data());\n  \n  // For each column in the panel \n  for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++) \n  {\n    nextl_col = (jj - jcol) * m; \n    \n    VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row\n    VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here\n    \n    \n    // For each nnz in A[*, jj] do depth first search\n    for (typename MatrixType::InnerIterator it(A, jj); it; ++it)\n    {\n      Index krow = it.row(); \n      dense_col(krow) = it.value();\n      \n      StorageIndex kmark = marker(krow); \n      if (kmark == jj) \n        continue; // krow visited before, go to the next nonzero\n      \n      dfs_kernel(jj, perm_r, nseg, panel_lsub, segrep, repfnz_col, xprune, marker, parent,\n                   xplore, glu, nextl_col, krow, traits);\n    }// end for nonzeros in column jj\n    \n  } // end for column jj\n}\n\n} // end namespace internal\n} // end namespace Eigen\n\n#endif // SPARSELU_PANEL_DFS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_pivotL.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of xpivotL.c file in SuperLU \n \n * -- SuperLU routine (version 3.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * October 15, 2003\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_PIVOTL_H\n#define SPARSELU_PIVOTL_H\n\nnamespace Eigen {\nnamespace internal {\n  \n/**\n * \\brief Performs the numerical pivotin on the current column of L, and the CDIV operation.\n * \n * Pivot policy :\n * (1) Compute thresh = u * max_(i>=j) abs(A_ij);\n * (2) IF user specifies pivot row k and abs(A_kj) >= thresh THEN\n *           pivot row = k;\n *       ELSE IF abs(A_jj) >= thresh THEN\n *           pivot row = j;\n *       ELSE\n *           pivot row = m;\n * \n *   Note: If you absolutely want to use a given pivot order, then set u=0.0.\n * \n * \\param jcol The current column of L\n * \\param diagpivotthresh diagonal pivoting threshold\n * \\param[in,out] perm_r Row permutation (threshold pivoting)\n * \\param[in] iperm_c column permutation - used to finf diagonal of Pc*A*Pc'\n * \\param[out] pivrow  The pivot row\n * \\param glu Global LU data\n * \\return 0 if success, i > 0 if U(i,i) is exactly zero \n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nIndex SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu)\n{\n  \n  Index fsupc = (glu.xsup)((glu.supno)(jcol)); // First column in the supernode containing the column jcol\n  Index nsupc = jcol - fsupc; // Number of columns in the supernode portion, excluding jcol; nsupc >=0\n  Index lptr = glu.xlsub(fsupc); // pointer to the starting location of the row subscripts for this supernode portion\n  Index nsupr = glu.xlsub(fsupc+1) - lptr; // Number of rows in the supernode\n  Index lda = glu.xlusup(fsupc+1) - glu.xlusup(fsupc); // leading dimension\n  Scalar* lu_sup_ptr = &(glu.lusup.data()[glu.xlusup(fsupc)]); // Start of the current supernode\n  Scalar* lu_col_ptr = &(glu.lusup.data()[glu.xlusup(jcol)]); // Start of jcol in the supernode\n  StorageIndex* lsub_ptr = &(glu.lsub.data()[lptr]); // Start of row indices of the supernode\n  \n  // Determine the largest abs numerical value for partial pivoting \n  Index diagind = iperm_c(jcol); // diagonal index \n  RealScalar pivmax(-1.0);\n  Index pivptr = nsupc; \n  Index diag = emptyIdxLU; \n  RealScalar rtemp;\n  Index isub, icol, itemp, k; \n  for (isub = nsupc; isub < nsupr; ++isub) {\n    using std::abs;\n    rtemp = abs(lu_col_ptr[isub]);\n    if (rtemp > pivmax) {\n      pivmax = rtemp; \n      pivptr = isub;\n    } \n    if (lsub_ptr[isub] == diagind) diag = isub;\n  }\n  \n  // Test for singularity\n  if ( pivmax <= RealScalar(0.0) ) {\n    // if pivmax == -1, the column is structurally empty, otherwise it is only numerically zero\n    pivrow = pivmax < RealScalar(0.0) ? diagind : lsub_ptr[pivptr];\n    perm_r(pivrow) = StorageIndex(jcol);\n    return (jcol+1);\n  }\n  \n  RealScalar thresh = diagpivotthresh * pivmax; \n  \n  // Choose appropriate pivotal element \n  \n  {\n    // Test if the diagonal element can be used as a pivot (given the threshold value)\n    if (diag >= 0 ) \n    {\n      // Diagonal element exists\n      using std::abs;\n      rtemp = abs(lu_col_ptr[diag]);\n      if (rtemp != RealScalar(0.0) && rtemp >= thresh) pivptr = diag;\n    }\n    pivrow = lsub_ptr[pivptr];\n  }\n  \n  // Record pivot row\n  perm_r(pivrow) = StorageIndex(jcol);\n  // Interchange row subscripts\n  if (pivptr != nsupc )\n  {\n    std::swap( lsub_ptr[pivptr], lsub_ptr[nsupc] );\n    // Interchange numerical values as well, for the two rows in the whole snode\n    // such that L is indexed the same way as A\n    for (icol = 0; icol <= nsupc; icol++)\n    {\n      itemp = pivptr + icol * lda; \n      std::swap(lu_sup_ptr[itemp], lu_sup_ptr[nsupc + icol * lda]);\n    }\n  }\n  // cdiv operations\n  Scalar temp = Scalar(1.0) / lu_col_ptr[nsupc];\n  for (k = nsupc+1; k < nsupr; k++)\n    lu_col_ptr[k] *= temp; \n  return 0;\n}\n\n} // end namespace internal\n} // end namespace Eigen\n\n#endif // SPARSELU_PIVOTL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_pruneL.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* \n \n * NOTE: This file is the modified version of [s,d,c,z]pruneL.c file in SuperLU \n \n * -- SuperLU routine (version 2.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * November 15, 1997\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n#ifndef SPARSELU_PRUNEL_H\n#define SPARSELU_PRUNEL_H\n\nnamespace Eigen {\nnamespace internal {\n\n/**\n * \\brief Prunes the L-structure.\n *\n * It prunes the L-structure  of supernodes whose L-structure contains the current pivot row \"pivrow\"\n * \n * \n * \\param jcol The current column of L\n * \\param[in] perm_r Row permutation\n * \\param[out] pivrow  The pivot row\n * \\param nseg Number of segments\n * \\param segrep \n * \\param repfnz\n * \\param[out] xprune \n * \\param glu Global LU data\n * \n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::pruneL(const Index jcol, const IndexVector& perm_r, const Index pivrow, const Index nseg,\n                                               const IndexVector& segrep, BlockIndexVector repfnz, IndexVector& xprune, GlobalLU_t& glu)\n{\n  // For each supernode-rep irep in U(*,j]\n  Index jsupno = glu.supno(jcol); \n  Index i,irep,irep1; \n  bool movnum, do_prune = false; \n  Index kmin = 0, kmax = 0, minloc, maxloc,krow; \n  for (i = 0; i < nseg; i++)\n  {\n    irep = segrep(i); \n    irep1 = irep + 1; \n    do_prune = false; \n    \n    // Don't prune with a zero U-segment \n    if (repfnz(irep) == emptyIdxLU) continue; \n    \n    // If a snode overlaps with the next panel, then the U-segment\n    // is fragmented into two parts -- irep and irep1. We should let \n    // pruning occur at the rep-column in irep1s snode. \n    if (glu.supno(irep) == glu.supno(irep1) ) continue; // don't prune \n    \n    // If it has not been pruned & it has a nonz in row L(pivrow,i)\n    if (glu.supno(irep) != jsupno )\n    {\n      if ( xprune (irep) >= glu.xlsub(irep1) )\n      {\n        kmin = glu.xlsub(irep);\n        kmax = glu.xlsub(irep1) - 1; \n        for (krow = kmin; krow <= kmax; krow++)\n        {\n          if (glu.lsub(krow) == pivrow) \n          {\n            do_prune = true; \n            break; \n          }\n        }\n      }\n      \n      if (do_prune) \n      {\n        // do a quicksort-type partition\n        // movnum=true means that the num values have to be exchanged\n        movnum = false; \n        if (irep == glu.xsup(glu.supno(irep)) ) // Snode of size 1 \n          movnum = true; \n        \n        while (kmin <= kmax)\n        {\n          if (perm_r(glu.lsub(kmax)) == emptyIdxLU)\n            kmax--; \n          else if ( perm_r(glu.lsub(kmin)) != emptyIdxLU)\n            kmin++;\n          else \n          {\n            // kmin below pivrow (not yet pivoted), and kmax\n            // above pivrow: interchange the two suscripts\n            std::swap(glu.lsub(kmin), glu.lsub(kmax)); \n            \n            // If the supernode has only one column, then we \n            // only keep one set of subscripts. For any subscript\n            // intercnahge performed, similar interchange must be \n            // done on the numerical values. \n            if (movnum) \n            {\n              minloc = glu.xlusup(irep) + ( kmin - glu.xlsub(irep) ); \n              maxloc = glu.xlusup(irep) + ( kmax - glu.xlsub(irep) ); \n              std::swap(glu.lusup(minloc), glu.lusup(maxloc)); \n            }\n            kmin++;\n            kmax--;\n          }\n        } // end while \n        \n        xprune(irep) = StorageIndex(kmin);  //Pruning \n      } // end if do_prune \n    } // end pruning \n  } // End for each U-segment\n}\n\n} // end namespace internal\n} // end namespace Eigen\n\n#endif // SPARSELU_PRUNEL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseLU/SparseLU_relax_snode.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n/* This file is a modified version of heap_relax_snode.c file in SuperLU\n * -- SuperLU routine (version 3.0) --\n * Univ. of California Berkeley, Xerox Palo Alto Research Center,\n * and Lawrence Berkeley National Lab.\n * October 15, 2003\n *\n * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n *\n * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n * EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n *\n * Permission is hereby granted to use or copy this program for any\n * purpose, provided the above notices are retained on all copies.\n * Permission to modify the code and to distribute modified code is\n * granted, provided the above notices are retained, and a notice that\n * the code was modified is included with the above copyright notice.\n */\n\n#ifndef SPARSELU_RELAX_SNODE_H\n#define SPARSELU_RELAX_SNODE_H\n\nnamespace Eigen {\n\nnamespace internal {\n \n/** \n * \\brief Identify the initial relaxed supernodes\n * \n * This routine is applied to a column elimination tree. \n * It assumes that the matrix has been reordered according to the postorder of the etree\n * \\param n  the number of columns\n * \\param et elimination tree \n * \\param relax_columns Maximum number of columns allowed in a relaxed snode \n * \\param descendants Number of descendants of each node in the etree\n * \\param relax_end last column in a supernode\n */\ntemplate <typename Scalar, typename StorageIndex>\nvoid SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector& et, const Index relax_columns, IndexVector& descendants, IndexVector& relax_end)\n{\n  \n  // compute the number of descendants of each node in the etree\n  Index parent; \n  relax_end.setConstant(emptyIdxLU);\n  descendants.setZero();\n  for (Index j = 0; j < n; j++) \n  {\n    parent = et(j);\n    if (parent != n) // not the dummy root\n      descendants(parent) += descendants(j) + 1;\n  }\n  // Identify the relaxed supernodes by postorder traversal of the etree\n  Index snode_start; // beginning of a snode \n  for (Index j = 0; j < n; )\n  {\n    parent = et(j);\n    snode_start = j; \n    while ( parent != n && descendants(parent) < relax_columns ) \n    {\n      j = parent; \n      parent = et(j);\n    }\n    // Found a supernode in postordered etree, j is the last column \n    relax_end(snode_start) = StorageIndex(j); // Record last column\n    j++;\n    // Search for a new leaf\n    while (descendants(j) != 0 && j < n) j++;\n  } // End postorder traversal of the etree\n  \n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SparseQR/SparseQR.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>\n// Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SPARSE_QR_H\n#define EIGEN_SPARSE_QR_H\n\nnamespace Eigen {\n\ntemplate<typename MatrixType, typename OrderingType> class SparseQR;\ntemplate<typename SparseQRType> struct SparseQRMatrixQReturnType;\ntemplate<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType;\ntemplate<typename SparseQRType, typename Derived> struct SparseQR_QProduct;\nnamespace internal {\n  template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >\n  {\n    typedef typename SparseQRType::MatrixType ReturnType;\n    typedef typename ReturnType::StorageIndex StorageIndex;\n    typedef typename ReturnType::StorageKind StorageKind;\n    enum {\n      RowsAtCompileTime = Dynamic,\n      ColsAtCompileTime = Dynamic\n    };\n  };\n  template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >\n  {\n    typedef typename SparseQRType::MatrixType ReturnType;\n  };\n  template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> >\n  {\n    typedef typename Derived::PlainObject ReturnType;\n  };\n} // End namespace internal\n\n/**\n  * \\ingroup SparseQR_Module\n  * \\class SparseQR\n  * \\brief Sparse left-looking rank-revealing QR factorization\n  * \n  * This class implements a left-looking rank-revealing QR decomposition \n  * of sparse matrices. When a column has a norm less than a given tolerance\n  * it is implicitly permuted to the end. The QR factorization thus obtained is \n  * given by A*P = Q*R where R is upper triangular or trapezoidal. \n  * \n  * P is the column permutation which is the product of the fill-reducing and the\n  * rank-revealing permutations. Use colsPermutation() to get it.\n  * \n  * Q is the orthogonal matrix represented as products of Householder reflectors. \n  * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.\n  * You can then apply it to a vector.\n  * \n  * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.\n  * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.\n  * \n  * \\tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>\n  * \\tparam _OrderingType The fill-reducing ordering method. See the \\link OrderingMethods_Module \n  *  OrderingMethods \\endlink module for the list of built-in and external ordering methods.\n  * \n  * \\implsparsesolverconcept\n  *\n  * \\warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).\n  * \n  */\ntemplate<typename _MatrixType, typename _OrderingType>\nclass SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >\n{\n  protected:\n    typedef SparseSolverBase<SparseQR<_MatrixType,_OrderingType> > Base;\n    using Base::m_isInitialized;\n  public:\n    using Base::_solve_impl;\n    typedef _MatrixType MatrixType;\n    typedef _OrderingType OrderingType;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef SparseMatrix<Scalar,ColMajor,StorageIndex> QRMatrixType;\n    typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;\n    typedef Matrix<Scalar, Dynamic, 1> ScalarVector;\n    typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;\n\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n    \n  public:\n    SparseQR () :  m_analysisIsok(false), m_lastError(\"\"), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)\n    { }\n    \n    /** Construct a QR factorization of the matrix \\a mat.\n      * \n      * \\warning The matrix \\a mat must be in compressed mode (see SparseMatrix::makeCompressed()).\n      * \n      * \\sa compute()\n      */\n    explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(\"\"), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)\n    {\n      compute(mat);\n    }\n    \n    /** Computes the QR factorization of the sparse matrix \\a mat.\n      * \n      * \\warning The matrix \\a mat must be in compressed mode (see SparseMatrix::makeCompressed()).\n      * \n      * \\sa analyzePattern(), factorize()\n      */\n    void compute(const MatrixType& mat)\n    {\n      analyzePattern(mat);\n      factorize(mat);\n    }\n    void analyzePattern(const MatrixType& mat);\n    void factorize(const MatrixType& mat);\n    \n    /** \\returns the number of rows of the represented matrix. \n      */\n    inline Index rows() const { return m_pmat.rows(); }\n    \n    /** \\returns the number of columns of the represented matrix. \n      */\n    inline Index cols() const { return m_pmat.cols();}\n    \n    /** \\returns a const reference to the \\b sparse upper triangular matrix R of the QR factorization.\n      * \\warning The entries of the returned matrix are not sorted. This means that using it in algorithms\n      *          expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()),\n      *          and coefficient-wise operations. Matrix products and triangular solves are fine though.\n      *\n      * To sort the entries, you can assign it to a row-major matrix, and if a column-major matrix\n      * is required, you can copy it again:\n      * \\code\n      * SparseMatrix<double>          R  = qr.matrixR();  // column-major, not sorted!\n      * SparseMatrix<double,RowMajor> Rr = qr.matrixR();  // row-major, sorted\n      * SparseMatrix<double>          Rc = Rr;            // column-major, sorted\n      * \\endcode\n      */\n    const QRMatrixType& matrixR() const { return m_R; }\n    \n    /** \\returns the number of non linearly dependent columns as determined by the pivoting threshold.\n      *\n      * \\sa setPivotThreshold()\n      */\n    Index rank() const\n    {\n      eigen_assert(m_isInitialized && \"The factorization should be called first, use compute()\");\n      return m_nonzeropivots; \n    }\n    \n    /** \\returns an expression of the matrix Q as products of sparse Householder reflectors.\n    * The common usage of this function is to apply it to a dense matrix or vector\n    * \\code\n    * VectorXd B1, B2;\n    * // Initialize B1\n    * B2 = matrixQ() * B1;\n    * \\endcode\n    *\n    * To get a plain SparseMatrix representation of Q:\n    * \\code\n    * SparseMatrix<double> Q;\n    * Q = SparseQR<SparseMatrix<double> >(A).matrixQ();\n    * \\endcode\n    * Internally, this call simply performs a sparse product between the matrix Q\n    * and a sparse identity matrix. However, due to the fact that the sparse\n    * reflectors are stored unsorted, two transpositions are needed to sort\n    * them before performing the product.\n    */\n    SparseQRMatrixQReturnType<SparseQR> matrixQ() const \n    { return SparseQRMatrixQReturnType<SparseQR>(*this); }\n    \n    /** \\returns a const reference to the column permutation P that was applied to A such that A*P = Q*R\n      * It is the combination of the fill-in reducing permutation and numerical column pivoting.\n      */\n    const PermutationType& colsPermutation() const\n    { \n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_outputPerm_c;\n    }\n    \n    /** \\returns A string describing the type of error.\n      * This method is provided to ease debugging, not to handle errors.\n      */\n    std::string lastErrorMessage() const { return m_lastError; }\n    \n    /** \\internal */\n    template<typename Rhs, typename Dest>\n    bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const\n    {\n      eigen_assert(m_isInitialized && \"The factorization should be called first, use compute()\");\n      eigen_assert(this->rows() == B.rows() && \"SparseQR::solve() : invalid number of rows in the right hand side matrix\");\n\n      Index rank = this->rank();\n      \n      // Compute Q^T * b;\n      typename Dest::PlainObject y, b;\n      y = this->matrixQ().transpose() * B; \n      b = y;\n      \n      // Solve with the triangular matrix R\n      y.resize((std::max<Index>)(cols(),y.rows()),y.cols());\n      y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));\n      y.bottomRows(y.rows()-rank).setZero();\n      \n      // Apply the column permutation\n      if (m_perm_c.size())  dest = colsPermutation() * y.topRows(cols());\n      else                  dest = y.topRows(cols());\n      \n      m_info = Success;\n      return true;\n    }\n\n    /** Sets the threshold that is used to determine linearly dependent columns during the factorization.\n      *\n      * In practice, if during the factorization the norm of the column that has to be eliminated is below\n      * this threshold, then the entire column is treated as zero, and it is moved at the end.\n      */\n    void setPivotThreshold(const RealScalar& threshold)\n    {\n      m_useDefaultThreshold = false;\n      m_threshold = threshold;\n    }\n    \n    /** \\returns the solution X of \\f$ A X = B \\f$ using the current decomposition of A.\n      *\n      * \\sa compute()\n      */\n    template<typename Rhs>\n    inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const \n    {\n      eigen_assert(m_isInitialized && \"The factorization should be called first, use compute()\");\n      eigen_assert(this->rows() == B.rows() && \"SparseQR::solve() : invalid number of rows in the right hand side matrix\");\n      return Solve<SparseQR, Rhs>(*this, B.derived());\n    }\n    template<typename Rhs>\n    inline const Solve<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const\n    {\n          eigen_assert(m_isInitialized && \"The factorization should be called first, use compute()\");\n          eigen_assert(this->rows() == B.rows() && \"SparseQR::solve() : invalid number of rows in the right hand side matrix\");\n          return Solve<SparseQR, Rhs>(*this, B.derived());\n    }\n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was successful,\n      *          \\c NumericalIssue if the QR factorization reports a numerical problem\n      *          \\c InvalidInput if the input matrix is invalid\n      *\n      * \\sa iparm()          \n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n\n\n    /** \\internal */\n    inline void _sort_matrix_Q()\n    {\n      if(this->m_isQSorted) return;\n      // The matrix Q is sorted during the transposition\n      SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q);\n      this->m_Q = mQrm;\n      this->m_isQSorted = true;\n    }\n\n    \n  protected:\n    bool m_analysisIsok;\n    bool m_factorizationIsok;\n    mutable ComputationInfo m_info;\n    std::string m_lastError;\n    QRMatrixType m_pmat;            // Temporary matrix\n    QRMatrixType m_R;               // The triangular factor matrix\n    QRMatrixType m_Q;               // The orthogonal reflectors\n    ScalarVector m_hcoeffs;         // The Householder coefficients\n    PermutationType m_perm_c;       // Fill-reducing  Column  permutation\n    PermutationType m_pivotperm;    // The permutation for rank revealing\n    PermutationType m_outputPerm_c; // The final column permutation\n    RealScalar m_threshold;         // Threshold to determine null Householder reflections\n    bool m_useDefaultThreshold;     // Use default threshold\n    Index m_nonzeropivots;          // Number of non zero pivots found\n    IndexVector m_etree;            // Column elimination tree\n    IndexVector m_firstRowElt;      // First element in each row\n    bool m_isQSorted;               // whether Q is sorted or not\n    bool m_isEtreeOk;               // whether the elimination tree match the initial input matrix\n    \n    template <typename, typename > friend struct SparseQR_QProduct;\n    \n};\n\n/** \\brief Preprocessing step of a QR factorization \n  * \n  * \\warning The matrix \\a mat must be in compressed mode (see SparseMatrix::makeCompressed()).\n  * \n  * In this step, the fill-reducing permutation is computed and applied to the columns of A\n  * and the column elimination tree is computed as well. Only the sparsity pattern of \\a mat is exploited.\n  * \n  * \\note In this step it is assumed that there is no empty row in the matrix \\a mat.\n  */\ntemplate <typename MatrixType, typename OrderingType>\nvoid SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)\n{\n  eigen_assert(mat.isCompressed() && \"SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR\");\n  // Copy to a column major matrix if the input is rowmajor\n  typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);\n  // Compute the column fill reducing ordering\n  OrderingType ord; \n  ord(matCpy, m_perm_c); \n  Index n = mat.cols();\n  Index m = mat.rows();\n  Index diagSize = (std::min)(m,n);\n  \n  if (!m_perm_c.size())\n  {\n    m_perm_c.resize(n);\n    m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));\n  }\n  \n  // Compute the column elimination tree of the permuted matrix\n  m_outputPerm_c = m_perm_c.inverse();\n  internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());\n  m_isEtreeOk = true;\n  \n  m_R.resize(m, n);\n  m_Q.resize(m, diagSize);\n  \n  // Allocate space for nonzero elements : rough estimation\n  m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree\n  m_Q.reserve(2*mat.nonZeros());\n  m_hcoeffs.resize(diagSize);\n  m_analysisIsok = true;\n}\n\n/** \\brief Performs the numerical QR factorization of the input matrix\n  * \n  * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with\n  * a matrix having the same sparsity pattern than \\a mat.\n  * \n  * \\param mat The sparse column-major matrix\n  */\ntemplate <typename MatrixType, typename OrderingType>\nvoid SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)\n{\n  using std::abs;\n  \n  eigen_assert(m_analysisIsok && \"analyzePattern() should be called before this step\");\n  StorageIndex m = StorageIndex(mat.rows());\n  StorageIndex n = StorageIndex(mat.cols());\n  StorageIndex diagSize = (std::min)(m,n);\n  IndexVector mark((std::max)(m,n)); mark.setConstant(-1);  // Record the visited nodes\n  IndexVector Ridx(n), Qidx(m);                             // Store temporarily the row indexes for the current column of R and Q\n  Index nzcolR, nzcolQ;                                     // Number of nonzero for the current column of R and Q\n  ScalarVector tval(m);                                     // The dense vector used to compute the current column\n  RealScalar pivotThreshold = m_threshold;\n  \n  m_R.setZero();\n  m_Q.setZero();\n  m_pmat = mat;\n  if(!m_isEtreeOk)\n  {\n    m_outputPerm_c = m_perm_c.inverse();\n    internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());\n    m_isEtreeOk = true;\n  }\n\n  m_pmat.uncompress(); // To have the innerNonZeroPtr allocated\n  \n  // Apply the fill-in reducing permutation lazily:\n  {\n    // If the input is row major, copy the original column indices,\n    // otherwise directly use the input matrix\n    // \n    IndexVector originalOuterIndicesCpy;\n    const StorageIndex *originalOuterIndices = mat.outerIndexPtr();\n    if(MatrixType::IsRowMajor)\n    {\n      originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);\n      originalOuterIndices = originalOuterIndicesCpy.data();\n    }\n    \n    for (int i = 0; i < n; i++)\n    {\n      Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;\n      m_pmat.outerIndexPtr()[p] = originalOuterIndices[i]; \n      m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i]; \n    }\n  }\n  \n  /* Compute the default threshold as in MatLab, see:\n   * Tim Davis, \"Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing\n   * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3 \n   */\n  if(m_useDefaultThreshold) \n  {\n    RealScalar max2Norm = 0.0;\n    for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());\n    if(max2Norm==RealScalar(0))\n      max2Norm = RealScalar(1);\n    pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();\n  }\n  \n  // Initialize the numerical permutation\n  m_pivotperm.setIdentity(n);\n  \n  StorageIndex nonzeroCol = 0; // Record the number of valid pivots\n  m_Q.startVec(0);\n\n  // Left looking rank-revealing QR factorization: compute a column of R and Q at a time\n  for (StorageIndex col = 0; col < n; ++col)\n  {\n    mark.setConstant(-1);\n    m_R.startVec(col);\n    mark(nonzeroCol) = col;\n    Qidx(0) = nonzeroCol;\n    nzcolR = 0; nzcolQ = 1;\n    bool found_diag = nonzeroCol>=m;\n    tval.setZero(); \n    \n    // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,\n    // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.\n    // Note: if the diagonal entry does not exist, then its contribution must be explicitly added,\n    // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.\n    for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)\n    {\n      StorageIndex curIdx = nonzeroCol;\n      if(itp) curIdx = StorageIndex(itp.row());\n      if(curIdx == nonzeroCol) found_diag = true;\n      \n      // Get the nonzeros indexes of the current column of R\n      StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here\n      if (st < 0 )\n      {\n        m_lastError = \"Empty row found during numerical factorization\";\n        m_info = InvalidInput;\n        return;\n      }\n\n      // Traverse the etree \n      Index bi = nzcolR;\n      for (; mark(st) != col; st = m_etree(st))\n      {\n        Ridx(nzcolR) = st;  // Add this row to the list,\n        mark(st) = col;     // and mark this row as visited\n        nzcolR++;\n      }\n\n      // Reverse the list to get the topological ordering\n      Index nt = nzcolR-bi;\n      for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));\n       \n      // Copy the current (curIdx,pcol) value of the input matrix\n      if(itp) tval(curIdx) = itp.value();\n      else    tval(curIdx) = Scalar(0);\n      \n      // Compute the pattern of Q(:,k)\n      if(curIdx > nonzeroCol && mark(curIdx) != col ) \n      {\n        Qidx(nzcolQ) = curIdx;  // Add this row to the pattern of Q,\n        mark(curIdx) = col;     // and mark it as visited\n        nzcolQ++;\n      }\n    }\n\n    // Browse all the indexes of R(:,col) in reverse order\n    for (Index i = nzcolR-1; i >= 0; i--)\n    {\n      Index curIdx = Ridx(i);\n      \n      // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)\n      Scalar tdot(0);\n      \n      // First compute q' * tval\n      tdot = m_Q.col(curIdx).dot(tval);\n\n      tdot *= m_hcoeffs(curIdx);\n      \n      // Then update tval = tval - q * tau\n      // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient \"dense ?= sparse\")\n      for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)\n        tval(itq.row()) -= itq.value() * tdot;\n\n      // Detect fill-in for the current column of Q\n      if(m_etree(Ridx(i)) == nonzeroCol)\n      {\n        for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)\n        {\n          StorageIndex iQ = StorageIndex(itq.row());\n          if (mark(iQ) != col)\n          {\n            Qidx(nzcolQ++) = iQ;  // Add this row to the pattern of Q,\n            mark(iQ) = col;       // and mark it as visited\n          }\n        }\n      }\n    } // End update current column\n    \n    Scalar tau = RealScalar(0);\n    RealScalar beta = 0;\n    \n    if(nonzeroCol < diagSize)\n    {\n      // Compute the Householder reflection that eliminate the current column\n      // FIXME this step should call the Householder module.\n      Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);\n      \n      // First, the squared norm of Q((col+1):m, col)\n      RealScalar sqrNorm = 0.;\n      for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));\n      if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))\n      {\n        beta = numext::real(c0);\n        tval(Qidx(0)) = 1;\n      }\n      else\n      {\n        using std::sqrt;\n        beta = sqrt(numext::abs2(c0) + sqrNorm);\n        if(numext::real(c0) >= RealScalar(0))\n          beta = -beta;\n        tval(Qidx(0)) = 1;\n        for (Index itq = 1; itq < nzcolQ; ++itq)\n          tval(Qidx(itq)) /= (c0 - beta);\n        tau = numext::conj((beta-c0) / beta);\n          \n      }\n    }\n\n    // Insert values in R\n    for (Index  i = nzcolR-1; i >= 0; i--)\n    {\n      Index curIdx = Ridx(i);\n      if(curIdx < nonzeroCol) \n      {\n        m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);\n        tval(curIdx) = Scalar(0.);\n      }\n    }\n\n    if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)\n    {\n      m_R.insertBackByOuterInner(col, nonzeroCol) = beta;\n      // The householder coefficient\n      m_hcoeffs(nonzeroCol) = tau;\n      // Record the householder reflections\n      for (Index itq = 0; itq < nzcolQ; ++itq)\n      {\n        Index iQ = Qidx(itq);\n        m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);\n        tval(iQ) = Scalar(0.);\n      }\n      nonzeroCol++;\n      if(nonzeroCol<diagSize)\n        m_Q.startVec(nonzeroCol);\n    }\n    else\n    {\n      // Zero pivot found: move implicitly this column to the end\n      for (Index j = nonzeroCol; j < n-1; j++) \n        std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);\n      \n      // Recompute the column elimination tree\n      internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());\n      m_isEtreeOk = false;\n    }\n  }\n  \n  m_hcoeffs.tail(diagSize-nonzeroCol).setZero();\n  \n  // Finalize the column pointers of the sparse matrices R and Q\n  m_Q.finalize();\n  m_Q.makeCompressed();\n  m_R.finalize();\n  m_R.makeCompressed();\n  m_isQSorted = false;\n\n  m_nonzeropivots = nonzeroCol;\n  \n  if(nonzeroCol<n)\n  {\n    // Permute the triangular factor to put the 'dead' columns to the end\n    QRMatrixType tempR(m_R);\n    m_R = tempR * m_pivotperm;\n    \n    // Update the column permutation\n    m_outputPerm_c = m_outputPerm_c * m_pivotperm;\n  }\n  \n  m_isInitialized = true; \n  m_factorizationIsok = true;\n  m_info = Success;\n}\n\ntemplate <typename SparseQRType, typename Derived>\nstruct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> >\n{\n  typedef typename SparseQRType::QRMatrixType MatrixType;\n  typedef typename SparseQRType::Scalar Scalar;\n  // Get the references \n  SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) : \n  m_qr(qr),m_other(other),m_transpose(transpose) {}\n  inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }\n  inline Index cols() const { return m_other.cols(); }\n  \n  // Assign to a vector\n  template<typename DesType>\n  void evalTo(DesType& res) const\n  {\n    Index m = m_qr.rows();\n    Index n = m_qr.cols();\n    Index diagSize = (std::min)(m,n);\n    res = m_other;\n    if (m_transpose)\n    {\n      eigen_assert(m_qr.m_Q.rows() == m_other.rows() && \"Non conforming object sizes\");\n      //Compute res = Q' * other column by column\n      for(Index j = 0; j < res.cols(); j++){\n        for (Index k = 0; k < diagSize; k++)\n        {\n          Scalar tau = Scalar(0);\n          tau = m_qr.m_Q.col(k).dot(res.col(j));\n          if(tau==Scalar(0)) continue;\n          tau = tau * m_qr.m_hcoeffs(k);\n          res.col(j) -= tau * m_qr.m_Q.col(k);\n        }\n      }\n    }\n    else\n    {\n      eigen_assert(m_qr.m_Q.rows() == m_other.rows() && \"Non conforming object sizes\");\n      // Compute res = Q * other column by column\n      for(Index j = 0; j < res.cols(); j++)\n      {\n        for (Index k = diagSize-1; k >=0; k--)\n        {\n          Scalar tau = Scalar(0);\n          tau = m_qr.m_Q.col(k).dot(res.col(j));\n          if(tau==Scalar(0)) continue;\n          tau = tau * m_qr.m_hcoeffs(k);\n          res.col(j) -= tau * m_qr.m_Q.col(k);\n        }\n      }\n    }\n  }\n  \n  const SparseQRType& m_qr;\n  const Derived& m_other;\n  bool m_transpose;\n};\n\ntemplate<typename SparseQRType>\nstruct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >\n{  \n  typedef typename SparseQRType::Scalar Scalar;\n  typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;\n  enum {\n    RowsAtCompileTime = Dynamic,\n    ColsAtCompileTime = Dynamic\n  };\n  explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}\n  template<typename Derived>\n  SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other)\n  {\n    return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);\n  }\n  SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const\n  {\n    return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);\n  }\n  inline Index rows() const { return m_qr.rows(); }\n  inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }\n  // To use for operations with the transpose of Q\n  SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const\n  {\n    return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);\n  }\n  const SparseQRType& m_qr;\n};\n\ntemplate<typename SparseQRType>\nstruct SparseQRMatrixQTransposeReturnType\n{\n  explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {}\n  template<typename Derived>\n  SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other)\n  {\n    return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true);\n  }\n  const SparseQRType& m_qr;\n};\n\nnamespace internal {\n  \ntemplate<typename SparseQRType>\nstruct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >\n{\n  typedef typename SparseQRType::MatrixType MatrixType;\n  typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;\n  typedef SparseShape Shape;\n};\n\ntemplate< typename DstXprType, typename SparseQRType>\nstruct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse>\n{\n  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;\n  typedef typename DstXprType::Scalar Scalar;\n  typedef typename DstXprType::StorageIndex StorageIndex;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)\n  {\n    typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());\n    idMat.setIdentity();\n    // Sort the sparse householder reflectors if needed\n    const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();\n    dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false);\n  }\n};\n\ntemplate< typename DstXprType, typename SparseQRType>\nstruct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense>\n{\n  typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;\n  typedef typename DstXprType::Scalar Scalar;\n  typedef typename DstXprType::StorageIndex StorageIndex;\n  static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)\n  {\n    dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());\n  }\n};\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/StlSupport/StdDeque.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDDEQUE_H\n#define EIGEN_STDDEQUE_H\n\n#include \"details.h\"\n\n/**\n * This section contains a convenience MACRO which allows an easy specialization of\n * std::deque such that for data types with alignment issues the correct allocator\n * is used automatically.\n */\n#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) \\\nnamespace std \\\n{ \\\n  template<> \\\n  class deque<__VA_ARGS__, std::allocator<__VA_ARGS__> >           \\\n    : public deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \\\n  { \\\n    typedef deque<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > deque_base; \\\n  public: \\\n    typedef __VA_ARGS__ value_type; \\\n    typedef deque_base::allocator_type allocator_type; \\\n    typedef deque_base::size_type size_type;  \\\n    typedef deque_base::iterator iterator;  \\\n    explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {}  \\\n    template<typename InputIterator> \\\n    deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : deque_base(first, last, a) {} \\\n    deque(const deque& c) : deque_base(c) {}  \\\n    explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \\\n    deque(iterator start, iterator end) : deque_base(start, end) {}  \\\n    deque& operator=(const deque& x) {  \\\n      deque_base::operator=(x);  \\\n      return *this;  \\\n    } \\\n  }; \\\n}\n\n// check whether we really need the std::deque specialization\n#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_DEQUE) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::deque::resize(size_type,const T&). */\n\nnamespace std {\n\n#define EIGEN_STD_DEQUE_SPECIALIZATION_BODY \\\n  public:  \\\n    typedef T value_type; \\\n    typedef typename deque_base::allocator_type allocator_type; \\\n    typedef typename deque_base::size_type size_type;  \\\n    typedef typename deque_base::iterator iterator;  \\\n    typedef typename deque_base::const_iterator const_iterator;  \\\n    explicit deque(const allocator_type& a = allocator_type()) : deque_base(a) {}  \\\n    template<typename InputIterator> \\\n    deque(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \\\n    : deque_base(first, last, a) {} \\\n    deque(const deque& c) : deque_base(c) {}  \\\n    explicit deque(size_type num, const value_type& val = value_type()) : deque_base(num, val) {} \\\n    deque(iterator start, iterator end) : deque_base(start, end) {}  \\\n    deque& operator=(const deque& x) {  \\\n      deque_base::operator=(x);  \\\n      return *this;  \\\n    }\n\n  template<typename T>\n  class deque<T,EIGEN_ALIGNED_ALLOCATOR<T> >\n    : public deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                   Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >\n{\n  typedef deque<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > deque_base;\n  EIGEN_STD_DEQUE_SPECIALIZATION_BODY\n\n  void resize(size_type new_size)\n  { resize(new_size, T()); }\n\n#if defined(_DEQUE_)\n  // workaround MSVC std::deque implementation\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (deque_base::size() < new_size)\n      deque_base::_Insert_n(deque_base::end(), new_size - deque_base::size(), x);\n    else if (new_size < deque_base::size())\n      deque_base::erase(deque_base::begin() + new_size, deque_base::end());\n  }\n  void push_back(const value_type& x)\n  { deque_base::push_back(x); } \n  void push_front(const value_type& x)\n  { deque_base::push_front(x); }\n  using deque_base::insert;  \n  iterator insert(const_iterator position, const value_type& x)\n  { return deque_base::insert(position,x); }\n  void insert(const_iterator position, size_type new_size, const value_type& x)\n  { deque_base::insert(position, new_size, x); }\n#elif defined(_GLIBCXX_DEQUE) && EIGEN_GNUC_AT_LEAST(4,2)\n  // workaround GCC std::deque implementation\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (new_size < deque_base::size())\n      deque_base::_M_erase_at_end(this->_M_impl._M_start + new_size);\n    else\n      deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);\n  }\n#else\n  // either GCC 4.1 or non-GCC\n  // default implementation which should always work.\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (new_size < deque_base::size())\n      deque_base::erase(deque_base::begin() + new_size, deque_base::end());\n    else if (new_size > deque_base::size())\n      deque_base::insert(deque_base::end(), new_size - deque_base::size(), x);\n  }\n#endif\n  };\n}\n\n#endif // check whether specialization is actually required\n\n#endif // EIGEN_STDDEQUE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/StlSupport/StdList.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDLIST_H\n#define EIGEN_STDLIST_H\n\n#include \"details.h\"\n\n/**\n * This section contains a convenience MACRO which allows an easy specialization of\n * std::list such that for data types with alignment issues the correct allocator\n * is used automatically.\n */\n#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) \\\nnamespace std \\\n{ \\\n  template<> \\\n  class list<__VA_ARGS__, std::allocator<__VA_ARGS__> >           \\\n    : public list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \\\n  { \\\n    typedef list<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > list_base; \\\n  public: \\\n    typedef __VA_ARGS__ value_type; \\\n    typedef list_base::allocator_type allocator_type; \\\n    typedef list_base::size_type size_type;  \\\n    typedef list_base::iterator iterator;  \\\n    explicit list(const allocator_type& a = allocator_type()) : list_base(a) {}  \\\n    template<typename InputIterator> \\\n    list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : list_base(first, last, a) {} \\\n    list(const list& c) : list_base(c) {}  \\\n    explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \\\n    list(iterator start, iterator end) : list_base(start, end) {}  \\\n    list& operator=(const list& x) {  \\\n      list_base::operator=(x);  \\\n      return *this;  \\\n    } \\\n  }; \\\n}\n\n// check whether we really need the std::list specialization\n#if !EIGEN_HAS_CXX11_CONTAINERS && !(defined(_GLIBCXX_LIST) && (!EIGEN_GNUC_AT_LEAST(4,1))) /* Note that before gcc-4.1 we already have: std::list::resize(size_type,const T&). */\n\nnamespace std\n{\n\n#define EIGEN_STD_LIST_SPECIALIZATION_BODY \\\n  public:  \\\n    typedef T value_type; \\\n    typedef typename list_base::allocator_type allocator_type; \\\n    typedef typename list_base::size_type size_type;  \\\n    typedef typename list_base::iterator iterator;  \\\n    typedef typename list_base::const_iterator const_iterator;  \\\n    explicit list(const allocator_type& a = allocator_type()) : list_base(a) {}  \\\n    template<typename InputIterator> \\\n    list(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \\\n    : list_base(first, last, a) {} \\\n    list(const list& c) : list_base(c) {}  \\\n    explicit list(size_type num, const value_type& val = value_type()) : list_base(num, val) {} \\\n    list(iterator start, iterator end) : list_base(start, end) {}  \\\n    list& operator=(const list& x) {  \\\n    list_base::operator=(x);  \\\n    return *this; \\\n  }\n\n  template<typename T>\n  class list<T,EIGEN_ALIGNED_ALLOCATOR<T> >\n    : public list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                  Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >\n  {\n    typedef list<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                 Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > list_base;\n    EIGEN_STD_LIST_SPECIALIZATION_BODY\n\n    void resize(size_type new_size)\n    { resize(new_size, T()); }\n\n    void resize(size_type new_size, const value_type& x)\n    {\n      if (list_base::size() < new_size)\n        list_base::insert(list_base::end(), new_size - list_base::size(), x);\n      else\n        while (new_size < list_base::size()) list_base::pop_back();\n    }\n\n#if defined(_LIST_)\n    // workaround MSVC std::list implementation\n    void push_back(const value_type& x)\n    { list_base::push_back(x); } \n    using list_base::insert;  \n    iterator insert(const_iterator position, const value_type& x)\n    { return list_base::insert(position,x); }\n    void insert(const_iterator position, size_type new_size, const value_type& x)\n    { list_base::insert(position, new_size, x); }\n#endif\n  };\n}\n\n#endif // check whether specialization is actually required\n\n#endif // EIGEN_STDLIST_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/StlSupport/StdVector.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STDVECTOR_H\n#define EIGEN_STDVECTOR_H\n\n#include \"details.h\"\n\n/**\n * This section contains a convenience MACRO which allows an easy specialization of\n * std::vector such that for data types with alignment issues the correct allocator\n * is used automatically.\n */\n#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) \\\nnamespace std \\\n{ \\\n  template<> \\\n  class vector<__VA_ARGS__, std::allocator<__VA_ARGS__> >  \\\n    : public vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > \\\n  { \\\n    typedef vector<__VA_ARGS__, EIGEN_ALIGNED_ALLOCATOR<__VA_ARGS__> > vector_base; \\\n  public: \\\n    typedef __VA_ARGS__ value_type; \\\n    typedef vector_base::allocator_type allocator_type; \\\n    typedef vector_base::size_type size_type;  \\\n    typedef vector_base::iterator iterator;  \\\n    explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {}  \\\n    template<typename InputIterator> \\\n    vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) : vector_base(first, last, a) {} \\\n    vector(const vector& c) : vector_base(c) {}  \\\n    explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \\\n    vector(iterator start, iterator end) : vector_base(start, end) {}  \\\n    vector& operator=(const vector& x) {  \\\n      vector_base::operator=(x);  \\\n      return *this;  \\\n    } \\\n  }; \\\n}\n\n// Don't specialize if containers are implemented according to C++11\n#if !EIGEN_HAS_CXX11_CONTAINERS\n\nnamespace std {\n\n#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \\\n  public:  \\\n    typedef T value_type; \\\n    typedef typename vector_base::allocator_type allocator_type; \\\n    typedef typename vector_base::size_type size_type;  \\\n    typedef typename vector_base::iterator iterator;  \\\n    typedef typename vector_base::const_iterator const_iterator;  \\\n    explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {}  \\\n    template<typename InputIterator> \\\n    vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \\\n    : vector_base(first, last, a) {} \\\n    vector(const vector& c) : vector_base(c) {}  \\\n    explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \\\n    vector(iterator start, iterator end) : vector_base(start, end) {}  \\\n    vector& operator=(const vector& x) {  \\\n      vector_base::operator=(x);  \\\n      return *this;  \\\n    }\n\n  template<typename T>\n  class vector<T,EIGEN_ALIGNED_ALLOCATOR<T> >\n    : public vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                    Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> >\n{\n  typedef vector<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T),\n                 Eigen::aligned_allocator_indirection<EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T)> > vector_base;\n  EIGEN_STD_VECTOR_SPECIALIZATION_BODY\n\n  void resize(size_type new_size)\n  { resize(new_size, T()); }\n\n#if defined(_VECTOR_)\n  // workaround MSVC std::vector implementation\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (vector_base::size() < new_size)\n      vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x);\n    else if (new_size < vector_base::size())\n      vector_base::erase(vector_base::begin() + new_size, vector_base::end());\n  }\n  void push_back(const value_type& x)\n  { vector_base::push_back(x); } \n  using vector_base::insert;  \n  iterator insert(const_iterator position, const value_type& x)\n  { return vector_base::insert(position,x); }\n  void insert(const_iterator position, size_type new_size, const value_type& x)\n  { vector_base::insert(position, new_size, x); }\n#elif defined(_GLIBCXX_VECTOR) && (!(EIGEN_GNUC_AT_LEAST(4,1)))\n  /* Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&).\n   * However, this specialization is still needed to make the above EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION trick to work. */\n  void resize(size_type new_size, const value_type& x)\n  {\n    vector_base::resize(new_size,x);\n  }\n#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2)\n  // workaround GCC std::vector implementation\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (new_size < vector_base::size())\n      vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size);\n    else\n      vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);\n  }\n#else\n  // either GCC 4.1 or non-GCC\n  // default implementation which should always work.\n  void resize(size_type new_size, const value_type& x)\n  {\n    if (new_size < vector_base::size())\n      vector_base::erase(vector_base::begin() + new_size, vector_base::end());\n    else if (new_size > vector_base::size())\n      vector_base::insert(vector_base::end(), new_size - vector_base::size(), x);\n  }\n#endif\n  };\n}\n#endif // !EIGEN_HAS_CXX11_CONTAINERS\n\n\n#endif // EIGEN_STDVECTOR_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/StlSupport/details.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2009 Hauke Heibel <hauke.heibel@googlemail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_STL_DETAILS_H\n#define EIGEN_STL_DETAILS_H\n\n#ifndef EIGEN_ALIGNED_ALLOCATOR\n  #define EIGEN_ALIGNED_ALLOCATOR Eigen::aligned_allocator\n#endif\n\nnamespace Eigen {\n\n  // This one is needed to prevent reimplementing the whole std::vector.\n  template <class T>\n  class aligned_allocator_indirection : public EIGEN_ALIGNED_ALLOCATOR<T>\n  {\n  public:\n    typedef std::size_t     size_type;\n    typedef std::ptrdiff_t  difference_type;\n    typedef T*              pointer;\n    typedef const T*        const_pointer;\n    typedef T&              reference;\n    typedef const T&        const_reference;\n    typedef T               value_type;\n\n    template<class U>\n    struct rebind\n    {\n      typedef aligned_allocator_indirection<U> other;\n    };\n\n    aligned_allocator_indirection() {}\n    aligned_allocator_indirection(const aligned_allocator_indirection& ) : EIGEN_ALIGNED_ALLOCATOR<T>() {}\n    aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<T>& ) {}\n    template<class U>\n    aligned_allocator_indirection(const aligned_allocator_indirection<U>& ) {}\n    template<class U>\n    aligned_allocator_indirection(const EIGEN_ALIGNED_ALLOCATOR<U>& ) {}\n    ~aligned_allocator_indirection() {}\n  };\n\n#if EIGEN_COMP_MSVC\n\n  // sometimes, MSVC detects, at compile time, that the argument x\n  // in std::vector::resize(size_t s,T x) won't be aligned and generate an error\n  // even if this function is never called. Whence this little wrapper.\n#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) \\\n  typename Eigen::internal::conditional< \\\n    Eigen::internal::is_arithmetic<T>::value, \\\n    T, \\\n    Eigen::internal::workaround_msvc_stl_support<T> \\\n  >::type\n\n  namespace internal {\n  template<typename T> struct workaround_msvc_stl_support : public T\n  {\n    inline workaround_msvc_stl_support() : T() {}\n    inline workaround_msvc_stl_support(const T& other) : T(other) {}\n    inline operator T& () { return *static_cast<T*>(this); }\n    inline operator const T& () const { return *static_cast<const T*>(this); }\n    template<typename OtherT>\n    inline T& operator=(const OtherT& other)\n    { T::operator=(other); return *this; }\n    inline workaround_msvc_stl_support& operator=(const workaround_msvc_stl_support& other)\n    { T::operator=(other); return *this; }\n  };\n  }\n\n#else\n\n#define EIGEN_WORKAROUND_MSVC_STL_SUPPORT(T) T\n\n#endif\n\n}\n\n#endif // EIGEN_STL_DETAILS_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/SuperLUSupport/SuperLUSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_SUPERLUSUPPORT_H\n#define EIGEN_SUPERLUSUPPORT_H\n\nnamespace Eigen {\n\n#if defined(SUPERLU_MAJOR_VERSION) && (SUPERLU_MAJOR_VERSION >= 5)\n#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE)\t\t\\\n    extern \"C\" {                                                                                          \\\n      extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *,                  \\\n                                char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,           \\\n                                void *, int, SuperMatrix *, SuperMatrix *,                                \\\n                                FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *,                       \\\n                                GlobalLU_t *, mem_usage_t *, SuperLUStat_t *, int *);                     \\\n    }                                                                                                     \\\n    inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A,                                \\\n         int *perm_c, int *perm_r, int *etree, char *equed,                                               \\\n         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                                      \\\n         SuperMatrix *U, void *work, int lwork,                                                           \\\n         SuperMatrix *B, SuperMatrix *X,                                                                  \\\n         FLOATTYPE *recip_pivot_growth,                                                                   \\\n         FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr,                                              \\\n         SuperLUStat_t *stats, int *info, KEYTYPE) {                                                      \\\n    mem_usage_t mem_usage;                                                                                \\\n    GlobalLU_t gLU;                                                                                       \\\n    PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L,                                      \\\n         U, work, lwork, B, X, recip_pivot_growth, rcond,                                                 \\\n         ferr, berr, &gLU, &mem_usage, stats, info);                                                      \\\n    return mem_usage.for_lu; /* bytes used by the factor storage */                                       \\\n  }\n#else // version < 5.0\n#define DECL_GSSVX(PREFIX,FLOATTYPE,KEYTYPE)\t\t\\\n    extern \"C\" {                                                                                          \\\n      extern void PREFIX##gssvx(superlu_options_t *, SuperMatrix *, int *, int *, int *,                  \\\n                                char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,           \\\n                                void *, int, SuperMatrix *, SuperMatrix *,                                \\\n                                FLOATTYPE *, FLOATTYPE *, FLOATTYPE *, FLOATTYPE *,                       \\\n                                mem_usage_t *, SuperLUStat_t *, int *);                                   \\\n    }                                                                                                     \\\n    inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A,                                \\\n         int *perm_c, int *perm_r, int *etree, char *equed,                                               \\\n         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                                      \\\n         SuperMatrix *U, void *work, int lwork,                                                           \\\n         SuperMatrix *B, SuperMatrix *X,                                                                  \\\n         FLOATTYPE *recip_pivot_growth,                                                                   \\\n         FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr,                                              \\\n         SuperLUStat_t *stats, int *info, KEYTYPE) {                                                      \\\n    mem_usage_t mem_usage;                                                                                \\\n    PREFIX##gssvx(options, A, perm_c, perm_r, etree, equed, R, C, L,                                      \\\n         U, work, lwork, B, X, recip_pivot_growth, rcond,                                                 \\\n         ferr, berr, &mem_usage, stats, info);                                                            \\\n    return mem_usage.for_lu; /* bytes used by the factor storage */                                       \\\n  }\n#endif\n\nDECL_GSSVX(s,float,float)\nDECL_GSSVX(c,float,std::complex<float>)\nDECL_GSSVX(d,double,double)\nDECL_GSSVX(z,double,std::complex<double>)\n\n#ifdef MILU_ALPHA\n#define EIGEN_SUPERLU_HAS_ILU\n#endif\n\n#ifdef EIGEN_SUPERLU_HAS_ILU\n\n// similarly for the incomplete factorization using gsisx\n#define DECL_GSISX(PREFIX,FLOATTYPE,KEYTYPE)                                                    \\\n    extern \"C\" {                                                                                \\\n      extern void PREFIX##gsisx(superlu_options_t *, SuperMatrix *, int *, int *, int *,        \\\n                         char *, FLOATTYPE *, FLOATTYPE *, SuperMatrix *, SuperMatrix *,        \\\n                         void *, int, SuperMatrix *, SuperMatrix *, FLOATTYPE *, FLOATTYPE *,   \\\n                         mem_usage_t *, SuperLUStat_t *, int *);                        \\\n    }                                                                                           \\\n    inline float SuperLU_gsisx(superlu_options_t *options, SuperMatrix *A,                      \\\n         int *perm_c, int *perm_r, int *etree, char *equed,                                     \\\n         FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L,                                            \\\n         SuperMatrix *U, void *work, int lwork,                                                 \\\n         SuperMatrix *B, SuperMatrix *X,                                                        \\\n         FLOATTYPE *recip_pivot_growth,                                                         \\\n         FLOATTYPE *rcond,                                                                      \\\n         SuperLUStat_t *stats, int *info, KEYTYPE) {                                            \\\n    mem_usage_t mem_usage;                                                              \\\n    PREFIX##gsisx(options, A, perm_c, perm_r, etree, equed, R, C, L,                            \\\n         U, work, lwork, B, X, recip_pivot_growth, rcond,                                       \\\n         &mem_usage, stats, info);                                                              \\\n    return mem_usage.for_lu; /* bytes used by the factor storage */                             \\\n  }\n\nDECL_GSISX(s,float,float)\nDECL_GSISX(c,float,std::complex<float>)\nDECL_GSISX(d,double,double)\nDECL_GSISX(z,double,std::complex<double>)\n\n#endif\n\ntemplate<typename MatrixType>\nstruct SluMatrixMapHelper;\n\n/** \\internal\n  *\n  * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices\n  * and dense matrices. Supernodal and other fancy format are not supported by this wrapper.\n  *\n  * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure.\n  */\nstruct SluMatrix : SuperMatrix\n{\n  SluMatrix()\n  {\n    Store = &storage;\n  }\n\n  SluMatrix(const SluMatrix& other)\n    : SuperMatrix(other)\n  {\n    Store = &storage;\n    storage = other.storage;\n  }\n\n  SluMatrix& operator=(const SluMatrix& other)\n  {\n    SuperMatrix::operator=(static_cast<const SuperMatrix&>(other));\n    Store = &storage;\n    storage = other.storage;\n    return *this;\n  }\n\n  struct\n  {\n    union {int nnz;int lda;};\n    void *values;\n    int *innerInd;\n    int *outerInd;\n  } storage;\n\n  void setStorageType(Stype_t t)\n  {\n    Stype = t;\n    if (t==SLU_NC || t==SLU_NR || t==SLU_DN)\n      Store = &storage;\n    else\n    {\n      eigen_assert(false && \"storage type not supported\");\n      Store = 0;\n    }\n  }\n\n  template<typename Scalar>\n  void setScalarType()\n  {\n    if (internal::is_same<Scalar,float>::value)\n      Dtype = SLU_S;\n    else if (internal::is_same<Scalar,double>::value)\n      Dtype = SLU_D;\n    else if (internal::is_same<Scalar,std::complex<float> >::value)\n      Dtype = SLU_C;\n    else if (internal::is_same<Scalar,std::complex<double> >::value)\n      Dtype = SLU_Z;\n    else\n    {\n      eigen_assert(false && \"Scalar type not supported by SuperLU\");\n    }\n  }\n\n  template<typename MatrixType>\n  static SluMatrix Map(MatrixBase<MatrixType>& _mat)\n  {\n    MatrixType& mat(_mat.derived());\n    eigen_assert( ((MatrixType::Flags&RowMajorBit)!=RowMajorBit) && \"row-major dense matrices are not supported by SuperLU\");\n    SluMatrix res;\n    res.setStorageType(SLU_DN);\n    res.setScalarType<typename MatrixType::Scalar>();\n    res.Mtype     = SLU_GE;\n\n    res.nrow      = internal::convert_index<int>(mat.rows());\n    res.ncol      = internal::convert_index<int>(mat.cols());\n\n    res.storage.lda       = internal::convert_index<int>(MatrixType::IsVectorAtCompileTime ? mat.size() : mat.outerStride());\n    res.storage.values    = (void*)(mat.data());\n    return res;\n  }\n\n  template<typename MatrixType>\n  static SluMatrix Map(SparseMatrixBase<MatrixType>& a_mat)\n  {\n    MatrixType &mat(a_mat.derived());\n    SluMatrix res;\n    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)\n    {\n      res.setStorageType(SLU_NR);\n      res.nrow      = internal::convert_index<int>(mat.cols());\n      res.ncol      = internal::convert_index<int>(mat.rows());\n    }\n    else\n    {\n      res.setStorageType(SLU_NC);\n      res.nrow      = internal::convert_index<int>(mat.rows());\n      res.ncol      = internal::convert_index<int>(mat.cols());\n    }\n\n    res.Mtype       = SLU_GE;\n\n    res.storage.nnz       = internal::convert_index<int>(mat.nonZeros());\n    res.storage.values    = mat.valuePtr();\n    res.storage.innerInd  = mat.innerIndexPtr();\n    res.storage.outerInd  = mat.outerIndexPtr();\n\n    res.setScalarType<typename MatrixType::Scalar>();\n\n    // FIXME the following is not very accurate\n    if (MatrixType::Flags & Upper)\n      res.Mtype = SLU_TRU;\n    if (MatrixType::Flags & Lower)\n      res.Mtype = SLU_TRL;\n\n    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && \"SelfAdjoint matrix shape not supported by SuperLU\");\n\n    return res;\n  }\n};\n\ntemplate<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>\nstruct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >\n{\n  typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;\n  static void run(MatrixType& mat, SluMatrix& res)\n  {\n    eigen_assert( ((Options&RowMajor)!=RowMajor) && \"row-major dense matrices is not supported by SuperLU\");\n    res.setStorageType(SLU_DN);\n    res.setScalarType<Scalar>();\n    res.Mtype     = SLU_GE;\n\n    res.nrow      = mat.rows();\n    res.ncol      = mat.cols();\n\n    res.storage.lda       = mat.outerStride();\n    res.storage.values    = mat.data();\n  }\n};\n\ntemplate<typename Derived>\nstruct SluMatrixMapHelper<SparseMatrixBase<Derived> >\n{\n  typedef Derived MatrixType;\n  static void run(MatrixType& mat, SluMatrix& res)\n  {\n    if ((MatrixType::Flags&RowMajorBit)==RowMajorBit)\n    {\n      res.setStorageType(SLU_NR);\n      res.nrow      = mat.cols();\n      res.ncol      = mat.rows();\n    }\n    else\n    {\n      res.setStorageType(SLU_NC);\n      res.nrow      = mat.rows();\n      res.ncol      = mat.cols();\n    }\n\n    res.Mtype       = SLU_GE;\n\n    res.storage.nnz       = mat.nonZeros();\n    res.storage.values    = mat.valuePtr();\n    res.storage.innerInd  = mat.innerIndexPtr();\n    res.storage.outerInd  = mat.outerIndexPtr();\n\n    res.setScalarType<typename MatrixType::Scalar>();\n\n    // FIXME the following is not very accurate\n    if (MatrixType::Flags & Upper)\n      res.Mtype = SLU_TRU;\n    if (MatrixType::Flags & Lower)\n      res.Mtype = SLU_TRL;\n\n    eigen_assert(((MatrixType::Flags & SelfAdjoint)==0) && \"SelfAdjoint matrix shape not supported by SuperLU\");\n  }\n};\n\nnamespace internal {\n\ntemplate<typename MatrixType>\nSluMatrix asSluMatrix(MatrixType& mat)\n{\n  return SluMatrix::Map(mat);\n}\n\n/** View a Super LU matrix as an Eigen expression */\ntemplate<typename Scalar, int Flags, typename Index>\nMappedSparseMatrix<Scalar,Flags,Index> map_superlu(SluMatrix& sluMat)\n{\n  eigen_assert((Flags&RowMajor)==RowMajor && sluMat.Stype == SLU_NR\n         || (Flags&ColMajor)==ColMajor && sluMat.Stype == SLU_NC);\n\n  Index outerSize = (Flags&RowMajor)==RowMajor ? sluMat.ncol : sluMat.nrow;\n\n  return MappedSparseMatrix<Scalar,Flags,Index>(\n    sluMat.nrow, sluMat.ncol, sluMat.storage.outerInd[outerSize],\n    sluMat.storage.outerInd, sluMat.storage.innerInd, reinterpret_cast<Scalar*>(sluMat.storage.values) );\n}\n\n} // end namespace internal\n\n/** \\ingroup SuperLUSupport_Module\n  * \\class SuperLUBase\n  * \\brief The base class for the direct and incomplete LU factorization of SuperLU\n  */\ntemplate<typename _MatrixType, typename Derived>\nclass SuperLUBase : public SparseSolverBase<Derived>\n{\n  protected:\n    typedef SparseSolverBase<Derived> Base;\n    using Base::derived;\n    using Base::m_isInitialized;\n  public:\n    typedef _MatrixType MatrixType;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<Scalar,Dynamic,1> Vector;\n    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;\n    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;    \n    typedef Map<PermutationMatrix<Dynamic,Dynamic,int> > PermutationMap;\n    typedef SparseMatrix<Scalar> LUMatrixType;\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n  public:\n\n    SuperLUBase() {}\n\n    ~SuperLUBase()\n    {\n      clearFactors();\n    }\n    \n    inline Index rows() const { return m_matrix.rows(); }\n    inline Index cols() const { return m_matrix.cols(); }\n    \n    /** \\returns a reference to the Super LU option object to configure the  Super LU algorithms. */\n    inline superlu_options_t& options() { return m_sluOptions; }\n    \n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n\n    /** Computes the sparse Cholesky decomposition of \\a matrix */\n    void compute(const MatrixType& matrix)\n    {\n      derived().analyzePattern(matrix);\n      derived().factorize(matrix);\n    }\n\n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      * \n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& /*matrix*/)\n    {\n      m_isInitialized = true;\n      m_info = Success;\n      m_analysisIsOk = true;\n      m_factorizationIsOk = false;\n    }\n    \n    template<typename Stream>\n    void dumpMemory(Stream& /*s*/)\n    {}\n    \n  protected:\n    \n    void initFactorization(const MatrixType& a)\n    {\n      set_default_options(&this->m_sluOptions);\n      \n      const Index size = a.rows();\n      m_matrix = a;\n\n      m_sluA = internal::asSluMatrix(m_matrix);\n      clearFactors();\n\n      m_p.resize(size);\n      m_q.resize(size);\n      m_sluRscale.resize(size);\n      m_sluCscale.resize(size);\n      m_sluEtree.resize(size);\n\n      // set empty B and X\n      m_sluB.setStorageType(SLU_DN);\n      m_sluB.setScalarType<Scalar>();\n      m_sluB.Mtype          = SLU_GE;\n      m_sluB.storage.values = 0;\n      m_sluB.nrow           = 0;\n      m_sluB.ncol           = 0;\n      m_sluB.storage.lda    = internal::convert_index<int>(size);\n      m_sluX                = m_sluB;\n      \n      m_extractedDataAreDirty = true;\n    }\n    \n    void init()\n    {\n      m_info = InvalidInput;\n      m_isInitialized = false;\n      m_sluL.Store = 0;\n      m_sluU.Store = 0;\n    }\n    \n    void extractData() const;\n\n    void clearFactors()\n    {\n      if(m_sluL.Store)\n        Destroy_SuperNode_Matrix(&m_sluL);\n      if(m_sluU.Store)\n        Destroy_CompCol_Matrix(&m_sluU);\n\n      m_sluL.Store = 0;\n      m_sluU.Store = 0;\n\n      memset(&m_sluL,0,sizeof m_sluL);\n      memset(&m_sluU,0,sizeof m_sluU);\n    }\n\n    // cached data to reduce reallocation, etc.\n    mutable LUMatrixType m_l;\n    mutable LUMatrixType m_u;\n    mutable IntColVectorType m_p;\n    mutable IntRowVectorType m_q;\n\n    mutable LUMatrixType m_matrix;  // copy of the factorized matrix\n    mutable SluMatrix m_sluA;\n    mutable SuperMatrix m_sluL, m_sluU;\n    mutable SluMatrix m_sluB, m_sluX;\n    mutable SuperLUStat_t m_sluStat;\n    mutable superlu_options_t m_sluOptions;\n    mutable std::vector<int> m_sluEtree;\n    mutable Matrix<RealScalar,Dynamic,1> m_sluRscale, m_sluCscale;\n    mutable Matrix<RealScalar,Dynamic,1> m_sluFerr, m_sluBerr;\n    mutable char m_sluEqued;\n\n    mutable ComputationInfo m_info;\n    int m_factorizationIsOk;\n    int m_analysisIsOk;\n    mutable bool m_extractedDataAreDirty;\n    \n  private:\n    SuperLUBase(SuperLUBase& ) { }\n};\n\n\n/** \\ingroup SuperLUSupport_Module\n  * \\class SuperLU\n  * \\brief A sparse direct LU factorization and solver based on the SuperLU library\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a direct LU factorization\n  * using the SuperLU library. The sparse matrix A must be squared and invertible. The vectors or matrices\n  * X and B can be either dense or sparse.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  *\n  * \\warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported.\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SparseLU\n  */\ntemplate<typename _MatrixType>\nclass SuperLU : public SuperLUBase<_MatrixType,SuperLU<_MatrixType> >\n{\n  public:\n    typedef SuperLUBase<_MatrixType,SuperLU> Base;\n    typedef _MatrixType MatrixType;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::RealScalar RealScalar;\n    typedef typename Base::StorageIndex StorageIndex;\n    typedef typename Base::IntRowVectorType IntRowVectorType;\n    typedef typename Base::IntColVectorType IntColVectorType;   \n    typedef typename Base::PermutationMap PermutationMap;\n    typedef typename Base::LUMatrixType LUMatrixType;\n    typedef TriangularView<LUMatrixType, Lower|UnitDiag>  LMatrixType;\n    typedef TriangularView<LUMatrixType,  Upper>          UMatrixType;\n\n  public:\n    using Base::_solve_impl;\n\n    SuperLU() : Base() { init(); }\n\n    explicit SuperLU(const MatrixType& matrix) : Base()\n    {\n      init();\n      Base::compute(matrix);\n    }\n\n    ~SuperLU()\n    {\n    }\n    \n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      * \n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    {\n      m_info = InvalidInput;\n      m_isInitialized = false;\n      Base::analyzePattern(matrix);\n    }\n    \n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& matrix);\n    \n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;\n    \n    inline const LMatrixType& matrixL() const\n    {\n      if (m_extractedDataAreDirty) this->extractData();\n      return m_l;\n    }\n\n    inline const UMatrixType& matrixU() const\n    {\n      if (m_extractedDataAreDirty) this->extractData();\n      return m_u;\n    }\n\n    inline const IntColVectorType& permutationP() const\n    {\n      if (m_extractedDataAreDirty) this->extractData();\n      return m_p;\n    }\n\n    inline const IntRowVectorType& permutationQ() const\n    {\n      if (m_extractedDataAreDirty) this->extractData();\n      return m_q;\n    }\n    \n    Scalar determinant() const;\n    \n  protected:\n    \n    using Base::m_matrix;\n    using Base::m_sluOptions;\n    using Base::m_sluA;\n    using Base::m_sluB;\n    using Base::m_sluX;\n    using Base::m_p;\n    using Base::m_q;\n    using Base::m_sluEtree;\n    using Base::m_sluEqued;\n    using Base::m_sluRscale;\n    using Base::m_sluCscale;\n    using Base::m_sluL;\n    using Base::m_sluU;\n    using Base::m_sluStat;\n    using Base::m_sluFerr;\n    using Base::m_sluBerr;\n    using Base::m_l;\n    using Base::m_u;\n    \n    using Base::m_analysisIsOk;\n    using Base::m_factorizationIsOk;\n    using Base::m_extractedDataAreDirty;\n    using Base::m_isInitialized;\n    using Base::m_info;\n    \n    void init()\n    {\n      Base::init();\n      \n      set_default_options(&this->m_sluOptions);\n      m_sluOptions.PrintStat        = NO;\n      m_sluOptions.ConditionNumber  = NO;\n      m_sluOptions.Trans            = NOTRANS;\n      m_sluOptions.ColPerm          = COLAMD;\n    }\n    \n    \n  private:\n    SuperLU(SuperLU& ) { }\n};\n\ntemplate<typename MatrixType>\nvoid SuperLU<MatrixType>::factorize(const MatrixType& a)\n{\n  eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n  if(!m_analysisIsOk)\n  {\n    m_info = InvalidInput;\n    return;\n  }\n  \n  this->initFactorization(a);\n  \n  m_sluOptions.ColPerm = COLAMD;\n  int info = 0;\n  RealScalar recip_pivot_growth, rcond;\n  RealScalar ferr, berr;\n\n  StatInit(&m_sluStat);\n  SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],\n                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],\n                &m_sluL, &m_sluU,\n                NULL, 0,\n                &m_sluB, &m_sluX,\n                &recip_pivot_growth, &rcond,\n                &ferr, &berr,\n                &m_sluStat, &info, Scalar());\n  StatFree(&m_sluStat);\n\n  m_extractedDataAreDirty = true;\n\n  // FIXME how to better check for errors ???\n  m_info = info == 0 ? Success : NumericalIssue;\n  m_factorizationIsOk = true;\n}\n\ntemplate<typename MatrixType>\ntemplate<typename Rhs,typename Dest>\nvoid SuperLU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const\n{\n  eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()\");\n\n  const Index size = m_matrix.rows();\n  const Index rhsCols = b.cols();\n  eigen_assert(size==b.rows());\n\n  m_sluOptions.Trans = NOTRANS;\n  m_sluOptions.Fact = FACTORED;\n  m_sluOptions.IterRefine = NOREFINE;\n  \n\n  m_sluFerr.resize(rhsCols);\n  m_sluBerr.resize(rhsCols);\n  \n  Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b);\n  Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x);\n  \n  m_sluB = SluMatrix::Map(b_ref.const_cast_derived());\n  m_sluX = SluMatrix::Map(x_ref.const_cast_derived());\n  \n  typename Rhs::PlainObject b_cpy;\n  if(m_sluEqued!='N')\n  {\n    b_cpy = b;\n    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  \n  }\n\n  StatInit(&m_sluStat);\n  int info = 0;\n  RealScalar recip_pivot_growth, rcond;\n  SuperLU_gssvx(&m_sluOptions, &m_sluA,\n                m_q.data(), m_p.data(),\n                &m_sluEtree[0], &m_sluEqued,\n                &m_sluRscale[0], &m_sluCscale[0],\n                &m_sluL, &m_sluU,\n                NULL, 0,\n                &m_sluB, &m_sluX,\n                &recip_pivot_growth, &rcond,\n                &m_sluFerr[0], &m_sluBerr[0],\n                &m_sluStat, &info, Scalar());\n  StatFree(&m_sluStat);\n  \n  if(x.derived().data() != x_ref.data())\n    x = x_ref;\n  \n  m_info = info==0 ? Success : NumericalIssue;\n}\n\n// the code of this extractData() function has been adapted from the SuperLU's Matlab support code,\n//\n//  Copyright (c) 1994 by Xerox Corporation.  All rights reserved.\n//\n//  THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY\n//  EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.\n//\ntemplate<typename MatrixType, typename Derived>\nvoid SuperLUBase<MatrixType,Derived>::extractData() const\n{\n  eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for extracting factors, you must first call either compute() or analyzePattern()/factorize()\");\n  if (m_extractedDataAreDirty)\n  {\n    int         upper;\n    int         fsupc, istart, nsupr;\n    int         lastl = 0, lastu = 0;\n    SCformat    *Lstore = static_cast<SCformat*>(m_sluL.Store);\n    NCformat    *Ustore = static_cast<NCformat*>(m_sluU.Store);\n    Scalar      *SNptr;\n\n    const Index size = m_matrix.rows();\n    m_l.resize(size,size);\n    m_l.resizeNonZeros(Lstore->nnz);\n    m_u.resize(size,size);\n    m_u.resizeNonZeros(Ustore->nnz);\n\n    int* Lcol = m_l.outerIndexPtr();\n    int* Lrow = m_l.innerIndexPtr();\n    Scalar* Lval = m_l.valuePtr();\n\n    int* Ucol = m_u.outerIndexPtr();\n    int* Urow = m_u.innerIndexPtr();\n    Scalar* Uval = m_u.valuePtr();\n\n    Ucol[0] = 0;\n    Ucol[0] = 0;\n\n    /* for each supernode */\n    for (int k = 0; k <= Lstore->nsuper; ++k)\n    {\n      fsupc   = L_FST_SUPC(k);\n      istart  = L_SUB_START(fsupc);\n      nsupr   = L_SUB_START(fsupc+1) - istart;\n      upper   = 1;\n\n      /* for each column in the supernode */\n      for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)\n      {\n        SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)];\n\n        /* Extract U */\n        for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i)\n        {\n          Uval[lastu] = ((Scalar*)Ustore->nzval)[i];\n          /* Matlab doesn't like explicit zero. */\n          if (Uval[lastu] != 0.0)\n            Urow[lastu++] = U_SUB(i);\n        }\n        for (int i = 0; i < upper; ++i)\n        {\n          /* upper triangle in the supernode */\n          Uval[lastu] = SNptr[i];\n          /* Matlab doesn't like explicit zero. */\n          if (Uval[lastu] != 0.0)\n            Urow[lastu++] = L_SUB(istart+i);\n        }\n        Ucol[j+1] = lastu;\n\n        /* Extract L */\n        Lval[lastl] = 1.0; /* unit diagonal */\n        Lrow[lastl++] = L_SUB(istart + upper - 1);\n        for (int i = upper; i < nsupr; ++i)\n        {\n          Lval[lastl] = SNptr[i];\n          /* Matlab doesn't like explicit zero. */\n          if (Lval[lastl] != 0.0)\n            Lrow[lastl++] = L_SUB(istart+i);\n        }\n        Lcol[j+1] = lastl;\n\n        ++upper;\n      } /* for j ... */\n\n    } /* for k ... */\n\n    // squeeze the matrices :\n    m_l.resizeNonZeros(lastl);\n    m_u.resizeNonZeros(lastu);\n\n    m_extractedDataAreDirty = false;\n  }\n}\n\ntemplate<typename MatrixType>\ntypename SuperLU<MatrixType>::Scalar SuperLU<MatrixType>::determinant() const\n{\n  eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for computing the determinant, you must first call either compute() or analyzePattern()/factorize()\");\n  \n  if (m_extractedDataAreDirty)\n    this->extractData();\n\n  Scalar det = Scalar(1);\n  for (int j=0; j<m_u.cols(); ++j)\n  {\n    if (m_u.outerIndexPtr()[j+1]-m_u.outerIndexPtr()[j] > 0)\n    {\n      int lastId = m_u.outerIndexPtr()[j+1]-1;\n      eigen_assert(m_u.innerIndexPtr()[lastId]<=j);\n      if (m_u.innerIndexPtr()[lastId]==j)\n        det *= m_u.valuePtr()[lastId];\n    }\n  }\n  if(PermutationMap(m_p.data(),m_p.size()).determinant()*PermutationMap(m_q.data(),m_q.size()).determinant()<0)\n    det = -det;\n  if(m_sluEqued!='N')\n    return det/m_sluRscale.prod()/m_sluCscale.prod();\n  else\n    return det;\n}\n\n#ifdef EIGEN_PARSED_BY_DOXYGEN\n#define EIGEN_SUPERLU_HAS_ILU\n#endif\n\n#ifdef EIGEN_SUPERLU_HAS_ILU\n\n/** \\ingroup SuperLUSupport_Module\n  * \\class SuperILU\n  * \\brief A sparse direct \\b incomplete LU factorization and solver based on the SuperLU library\n  *\n  * This class allows to solve for an approximate solution of A.X = B sparse linear problems via an incomplete LU factorization\n  * using the SuperLU library. This class is aimed to be used as a preconditioner of the iterative linear solvers.\n  *\n  * \\warning This class is only for the 4.x versions of SuperLU. The 3.x and 5.x versions are not supported.\n  *\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class IncompleteLUT, class ConjugateGradient, class BiCGSTAB\n  */\n\ntemplate<typename _MatrixType>\nclass SuperILU : public SuperLUBase<_MatrixType,SuperILU<_MatrixType> >\n{\n  public:\n    typedef SuperLUBase<_MatrixType,SuperILU> Base;\n    typedef _MatrixType MatrixType;\n    typedef typename Base::Scalar Scalar;\n    typedef typename Base::RealScalar RealScalar;\n\n  public:\n    using Base::_solve_impl;\n\n    SuperILU() : Base() { init(); }\n\n    SuperILU(const MatrixType& matrix) : Base()\n    {\n      init();\n      Base::compute(matrix);\n    }\n\n    ~SuperILU()\n    {\n    }\n    \n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      * \n      * \\sa factorize()\n      */\n    void analyzePattern(const MatrixType& matrix)\n    {\n      Base::analyzePattern(matrix);\n    }\n    \n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed.\n      *\n      * \\sa analyzePattern()\n      */\n    void factorize(const MatrixType& matrix);\n    \n    #ifndef EIGEN_PARSED_BY_DOXYGEN\n    /** \\internal */\n    template<typename Rhs,typename Dest>\n    void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const;\n    #endif // EIGEN_PARSED_BY_DOXYGEN\n    \n  protected:\n    \n    using Base::m_matrix;\n    using Base::m_sluOptions;\n    using Base::m_sluA;\n    using Base::m_sluB;\n    using Base::m_sluX;\n    using Base::m_p;\n    using Base::m_q;\n    using Base::m_sluEtree;\n    using Base::m_sluEqued;\n    using Base::m_sluRscale;\n    using Base::m_sluCscale;\n    using Base::m_sluL;\n    using Base::m_sluU;\n    using Base::m_sluStat;\n    using Base::m_sluFerr;\n    using Base::m_sluBerr;\n    using Base::m_l;\n    using Base::m_u;\n    \n    using Base::m_analysisIsOk;\n    using Base::m_factorizationIsOk;\n    using Base::m_extractedDataAreDirty;\n    using Base::m_isInitialized;\n    using Base::m_info;\n\n    void init()\n    {\n      Base::init();\n      \n      ilu_set_default_options(&m_sluOptions);\n      m_sluOptions.PrintStat        = NO;\n      m_sluOptions.ConditionNumber  = NO;\n      m_sluOptions.Trans            = NOTRANS;\n      m_sluOptions.ColPerm          = MMD_AT_PLUS_A;\n      \n      // no attempt to preserve column sum\n      m_sluOptions.ILU_MILU = SILU;\n      // only basic ILU(k) support -- no direct control over memory consumption\n      // better to use ILU_DropRule = DROP_BASIC | DROP_AREA\n      // and set ILU_FillFactor to max memory growth\n      m_sluOptions.ILU_DropRule = DROP_BASIC;\n      m_sluOptions.ILU_DropTol = NumTraits<Scalar>::dummy_precision()*10;\n    }\n    \n  private:\n    SuperILU(SuperILU& ) { }\n};\n\ntemplate<typename MatrixType>\nvoid SuperILU<MatrixType>::factorize(const MatrixType& a)\n{\n  eigen_assert(m_analysisIsOk && \"You must first call analyzePattern()\");\n  if(!m_analysisIsOk)\n  {\n    m_info = InvalidInput;\n    return;\n  }\n  \n  this->initFactorization(a);\n\n  int info = 0;\n  RealScalar recip_pivot_growth, rcond;\n\n  StatInit(&m_sluStat);\n  SuperLU_gsisx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0],\n                &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0],\n                &m_sluL, &m_sluU,\n                NULL, 0,\n                &m_sluB, &m_sluX,\n                &recip_pivot_growth, &rcond,\n                &m_sluStat, &info, Scalar());\n  StatFree(&m_sluStat);\n\n  // FIXME how to better check for errors ???\n  m_info = info == 0 ? Success : NumericalIssue;\n  m_factorizationIsOk = true;\n}\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ntemplate<typename MatrixType>\ntemplate<typename Rhs,typename Dest>\nvoid SuperILU<MatrixType>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest>& x) const\n{\n  eigen_assert(m_factorizationIsOk && \"The decomposition is not in a valid state for solving, you must first call either compute() or analyzePattern()/factorize()\");\n\n  const int size = m_matrix.rows();\n  const int rhsCols = b.cols();\n  eigen_assert(size==b.rows());\n\n  m_sluOptions.Trans = NOTRANS;\n  m_sluOptions.Fact = FACTORED;\n  m_sluOptions.IterRefine = NOREFINE;\n\n  m_sluFerr.resize(rhsCols);\n  m_sluBerr.resize(rhsCols);\n  \n  Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b);\n  Ref<const Matrix<typename Dest::Scalar,Dynamic,Dynamic,ColMajor> > x_ref(x);\n  \n  m_sluB = SluMatrix::Map(b_ref.const_cast_derived());\n  m_sluX = SluMatrix::Map(x_ref.const_cast_derived());\n\n  typename Rhs::PlainObject b_cpy;\n  if(m_sluEqued!='N')\n  {\n    b_cpy = b;\n    m_sluB = SluMatrix::Map(b_cpy.const_cast_derived());  \n  }\n  \n  int info = 0;\n  RealScalar recip_pivot_growth, rcond;\n\n  StatInit(&m_sluStat);\n  SuperLU_gsisx(&m_sluOptions, &m_sluA,\n                m_q.data(), m_p.data(),\n                &m_sluEtree[0], &m_sluEqued,\n                &m_sluRscale[0], &m_sluCscale[0],\n                &m_sluL, &m_sluU,\n                NULL, 0,\n                &m_sluB, &m_sluX,\n                &recip_pivot_growth, &rcond,\n                &m_sluStat, &info, Scalar());\n  StatFree(&m_sluStat);\n  \n  if(x.derived().data() != x_ref.data())\n    x = x_ref;\n\n  m_info = info==0 ? Success : NumericalIssue;\n}\n#endif\n\n#endif\n\n} // end namespace Eigen\n\n#endif // EIGEN_SUPERLUSUPPORT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/UmfPackSupport/UmfPackSupport.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_UMFPACKSUPPORT_H\n#define EIGEN_UMFPACKSUPPORT_H\n\nnamespace Eigen {\n\n/* TODO extract L, extract U, compute det, etc... */\n\n// generic double/complex<double> wrapper functions:\n\n\ninline void umfpack_defaults(double control[UMFPACK_CONTROL], double)\n{ umfpack_di_defaults(control); }\n\ninline void umfpack_defaults(double control[UMFPACK_CONTROL], std::complex<double>)\n{ umfpack_zi_defaults(control); }\n\ninline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], double)\n{ umfpack_di_report_info(control, info);}\n\ninline void umfpack_report_info(double control[UMFPACK_CONTROL], double info[UMFPACK_INFO], std::complex<double>)\n{ umfpack_zi_report_info(control, info);}\n\ninline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, double)\n{ umfpack_di_report_status(control, status);}\n\ninline void umfpack_report_status(double control[UMFPACK_CONTROL], int status, std::complex<double>)\n{ umfpack_zi_report_status(control, status);}\n\ninline void umfpack_report_control(double control[UMFPACK_CONTROL], double)\n{ umfpack_di_report_control(control);}\n\ninline void umfpack_report_control(double control[UMFPACK_CONTROL], std::complex<double>)\n{ umfpack_zi_report_control(control);}\n\ninline void umfpack_free_numeric(void **Numeric, double)\n{ umfpack_di_free_numeric(Numeric); *Numeric = 0; }\n\ninline void umfpack_free_numeric(void **Numeric, std::complex<double>)\n{ umfpack_zi_free_numeric(Numeric); *Numeric = 0; }\n\ninline void umfpack_free_symbolic(void **Symbolic, double)\n{ umfpack_di_free_symbolic(Symbolic); *Symbolic = 0; }\n\ninline void umfpack_free_symbolic(void **Symbolic, std::complex<double>)\n{ umfpack_zi_free_symbolic(Symbolic); *Symbolic = 0; }\n\ninline int umfpack_symbolic(int n_row,int n_col,\n                            const int Ap[], const int Ai[], const double Ax[], void **Symbolic,\n                            const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])\n{\n  return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info);\n}\n\ninline int umfpack_symbolic(int n_row,int n_col,\n                            const int Ap[], const int Ai[], const std::complex<double> Ax[], void **Symbolic,\n                            const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO])\n{\n  return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Control,Info);\n}\n\ninline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[],\n                            void *Symbolic, void **Numeric,\n                            const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])\n{\n  return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info);\n}\n\ninline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex<double> Ax[],\n                            void *Symbolic, void **Numeric,\n                            const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO])\n{\n  return umfpack_zi_numeric(Ap,Ai,&numext::real_ref(Ax[0]),0,Symbolic,Numeric,Control,Info);\n}\n\ninline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[],\n                          double X[], const double B[], void *Numeric,\n                          const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])\n{\n  return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info);\n}\n\ninline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex<double> Ax[],\n                          std::complex<double> X[], const std::complex<double> B[], void *Numeric,\n                          const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO])\n{\n  return umfpack_zi_solve(sys,Ap,Ai,&numext::real_ref(Ax[0]),0,&numext::real_ref(X[0]),0,&numext::real_ref(B[0]),0,Numeric,Control,Info);\n}\n\ninline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double)\n{\n  return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);\n}\n\ninline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex<double>)\n{\n  return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric);\n}\n\ninline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[],\n                               int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric)\n{\n  return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric);\n}\n\ninline int umfpack_get_numeric(int Lp[], int Lj[], std::complex<double> Lx[], int Up[], int Ui[], std::complex<double> Ux[],\n                               int P[], int Q[], std::complex<double> Dx[], int *do_recip, double Rs[], void *Numeric)\n{\n  double& lx0_real = numext::real_ref(Lx[0]);\n  double& ux0_real = numext::real_ref(Ux[0]);\n  double& dx0_real = numext::real_ref(Dx[0]);\n  return umfpack_zi_get_numeric(Lp,Lj,Lx?&lx0_real:0,0,Up,Ui,Ux?&ux0_real:0,0,P,Q,\n                                Dx?&dx0_real:0,0,do_recip,Rs,Numeric);\n}\n\ninline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])\n{\n  return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info);\n}\n\ninline int umfpack_get_determinant(std::complex<double> *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO])\n{\n  double& mx_real = numext::real_ref(*Mx);\n  return umfpack_zi_get_determinant(&mx_real,0,Ex,NumericHandle,User_Info);\n}\n\n\n/** \\ingroup UmfPackSupport_Module\n  * \\brief A sparse LU factorization and solver based on UmfPack\n  *\n  * This class allows to solve for A.X = B sparse linear problems via a LU factorization\n  * using the UmfPack library. The sparse matrix A must be squared and full rank.\n  * The vectors or matrices X and B can be either dense or sparse.\n  *\n  * \\warning The input matrix A should be in a \\b compressed and \\b column-major form.\n  * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.\n  * \\tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<>\n  *\n  * \\implsparsesolverconcept\n  *\n  * \\sa \\ref TutorialSparseSolverConcept, class SparseLU\n  */\ntemplate<typename _MatrixType>\nclass UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >\n{\n  protected:\n    typedef SparseSolverBase<UmfPackLU<_MatrixType> > Base;\n    using Base::m_isInitialized;\n  public:\n    using Base::_solve_impl;\n    typedef _MatrixType MatrixType;\n    typedef typename MatrixType::Scalar Scalar;\n    typedef typename MatrixType::RealScalar RealScalar;\n    typedef typename MatrixType::StorageIndex StorageIndex;\n    typedef Matrix<Scalar,Dynamic,1> Vector;\n    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;\n    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;\n    typedef SparseMatrix<Scalar> LUMatrixType;\n    typedef SparseMatrix<Scalar,ColMajor,int> UmfpackMatrixType;\n    typedef Ref<const UmfpackMatrixType, StandardCompressedFormat> UmfpackMatrixRef;\n    enum {\n      ColsAtCompileTime = MatrixType::ColsAtCompileTime,\n      MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime\n    };\n\n  public:\n\n    typedef Array<double, UMFPACK_CONTROL, 1> UmfpackControl;\n    typedef Array<double, UMFPACK_INFO, 1> UmfpackInfo;\n\n    UmfPackLU()\n      : m_dummy(0,0), mp_matrix(m_dummy)\n    {\n      init();\n    }\n\n    template<typename InputMatrixType>\n    explicit UmfPackLU(const InputMatrixType& matrix)\n      : mp_matrix(matrix)\n    {\n      init();\n      compute(matrix);\n    }\n\n    ~UmfPackLU()\n    {\n      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());\n      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());\n    }\n\n    inline Index rows() const { return mp_matrix.rows(); }\n    inline Index cols() const { return mp_matrix.cols(); }\n\n    /** \\brief Reports whether previous computation was successful.\n      *\n      * \\returns \\c Success if computation was succesful,\n      *          \\c NumericalIssue if the matrix.appears to be negative.\n      */\n    ComputationInfo info() const\n    {\n      eigen_assert(m_isInitialized && \"Decomposition is not initialized.\");\n      return m_info;\n    }\n\n    inline const LUMatrixType& matrixL() const\n    {\n      if (m_extractedDataAreDirty) extractData();\n      return m_l;\n    }\n\n    inline const LUMatrixType& matrixU() const\n    {\n      if (m_extractedDataAreDirty) extractData();\n      return m_u;\n    }\n\n    inline const IntColVectorType& permutationP() const\n    {\n      if (m_extractedDataAreDirty) extractData();\n      return m_p;\n    }\n\n    inline const IntRowVectorType& permutationQ() const\n    {\n      if (m_extractedDataAreDirty) extractData();\n      return m_q;\n    }\n\n    /** Computes the sparse Cholesky decomposition of \\a matrix\n     *  Note that the matrix should be column-major, and in compressed format for best performance.\n     *  \\sa SparseMatrix::makeCompressed().\n     */\n    template<typename InputMatrixType>\n    void compute(const InputMatrixType& matrix)\n    {\n      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());\n      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());\n      grab(matrix.derived());\n      analyzePattern_impl();\n      factorize_impl();\n    }\n\n    /** Performs a symbolic decomposition on the sparcity of \\a matrix.\n      *\n      * This function is particularly useful when solving for several problems having the same structure.\n      *\n      * \\sa factorize(), compute()\n      */\n    template<typename InputMatrixType>\n    void analyzePattern(const InputMatrixType& matrix)\n    {\n      if(m_symbolic) umfpack_free_symbolic(&m_symbolic,Scalar());\n      if(m_numeric)  umfpack_free_numeric(&m_numeric,Scalar());\n\n      grab(matrix.derived());\n\n      analyzePattern_impl();\n    }\n\n    /** Provides the return status code returned by UmfPack during the numeric\n      * factorization.\n      *\n      * \\sa factorize(), compute()\n      */\n    inline int umfpackFactorizeReturncode() const\n    {\n      eigen_assert(m_numeric && \"UmfPackLU: you must first call factorize()\");\n      return m_fact_errorCode;\n    }\n\n    /** Provides access to the control settings array used by UmfPack.\n      *\n      * If this array contains NaN's, the default values are used.\n      *\n      * See UMFPACK documentation for details.\n      */\n    inline const UmfpackControl& umfpackControl() const\n    {\n      return m_control;\n    }\n\n    /** Provides access to the control settings array used by UmfPack.\n      *\n      * If this array contains NaN's, the default values are used.\n      *\n      * See UMFPACK documentation for details.\n      */\n    inline UmfpackControl& umfpackControl()\n    {\n      return m_control;\n    }\n\n    /** Performs a numeric decomposition of \\a matrix\n      *\n      * The given matrix must has the same sparcity than the matrix on which the pattern anylysis has been performed.\n      *\n      * \\sa analyzePattern(), compute()\n      */\n    template<typename InputMatrixType>\n    void factorize(const InputMatrixType& matrix)\n    {\n      eigen_assert(m_analysisIsOk && \"UmfPackLU: you must first call analyzePattern()\");\n      if(m_numeric)\n        umfpack_free_numeric(&m_numeric,Scalar());\n\n      grab(matrix.derived());\n\n      factorize_impl();\n    }\n\n    /** Prints the current UmfPack control settings.\n      *\n      * \\sa umfpackControl()\n      */\n    void printUmfpackControl()\n    {\n      umfpack_report_control(m_control.data(), Scalar());\n    }\n\n    /** Prints statistics collected by UmfPack.\n      *\n      * \\sa analyzePattern(), compute()\n      */\n    void printUmfpackInfo()\n    {\n      eigen_assert(m_analysisIsOk && \"UmfPackLU: you must first call analyzePattern()\");\n      umfpack_report_info(m_control.data(), m_umfpackInfo.data(), Scalar());\n    }\n\n    /** Prints the status of the previous factorization operation performed by UmfPack (symbolic or numerical factorization).\n      *\n      * \\sa analyzePattern(), compute()\n      */\n    void printUmfpackStatus() {\n      eigen_assert(m_analysisIsOk && \"UmfPackLU: you must first call analyzePattern()\");\n      umfpack_report_status(m_control.data(), m_fact_errorCode, Scalar());\n    }\n\n    /** \\internal */\n    template<typename BDerived,typename XDerived>\n    bool _solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const;\n\n    Scalar determinant() const;\n\n    void extractData() const;\n\n  protected:\n\n    void init()\n    {\n      m_info                  = InvalidInput;\n      m_isInitialized         = false;\n      m_numeric               = 0;\n      m_symbolic              = 0;\n      m_extractedDataAreDirty = true;\n\n      umfpack_defaults(m_control.data(), Scalar());\n    }\n\n    void analyzePattern_impl()\n    {\n      m_fact_errorCode = umfpack_symbolic(internal::convert_index<int>(mp_matrix.rows()),\n                                          internal::convert_index<int>(mp_matrix.cols()),\n                                          mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),\n                                          &m_symbolic, m_control.data(), m_umfpackInfo.data());\n\n      m_isInitialized = true;\n      m_info = m_fact_errorCode ? InvalidInput : Success;\n      m_analysisIsOk = true;\n      m_factorizationIsOk = false;\n      m_extractedDataAreDirty = true;\n    }\n\n    void factorize_impl()\n    {\n\n      m_fact_errorCode = umfpack_numeric(mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),\n                                         m_symbolic, &m_numeric, m_control.data(), m_umfpackInfo.data());\n\n      m_info = m_fact_errorCode == UMFPACK_OK ? Success : NumericalIssue;\n      m_factorizationIsOk = true;\n      m_extractedDataAreDirty = true;\n    }\n\n    template<typename MatrixDerived>\n    void grab(const EigenBase<MatrixDerived> &A)\n    {\n      mp_matrix.~UmfpackMatrixRef();\n      ::new (&mp_matrix) UmfpackMatrixRef(A.derived());\n    }\n\n    void grab(const UmfpackMatrixRef &A)\n    {\n      if(&(A.derived()) != &mp_matrix)\n      {\n        mp_matrix.~UmfpackMatrixRef();\n        ::new (&mp_matrix) UmfpackMatrixRef(A);\n      }\n    }\n\n    // cached data to reduce reallocation, etc.\n    mutable LUMatrixType m_l;\n    int m_fact_errorCode;\n    UmfpackControl m_control;\n    mutable UmfpackInfo m_umfpackInfo;\n\n    mutable LUMatrixType m_u;\n    mutable IntColVectorType m_p;\n    mutable IntRowVectorType m_q;\n\n    UmfpackMatrixType m_dummy;\n    UmfpackMatrixRef mp_matrix;\n\n    void* m_numeric;\n    void* m_symbolic;\n\n    mutable ComputationInfo m_info;\n    int m_factorizationIsOk;\n    int m_analysisIsOk;\n    mutable bool m_extractedDataAreDirty;\n\n  private:\n    UmfPackLU(const UmfPackLU& ) { }\n};\n\n\ntemplate<typename MatrixType>\nvoid UmfPackLU<MatrixType>::extractData() const\n{\n  if (m_extractedDataAreDirty)\n  {\n    // get size of the data\n    int lnz, unz, rows, cols, nz_udiag;\n    umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar());\n\n    // allocate data\n    m_l.resize(rows,(std::min)(rows,cols));\n    m_l.resizeNonZeros(lnz);\n\n    m_u.resize((std::min)(rows,cols),cols);\n    m_u.resizeNonZeros(unz);\n\n    m_p.resize(rows);\n    m_q.resize(cols);\n\n    // extract\n    umfpack_get_numeric(m_l.outerIndexPtr(), m_l.innerIndexPtr(), m_l.valuePtr(),\n                        m_u.outerIndexPtr(), m_u.innerIndexPtr(), m_u.valuePtr(),\n                        m_p.data(), m_q.data(), 0, 0, 0, m_numeric);\n\n    m_extractedDataAreDirty = false;\n  }\n}\n\ntemplate<typename MatrixType>\ntypename UmfPackLU<MatrixType>::Scalar UmfPackLU<MatrixType>::determinant() const\n{\n  Scalar det;\n  umfpack_get_determinant(&det, 0, m_numeric, 0);\n  return det;\n}\n\ntemplate<typename MatrixType>\ntemplate<typename BDerived,typename XDerived>\nbool UmfPackLU<MatrixType>::_solve_impl(const MatrixBase<BDerived> &b, MatrixBase<XDerived> &x) const\n{\n  Index rhsCols = b.cols();\n  eigen_assert((BDerived::Flags&RowMajorBit)==0 && \"UmfPackLU backend does not support non col-major rhs yet\");\n  eigen_assert((XDerived::Flags&RowMajorBit)==0 && \"UmfPackLU backend does not support non col-major result yet\");\n  eigen_assert(b.derived().data() != x.derived().data() && \" Umfpack does not support inplace solve\");\n\n  int errorCode;\n  Scalar* x_ptr = 0;\n  Matrix<Scalar,Dynamic,1> x_tmp;\n  if(x.innerStride()!=1)\n  {\n    x_tmp.resize(x.rows());\n    x_ptr = x_tmp.data();\n  }\n  for (int j=0; j<rhsCols; ++j)\n  {\n    if(x.innerStride()==1)\n      x_ptr = &x.col(j).coeffRef(0);\n    errorCode = umfpack_solve(UMFPACK_A,\n        mp_matrix.outerIndexPtr(), mp_matrix.innerIndexPtr(), mp_matrix.valuePtr(),\n        x_ptr, &b.const_cast_derived().col(j).coeffRef(0), m_numeric, m_control.data(), m_umfpackInfo.data());\n    if(x.innerStride()!=1)\n      x.col(j) = x_tmp;\n    if (errorCode!=0)\n      return false;\n  }\n\n  return true;\n}\n\n} // end namespace Eigen\n\n#endif // EIGEN_UMFPACKSUPPORT_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/Image.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MISC_IMAGE_H\n#define EIGEN_MISC_IMAGE_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\class image_retval_base\n  *\n  */\ntemplate<typename DecompositionType>\nstruct traits<image_retval_base<DecompositionType> >\n{\n  typedef typename DecompositionType::MatrixType MatrixType;\n  typedef Matrix<\n    typename MatrixType::Scalar,\n    MatrixType::RowsAtCompileTime, // the image is a subspace of the destination space, whose\n                                   // dimension is the number of rows of the original matrix\n    Dynamic,                       // we don't know at compile time the dimension of the image (the rank)\n    MatrixType::Options,\n    MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,\n    MatrixType::MaxColsAtCompileTime  // so it has the same number of rows and at most as many columns.\n  > ReturnType;\n};\n\ntemplate<typename _DecompositionType> struct image_retval_base\n : public ReturnByValue<image_retval_base<_DecompositionType> >\n{\n  typedef _DecompositionType DecompositionType;\n  typedef typename DecompositionType::MatrixType MatrixType;\n  typedef ReturnByValue<image_retval_base> Base;\n\n  image_retval_base(const DecompositionType& dec, const MatrixType& originalMatrix)\n    : m_dec(dec), m_rank(dec.rank()),\n      m_cols(m_rank == 0 ? 1 : m_rank),\n      m_originalMatrix(originalMatrix)\n  {}\n\n  inline Index rows() const { return m_dec.rows(); }\n  inline Index cols() const { return m_cols; }\n  inline Index rank() const { return m_rank; }\n  inline const DecompositionType& dec() const { return m_dec; }\n  inline const MatrixType& originalMatrix() const { return m_originalMatrix; }\n\n  template<typename Dest> inline void evalTo(Dest& dst) const\n  {\n    static_cast<const image_retval<DecompositionType>*>(this)->evalTo(dst);\n  }\n\n  protected:\n    const DecompositionType& m_dec;\n    Index m_rank, m_cols;\n    const MatrixType& m_originalMatrix;\n};\n\n} // end namespace internal\n\n#define EIGEN_MAKE_IMAGE_HELPERS(DecompositionType) \\\n  typedef typename DecompositionType::MatrixType MatrixType; \\\n  typedef typename MatrixType::Scalar Scalar; \\\n  typedef typename MatrixType::RealScalar RealScalar; \\\n  typedef Eigen::internal::image_retval_base<DecompositionType> Base; \\\n  using Base::dec; \\\n  using Base::originalMatrix; \\\n  using Base::rank; \\\n  using Base::rows; \\\n  using Base::cols; \\\n  image_retval(const DecompositionType& dec, const MatrixType& originalMatrix) \\\n    : Base(dec, originalMatrix) {}\n\n} // end namespace Eigen\n\n#endif // EIGEN_MISC_IMAGE_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/Kernel.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_MISC_KERNEL_H\n#define EIGEN_MISC_KERNEL_H\n\nnamespace Eigen { \n\nnamespace internal {\n\n/** \\class kernel_retval_base\n  *\n  */\ntemplate<typename DecompositionType>\nstruct traits<kernel_retval_base<DecompositionType> >\n{\n  typedef typename DecompositionType::MatrixType MatrixType;\n  typedef Matrix<\n    typename MatrixType::Scalar,\n    MatrixType::ColsAtCompileTime, // the number of rows in the \"kernel matrix\"\n                                   // is the number of cols of the original matrix\n                                   // so that the product \"matrix * kernel = zero\" makes sense\n    Dynamic,                       // we don't know at compile-time the dimension of the kernel\n    MatrixType::Options,\n    MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter\n    MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,\n                                     // whose dimension is the number of columns of the original matrix\n  > ReturnType;\n};\n\ntemplate<typename _DecompositionType> struct kernel_retval_base\n : public ReturnByValue<kernel_retval_base<_DecompositionType> >\n{\n  typedef _DecompositionType DecompositionType;\n  typedef ReturnByValue<kernel_retval_base> Base;\n\n  explicit kernel_retval_base(const DecompositionType& dec)\n    : m_dec(dec),\n      m_rank(dec.rank()),\n      m_cols(m_rank==dec.cols() ? 1 : dec.cols() - m_rank)\n  {}\n\n  inline Index rows() const { return m_dec.cols(); }\n  inline Index cols() const { return m_cols; }\n  inline Index rank() const { return m_rank; }\n  inline const DecompositionType& dec() const { return m_dec; }\n\n  template<typename Dest> inline void evalTo(Dest& dst) const\n  {\n    static_cast<const kernel_retval<DecompositionType>*>(this)->evalTo(dst);\n  }\n\n  protected:\n    const DecompositionType& m_dec;\n    Index m_rank, m_cols;\n};\n\n} // end namespace internal\n\n#define EIGEN_MAKE_KERNEL_HELPERS(DecompositionType) \\\n  typedef typename DecompositionType::MatrixType MatrixType; \\\n  typedef typename MatrixType::Scalar Scalar; \\\n  typedef typename MatrixType::RealScalar RealScalar; \\\n  typedef Eigen::internal::kernel_retval_base<DecompositionType> Base; \\\n  using Base::dec; \\\n  using Base::rank; \\\n  using Base::rows; \\\n  using Base::cols; \\\n  kernel_retval(const DecompositionType& dec) : Base(dec) {}\n\n} // end namespace Eigen\n\n#endif // EIGEN_MISC_KERNEL_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/RealSvd2x2.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n// Copyright (C) 2013-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_REALSVD2X2_H\n#define EIGEN_REALSVD2X2_H\n\nnamespace Eigen {\n\nnamespace internal {\n\ntemplate<typename MatrixType, typename RealScalar, typename Index>\nvoid real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,\n                         JacobiRotation<RealScalar> *j_left,\n                         JacobiRotation<RealScalar> *j_right)\n{\n  using std::sqrt;\n  using std::abs;\n  Matrix<RealScalar,2,2> m;\n  m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),\n       numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));\n  JacobiRotation<RealScalar> rot1;\n  RealScalar t = m.coeff(0,0) + m.coeff(1,1);\n  RealScalar d = m.coeff(1,0) - m.coeff(0,1);\n\n  if(abs(d) < (std::numeric_limits<RealScalar>::min)())\n  {\n    rot1.s() = RealScalar(0);\n    rot1.c() = RealScalar(1);\n  }\n  else\n  {\n    // If d!=0, then t/d cannot overflow because the magnitude of the\n    // entries forming d are not too small compared to the ones forming t.\n    RealScalar u = t / d;\n    RealScalar tmp = sqrt(RealScalar(1) + numext::abs2(u));\n    rot1.s() = RealScalar(1) / tmp;\n    rot1.c() = u / tmp;\n  }\n  m.applyOnTheLeft(0,1,rot1);\n  j_right->makeJacobi(m,0,1);\n  *j_left = rot1 * j_right->transpose();\n}\n\n} // end namespace internal\n\n} // end namespace Eigen\n\n#endif // EIGEN_REALSVD2X2_H\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/blas.h",
    "content": "#ifndef BLAS_H\n#define BLAS_H\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#define BLASFUNC(FUNC) FUNC##_\n\n#ifdef __WIN64__\ntypedef long long BLASLONG;\ntypedef unsigned long long BLASULONG;\n#else\ntypedef long BLASLONG;\ntypedef unsigned long BLASULONG;\n#endif\n\nint    BLASFUNC(xerbla)(const char *, int *info, int);\n\nfloat  BLASFUNC(sdot)  (int *, float  *, int *, float  *, int *);\nfloat  BLASFUNC(sdsdot)(int *, float  *,        float  *, int *, float  *, int *);\n\ndouble BLASFUNC(dsdot) (int *, float  *, int *, float  *, int *);\ndouble BLASFUNC(ddot)  (int *, double *, int *, double *, int *);\ndouble BLASFUNC(qdot)  (int *, double *, int *, double *, int *);\n\nint  BLASFUNC(cdotuw)  (int *, float  *, int *, float  *, int *, float*);\nint  BLASFUNC(cdotcw)  (int *, float  *, int *, float  *, int *, float*);\nint  BLASFUNC(zdotuw)  (int *, double  *, int *, double  *, int *, double*);\nint  BLASFUNC(zdotcw)  (int *, double  *, int *, double  *, int *, double*);\n\nint    BLASFUNC(saxpy) (const int *, const float  *, const float  *, const int *, float  *, const int *);\nint    BLASFUNC(daxpy) (const int *, const double *, const double *, const int *, double *, const int *);\nint    BLASFUNC(qaxpy) (const int *, const double *, const double *, const int *, double *, const int *);\nint    BLASFUNC(caxpy) (const int *, const float  *, const float  *, const int *, float  *, const int *);\nint    BLASFUNC(zaxpy) (const int *, const double *, const double *, const int *, double *, const int *);\nint    BLASFUNC(xaxpy) (const int *, const double *, const double *, const int *, double *, const int *);\nint    BLASFUNC(caxpyc)(const int *, const float  *, const float  *, const int *, float  *, const int *);\nint    BLASFUNC(zaxpyc)(const int *, const double *, const double *, const int *, double *, const int *);\nint    BLASFUNC(xaxpyc)(const int *, const double *, const double *, const int *, double *, const int *);\n\nint    BLASFUNC(scopy) (int *, float  *, int *, float  *, int *);\nint    BLASFUNC(dcopy) (int *, double *, int *, double *, int *);\nint    BLASFUNC(qcopy) (int *, double *, int *, double *, int *);\nint    BLASFUNC(ccopy) (int *, float  *, int *, float  *, int *);\nint    BLASFUNC(zcopy) (int *, double *, int *, double *, int *);\nint    BLASFUNC(xcopy) (int *, double *, int *, double *, int *);\n\nint    BLASFUNC(sswap) (int *, float  *, int *, float  *, int *);\nint    BLASFUNC(dswap) (int *, double *, int *, double *, int *);\nint    BLASFUNC(qswap) (int *, double *, int *, double *, int *);\nint    BLASFUNC(cswap) (int *, float  *, int *, float  *, int *);\nint    BLASFUNC(zswap) (int *, double *, int *, double *, int *);\nint    BLASFUNC(xswap) (int *, double *, int *, double *, int *);\n\nfloat  BLASFUNC(sasum) (int *, float  *, int *);\nfloat  BLASFUNC(scasum)(int *, float  *, int *);\ndouble BLASFUNC(dasum) (int *, double *, int *);\ndouble BLASFUNC(qasum) (int *, double *, int *);\ndouble BLASFUNC(dzasum)(int *, double *, int *);\ndouble BLASFUNC(qxasum)(int *, double *, int *);\n\nint    BLASFUNC(isamax)(int *, float  *, int *);\nint    BLASFUNC(idamax)(int *, double *, int *);\nint    BLASFUNC(iqamax)(int *, double *, int *);\nint    BLASFUNC(icamax)(int *, float  *, int *);\nint    BLASFUNC(izamax)(int *, double *, int *);\nint    BLASFUNC(ixamax)(int *, double *, int *);\n\nint    BLASFUNC(ismax) (int *, float  *, int *);\nint    BLASFUNC(idmax) (int *, double *, int *);\nint    BLASFUNC(iqmax) (int *, double *, int *);\nint    BLASFUNC(icmax) (int *, float  *, int *);\nint    BLASFUNC(izmax) (int *, double *, int *);\nint    BLASFUNC(ixmax) (int *, double *, int *);\n\nint    BLASFUNC(isamin)(int *, float  *, int *);\nint    BLASFUNC(idamin)(int *, double *, int *);\nint    BLASFUNC(iqamin)(int *, double *, int *);\nint    BLASFUNC(icamin)(int *, float  *, int *);\nint    BLASFUNC(izamin)(int *, double *, int *);\nint    BLASFUNC(ixamin)(int *, double *, int *);\n\nint    BLASFUNC(ismin)(int *, float  *, int *);\nint    BLASFUNC(idmin)(int *, double *, int *);\nint    BLASFUNC(iqmin)(int *, double *, int *);\nint    BLASFUNC(icmin)(int *, float  *, int *);\nint    BLASFUNC(izmin)(int *, double *, int *);\nint    BLASFUNC(ixmin)(int *, double *, int *);\n\nfloat  BLASFUNC(samax) (int *, float  *, int *);\ndouble BLASFUNC(damax) (int *, double *, int *);\ndouble BLASFUNC(qamax) (int *, double *, int *);\nfloat  BLASFUNC(scamax)(int *, float  *, int *);\ndouble BLASFUNC(dzamax)(int *, double *, int *);\ndouble BLASFUNC(qxamax)(int *, double *, int *);\n\nfloat  BLASFUNC(samin) (int *, float  *, int *);\ndouble BLASFUNC(damin) (int *, double *, int *);\ndouble BLASFUNC(qamin) (int *, double *, int *);\nfloat  BLASFUNC(scamin)(int *, float  *, int *);\ndouble BLASFUNC(dzamin)(int *, double *, int *);\ndouble BLASFUNC(qxamin)(int *, double *, int *);\n\nfloat  BLASFUNC(smax)  (int *, float  *, int *);\ndouble BLASFUNC(dmax)  (int *, double *, int *);\ndouble BLASFUNC(qmax)  (int *, double *, int *);\nfloat  BLASFUNC(scmax) (int *, float  *, int *);\ndouble BLASFUNC(dzmax) (int *, double *, int *);\ndouble BLASFUNC(qxmax) (int *, double *, int *);\n\nfloat  BLASFUNC(smin)  (int *, float  *, int *);\ndouble BLASFUNC(dmin)  (int *, double *, int *);\ndouble BLASFUNC(qmin)  (int *, double *, int *);\nfloat  BLASFUNC(scmin) (int *, float  *, int *);\ndouble BLASFUNC(dzmin) (int *, double *, int *);\ndouble BLASFUNC(qxmin) (int *, double *, int *);\n\nint    BLASFUNC(sscal) (int *,  float  *, float  *, int *);\nint    BLASFUNC(dscal) (int *,  double *, double *, int *);\nint    BLASFUNC(qscal) (int *,  double *, double *, int *);\nint    BLASFUNC(cscal) (int *,  float  *, float  *, int *);\nint    BLASFUNC(zscal) (int *,  double *, double *, int *);\nint    BLASFUNC(xscal) (int *,  double *, double *, int *);\nint    BLASFUNC(csscal)(int *,  float  *, float  *, int *);\nint    BLASFUNC(zdscal)(int *,  double *, double *, int *);\nint    BLASFUNC(xqscal)(int *,  double *, double *, int *);\n\nfloat  BLASFUNC(snrm2) (int *, float  *, int *);\nfloat  BLASFUNC(scnrm2)(int *, float  *, int *);\n\ndouble BLASFUNC(dnrm2) (int *, double *, int *);\ndouble BLASFUNC(qnrm2) (int *, double *, int *);\ndouble BLASFUNC(dznrm2)(int *, double *, int *);\ndouble BLASFUNC(qxnrm2)(int *, double *, int *);\n\nint    BLASFUNC(srot)  (int *, float  *, int *, float  *, int *, float  *, float  *);\nint    BLASFUNC(drot)  (int *, double *, int *, double *, int *, double *, double *);\nint    BLASFUNC(qrot)  (int *, double *, int *, double *, int *, double *, double *);\nint    BLASFUNC(csrot) (int *, float  *, int *, float  *, int *, float  *, float  *);\nint    BLASFUNC(zdrot) (int *, double *, int *, double *, int *, double *, double *);\nint    BLASFUNC(xqrot) (int *, double *, int *, double *, int *, double *, double *);\n\nint    BLASFUNC(srotg) (float  *, float  *, float  *, float  *);\nint    BLASFUNC(drotg) (double *, double *, double *, double *);\nint    BLASFUNC(qrotg) (double *, double *, double *, double *);\nint    BLASFUNC(crotg) (float  *, float  *, float  *, float  *);\nint    BLASFUNC(zrotg) (double *, double *, double *, double *);\nint    BLASFUNC(xrotg) (double *, double *, double *, double *);\n\nint    BLASFUNC(srotmg)(float  *, float  *, float  *, float  *, float  *);\nint    BLASFUNC(drotmg)(double *, double *, double *, double *, double *);\n\nint    BLASFUNC(srotm) (int *, float  *, int *, float  *, int *, float  *);\nint    BLASFUNC(drotm) (int *, double *, int *, double *, int *, double *);\nint    BLASFUNC(qrotm) (int *, double *, int *, double *, int *, double *);\n\n/* Level 2 routines */\n\nint BLASFUNC(sger)(int *,    int *, float *,  float *, int *,\n\t\t   float *,  int *, float *,  int *);\nint BLASFUNC(dger)(int *,    int *, double *, double *, int *,\n\t\t   double *, int *, double *, int *);\nint BLASFUNC(qger)(int *,    int *, double *, double *, int *,\n\t\t   double *, int *, double *, int *);\nint BLASFUNC(cgeru)(int *,    int *, float *,  float *, int *,\n\t\t    float *,  int *, float *,  int *);\nint BLASFUNC(cgerc)(int *,    int *, float *,  float *, int *,\n\t\t    float *,  int *, float *,  int *);\nint BLASFUNC(zgeru)(int *,    int *, double *, double *, int *,\n\t\t    double *, int *, double *, int *);\nint BLASFUNC(zgerc)(int *,    int *, double *, double *, int *,\n\t\t    double *, int *, double *, int *);\nint BLASFUNC(xgeru)(int *,    int *, double *, double *, int *,\n\t\t    double *, int *, double *, int *);\nint BLASFUNC(xgerc)(int *,    int *, double *, double *, int *,\n\t\t    double *, int *, double *, int *);\n\nint BLASFUNC(sgemv)(const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(qgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(cgemv)(const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xgemv)(const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(strsv) (const char *, const char *, const char *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(dtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(qtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(ctrsv) (const char *, const char *, const char *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(ztrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(xtrsv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(stpsv) (char *, char *, char *, int *, float  *, float  *, int *);\nint BLASFUNC(dtpsv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(qtpsv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(ctpsv) (char *, char *, char *, int *, float  *, float  *, int *);\nint BLASFUNC(ztpsv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(xtpsv) (char *, char *, char *, int *, double *, double *, int *);\n\nint BLASFUNC(strmv) (const char *, const char *, const char *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(dtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(qtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(ctrmv) (const char *, const char *, const char *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(ztrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(xtrmv) (const char *, const char *, const char *, const int *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(stpmv) (char *, char *, char *, int *, float  *, float  *, int *);\nint BLASFUNC(dtpmv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(qtpmv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(ctpmv) (char *, char *, char *, int *, float  *, float  *, int *);\nint BLASFUNC(ztpmv) (char *, char *, char *, int *, double *, double *, int *);\nint BLASFUNC(xtpmv) (char *, char *, char *, int *, double *, double *, int *);\n\nint BLASFUNC(stbmv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);\nint BLASFUNC(dtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(qtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(ctbmv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);\nint BLASFUNC(ztbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(xtbmv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\n\nint BLASFUNC(stbsv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);\nint BLASFUNC(dtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(qtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(ctbsv) (char *, char *, char *, int *, int *, float  *, int *, float  *, int *);\nint BLASFUNC(ztbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\nint BLASFUNC(xtbsv) (char *, char *, char *, int *, int *, double *, int *, double *, int *);\n\nint BLASFUNC(ssymv) (const char *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(qsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(sspmv) (char *, int *, float  *, float *,\n\t\t     float  *, int *, float *, float *, int *);\nint BLASFUNC(dspmv) (char *, int *, double  *, double *,\n\t\t     double  *, int *, double *, double *, int *);\nint BLASFUNC(qspmv) (char *, int *, double  *, double *,\n\t\t     double  *, int *, double *, double *, int *);\n\nint BLASFUNC(ssyr) (const char *, const int *, const float   *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(dsyr) (const char *, const int *, const double  *, const double *, const int *, double *, const int *);\nint BLASFUNC(qsyr) (const char *, const int *, const double  *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(ssyr2) (const char *, const int *, const float   *, const float  *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(dsyr2) (const char *, const int *, const double  *, const double *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(qsyr2) (const char *, const int *, const double  *, const double *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(csyr2) (const char *, const int *, const float   *, const float  *, const int *, const float  *, const int *, float  *, const int *);\nint BLASFUNC(zsyr2) (const char *, const int *, const double  *, const double *, const int *, const double *, const int *, double *, const int *);\nint BLASFUNC(xsyr2) (const char *, const int *, const double  *, const double *, const int *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(sspr) (char *, int *, float   *, float  *, int *,\n\t\t    float  *);\nint BLASFUNC(dspr) (char *, int *, double  *, double *, int *,\n\t\t    double *);\nint BLASFUNC(qspr) (char *, int *, double  *, double *, int *,\n\t\t    double *);\n\nint BLASFUNC(sspr2) (char *, int *, float   *,\n\t\t     float  *, int *, float  *, int *, float  *);\nint BLASFUNC(dspr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\nint BLASFUNC(qspr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\nint BLASFUNC(cspr2) (char *, int *, float   *,\n\t\t     float  *, int *, float  *, int *, float  *);\nint BLASFUNC(zspr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\nint BLASFUNC(xspr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\n\nint BLASFUNC(cher) (char *, int *, float   *, float  *, int *,\n\t\t    float  *, int *);\nint BLASFUNC(zher) (char *, int *, double  *, double *, int *,\n\t\t    double *, int *);\nint BLASFUNC(xher) (char *, int *, double  *, double *, int *,\n\t\t    double *, int *);\n\nint BLASFUNC(chpr) (char *, int *, float   *, float  *, int *, float  *);\nint BLASFUNC(zhpr) (char *, int *, double  *, double *, int *, double *);\nint BLASFUNC(xhpr) (char *, int *, double  *, double *, int *, double *);\n\nint BLASFUNC(cher2) (char *, int *, float   *,\n\t\t     float  *, int *, float  *, int *, float  *, int *);\nint BLASFUNC(zher2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *, int *);\nint BLASFUNC(xher2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *, int *);\n\nint BLASFUNC(chpr2) (char *, int *, float   *,\n\t\t     float  *, int *, float  *, int *, float  *);\nint BLASFUNC(zhpr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\nint BLASFUNC(xhpr2) (char *, int *, double  *,\n\t\t     double *, int *, double *, int *, double *);\n\nint BLASFUNC(chemv) (const char *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xhemv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(chpmv) (char *, int *, float  *, float *,\n\t\t     float  *, int *, float *, float *, int *);\nint BLASFUNC(zhpmv) (char *, int *, double  *, double *,\n\t\t     double  *, int *, double *, double *, int *);\nint BLASFUNC(xhpmv) (char *, int *, double  *, double *,\n\t\t     double  *, int *, double *, double *, int *);\n\nint BLASFUNC(snorm)(char *, int *, int *, float  *, int *);\nint BLASFUNC(dnorm)(char *, int *, int *, double *, int *);\nint BLASFUNC(cnorm)(char *, int *, int *, float  *, int *);\nint BLASFUNC(znorm)(char *, int *, int *, double *, int *);\n\nint BLASFUNC(sgbmv)(char *, int *, int *, int *, int *, float  *, float  *, int *,\n\t\t    float  *, int *, float  *, float  *, int *);\nint BLASFUNC(dgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(qgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(cgbmv)(char *, int *, int *, int *, int *, float  *, float  *, int *,\n\t\t    float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(xgbmv)(char *, int *, int *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\n\nint BLASFUNC(ssbmv)(char *, int *, int *, float  *, float  *, int *,\n\t\t    float  *, int *, float  *, float  *, int *);\nint BLASFUNC(dsbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(qsbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(csbmv)(char *, int *, int *, float  *, float  *, int *,\n\t\t    float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zsbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(xsbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\n\nint BLASFUNC(chbmv)(char *, int *, int *, float  *, float  *, int *,\n\t\t    float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zhbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\nint BLASFUNC(xhbmv)(char *, int *, int *, double *, double *, int *,\n\t\t    double *, int *, double *, double *, int *);\n\n/* Level 3 routines */\n\nint BLASFUNC(sgemm)(const char *, const char *, const int *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(qgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(cgemm)(const char *, const char *, const int *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xgemm)(const char *, const char *, const int *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(cgemm3m)(char *, char *, int *, int *, int *, float *,\n\t   float  *, int *, float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zgemm3m)(char *, char *, int *, int *, int *, double *,\n\t   double *, int *, double *, int *, double *, double *, int *);\nint BLASFUNC(xgemm3m)(char *, char *, int *, int *, int *, double *,\n\t   double *, int *, double *, int *, double *, double *, int *);\n\nint BLASFUNC(sge2mm)(char *, char *, char *, int *, int *,\n\t\t     float *, float  *, int *, float  *, int *,\n\t\t     float *, float  *, int *);\nint BLASFUNC(dge2mm)(char *, char *, char *, int *, int *,\n\t\t     double *, double  *, int *, double  *, int *,\n\t\t     double *, double  *, int *);\nint BLASFUNC(cge2mm)(char *, char *, char *, int *, int *,\n\t\t     float *, float  *, int *, float  *, int *,\n\t\t     float *, float  *, int *);\nint BLASFUNC(zge2mm)(char *, char *, char *, int *, int *,\n\t\t     double *, double  *, int *, double  *, int *,\n\t\t     double *, double  *, int *);\n\nint BLASFUNC(strsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *,  const float *,  const int *, float *,  const int *);\nint BLASFUNC(dtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(qtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(ctrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *,  const float *,  const int *, float *,  const int *);\nint BLASFUNC(ztrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(xtrsm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(strmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *,  const float *,  const int *, float *,  const int *);\nint BLASFUNC(dtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(qtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(ctrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const float *,  const float *,  const int *, float *,  const int *);\nint BLASFUNC(ztrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\nint BLASFUNC(xtrmm)(const char *, const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, double *, const int *);\n\nint BLASFUNC(ssymm)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(qsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(csymm)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xsymm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(csymm3m)(char *, char *, int *, int *, float  *, float  *, int *, float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *);\nint BLASFUNC(xsymm3m)(char *, char *, int *, int *, double *, double *, int *, double *, int *, double *, double *, int *);\n\nint BLASFUNC(ssyrk)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(qsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(csyrk)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xsyrk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(ssyr2k)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(dsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\nint BLASFUNC(qsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\nint BLASFUNC(csyr2k)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\nint BLASFUNC(xsyr2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\n\nint BLASFUNC(chemm)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xhemm)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(chemm3m)(char *, char *, int *, int *, float  *, float  *, int *,\n\t   float  *, int *, float  *, float  *, int *);\nint BLASFUNC(zhemm3m)(char *, char *, int *, int *, double *, double *, int *,\n\t   double *, int *, double *, double *, int *);\nint BLASFUNC(xhemm3m)(char *, char *, int *, int *, double *, double *, int *,\n\t   double *, int *, double *, double *, int *);\n\nint BLASFUNC(cherk)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xherk)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, double *, const int *);\n\nint BLASFUNC(cher2k)(const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xher2k)(const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(cher2m)(const char *, const char *, const char *, const int *, const int *, const float  *, const float  *, const int *, const float *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\nint BLASFUNC(xher2m)(const char *, const char *, const char *, const int *, const int *, const double *, const double *, const int *, const double*, const int *, const double *, double *, const int *);\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/lapack.h",
    "content": "#ifndef LAPACK_H\n#define LAPACK_H\n\n#include \"blas.h\"\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\nint BLASFUNC(csymv) (const char *, const int *, const float  *, const float  *, const int *, const float  *, const int *, const float  *, float  *, const int *);\nint BLASFUNC(zsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\nint BLASFUNC(xsymv) (const char *, const int *, const double *, const double *, const int *, const double *, const int *, const double *, double *, const int *);\n\n\nint BLASFUNC(cspmv) (char *, int *, float  *, float *,\n         float  *, int *, float *, float *, int *);\nint BLASFUNC(zspmv) (char *, int *, double  *, double *,\n         double  *, int *, double *, double *, int *);\nint BLASFUNC(xspmv) (char *, int *, double  *, double *,\n         double  *, int *, double *, double *, int *);\n\nint BLASFUNC(csyr) (char *, int *, float   *, float  *, int *,\n        float  *, int *);\nint BLASFUNC(zsyr) (char *, int *, double  *, double *, int *,\n        double *, int *);\nint BLASFUNC(xsyr) (char *, int *, double  *, double *, int *,\n        double *, int *);\n\nint BLASFUNC(cspr) (char *, int *, float   *, float  *, int *,\n        float  *);\nint BLASFUNC(zspr) (char *, int *, double  *, double *, int *,\n        double *);\nint BLASFUNC(xspr) (char *, int *, double  *, double *, int *,\n        double *);\n\nint BLASFUNC(sgemt)(char *, int *, int *, float  *, float  *, int *,\n        float  *, int *);\nint BLASFUNC(dgemt)(char *, int *, int *, double *, double *, int *,\n        double *, int *);\nint BLASFUNC(cgemt)(char *, int *, int *, float  *, float  *, int *,\n        float  *, int *);\nint BLASFUNC(zgemt)(char *, int *, int *, double *, double *, int *,\n        double *, int *);\n\nint BLASFUNC(sgema)(char *, char *, int *, int *, float  *,\n        float  *, int *, float *, float  *, int *, float *, int *);\nint BLASFUNC(dgema)(char *, char *, int *, int *, double *,\n        double *, int *, double*, double *, int *, double*, int *);\nint BLASFUNC(cgema)(char *, char *, int *, int *, float  *,\n        float  *, int *, float *, float  *, int *, float *, int *);\nint BLASFUNC(zgema)(char *, char *, int *, int *, double *,\n        double *, int *, double*, double *, int *, double*, int *);\n\nint BLASFUNC(sgems)(char *, char *, int *, int *, float  *,\n        float  *, int *, float *, float  *, int *, float *, int *);\nint BLASFUNC(dgems)(char *, char *, int *, int *, double *,\n        double *, int *, double*, double *, int *, double*, int *);\nint BLASFUNC(cgems)(char *, char *, int *, int *, float  *,\n        float  *, int *, float *, float  *, int *, float *, int *);\nint BLASFUNC(zgems)(char *, char *, int *, int *, double *,\n        double *, int *, double*, double *, int *, double*, int *);\n\nint BLASFUNC(sgetf2)(int *, int *, float  *, int *, int *, int *);\nint BLASFUNC(dgetf2)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(qgetf2)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(cgetf2)(int *, int *, float  *, int *, int *, int *);\nint BLASFUNC(zgetf2)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(xgetf2)(int *, int *, double *, int *, int *, int *);\n\nint BLASFUNC(sgetrf)(int *, int *, float  *, int *, int *, int *);\nint BLASFUNC(dgetrf)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(qgetrf)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(cgetrf)(int *, int *, float  *, int *, int *, int *);\nint BLASFUNC(zgetrf)(int *, int *, double *, int *, int *, int *);\nint BLASFUNC(xgetrf)(int *, int *, double *, int *, int *, int *);\n\nint BLASFUNC(slaswp)(int *, float  *, int *, int *, int *, int *, int *);\nint BLASFUNC(dlaswp)(int *, double *, int *, int *, int *, int *, int *);\nint BLASFUNC(qlaswp)(int *, double *, int *, int *, int *, int *, int *);\nint BLASFUNC(claswp)(int *, float  *, int *, int *, int *, int *, int *);\nint BLASFUNC(zlaswp)(int *, double *, int *, int *, int *, int *, int *);\nint BLASFUNC(xlaswp)(int *, double *, int *, int *, int *, int *, int *);\n\nint BLASFUNC(sgetrs)(char *, int *, int *, float  *, int *, int *, float  *, int *, int *);\nint BLASFUNC(dgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);\nint BLASFUNC(qgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);\nint BLASFUNC(cgetrs)(char *, int *, int *, float  *, int *, int *, float  *, int *, int *);\nint BLASFUNC(zgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);\nint BLASFUNC(xgetrs)(char *, int *, int *, double *, int *, int *, double *, int *, int *);\n\nint BLASFUNC(sgesv)(int *, int *, float  *, int *, int *, float *, int *, int *);\nint BLASFUNC(dgesv)(int *, int *, double *, int *, int *, double*, int *, int *);\nint BLASFUNC(qgesv)(int *, int *, double *, int *, int *, double*, int *, int *);\nint BLASFUNC(cgesv)(int *, int *, float  *, int *, int *, float *, int *, int *);\nint BLASFUNC(zgesv)(int *, int *, double *, int *, int *, double*, int *, int *);\nint BLASFUNC(xgesv)(int *, int *, double *, int *, int *, double*, int *, int *);\n\nint BLASFUNC(spotf2)(char *, int *, float  *, int *, int *);\nint BLASFUNC(dpotf2)(char *, int *, double *, int *, int *);\nint BLASFUNC(qpotf2)(char *, int *, double *, int *, int *);\nint BLASFUNC(cpotf2)(char *, int *, float  *, int *, int *);\nint BLASFUNC(zpotf2)(char *, int *, double *, int *, int *);\nint BLASFUNC(xpotf2)(char *, int *, double *, int *, int *);\n\nint BLASFUNC(spotrf)(char *, int *, float  *, int *, int *);\nint BLASFUNC(dpotrf)(char *, int *, double *, int *, int *);\nint BLASFUNC(qpotrf)(char *, int *, double *, int *, int *);\nint BLASFUNC(cpotrf)(char *, int *, float  *, int *, int *);\nint BLASFUNC(zpotrf)(char *, int *, double *, int *, int *);\nint BLASFUNC(xpotrf)(char *, int *, double *, int *, int *);\n\nint BLASFUNC(slauu2)(char *, int *, float  *, int *, int *);\nint BLASFUNC(dlauu2)(char *, int *, double *, int *, int *);\nint BLASFUNC(qlauu2)(char *, int *, double *, int *, int *);\nint BLASFUNC(clauu2)(char *, int *, float  *, int *, int *);\nint BLASFUNC(zlauu2)(char *, int *, double *, int *, int *);\nint BLASFUNC(xlauu2)(char *, int *, double *, int *, int *);\n\nint BLASFUNC(slauum)(char *, int *, float  *, int *, int *);\nint BLASFUNC(dlauum)(char *, int *, double *, int *, int *);\nint BLASFUNC(qlauum)(char *, int *, double *, int *, int *);\nint BLASFUNC(clauum)(char *, int *, float  *, int *, int *);\nint BLASFUNC(zlauum)(char *, int *, double *, int *, int *);\nint BLASFUNC(xlauum)(char *, int *, double *, int *, int *);\n\nint BLASFUNC(strti2)(char *, char *, int *, float  *, int *, int *);\nint BLASFUNC(dtrti2)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(qtrti2)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(ctrti2)(char *, char *, int *, float  *, int *, int *);\nint BLASFUNC(ztrti2)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(xtrti2)(char *, char *, int *, double *, int *, int *);\n\nint BLASFUNC(strtri)(char *, char *, int *, float  *, int *, int *);\nint BLASFUNC(dtrtri)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(qtrtri)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(ctrtri)(char *, char *, int *, float  *, int *, int *);\nint BLASFUNC(ztrtri)(char *, char *, int *, double *, int *, int *);\nint BLASFUNC(xtrtri)(char *, char *, int *, double *, int *, int *);\n\nint BLASFUNC(spotri)(char *, int *, float  *, int *, int *);\nint BLASFUNC(dpotri)(char *, int *, double *, int *, int *);\nint BLASFUNC(qpotri)(char *, int *, double *, int *, int *);\nint BLASFUNC(cpotri)(char *, int *, float  *, int *, int *);\nint BLASFUNC(zpotri)(char *, int *, double *, int *, int *);\nint BLASFUNC(xpotri)(char *, int *, double *, int *, int *);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/lapacke.h",
    "content": "/*****************************************************************************\n  Copyright (c) 2010, Intel Corp.\n  All rights reserved.\n\n  Redistribution and use in source and binary forms, with or without\n  modification, are permitted provided that the following conditions are met:\n\n    * Redistributions of source code must retain the above copyright notice,\n      this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above copyright\n      notice, this list of conditions and the following disclaimer in the\n      documentation and/or other materials provided with the distribution.\n    * Neither the name of Intel Corporation nor the names of its contributors\n      may be used to endorse or promote products derived from this software\n      without specific prior written permission.\n\n  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n  THE POSSIBILITY OF SUCH DAMAGE.\n******************************************************************************\n* Contents: Native C interface to LAPACK\n* Author: Intel Corporation\n* Generated November, 2011\n*****************************************************************************/\n\n#ifndef _MKL_LAPACKE_H_\n\n#ifndef _LAPACKE_H_\n#define _LAPACKE_H_\n\n/*\n*  Turn on HAVE_LAPACK_CONFIG_H to redefine C-LAPACK datatypes\n*/\n#ifdef HAVE_LAPACK_CONFIG_H\n#include \"lapacke_config.h\"\n#endif\n\n#include <stdlib.h>\n\n#ifndef lapack_int\n#define lapack_int     int\n#endif\n\n#ifndef lapack_logical\n#define lapack_logical lapack_int\n#endif\n\n/* Complex types are structures equivalent to the\n* Fortran complex types COMPLEX(4) and COMPLEX(8).\n*\n* One can also redefine the types with his own types\n* for example by including in the code definitions like\n*\n* #define lapack_complex_float std::complex<float>\n* #define lapack_complex_double std::complex<double>\n*\n* or define these types in the command line:\n*\n* -Dlapack_complex_float=\"std::complex<float>\"\n* -Dlapack_complex_double=\"std::complex<double>\"\n*/\n\n#ifndef LAPACK_COMPLEX_CUSTOM\n\n/* Complex type (single precision) */\n#ifndef lapack_complex_float\n#include <complex.h>\n#define lapack_complex_float    float _Complex\n#endif\n\n#ifndef lapack_complex_float_real\n#define lapack_complex_float_real(z)       (creal(z))\n#endif\n\n#ifndef lapack_complex_float_imag\n#define lapack_complex_float_imag(z)       (cimag(z))\n#endif\n\nlapack_complex_float lapack_make_complex_float( float re, float im );\n\n/* Complex type (double precision) */\n#ifndef lapack_complex_double\n#include <complex.h>\n#define lapack_complex_double   double _Complex\n#endif\n\n#ifndef lapack_complex_double_real\n#define lapack_complex_double_real(z)      (creal(z))\n#endif\n\n#ifndef lapack_complex_double_imag\n#define lapack_complex_double_imag(z)       (cimag(z))\n#endif\n\nlapack_complex_double lapack_make_complex_double( double re, double im );\n\n#endif\n\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif /* __cplusplus */\n\n#ifndef LAPACKE_malloc\n#define LAPACKE_malloc( size ) malloc( size )\n#endif\n#ifndef LAPACKE_free\n#define LAPACKE_free( p )      free( p )\n#endif\n\n#define LAPACK_C2INT( x ) (lapack_int)(*((float*)&x ))\n#define LAPACK_Z2INT( x ) (lapack_int)(*((double*)&x ))\n\n#define LAPACK_ROW_MAJOR               101\n#define LAPACK_COL_MAJOR               102\n\n#define LAPACK_WORK_MEMORY_ERROR       -1010\n#define LAPACK_TRANSPOSE_MEMORY_ERROR  -1011\n\n/* Callback logical functions of one, two, or three arguments are used\n*  to select eigenvalues to sort to the top left of the Schur form.\n*  The value is selected if function returns TRUE (non-zero). */\n\ntypedef lapack_logical (*LAPACK_S_SELECT2) ( const float*, const float* );\ntypedef lapack_logical (*LAPACK_S_SELECT3)\n    ( const float*, const float*, const float* );\ntypedef lapack_logical (*LAPACK_D_SELECT2) ( const double*, const double* );\ntypedef lapack_logical (*LAPACK_D_SELECT3)\n    ( const double*, const double*, const double* );\n\ntypedef lapack_logical (*LAPACK_C_SELECT1) ( const lapack_complex_float* );\ntypedef lapack_logical (*LAPACK_C_SELECT2)\n    ( const lapack_complex_float*, const lapack_complex_float* );\ntypedef lapack_logical (*LAPACK_Z_SELECT1) ( const lapack_complex_double* );\ntypedef lapack_logical (*LAPACK_Z_SELECT2)\n    ( const lapack_complex_double*, const lapack_complex_double* );\n\n#include \"lapacke_mangling.h\"\n\n#define LAPACK_lsame LAPACK_GLOBAL(lsame,LSAME)\nlapack_logical LAPACK_lsame( char* ca,  char* cb,\n                              lapack_int lca, lapack_int lcb );\n\n/* C-LAPACK function prototypes */\n\nlapack_int LAPACKE_sbdsdc( int matrix_order, char uplo, char compq,\n                           lapack_int n, float* d, float* e, float* u,\n                           lapack_int ldu, float* vt, lapack_int ldvt, float* q,\n                           lapack_int* iq );\nlapack_int LAPACKE_dbdsdc( int matrix_order, char uplo, char compq,\n                           lapack_int n, double* d, double* e, double* u,\n                           lapack_int ldu, double* vt, lapack_int ldvt,\n                           double* q, lapack_int* iq );\n\nlapack_int LAPACKE_sbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           float* d, float* e, float* vt, lapack_int ldvt,\n                           float* u, lapack_int ldu, float* c, lapack_int ldc );\nlapack_int LAPACKE_dbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           double* d, double* e, double* vt, lapack_int ldvt,\n                           double* u, lapack_int ldu, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_cbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           float* d, float* e, lapack_complex_float* vt,\n                           lapack_int ldvt, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_zbdsqr( int matrix_order, char uplo, lapack_int n,\n                           lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                           double* d, double* e, lapack_complex_double* vt,\n                           lapack_int ldvt, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_sdisna( char job, lapack_int m, lapack_int n, const float* d,\n                           float* sep );\nlapack_int LAPACKE_ddisna( char job, lapack_int m, lapack_int n,\n                           const double* d, double* sep );\n\nlapack_int LAPACKE_sgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, float* ab, lapack_int ldab, float* d,\n                           float* e, float* q, lapack_int ldq, float* pt,\n                           lapack_int ldpt, float* c, lapack_int ldc );\nlapack_int LAPACKE_dgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, double* ab, lapack_int ldab,\n                           double* d, double* e, double* q, lapack_int ldq,\n                           double* pt, lapack_int ldpt, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_cgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, lapack_complex_float* ab,\n                           lapack_int ldab, float* d, float* e,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* pt, lapack_int ldpt,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zgbbrd( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int ncc, lapack_int kl,\n                           lapack_int ku, lapack_complex_double* ab,\n                           lapack_int ldab, double* d, double* e,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* pt, lapack_int ldpt,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* ab,\n                           lapack_int ldab, const lapack_int* ipiv, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* ab,\n                           lapack_int ldab, const lapack_int* ipiv,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zgbcon( int matrix_order, char norm, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* ab,\n                           lapack_int ldab, float* r, float* c, float* rowcnd,\n                           float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* ab,\n                           lapack_int ldab, double* r, double* c,\n                           double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           float* r, float* c, float* rowcnd, float* colcnd,\n                           float* amax );\nlapack_int LAPACKE_zgbequ( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           double* r, double* c, double* rowcnd, double* colcnd,\n                           double* amax );\n\nlapack_int LAPACKE_sgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku, const float* ab,\n                            lapack_int ldab, float* r, float* c, float* rowcnd,\n                            float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku, const double* ab,\n                            lapack_int ldab, double* r, double* c,\n                            double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku,\n                            const lapack_complex_float* ab, lapack_int ldab,\n                            float* r, float* c, float* rowcnd, float* colcnd,\n                            float* amax );\nlapack_int LAPACKE_zgbequb( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_int kl, lapack_int ku,\n                            const lapack_complex_double* ab, lapack_int ldab,\n                            double* r, double* c, double* rowcnd,\n                            double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, const float* afb,\n                           lapack_int ldafb, const lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, const double* afb,\n                           lapack_int ldafb, const lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* afb, lapack_int ldafb,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgbrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* afb, lapack_int ldafb,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const float* ab, lapack_int ldab,\n                            const float* afb, lapack_int ldafb,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const double* ab, lapack_int ldab,\n                            const double* afb, lapack_int ldafb,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const lapack_complex_float* ab,\n                            lapack_int ldab, const lapack_complex_float* afb,\n                            lapack_int ldafb, const lapack_int* ipiv,\n                            const float* r, const float* c,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zgbrfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, const lapack_complex_double* ab,\n                            lapack_int ldab, const lapack_complex_double* afb,\n                            lapack_int ldafb, const lapack_int* ipiv,\n                            const double* r, const double* c,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_sgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs, float* ab,\n                          lapack_int ldab, lapack_int* ipiv, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs, double* ab,\n                          lapack_int ldab, lapack_int* ipiv, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgbsv( int matrix_order, lapack_int n, lapack_int kl,\n                          lapack_int ku, lapack_int nrhs,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, float* ab, lapack_int ldab,\n                           float* afb, lapack_int ldafb, lapack_int* ipiv,\n                           char* equed, float* r, float* c, float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_dgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, double* ab, lapack_int ldab,\n                           double* afb, lapack_int ldafb, lapack_int* ipiv,\n                           char* equed, double* r, double* c, double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\nlapack_int LAPACKE_cgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, lapack_complex_float* ab,\n                           lapack_int ldab, lapack_complex_float* afb,\n                           lapack_int ldafb, lapack_int* ipiv, char* equed,\n                           float* r, float* c, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr, float* rpivot );\nlapack_int LAPACKE_zgbsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int kl, lapack_int ku,\n                           lapack_int nrhs, lapack_complex_double* ab,\n                           lapack_int ldab, lapack_complex_double* afb,\n                           lapack_int ldafb, lapack_int* ipiv, char* equed,\n                           double* r, double* c, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr, double* rpivot );\n\nlapack_int LAPACKE_sgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, float* ab, lapack_int ldab,\n                            float* afb, lapack_int ldafb, lapack_int* ipiv,\n                            char* equed, float* r, float* c, float* b,\n                            lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, double* ab, lapack_int ldab,\n                            double* afb, lapack_int ldafb, lapack_int* ipiv,\n                            char* equed, double* r, double* c, double* b,\n                            lapack_int ldb, double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_cgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, lapack_complex_float* ab,\n                            lapack_int ldab, lapack_complex_float* afb,\n                            lapack_int ldafb, lapack_int* ipiv, char* equed,\n                            float* r, float* c, lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* rpvgrw,\n                            float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zgbsvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int kl, lapack_int ku,\n                            lapack_int nrhs, lapack_complex_double* ab,\n                            lapack_int ldab, lapack_complex_double* afb,\n                            lapack_int ldafb, lapack_int* ipiv, char* equed,\n                            double* r, double* c, lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_sgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, float* ab,\n                           lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_dgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, double* ab,\n                           lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_cgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgbtrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zgbtrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int kl, lapack_int ku, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* scale,\n                           lapack_int m, float* v, lapack_int ldv );\nlapack_int LAPACKE_dgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* scale,\n                           lapack_int m, double* v, lapack_int ldv );\nlapack_int LAPACKE_cgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* scale,\n                           lapack_int m, lapack_complex_float* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_zgebak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* scale,\n                           lapack_int m, lapack_complex_double* v,\n                           lapack_int ldv );\n\nlapack_int LAPACKE_sgebal( int matrix_order, char job, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* ilo, lapack_int* ihi,\n                           float* scale );\nlapack_int LAPACKE_dgebal( int matrix_order, char job, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* ilo, lapack_int* ihi,\n                           double* scale );\nlapack_int LAPACKE_cgebal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ilo, lapack_int* ihi, float* scale );\nlapack_int LAPACKE_zgebal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ilo, lapack_int* ihi, double* scale );\n\nlapack_int LAPACKE_sgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* d, float* e,\n                           float* tauq, float* taup );\nlapack_int LAPACKE_dgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* d, double* e,\n                           double* tauq, double* taup );\nlapack_int LAPACKE_cgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* d,\n                           float* e, lapack_complex_float* tauq,\n                           lapack_complex_float* taup );\nlapack_int LAPACKE_zgebrd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda, double* d,\n                           double* e, lapack_complex_double* tauq,\n                           lapack_complex_double* taup );\n\nlapack_int LAPACKE_sgecon( int matrix_order, char norm, lapack_int n,\n                           const float* a, lapack_int lda, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dgecon( int matrix_order, char norm, lapack_int n,\n                           const double* a, lapack_int lda, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cgecon( int matrix_order, char norm, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_zgecon( int matrix_order, char norm, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double anorm, double* rcond );\n\nlapack_int LAPACKE_sgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const float* a, lapack_int lda, float* r, float* c,\n                           float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, double* r,\n                           double* c, double* rowcnd, double* colcnd,\n                           double* amax );\nlapack_int LAPACKE_cgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float* r, float* c, float* rowcnd, float* colcnd,\n                           float* amax );\nlapack_int LAPACKE_zgeequ( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double* r, double* c, double* rowcnd, double* colcnd,\n                           double* amax );\n\nlapack_int LAPACKE_sgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const float* a, lapack_int lda, float* r, float* c,\n                            float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const double* a, lapack_int lda, double* r,\n                            double* c, double* rowcnd, double* colcnd,\n                            double* amax );\nlapack_int LAPACKE_cgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* r, float* c, float* rowcnd, float* colcnd,\n                            float* amax );\nlapack_int LAPACKE_zgeequb( int matrix_order, lapack_int m, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* r, double* c, double* rowcnd,\n                            double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_S_SELECT2 select, lapack_int n, float* a,\n                          lapack_int lda, lapack_int* sdim, float* wr,\n                          float* wi, float* vs, lapack_int ldvs );\nlapack_int LAPACKE_dgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_D_SELECT2 select, lapack_int n, double* a,\n                          lapack_int lda, lapack_int* sdim, double* wr,\n                          double* wi, double* vs, lapack_int ldvs );\nlapack_int LAPACKE_cgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_C_SELECT1 select, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_int* sdim, lapack_complex_float* w,\n                          lapack_complex_float* vs, lapack_int ldvs );\nlapack_int LAPACKE_zgees( int matrix_order, char jobvs, char sort,\n                          LAPACK_Z_SELECT1 select, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_int* sdim, lapack_complex_double* w,\n                          lapack_complex_double* vs, lapack_int ldvs );\n\nlapack_int LAPACKE_sgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_S_SELECT2 select, char sense, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* sdim,\n                           float* wr, float* wi, float* vs, lapack_int ldvs,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_D_SELECT2 select, char sense, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* sdim,\n                           double* wr, double* wi, double* vs, lapack_int ldvs,\n                           double* rconde, double* rcondv );\nlapack_int LAPACKE_cgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_C_SELECT1 select, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* sdim, lapack_complex_float* w,\n                           lapack_complex_float* vs, lapack_int ldvs,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_zgeesx( int matrix_order, char jobvs, char sort,\n                           LAPACK_Z_SELECT1 select, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* sdim, lapack_complex_double* w,\n                           lapack_complex_double* vs, lapack_int ldvs,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, float* a, lapack_int lda, float* wr,\n                          float* wi, float* vl, lapack_int ldvl, float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_dgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, double* a, lapack_int lda, double* wr,\n                          double* wi, double* vl, lapack_int ldvl, double* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_cgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* w, lapack_complex_float* vl,\n                          lapack_int ldvl, lapack_complex_float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_zgeev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* w,\n                          lapack_complex_double* vl, lapack_int ldvl,\n                          lapack_complex_double* vr, lapack_int ldvr );\n\nlapack_int LAPACKE_sgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, float* a,\n                           lapack_int lda, float* wr, float* wi, float* vl,\n                           lapack_int ldvl, float* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, float* scale,\n                           float* abnrm, float* rconde, float* rcondv );\nlapack_int LAPACKE_dgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, double* a,\n                           lapack_int lda, double* wr, double* wi, double* vl,\n                           lapack_int ldvl, double* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, double* scale,\n                           double* abnrm, double* rconde, double* rcondv );\nlapack_int LAPACKE_cgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* w, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           float* scale, float* abnrm, float* rconde,\n                           float* rcondv );\nlapack_int LAPACKE_zgeevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* w, lapack_complex_double* vl,\n                           lapack_int ldvl, lapack_complex_double* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           double* scale, double* abnrm, double* rconde,\n                           double* rcondv );\n\nlapack_int LAPACKE_sgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, float* a, lapack_int lda,\n                           float* tau );\nlapack_int LAPACKE_dgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, double* a, lapack_int lda,\n                           double* tau );\nlapack_int LAPACKE_cgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* tau );\nlapack_int LAPACKE_zgehrd( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgejsv( int matrix_order, char joba, char jobu, char jobv,\n                           char jobr, char jobt, char jobp, lapack_int m,\n                           lapack_int n, float* a, lapack_int lda, float* sva,\n                           float* u, lapack_int ldu, float* v, lapack_int ldv,\n                           float* stat, lapack_int* istat );\nlapack_int LAPACKE_dgejsv( int matrix_order, char joba, char jobu, char jobv,\n                           char jobr, char jobt, char jobp, lapack_int m,\n                           lapack_int n, double* a, lapack_int lda, double* sva,\n                           double* u, lapack_int ldu, double* v, lapack_int ldv,\n                           double* stat, lapack_int* istat );\n\nlapack_int LAPACKE_sgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgelq2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgelqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs, float* a,\n                          lapack_int lda, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs, double* a,\n                          lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgels( int matrix_order, char trans, lapack_int m,\n                          lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_cgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelsd( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_cgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* s, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelss( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* s, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, lapack_int* jpvt, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_dgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, lapack_int* jpvt,\n                           double rcond, lapack_int* rank );\nlapack_int LAPACKE_cgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_int* jpvt, float rcond,\n                           lapack_int* rank );\nlapack_int LAPACKE_zgelsy( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_int* jpvt, double rcond,\n                           lapack_int* rank );\n\nlapack_int LAPACKE_sgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqlf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* jpvt,\n                           float* tau );\nlapack_int LAPACKE_dgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* jpvt,\n                           double* tau );\nlapack_int LAPACKE_cgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqp3( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* jpvt,\n                           float* tau );\nlapack_int LAPACKE_dgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* jpvt,\n                           double* tau );\nlapack_int LAPACKE_cgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqpf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* jpvt, lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqr2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* tau );\nlapack_int LAPACKE_zgeqrfp( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgerfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* r,\n                            const float* c, const lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zgerfsx( int matrix_order, char trans, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* r,\n                            const double* c, const lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_cgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zgerqf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_sgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, float* a, lapack_int lda, float* s,\n                           float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_dgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, double* a, lapack_int lda, double* s,\n                           double* u, lapack_int ldu, double* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_cgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt );\nlapack_int LAPACKE_zgesdd( int matrix_order, char jobz, lapack_int m,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt );\n\nlapack_int LAPACKE_sgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* a, lapack_int lda, lapack_int* ipiv, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* a, lapack_int lda, lapack_int* ipiv,\n                          double* b, lapack_int ldb );\nlapack_int LAPACKE_cgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dsgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                           double* a, lapack_int lda, lapack_int* ipiv,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           lapack_int* iter );\nlapack_int LAPACKE_zcgesv( int matrix_order, lapack_int n, lapack_int nrhs,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* iter );\n\nlapack_int LAPACKE_sgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, float* a, lapack_int lda,\n                           float* s, float* u, lapack_int ldu, float* vt,\n                           lapack_int ldvt, float* superb );\nlapack_int LAPACKE_dgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, double* a,\n                           lapack_int lda, double* s, double* u, lapack_int ldu,\n                           double* vt, lapack_int ldvt, double* superb );\nlapack_int LAPACKE_cgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float* s, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* vt,\n                           lapack_int ldvt, float* superb );\nlapack_int LAPACKE_zgesvd( int matrix_order, char jobu, char jobvt,\n                           lapack_int m, lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double* s, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* vt,\n                           lapack_int ldvt, double* superb );\n\nlapack_int LAPACKE_sgesvj( int matrix_order, char joba, char jobu, char jobv,\n                           lapack_int m, lapack_int n, float* a, lapack_int lda,\n                           float* sva, lapack_int mv, float* v, lapack_int ldv,\n                           float* stat );\nlapack_int LAPACKE_dgesvj( int matrix_order, char joba, char jobu, char jobv,\n                           lapack_int m, lapack_int n, double* a,\n                           lapack_int lda, double* sva, lapack_int mv,\n                           double* v, lapack_int ldv, double* stat );\n\nlapack_int LAPACKE_sgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, float* a,\n                           lapack_int lda, float* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, float* r, float* c,\n                           float* b, lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_dgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, double* a,\n                           lapack_int lda, double* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, double* r, double* c,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\nlapack_int LAPACKE_cgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, float* r, float* c,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr,\n                           float* rpivot );\nlapack_int LAPACKE_zgesvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* af, lapack_int ldaf,\n                           lapack_int* ipiv, char* equed, double* r, double* c,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr,\n                           double* rpivot );\n\nlapack_int LAPACKE_sgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* r, float* c,\n                            float* b, lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* r, double* c,\n                            double* b, lapack_int ldb, double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* r, float* c,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zgesvxx( int matrix_order, char fact, char trans,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* r, double* c,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgetf2( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zgetrf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetri( int matrix_order, lapack_int n, float* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dgetri( int matrix_order, lapack_int n, double* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_cgetri( int matrix_order, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zgetri( int matrix_order, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgetrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* lscale,\n                           const float* rscale, lapack_int m, float* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_dggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* lscale,\n                           const double* rscale, lapack_int m, double* v,\n                           lapack_int ldv );\nlapack_int LAPACKE_cggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const float* lscale,\n                           const float* rscale, lapack_int m,\n                           lapack_complex_float* v, lapack_int ldv );\nlapack_int LAPACKE_zggbak( int matrix_order, char job, char side, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, const double* lscale,\n                           const double* rscale, lapack_int m,\n                           lapack_complex_double* v, lapack_int ldv );\n\nlapack_int LAPACKE_sggbal( int matrix_order, char job, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale );\nlapack_int LAPACKE_dggbal( int matrix_order, char job, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale );\nlapack_int LAPACKE_cggbal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale );\nlapack_int LAPACKE_zggbal( int matrix_order, char job, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale );\n\nlapack_int LAPACKE_sgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_S_SELECT3 selctg, lapack_int n, float* a,\n                          lapack_int lda, float* b, lapack_int ldb,\n                          lapack_int* sdim, float* alphar, float* alphai,\n                          float* beta, float* vsl, lapack_int ldvsl, float* vsr,\n                          lapack_int ldvsr );\nlapack_int LAPACKE_dgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_D_SELECT3 selctg, lapack_int n, double* a,\n                          lapack_int lda, double* b, lapack_int ldb,\n                          lapack_int* sdim, double* alphar, double* alphai,\n                          double* beta, double* vsl, lapack_int ldvsl,\n                          double* vsr, lapack_int ldvsr );\nlapack_int LAPACKE_cgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_C_SELECT2 selctg, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb,\n                          lapack_int* sdim, lapack_complex_float* alpha,\n                          lapack_complex_float* beta, lapack_complex_float* vsl,\n                          lapack_int ldvsl, lapack_complex_float* vsr,\n                          lapack_int ldvsr );\nlapack_int LAPACKE_zgges( int matrix_order, char jobvsl, char jobvsr, char sort,\n                          LAPACK_Z_SELECT2 selctg, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda,\n                          lapack_complex_double* b, lapack_int ldb,\n                          lapack_int* sdim, lapack_complex_double* alpha,\n                          lapack_complex_double* beta,\n                          lapack_complex_double* vsl, lapack_int ldvsl,\n                          lapack_complex_double* vsr, lapack_int ldvsr );\n\nlapack_int LAPACKE_sggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_S_SELECT3 selctg, char sense,\n                           lapack_int n, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, lapack_int* sdim, float* alphar,\n                           float* alphai, float* beta, float* vsl,\n                           lapack_int ldvsl, float* vsr, lapack_int ldvsr,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_D_SELECT3 selctg, char sense,\n                           lapack_int n, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, lapack_int* sdim, double* alphar,\n                           double* alphai, double* beta, double* vsl,\n                           lapack_int ldvsl, double* vsr, lapack_int ldvsr,\n                           double* rconde, double* rcondv );\nlapack_int LAPACKE_cggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_C_SELECT2 selctg, char sense,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_int* sdim,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta,\n                           lapack_complex_float* vsl, lapack_int ldvsl,\n                           lapack_complex_float* vsr, lapack_int ldvsr,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_zggesx( int matrix_order, char jobvsl, char jobvsr,\n                           char sort, LAPACK_Z_SELECT2 selctg, char sense,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_int* sdim,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* vsl, lapack_int ldvsl,\n                           lapack_complex_double* vsr, lapack_int ldvsr,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, float* a, lapack_int lda, float* b,\n                          lapack_int ldb, float* alphar, float* alphai,\n                          float* beta, float* vl, lapack_int ldvl, float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_dggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, double* a, lapack_int lda, double* b,\n                          lapack_int ldb, double* alphar, double* alphai,\n                          double* beta, double* vl, lapack_int ldvl, double* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_cggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_float* a, lapack_int lda,\n                          lapack_complex_float* b, lapack_int ldb,\n                          lapack_complex_float* alpha,\n                          lapack_complex_float* beta, lapack_complex_float* vl,\n                          lapack_int ldvl, lapack_complex_float* vr,\n                          lapack_int ldvr );\nlapack_int LAPACKE_zggev( int matrix_order, char jobvl, char jobvr,\n                          lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb, lapack_complex_double* alpha,\n                          lapack_complex_double* beta,\n                          lapack_complex_double* vl, lapack_int ldvl,\n                          lapack_complex_double* vr, lapack_int ldvr );\n\nlapack_int LAPACKE_sggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alphar, float* alphai, float* beta, float* vl,\n                           lapack_int ldvl, float* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, float* lscale,\n                           float* rscale, float* abnrm, float* bbnrm,\n                           float* rconde, float* rcondv );\nlapack_int LAPACKE_dggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double* alphar, double* alphai, double* beta,\n                           double* vl, lapack_int ldvl, double* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           double* lscale, double* rscale, double* abnrm,\n                           double* bbnrm, double* rconde, double* rcondv );\nlapack_int LAPACKE_cggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int* ilo, lapack_int* ihi,\n                           float* lscale, float* rscale, float* abnrm,\n                           float* bbnrm, float* rconde, float* rcondv );\nlapack_int LAPACKE_zggevx( int matrix_order, char balanc, char jobvl,\n                           char jobvr, char sense, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int* ilo, lapack_int* ihi, double* lscale,\n                           double* rscale, double* abnrm, double* bbnrm,\n                           double* rconde, double* rcondv );\n\nlapack_int LAPACKE_sggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* d, float* x, float* y );\nlapack_int LAPACKE_dggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, double* d, double* x, double* y );\nlapack_int LAPACKE_cggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* d,\n                           lapack_complex_float* x, lapack_complex_float* y );\nlapack_int LAPACKE_zggglm( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* d,\n                           lapack_complex_double* x, lapack_complex_double* y );\n\nlapack_int LAPACKE_sgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           float* a, lapack_int lda, float* b, lapack_int ldb,\n                           float* q, lapack_int ldq, float* z, lapack_int ldz );\nlapack_int LAPACKE_dgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           double* a, lapack_int lda, double* b, lapack_int ldb,\n                           double* q, lapack_int ldq, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_cgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zgghrd( int matrix_order, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, float* a, lapack_int lda, float* b,\n                           lapack_int ldb, float* c, float* d, float* x );\nlapack_int LAPACKE_dgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, double* a, lapack_int lda, double* b,\n                           lapack_int ldb, double* c, double* d, double* x );\nlapack_int LAPACKE_cgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* c,\n                           lapack_complex_float* d, lapack_complex_float* x );\nlapack_int LAPACKE_zgglse( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* c,\n                           lapack_complex_double* d, lapack_complex_double* x );\n\nlapack_int LAPACKE_sggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, float* a, lapack_int lda, float* taua,\n                           float* b, lapack_int ldb, float* taub );\nlapack_int LAPACKE_dggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, double* a, lapack_int lda,\n                           double* taua, double* b, lapack_int ldb,\n                           double* taub );\nlapack_int LAPACKE_cggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* taua,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* taub );\nlapack_int LAPACKE_zggqrf( int matrix_order, lapack_int n, lapack_int m,\n                           lapack_int p, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* taua,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* taub );\n\nlapack_int LAPACKE_sggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, float* a, lapack_int lda, float* taua,\n                           float* b, lapack_int ldb, float* taub );\nlapack_int LAPACKE_dggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, double* a, lapack_int lda,\n                           double* taua, double* b, lapack_int ldb,\n                           double* taub );\nlapack_int LAPACKE_cggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* taua,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* taub );\nlapack_int LAPACKE_zggrqf( int matrix_order, lapack_int m, lapack_int p,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* taua,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* taub );\n\nlapack_int LAPACKE_sggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alpha, float* beta, float* u, lapack_int ldu,\n                           float* v, lapack_int ldv, float* q, lapack_int ldq,\n                           lapack_int* iwork );\nlapack_int LAPACKE_dggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double* alpha, double* beta, double* u,\n                           lapack_int ldu, double* v, lapack_int ldv, double* q,\n                           lapack_int ldq, lapack_int* iwork );\nlapack_int LAPACKE_cggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           float* alpha, float* beta, lapack_complex_float* u,\n                           lapack_int ldu, lapack_complex_float* v,\n                           lapack_int ldv, lapack_complex_float* q,\n                           lapack_int ldq, lapack_int* iwork );\nlapack_int LAPACKE_zggsvd( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int n, lapack_int p,\n                           lapack_int* k, lapack_int* l,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           double* alpha, double* beta,\n                           lapack_complex_double* u, lapack_int ldu,\n                           lapack_complex_double* v, lapack_int ldv,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int* iwork );\n\nlapack_int LAPACKE_sggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float tola,\n                           float tolb, lapack_int* k, lapack_int* l, float* u,\n                           lapack_int ldu, float* v, lapack_int ldv, float* q,\n                           lapack_int ldq );\nlapack_int LAPACKE_dggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double tola, double tolb, lapack_int* k,\n                           lapack_int* l, double* u, lapack_int ldu, double* v,\n                           lapack_int ldv, double* q, lapack_int ldq );\nlapack_int LAPACKE_cggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb, float tola,\n                           float tolb, lapack_int* k, lapack_int* l,\n                           lapack_complex_float* u, lapack_int ldu,\n                           lapack_complex_float* v, lapack_int ldv,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zggsvp( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           double tola, double tolb, lapack_int* k,\n                           lapack_int* l, lapack_complex_double* u,\n                           lapack_int ldu, lapack_complex_double* v,\n                           lapack_int ldv, lapack_complex_double* q,\n                           lapack_int ldq );\n\nlapack_int LAPACKE_sgtcon( char norm, lapack_int n, const float* dl,\n                           const float* d, const float* du, const float* du2,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_dgtcon( char norm, lapack_int n, const double* dl,\n                           const double* d, const double* du, const double* du2,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cgtcon( char norm, lapack_int n,\n                           const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zgtcon( char norm, lapack_int n,\n                           const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* dl, const float* d,\n                           const float* du, const float* dlf, const float* df,\n                           const float* duf, const float* du2,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* dl, const double* d,\n                           const double* du, const double* dlf,\n                           const double* df, const double* duf,\n                           const double* du2, const lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* dlf,\n                           const lapack_complex_float* df,\n                           const lapack_complex_float* duf,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgtrfs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* dlf,\n                           const lapack_complex_double* df,\n                           const lapack_complex_double* duf,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* dl, float* d, float* du, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* dl, double* d, double* du, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_float* dl, lapack_complex_float* d,\n                          lapack_complex_float* du, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zgtsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          lapack_complex_double* dl, lapack_complex_double* d,\n                          lapack_complex_double* du, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, const float* dl,\n                           const float* d, const float* du, float* dlf,\n                           float* df, float* duf, float* du2, lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs, const double* dl,\n                           const double* d, const double* du, double* dlf,\n                           double* df, double* duf, double* du2,\n                           lapack_int* ipiv, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           lapack_complex_float* dlf, lapack_complex_float* df,\n                           lapack_complex_float* duf, lapack_complex_float* du2,\n                           lapack_int* ipiv, const lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zgtsvx( int matrix_order, char fact, char trans,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           lapack_complex_double* dlf,\n                           lapack_complex_double* df,\n                           lapack_complex_double* duf,\n                           lapack_complex_double* du2, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_sgttrf( lapack_int n, float* dl, float* d, float* du,\n                           float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_dgttrf( lapack_int n, double* dl, double* d, double* du,\n                           double* du2, lapack_int* ipiv );\nlapack_int LAPACKE_cgttrf( lapack_int n, lapack_complex_float* dl,\n                           lapack_complex_float* d, lapack_complex_float* du,\n                           lapack_complex_float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_zgttrf( lapack_int n, lapack_complex_double* dl,\n                           lapack_complex_double* d, lapack_complex_double* du,\n                           lapack_complex_double* du2, lapack_int* ipiv );\n\nlapack_int LAPACKE_sgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const float* dl, const float* d,\n                           const float* du, const float* du2,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const double* dl, const double* d,\n                           const double* du, const double* du2,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_cgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* dl,\n                           const lapack_complex_float* d,\n                           const lapack_complex_float* du,\n                           const lapack_complex_float* du2,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zgttrs( int matrix_order, char trans, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* dl,\n                           const lapack_complex_double* d,\n                           const lapack_complex_double* du,\n                           const lapack_complex_double* du2,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_chbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, lapack_complex_float* ab,\n                          lapack_int ldab, float* w, lapack_complex_float* z,\n                          lapack_int ldz );\nlapack_int LAPACKE_zhbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, lapack_complex_double* ab,\n                          lapack_int ldab, double* w, lapack_complex_double* z,\n                          lapack_int ldz );\n\nlapack_int LAPACKE_chbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab, float* w, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab, double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_chbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* q, lapack_int ldq, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* bb, lapack_int ldbb,\n                           lapack_complex_float* x, lapack_int ldx );\nlapack_int LAPACKE_zhbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* bb, lapack_int ldbb,\n                           lapack_complex_double* x, lapack_int ldx );\n\nlapack_int LAPACKE_chbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_complex_float* bb, lapack_int ldbb, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_complex_double* bb, lapack_int ldbb, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* bb, lapack_int ldbb, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* bb, lapack_int ldbb,\n                           double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_chbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* bb, lapack_int ldbb,\n                           lapack_complex_float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* bb, lapack_int ldbb,\n                           lapack_complex_double* q, lapack_int ldq, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab, float* d, float* e,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zhbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab, double* d, double* e,\n                           lapack_complex_double* q, lapack_int ldq );\n\nlapack_int LAPACKE_checon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zhecon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_cheequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zheequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_cheev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_zheev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_cheevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_zheevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           double* w );\n\nlapack_int LAPACKE_cheevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_zheevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* isuppz );\n\nlapack_int LAPACKE_cheevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_zheevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chegst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zhegst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_chegv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_float* a,\n                          lapack_int lda, lapack_complex_float* b,\n                          lapack_int ldb, float* w );\nlapack_int LAPACKE_zhegv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb, double* w );\n\nlapack_int LAPACKE_chegvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float* w );\nlapack_int LAPACKE_zhegvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double* w );\n\nlapack_int LAPACKE_chegvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhegvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_cherfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zherfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_cherfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zherfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_chesv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhesv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chesvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zhesvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_chesvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zhesvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_chetrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda, float* d,\n                           float* e, lapack_complex_float* tau );\nlapack_int LAPACKE_zhetrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda, double* d,\n                           double* e, lapack_complex_double* tau );\n\nlapack_int LAPACKE_chetrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zhetrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_chetri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zhetri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_chetrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, float alpha,\n                          const lapack_complex_float* a, lapack_int lda,\n                          float beta, lapack_complex_float* c );\nlapack_int LAPACKE_zhfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, double alpha,\n                          const lapack_complex_double* a, lapack_int lda,\n                          double beta, lapack_complex_double* c );\n\nlapack_int LAPACKE_shgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           float* h, lapack_int ldh, float* t, lapack_int ldt,\n                           float* alphar, float* alphai, float* beta, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz );\nlapack_int LAPACKE_dhgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           double* h, lapack_int ldh, double* t, lapack_int ldt,\n                           double* alphar, double* alphai, double* beta,\n                           double* q, lapack_int ldq, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_chgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* q,\n                           lapack_int ldq, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhgeqz( int matrix_order, char job, char compq, char compz,\n                           lapack_int n, lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zhpcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_chpev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_float* ap, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_complex_double* ap, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_float* ap, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_complex_double* ap, double* w,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_float* ap, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_zhpevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_complex_double* ap, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chpgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_float* ap,\n                           const lapack_complex_float* bp );\nlapack_int LAPACKE_zhpgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, lapack_complex_double* ap,\n                           const lapack_complex_double* bp );\n\nlapack_int LAPACKE_chpgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_float* ap,\n                          lapack_complex_float* bp, float* w,\n                          lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, lapack_complex_double* ap,\n                          lapack_complex_double* bp, double* w,\n                          lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_float* ap,\n                           lapack_complex_float* bp, float* w,\n                           lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zhpgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, lapack_complex_double* ap,\n                           lapack_complex_double* bp, double* w,\n                           lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_chpgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_complex_float* bp,\n                           float vl, float vu, lapack_int il, lapack_int iu,\n                           float abstol, lapack_int* m, float* w,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_zhpgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_complex_double* bp,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_chprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zhprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_chpsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zhpsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_chpsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* afp, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zhpsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* afp, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_chptrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, float* d, float* e,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zhptrd( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, double* d, double* e,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_chptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zhptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_chptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_zhptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, const lapack_int* ipiv );\n\nlapack_int LAPACKE_chptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zhptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_shsein( int matrix_order, char job, char eigsrc, char initv,\n                           lapack_logical* select, lapack_int n, const float* h,\n                           lapack_int ldh, float* wr, const float* wi,\n                           float* vl, lapack_int ldvl, float* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_dhsein( int matrix_order, char job, char eigsrc, char initv,\n                           lapack_logical* select, lapack_int n,\n                           const double* h, lapack_int ldh, double* wr,\n                           const double* wi, double* vl, lapack_int ldvl,\n                           double* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m, lapack_int* ifaill,\n                           lapack_int* ifailr );\nlapack_int LAPACKE_chsein( int matrix_order, char job, char eigsrc, char initv,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* w, lapack_complex_float* vl,\n                           lapack_int ldvl, lapack_complex_float* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_zhsein( int matrix_order, char job, char eigsrc, char initv,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* w, lapack_complex_double* vl,\n                           lapack_int ldvl, lapack_complex_double* vr,\n                           lapack_int ldvr, lapack_int mm, lapack_int* m,\n                           lapack_int* ifaill, lapack_int* ifailr );\n\nlapack_int LAPACKE_shseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, float* h,\n                           lapack_int ldh, float* wr, float* wi, float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_dhseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi, double* h,\n                           lapack_int ldh, double* wr, double* wi, double* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_chseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi,\n                           lapack_complex_float* h, lapack_int ldh,\n                           lapack_complex_float* w, lapack_complex_float* z,\n                           lapack_int ldz );\nlapack_int LAPACKE_zhseqr( int matrix_order, char job, char compz, lapack_int n,\n                           lapack_int ilo, lapack_int ihi,\n                           lapack_complex_double* h, lapack_int ldh,\n                           lapack_complex_double* w, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_clacgv( lapack_int n, lapack_complex_float* x,\n                           lapack_int incx );\nlapack_int LAPACKE_zlacgv( lapack_int n, lapack_complex_double* x,\n                           lapack_int incx );\n\nlapack_int LAPACKE_slacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const float* a, lapack_int lda, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dlacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const double* a, lapack_int lda, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_clacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zlacpy( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_zlag2c( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_float* sa, lapack_int ldsa );\n\nlapack_int LAPACKE_slag2d( int matrix_order, lapack_int m, lapack_int n,\n                           const float* sa, lapack_int ldsa, double* a,\n                           lapack_int lda );\n\nlapack_int LAPACKE_dlag2s( int matrix_order, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, float* sa,\n                           lapack_int ldsa );\n\nlapack_int LAPACKE_clag2z( int matrix_order, lapack_int m, lapack_int n,\n                           const lapack_complex_float* sa, lapack_int ldsa,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* d,\n                           float* a, lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_dlagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* d,\n                           double* a, lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_clagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const float* d,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_zlagge( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int kl, lapack_int ku, const double* d,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* iseed );\n\nfloat LAPACKE_slamch( char cmach );\ndouble LAPACKE_dlamch( char cmach );\n\nfloat LAPACKE_slange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const float* a, lapack_int lda );\ndouble LAPACKE_dlange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const double* a, lapack_int lda );\nfloat LAPACKE_clange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda );\ndouble LAPACKE_zlange( int matrix_order, char norm, lapack_int m,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda );\n\nfloat LAPACKE_clanhe( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda );\ndouble LAPACKE_zlanhe( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda );\n\nfloat LAPACKE_slansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const float* a, lapack_int lda );\ndouble LAPACKE_dlansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const double* a, lapack_int lda );\nfloat LAPACKE_clansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda );\ndouble LAPACKE_zlansy( int matrix_order, char norm, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda );\n\nfloat LAPACKE_slantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda );\ndouble LAPACKE_dlantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda );\nfloat LAPACKE_clantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda );\ndouble LAPACKE_zlantr( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int m, lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda );\n\n\nlapack_int LAPACKE_slarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const float* v, lapack_int ldv,\n                           const float* t, lapack_int ldt, float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_dlarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const double* v, lapack_int ldv,\n                           const double* t, lapack_int ldt, double* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_clarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const lapack_complex_float* v,\n                           lapack_int ldv, const lapack_complex_float* t,\n                           lapack_int ldt, lapack_complex_float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_zlarfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, const lapack_complex_double* v,\n                           lapack_int ldv, const lapack_complex_double* t,\n                           lapack_int ldt, lapack_complex_double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_slarfg( lapack_int n, float* alpha, float* x,\n                           lapack_int incx, float* tau );\nlapack_int LAPACKE_dlarfg( lapack_int n, double* alpha, double* x,\n                           lapack_int incx, double* tau );\nlapack_int LAPACKE_clarfg( lapack_int n, lapack_complex_float* alpha,\n                           lapack_complex_float* x, lapack_int incx,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_zlarfg( lapack_int n, lapack_complex_double* alpha,\n                           lapack_complex_double* x, lapack_int incx,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_slarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k, const float* v,\n                           lapack_int ldv, const float* tau, float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_dlarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k, const double* v,\n                           lapack_int ldv, const double* tau, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_clarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k,\n                           const lapack_complex_float* v, lapack_int ldv,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zlarft( int matrix_order, char direct, char storev,\n                           lapack_int n, lapack_int k,\n                           const lapack_complex_double* v, lapack_int ldv,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_slarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const float* v, float tau, float* c,\n                           lapack_int ldc, float* work );\nlapack_int LAPACKE_dlarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const double* v, double tau, double* c,\n                           lapack_int ldc, double* work );\nlapack_int LAPACKE_clarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const lapack_complex_float* v,\n                           lapack_complex_float tau, lapack_complex_float* c,\n                           lapack_int ldc, lapack_complex_float* work );\nlapack_int LAPACKE_zlarfx( int matrix_order, char side, lapack_int m,\n                           lapack_int n, const lapack_complex_double* v,\n                           lapack_complex_double tau, lapack_complex_double* c,\n                           lapack_int ldc, lapack_complex_double* work );\n\nlapack_int LAPACKE_slarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           float* x );\nlapack_int LAPACKE_dlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           double* x );\nlapack_int LAPACKE_clarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           lapack_complex_float* x );\nlapack_int LAPACKE_zlarnv( lapack_int idist, lapack_int* iseed, lapack_int n,\n                           lapack_complex_double* x );\n\nlapack_int LAPACKE_slaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, float alpha, float beta, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, double alpha, double beta, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_claset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, lapack_complex_float alpha,\n                           lapack_complex_float beta, lapack_complex_float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_zlaset( int matrix_order, char uplo, lapack_int m,\n                           lapack_int n, lapack_complex_double alpha,\n                           lapack_complex_double beta, lapack_complex_double* a,\n                           lapack_int lda );\n\nlapack_int LAPACKE_slasrt( char id, lapack_int n, float* d );\nlapack_int LAPACKE_dlasrt( char id, lapack_int n, double* d );\n\nlapack_int LAPACKE_slaswp( int matrix_order, lapack_int n, float* a,\n                           lapack_int lda, lapack_int k1, lapack_int k2,\n                           const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_dlaswp( int matrix_order, lapack_int n, double* a,\n                           lapack_int lda, lapack_int k1, lapack_int k2,\n                           const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_claswp( int matrix_order, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int k1, lapack_int k2, const lapack_int* ipiv,\n                           lapack_int incx );\nlapack_int LAPACKE_zlaswp( int matrix_order, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int k1, lapack_int k2, const lapack_int* ipiv,\n                           lapack_int incx );\n\nlapack_int LAPACKE_slatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, float* d,\n                           lapack_int mode, float cond, float dmax,\n                           lapack_int kl, lapack_int ku, char pack, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, double* d,\n                           lapack_int mode, double cond, double dmax,\n                           lapack_int kl, lapack_int ku, char pack, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_clatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, float* d,\n                           lapack_int mode, float cond, float dmax,\n                           lapack_int kl, lapack_int ku, char pack,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlatms( int matrix_order, lapack_int m, lapack_int n,\n                           char dist, lapack_int* iseed, char sym, double* d,\n                           lapack_int mode, double cond, double dmax,\n                           lapack_int kl, lapack_int ku, char pack,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slauum( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dlauum( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_clauum( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlauum( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_sopgtr( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, const float* tau, float* q,\n                           lapack_int ldq );\nlapack_int LAPACKE_dopgtr( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, const double* tau, double* q,\n                           lapack_int ldq );\n\nlapack_int LAPACKE_sopmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const float* ap,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dopmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const double* ap,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sorgbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, double* a,\n                           lapack_int lda, const double* tau );\n\nlapack_int LAPACKE_sorghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, float* a, lapack_int lda,\n                           const float* tau );\nlapack_int LAPACKE_dorgrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, double* a, lapack_int lda,\n                           const double* tau );\n\nlapack_int LAPACKE_sorgtr( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, const float* tau );\nlapack_int LAPACKE_dorgtr( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, const double* tau );\n\nlapack_int LAPACKE_sormbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const float* a, lapack_int lda,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dormhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const double* a, lapack_int lda,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const float* a, lapack_int lda, const float* tau,\n                           float* c, lapack_int ldc );\nlapack_int LAPACKE_dormrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const double* a, lapack_int lda, const double* tau,\n                           double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const float* a, lapack_int lda,\n                           const float* tau, float* c, lapack_int ldc );\nlapack_int LAPACKE_dormrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const double* a, lapack_int lda,\n                           const double* tau, double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sormtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda, const float* tau, float* c,\n                           lapack_int ldc );\nlapack_int LAPACKE_dormtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda, const double* tau, double* c,\n                           lapack_int ldc );\n\nlapack_int LAPACKE_spbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const float* ab, lapack_int ldab,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_dpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const double* ab, lapack_int ldab,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_float* ab,\n                           lapack_int ldab, float anorm, float* rcond );\nlapack_int LAPACKE_zpbcon( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_double* ab,\n                           lapack_int ldab, double anorm, double* rcond );\n\nlapack_int LAPACKE_spbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const float* ab, lapack_int ldab,\n                           float* s, float* scond, float* amax );\nlapack_int LAPACKE_dpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const double* ab, lapack_int ldab,\n                           double* s, double* scond, double* amax );\nlapack_int LAPACKE_cpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_float* ab,\n                           lapack_int ldab, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_zpbequ( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, const lapack_complex_double* ab,\n                           lapack_int ldab, double* s, double* scond,\n                           double* amax );\n\nlapack_int LAPACKE_spbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const float* ab,\n                           lapack_int ldab, const float* afb, lapack_int ldafb,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const double* ab,\n                           lapack_int ldab, const double* afb, lapack_int ldafb,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* afb, lapack_int ldafb,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpbrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* afb, lapack_int ldafb,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_spbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, float* bb, lapack_int ldbb );\nlapack_int LAPACKE_dpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, double* bb, lapack_int ldbb );\nlapack_int LAPACKE_cpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, lapack_complex_float* bb,\n                           lapack_int ldbb );\nlapack_int LAPACKE_zpbstf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kb, lapack_complex_double* bb,\n                           lapack_int ldbb );\n\nlapack_int LAPACKE_spbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs, float* ab,\n                          lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs, double* ab,\n                          lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs,\n                          lapack_complex_float* ab, lapack_int ldab,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int kd, lapack_int nrhs,\n                          lapack_complex_double* ab, lapack_int ldab,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, float* ab,\n                           lapack_int ldab, float* afb, lapack_int ldafb,\n                           char* equed, float* s, float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, double* ab,\n                           lapack_int ldab, double* afb, lapack_int ldafb,\n                           char* equed, double* s, double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* afb, lapack_int ldafb,\n                           char* equed, float* s, lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpbsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* afb, lapack_int ldafb,\n                           char* equed, double* s, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr );\n\nlapack_int LAPACKE_spbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab );\nlapack_int LAPACKE_dpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab );\nlapack_int LAPACKE_cpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_float* ab,\n                           lapack_int ldab );\nlapack_int LAPACKE_zpbtrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_complex_double* ab,\n                           lapack_int ldab );\n\nlapack_int LAPACKE_spbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const float* ab,\n                           lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs, const double* ab,\n                           lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbtrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_cpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftrf( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_cpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftri( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_cpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpftrs( int matrix_order, char transr, char uplo,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spocon( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dpocon( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_cpocon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_zpocon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double anorm, double* rcond );\n\nlapack_int LAPACKE_spoequ( int matrix_order, lapack_int n, const float* a,\n                           lapack_int lda, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_dpoequ( int matrix_order, lapack_int n, const double* a,\n                           lapack_int lda, double* s, double* scond,\n                           double* amax );\nlapack_int LAPACKE_cpoequ( int matrix_order, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequ( int matrix_order, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_spoequb( int matrix_order, lapack_int n, const float* a,\n                            lapack_int lda, float* s, float* scond,\n                            float* amax );\nlapack_int LAPACKE_dpoequb( int matrix_order, lapack_int n, const double* a,\n                            lapack_int lda, double* s, double* scond,\n                            double* amax );\nlapack_int LAPACKE_cpoequb( int matrix_order, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequb( int matrix_order, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_sporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_complex_float* b,\n                           lapack_int ldb, lapack_complex_float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_zporfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, double* ferr, double* berr );\n\nlapack_int LAPACKE_sporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const float* s, const float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const double* s, const double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const float* s, const lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zporfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const double* s, const lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_sposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* a, lapack_int lda, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* a, lapack_int lda, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zposv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_complex_double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dsposv( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           lapack_int* iter );\nlapack_int LAPACKE_zcposv( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* iter );\n\nlapack_int LAPACKE_sposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, float* a, lapack_int lda, float* af,\n                           lapack_int ldaf, char* equed, float* s, float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_dposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, double* a, lapack_int lda,\n                           double* af, lapack_int ldaf, char* equed, double* s,\n                           double* b, lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\nlapack_int LAPACKE_cposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, char* equed, float* s,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zposvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, char* equed, double* s,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_sposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            char* equed, float* s, float* b, lapack_int ldb,\n                            float* x, lapack_int ldx, float* rcond,\n                            float* rpvgrw, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_dposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            char* equed, double* s, double* b, lapack_int ldb,\n                            double* x, lapack_int ldx, double* rcond,\n                            double* rpvgrw, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\nlapack_int LAPACKE_cposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            char* equed, float* s, lapack_complex_float* b,\n                            lapack_int ldb, lapack_complex_float* x,\n                            lapack_int ldx, float* rcond, float* rpvgrw,\n                            float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zposvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            char* equed, double* s, lapack_complex_double* b,\n                            lapack_int ldb, lapack_complex_double* x,\n                            lapack_int ldx, double* rcond, double* rpvgrw,\n                            double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_spotrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dpotrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_cpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotri( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dpotri( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_cpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_cpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zpotrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sppcon( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float anorm, float* rcond );\nlapack_int LAPACKE_dppcon( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double anorm, double* rcond );\nlapack_int LAPACKE_cppcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_zppcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sppequ( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float* s, float* scond,\n                           float* amax );\nlapack_int LAPACKE_dppequ( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double* s, double* scond,\n                           double* amax );\nlapack_int LAPACKE_cppequ( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap, float* s,\n                           float* scond, float* amax );\nlapack_int LAPACKE_zppequ( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap, double* s,\n                           double* scond, double* amax );\n\nlapack_int LAPACKE_spprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, const float* afp,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, const double* afp,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_cpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zpprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* ap, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* ap, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_cppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zppsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, float* ap, float* afp, char* equed,\n                           float* s, float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, double* ap, double* afp,\n                           char* equed, double* s, double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_float* ap,\n                           lapack_complex_float* afp, char* equed, float* s,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zppsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, lapack_complex_double* ap,\n                           lapack_complex_double* afp, char* equed, double* s,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_spptrf( int matrix_order, char uplo, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dpptrf( int matrix_order, char uplo, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_cpptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_zpptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptri( int matrix_order, char uplo, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dpptri( int matrix_order, char uplo, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_cpptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_zpptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_cpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spstrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* piv, lapack_int* rank,\n                           float tol );\nlapack_int LAPACKE_dpstrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* piv, lapack_int* rank,\n                           double tol );\nlapack_int LAPACKE_cpstrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* piv, lapack_int* rank, float tol );\nlapack_int LAPACKE_zpstrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* piv, lapack_int* rank, double tol );\n\nlapack_int LAPACKE_sptcon( lapack_int n, const float* d, const float* e,\n                           float anorm, float* rcond );\nlapack_int LAPACKE_dptcon( lapack_int n, const double* d, const double* e,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cptcon( lapack_int n, const float* d,\n                           const lapack_complex_float* e, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_zptcon( lapack_int n, const double* d,\n                           const lapack_complex_double* e, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_spteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dpteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_cpteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zpteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_sptrfs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const float* d, const float* e, const float* df,\n                           const float* ef, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dptrfs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const double* d, const double* e, const double* df,\n                           const double* ef, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_cptrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e, const float* df,\n                           const lapack_complex_float* ef,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zptrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e, const double* df,\n                           const lapack_complex_double* ef,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* d, float* e, float* b, lapack_int ldb );\nlapack_int LAPACKE_dptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* d, double* e, double* b, lapack_int ldb );\nlapack_int LAPACKE_cptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          float* d, lapack_complex_float* e,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zptsv( int matrix_order, lapack_int n, lapack_int nrhs,\n                          double* d, lapack_complex_double* e,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const float* d, const float* e,\n                           float* df, float* ef, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const double* d, const double* e,\n                           double* df, double* ef, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\nlapack_int LAPACKE_cptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e, float* df,\n                           lapack_complex_float* ef,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zptsvx( int matrix_order, char fact, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e, double* df,\n                           lapack_complex_double* ef,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_spttrf( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dpttrf( lapack_int n, double* d, double* e );\nlapack_int LAPACKE_cpttrf( lapack_int n, float* d, lapack_complex_float* e );\nlapack_int LAPACKE_zpttrf( lapack_int n, double* d, lapack_complex_double* e );\n\nlapack_int LAPACKE_spttrs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const float* d, const float* e, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dpttrs( int matrix_order, lapack_int n, lapack_int nrhs,\n                           const double* d, const double* e, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_cpttrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* d,\n                           const lapack_complex_float* e,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpttrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* d,\n                           const lapack_complex_double* e,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, float* ab, lapack_int ldab, float* w,\n                          float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int kd, double* ab, lapack_int ldab, double* w,\n                          double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab, float* w,\n                           float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd, float* ab,\n                           lapack_int ldab, float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsbevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int kd, double* ab,\n                           lapack_int ldab, double* q, lapack_int ldq,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, float* ab,\n                           lapack_int ldab, const float* bb, lapack_int ldbb,\n                           float* x, lapack_int ldx );\nlapack_int LAPACKE_dsbgst( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, double* ab,\n                           lapack_int ldab, const double* bb, lapack_int ldbb,\n                           double* x, lapack_int ldx );\n\nlapack_int LAPACKE_ssbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb, float* ab,\n                          lapack_int ldab, float* bb, lapack_int ldbb, float* w,\n                          float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbgv( int matrix_order, char jobz, char uplo, lapack_int n,\n                          lapack_int ka, lapack_int kb, double* ab,\n                          lapack_int ldab, double* bb, lapack_int ldbb,\n                          double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, float* ab,\n                           lapack_int ldab, float* bb, lapack_int ldbb,\n                           float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dsbgvd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           lapack_int ka, lapack_int kb, double* ab,\n                           lapack_int ldab, double* bb, lapack_int ldbb,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_ssbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           float* ab, lapack_int ldab, float* bb,\n                           lapack_int ldbb, float* q, lapack_int ldq, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsbgvx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, lapack_int ka, lapack_int kb,\n                           double* ab, lapack_int ldab, double* bb,\n                           lapack_int ldbb, double* q, lapack_int ldq,\n                           double vl, double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, float* ab, lapack_int ldab, float* d,\n                           float* e, float* q, lapack_int ldq );\nlapack_int LAPACKE_dsbtrd( int matrix_order, char vect, char uplo, lapack_int n,\n                           lapack_int kd, double* ab, lapack_int ldab,\n                           double* d, double* e, double* q, lapack_int ldq );\n\nlapack_int LAPACKE_ssfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, float alpha,\n                          const float* a, lapack_int lda, float beta,\n                          float* c );\nlapack_int LAPACKE_dsfrk( int matrix_order, char transr, char uplo, char trans,\n                          lapack_int n, lapack_int k, double alpha,\n                          const double* a, lapack_int lda, double beta,\n                          double* c );\n\nlapack_int LAPACKE_sspcon( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, const lapack_int* ipiv, float anorm,\n                           float* rcond );\nlapack_int LAPACKE_dspcon( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, const lapack_int* ipiv,\n                           double anorm, double* rcond );\nlapack_int LAPACKE_cspcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zspcon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_sspev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          float* ap, float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          double* ap, double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           float* ap, float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           double* ap, double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* ap, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dspevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* ap, double vl, double vu,\n                           lapack_int il, lapack_int iu, double abstol,\n                           lapack_int* m, double* w, double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_sspgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, float* ap, const float* bp );\nlapack_int LAPACKE_dspgst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, double* ap, const double* bp );\n\nlapack_int LAPACKE_sspgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, float* ap, float* bp,\n                          float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspgv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, double* ap, double* bp,\n                          double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, float* ap, float* bp,\n                           float* w, float* z, lapack_int ldz );\nlapack_int LAPACKE_dspgvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, double* ap, double* bp,\n                           double* w, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sspgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, float* ap,\n                           float* bp, float vl, float vu, lapack_int il,\n                           lapack_int iu, float abstol, lapack_int* m, float* w,\n                           float* z, lapack_int ldz, lapack_int* ifail );\nlapack_int LAPACKE_dspgvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, double* ap,\n                           double* bp, double vl, double vu, lapack_int il,\n                           lapack_int iu, double abstol, lapack_int* m,\n                           double* w, double* z, lapack_int ldz,\n                           lapack_int* ifail );\n\nlapack_int LAPACKE_ssprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, const float* afp,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dsprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, const double* afp,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_csprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_complex_float* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zsprfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_complex_double* afp,\n                           const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_sspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* ap, lapack_int* ipiv,\n                          float* b, lapack_int ldb );\nlapack_int LAPACKE_dspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* ap, lapack_int* ipiv,\n                          double* b, lapack_int ldb );\nlapack_int LAPACKE_cspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* ap,\n                          lapack_int* ipiv, lapack_complex_float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_zspsv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* ap,\n                          lapack_int* ipiv, lapack_complex_double* b,\n                          lapack_int ldb );\n\nlapack_int LAPACKE_sspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap, float* afp,\n                           lapack_int* ipiv, const float* b, lapack_int ldb,\n                           float* x, lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap, double* afp,\n                           lapack_int* ipiv, const double* b, lapack_int ldb,\n                           double* x, lapack_int ldx, double* rcond,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_cspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           lapack_complex_float* afp, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zspsvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           lapack_complex_double* afp, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_ssptrd( int matrix_order, char uplo, lapack_int n, float* ap,\n                           float* d, float* e, float* tau );\nlapack_int LAPACKE_dsptrd( int matrix_order, char uplo, lapack_int n,\n                           double* ap, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssptrf( int matrix_order, char uplo, lapack_int n, float* ap,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_dsptrf( int matrix_order, char uplo, lapack_int n,\n                           double* ap, lapack_int* ipiv );\nlapack_int LAPACKE_csptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zsptrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptri( int matrix_order, char uplo, lapack_int n, float* ap,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_dsptri( int matrix_order, char uplo, lapack_int n,\n                           double* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_csptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* ap, const lapack_int* ipiv );\nlapack_int LAPACKE_zsptri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* ap, const lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* ap,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* ap,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* ap,\n                           const lapack_int* ipiv, lapack_complex_float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_zsptrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* ap,\n                           const lapack_int* ipiv, lapack_complex_double* b,\n                           lapack_int ldb );\n\nlapack_int LAPACKE_sstebz( char range, char order, lapack_int n, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           const float* d, const float* e, lapack_int* m,\n                           lapack_int* nsplit, float* w, lapack_int* iblock,\n                           lapack_int* isplit );\nlapack_int LAPACKE_dstebz( char range, char order, lapack_int n, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, const double* d, const double* e,\n                           lapack_int* m, lapack_int* nsplit, double* w,\n                           lapack_int* iblock, lapack_int* isplit );\n\nlapack_int LAPACKE_sstedc( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstedc( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_cstedc( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zstedc( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_sstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\nlapack_int LAPACKE_cstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* isuppz );\nlapack_int LAPACKE_zstegr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* isuppz );\n\nlapack_int LAPACKE_sstein( int matrix_order, lapack_int n, const float* d,\n                           const float* e, lapack_int m, const float* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           float* z, lapack_int ldz, lapack_int* ifailv );\nlapack_int LAPACKE_dstein( int matrix_order, lapack_int n, const double* d,\n                           const double* e, lapack_int m, const double* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           double* z, lapack_int ldz, lapack_int* ifailv );\nlapack_int LAPACKE_cstein( int matrix_order, lapack_int n, const float* d,\n                           const float* e, lapack_int m, const float* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int* ifailv );\nlapack_int LAPACKE_zstein( int matrix_order, lapack_int n, const double* d,\n                           const double* e, lapack_int m, const double* w,\n                           const lapack_int* iblock, const lapack_int* isplit,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* ifailv );\n\nlapack_int LAPACKE_sstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, lapack_int* m,\n                           float* w, float* z, lapack_int ldz, lapack_int nzc,\n                           lapack_int* isuppz, lapack_logical* tryrac );\nlapack_int LAPACKE_dstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           lapack_int* m, double* w, double* z, lapack_int ldz,\n                           lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\nlapack_int LAPACKE_cstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, lapack_int* m,\n                           float* w, lapack_complex_float* z, lapack_int ldz,\n                           lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\nlapack_int LAPACKE_zstemr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           lapack_int* m, double* w, lapack_complex_double* z,\n                           lapack_int ldz, lapack_int nzc, lapack_int* isuppz,\n                           lapack_logical* tryrac );\n\nlapack_int LAPACKE_ssteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dsteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, double* z, lapack_int ldz );\nlapack_int LAPACKE_csteqr( int matrix_order, char compz, lapack_int n, float* d,\n                           float* e, lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zsteqr( int matrix_order, char compz, lapack_int n,\n                           double* d, double* e, lapack_complex_double* z,\n                           lapack_int ldz );\n\nlapack_int LAPACKE_ssterf( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dsterf( lapack_int n, double* d, double* e );\n\nlapack_int LAPACKE_sstev( int matrix_order, char jobz, lapack_int n, float* d,\n                          float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstev( int matrix_order, char jobz, lapack_int n, double* d,\n                          double* e, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sstevd( int matrix_order, char jobz, lapack_int n, float* d,\n                           float* e, float* z, lapack_int ldz );\nlapack_int LAPACKE_dstevd( int matrix_order, char jobz, lapack_int n, double* d,\n                           double* e, double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sstevr( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dstevr( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\n\nlapack_int LAPACKE_sstevx( int matrix_order, char jobz, char range,\n                           lapack_int n, float* d, float* e, float vl, float vu,\n                           lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dstevx( int matrix_order, char jobz, char range,\n                           lapack_int n, double* d, double* e, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssycon( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_dsycon( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\nlapack_int LAPACKE_csycon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv, float anorm, float* rcond );\nlapack_int LAPACKE_zsycon( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv, double anorm,\n                           double* rcond );\n\nlapack_int LAPACKE_ssyequb( int matrix_order, char uplo, lapack_int n,\n                            const float* a, lapack_int lda, float* s,\n                            float* scond, float* amax );\nlapack_int LAPACKE_dsyequb( int matrix_order, char uplo, lapack_int n,\n                            const double* a, lapack_int lda, double* s,\n                            double* scond, double* amax );\nlapack_int LAPACKE_csyequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_float* a, lapack_int lda,\n                            float* s, float* scond, float* amax );\nlapack_int LAPACKE_zsyequb( int matrix_order, char uplo, lapack_int n,\n                            const lapack_complex_double* a, lapack_int lda,\n                            double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_ssyev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_dsyev( int matrix_order, char jobz, char uplo, lapack_int n,\n                          double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_ssyevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           float* a, lapack_int lda, float* w );\nlapack_int LAPACKE_dsyevd( int matrix_order, char jobz, char uplo, lapack_int n,\n                           double* a, lapack_int lda, double* w );\n\nlapack_int LAPACKE_ssyevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* a, lapack_int lda, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* isuppz );\nlapack_int LAPACKE_dsyevr( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* a, lapack_int lda, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* isuppz );\n\nlapack_int LAPACKE_ssyevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, float* a, lapack_int lda, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsyevx( int matrix_order, char jobz, char range, char uplo,\n                           lapack_int n, double* a, lapack_int lda, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssygst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, float* a, lapack_int lda,\n                           const float* b, lapack_int ldb );\nlapack_int LAPACKE_dsygst( int matrix_order, lapack_int itype, char uplo,\n                           lapack_int n, double* a, lapack_int lda,\n                           const double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssygv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, float* a, lapack_int lda,\n                          float* b, lapack_int ldb, float* w );\nlapack_int LAPACKE_dsygv( int matrix_order, lapack_int itype, char jobz,\n                          char uplo, lapack_int n, double* a, lapack_int lda,\n                          double* b, lapack_int ldb, double* w );\n\nlapack_int LAPACKE_ssygvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, float* a, lapack_int lda,\n                           float* b, lapack_int ldb, float* w );\nlapack_int LAPACKE_dsygvd( int matrix_order, lapack_int itype, char jobz,\n                           char uplo, lapack_int n, double* a, lapack_int lda,\n                           double* b, lapack_int ldb, double* w );\n\nlapack_int LAPACKE_ssygvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float vl,\n                           float vu, lapack_int il, lapack_int iu, float abstol,\n                           lapack_int* m, float* w, float* z, lapack_int ldz,\n                           lapack_int* ifail );\nlapack_int LAPACKE_dsygvx( int matrix_order, lapack_int itype, char jobz,\n                           char range, char uplo, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double vl,\n                           double vu, lapack_int il, lapack_int iu,\n                           double abstol, lapack_int* m, double* w, double* z,\n                           lapack_int ldz, lapack_int* ifail );\n\nlapack_int LAPACKE_ssyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const float* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const float* b,\n                           lapack_int ldb, float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dsyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const double* af, lapack_int ldaf,\n                           const lapack_int* ipiv, const double* b,\n                           lapack_int ldb, double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_csyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_zsyrfs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* af,\n                           lapack_int ldaf, const lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_ssyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const float* a,\n                            lapack_int lda, const float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const float* b, lapack_int ldb, float* x,\n                            lapack_int ldx, float* rcond, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dsyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs, const double* a,\n                            lapack_int lda, const double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const double* b, lapack_int ldb, double* x,\n                            lapack_int ldx, double* rcond, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_csyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_float* a, lapack_int lda,\n                            const lapack_complex_float* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const float* s,\n                            const lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* berr, lapack_int n_err_bnds,\n                            float* err_bnds_norm, float* err_bnds_comp,\n                            lapack_int nparams, float* params );\nlapack_int LAPACKE_zsyrfsx( int matrix_order, char uplo, char equed,\n                            lapack_int n, lapack_int nrhs,\n                            const lapack_complex_double* a, lapack_int lda,\n                            const lapack_complex_double* af, lapack_int ldaf,\n                            const lapack_int* ipiv, const double* s,\n                            const lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* berr, lapack_int n_err_bnds,\n                            double* err_bnds_norm, double* err_bnds_comp,\n                            lapack_int nparams, double* params );\n\nlapack_int LAPACKE_ssysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, float* a, lapack_int lda,\n                          lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, double* a, lapack_int lda,\n                          lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_float* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsysv( int matrix_order, char uplo, lapack_int n,\n                          lapack_int nrhs, lapack_complex_double* a,\n                          lapack_int lda, lapack_int* ipiv,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           float* af, lapack_int ldaf, lapack_int* ipiv,\n                           const float* b, lapack_int ldb, float* x,\n                           lapack_int ldx, float* rcond, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dsysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           double* af, lapack_int ldaf, lapack_int* ipiv,\n                           const double* b, lapack_int ldb, double* x,\n                           lapack_int ldx, double* rcond, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_csysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* x, lapack_int ldx,\n                           float* rcond, float* ferr, float* berr );\nlapack_int LAPACKE_zsysvx( int matrix_order, char fact, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* af,\n                           lapack_int ldaf, lapack_int* ipiv,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* x, lapack_int ldx,\n                           double* rcond, double* ferr, double* berr );\n\nlapack_int LAPACKE_ssysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, float* a,\n                            lapack_int lda, float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s, float* b,\n                            lapack_int ldb, float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_dsysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs, double* a,\n                            lapack_int lda, double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s, double* b,\n                            lapack_int ldb, double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\nlapack_int LAPACKE_csysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, float* s,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* x, lapack_int ldx,\n                            float* rcond, float* rpvgrw, float* berr,\n                            lapack_int n_err_bnds, float* err_bnds_norm,\n                            float* err_bnds_comp, lapack_int nparams,\n                            float* params );\nlapack_int LAPACKE_zsysvxx( int matrix_order, char fact, char uplo,\n                            lapack_int n, lapack_int nrhs,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* af, lapack_int ldaf,\n                            lapack_int* ipiv, char* equed, double* s,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* x, lapack_int ldx,\n                            double* rcond, double* rpvgrw, double* berr,\n                            lapack_int n_err_bnds, double* err_bnds_norm,\n                            double* err_bnds_comp, lapack_int nparams,\n                            double* params );\n\nlapack_int LAPACKE_ssytrd( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, float* d, float* e, float* tau );\nlapack_int LAPACKE_dsytrd( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssytrf( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dsytrf( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_csytrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_int* ipiv );\nlapack_int LAPACKE_zsytrf( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_int* ipiv );\n\nlapack_int LAPACKE_ssytri( int matrix_order, char uplo, lapack_int n, float* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsytri( int matrix_order, char uplo, lapack_int n, double* a,\n                           lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_csytri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_int* ipiv );\nlapack_int LAPACKE_zsytri( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_int* ipiv );\n\nlapack_int LAPACKE_ssytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const float* a, lapack_int lda,\n                           const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_dsytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const double* a, lapack_int lda,\n                           const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs( int matrix_order, char uplo, lapack_int n,\n                           lapack_int nrhs, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_int* ipiv,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd, const float* ab,\n                           lapack_int ldab, float* rcond );\nlapack_int LAPACKE_dtbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd, const double* ab,\n                           lapack_int ldab, double* rcond );\nlapack_int LAPACKE_ctbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           float* rcond );\nlapack_int LAPACKE_ztbcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, lapack_int kd,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           double* rcond );\n\nlapack_int LAPACKE_stbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, const float* b,\n                           lapack_int ldb, const float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_dtbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, const double* b,\n                           lapack_int ldb, const double* x, lapack_int ldx,\n                           double* ferr, double* berr );\nlapack_int LAPACKE_ctbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztbrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_stbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const float* ab, lapack_int ldab, float* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_dtbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const double* ab, lapack_int ldab, double* b,\n                           lapack_int ldb );\nlapack_int LAPACKE_ctbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_float* ab, lapack_int ldab,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztbtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int kd, lapack_int nrhs,\n                           const lapack_complex_double* ab, lapack_int ldab,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          float alpha, const float* a, float* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_dtfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          double alpha, const double* a, double* b,\n                          lapack_int ldb );\nlapack_int LAPACKE_ctfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          lapack_complex_float alpha,\n                          const lapack_complex_float* a,\n                          lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztfsm( int matrix_order, char transr, char side, char uplo,\n                          char trans, char diag, lapack_int m, lapack_int n,\n                          lapack_complex_double alpha,\n                          const lapack_complex_double* a,\n                          lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, float* a );\nlapack_int LAPACKE_dtftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, double* a );\nlapack_int LAPACKE_ctftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_ztftri( int matrix_order, char transr, char uplo, char diag,\n                           lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_stfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* arf, float* ap );\nlapack_int LAPACKE_dtfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* arf, double* ap );\nlapack_int LAPACKE_ctfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* arf,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztfttp( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* arf,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* arf, float* a,\n                           lapack_int lda );\nlapack_int LAPACKE_dtfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* arf, double* a,\n                           lapack_int lda );\nlapack_int LAPACKE_ctfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* arf,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztfttr( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* arf,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_stgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* s, lapack_int lds, const float* p,\n                           lapack_int ldp, float* vl, lapack_int ldvl,\n                           float* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_dtgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* s, lapack_int lds, const double* p,\n                           lapack_int ldp, double* vl, lapack_int ldvl,\n                           double* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_ctgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* s, lapack_int lds,\n                           const lapack_complex_float* p, lapack_int ldp,\n                           lapack_complex_float* vl, lapack_int ldvl,\n                           lapack_complex_float* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztgevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* s, lapack_int lds,\n                           const lapack_complex_double* p, lapack_int ldp,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\n\nlapack_int LAPACKE_stgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_dtgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double* q,\n                           lapack_int ldq, double* z, lapack_int ldz,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_ctgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* z, lapack_int ldz,\n                           lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztgexc( int matrix_order, lapack_logical wantq,\n                           lapack_logical wantz, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_stgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n, float* a,\n                           lapack_int lda, float* b, lapack_int ldb,\n                           float* alphar, float* alphai, float* beta, float* q,\n                           lapack_int ldq, float* z, lapack_int ldz,\n                           lapack_int* m, float* pl, float* pr, float* dif );\nlapack_int LAPACKE_dtgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           double* a, lapack_int lda, double* b, lapack_int ldb,\n                           double* alphar, double* alphai, double* beta,\n                           double* q, lapack_int ldq, double* z, lapack_int ldz,\n                           lapack_int* m, double* pl, double* pr, double* dif );\nlapack_int LAPACKE_ctgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* alpha,\n                           lapack_complex_float* beta, lapack_complex_float* q,\n                           lapack_int ldq, lapack_complex_float* z,\n                           lapack_int ldz, lapack_int* m, float* pl, float* pr,\n                           float* dif );\nlapack_int LAPACKE_ztgsen( int matrix_order, lapack_int ijob,\n                           lapack_logical wantq, lapack_logical wantz,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* alpha,\n                           lapack_complex_double* beta,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* z, lapack_int ldz,\n                           lapack_int* m, double* pl, double* pr, double* dif );\n\nlapack_int LAPACKE_stgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, float* a, lapack_int lda,\n                           float* b, lapack_int ldb, float tola, float tolb,\n                           float* alpha, float* beta, float* u, lapack_int ldu,\n                           float* v, lapack_int ldv, float* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_dtgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, double* a,\n                           lapack_int lda, double* b, lapack_int ldb,\n                           double tola, double tolb, double* alpha,\n                           double* beta, double* u, lapack_int ldu, double* v,\n                           lapack_int ldv, double* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_ctgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* b,\n                           lapack_int ldb, float tola, float tolb, float* alpha,\n                           float* beta, lapack_complex_float* u, lapack_int ldu,\n                           lapack_complex_float* v, lapack_int ldv,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_int* ncycle );\nlapack_int LAPACKE_ztgsja( int matrix_order, char jobu, char jobv, char jobq,\n                           lapack_int m, lapack_int p, lapack_int n,\n                           lapack_int k, lapack_int l, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* b,\n                           lapack_int ldb, double tola, double tolb,\n                           double* alpha, double* beta,\n                           lapack_complex_double* u, lapack_int ldu,\n                           lapack_complex_double* v, lapack_int ldv,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int* ncycle );\n\nlapack_int LAPACKE_stgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* a, lapack_int lda, const float* b,\n                           lapack_int ldb, const float* vl, lapack_int ldvl,\n                           const float* vr, lapack_int ldvr, float* s,\n                           float* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_dtgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* a, lapack_int lda, const double* b,\n                           lapack_int ldb, const double* vl, lapack_int ldvl,\n                           const double* vr, lapack_int ldvr, double* s,\n                           double* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ctgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* vl, lapack_int ldvl,\n                           const lapack_complex_float* vr, lapack_int ldvr,\n                           float* s, float* dif, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztgsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* vl, lapack_int ldvl,\n                           const lapack_complex_double* vr, lapack_int ldvr,\n                           double* s, double* dif, lapack_int mm,\n                           lapack_int* m );\n\nlapack_int LAPACKE_stgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n, const float* a,\n                           lapack_int lda, const float* b, lapack_int ldb,\n                           float* c, lapack_int ldc, const float* d,\n                           lapack_int ldd, const float* e, lapack_int lde,\n                           float* f, lapack_int ldf, float* scale, float* dif );\nlapack_int LAPACKE_dtgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n, const double* a,\n                           lapack_int lda, const double* b, lapack_int ldb,\n                           double* c, lapack_int ldc, const double* d,\n                           lapack_int ldd, const double* e, lapack_int lde,\n                           double* f, lapack_int ldf, double* scale,\n                           double* dif );\nlapack_int LAPACKE_ctgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* c, lapack_int ldc,\n                           const lapack_complex_float* d, lapack_int ldd,\n                           const lapack_complex_float* e, lapack_int lde,\n                           lapack_complex_float* f, lapack_int ldf,\n                           float* scale, float* dif );\nlapack_int LAPACKE_ztgsyl( int matrix_order, char trans, lapack_int ijob,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* c, lapack_int ldc,\n                           const lapack_complex_double* d, lapack_int ldd,\n                           const lapack_complex_double* e, lapack_int lde,\n                           lapack_complex_double* f, lapack_int ldf,\n                           double* scale, double* dif );\n\nlapack_int LAPACKE_stpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const float* ap, float* rcond );\nlapack_int LAPACKE_dtpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const double* ap, double* rcond );\nlapack_int LAPACKE_ctpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_float* ap,\n                           float* rcond );\nlapack_int LAPACKE_ztpcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_double* ap,\n                           double* rcond );\n\nlapack_int LAPACKE_stprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* ap,\n                           const float* b, lapack_int ldb, const float* x,\n                           lapack_int ldx, float* ferr, float* berr );\nlapack_int LAPACKE_dtprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* ap,\n                           const double* b, lapack_int ldb, const double* x,\n                           lapack_int ldx, double* ferr, double* berr );\nlapack_int LAPACKE_ctprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztprfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_stptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           float* ap );\nlapack_int LAPACKE_dtptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           double* ap );\nlapack_int LAPACKE_ctptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztptri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* ap,\n                           float* b, lapack_int ldb );\nlapack_int LAPACKE_dtptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* ap,\n                           double* b, lapack_int ldb );\nlapack_int LAPACKE_ctptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* ap,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztptrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* ap,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* ap, float* arf );\nlapack_int LAPACKE_dtpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* ap, double* arf );\nlapack_int LAPACKE_ctpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* ap,\n                           lapack_complex_float* arf );\nlapack_int LAPACKE_ztpttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* ap,\n                           lapack_complex_double* arf );\n\nlapack_int LAPACKE_stpttr( int matrix_order, char uplo, lapack_int n,\n                           const float* ap, float* a, lapack_int lda );\nlapack_int LAPACKE_dtpttr( int matrix_order, char uplo, lapack_int n,\n                           const double* ap, double* a, lapack_int lda );\nlapack_int LAPACKE_ctpttr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztpttr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const float* a, lapack_int lda,\n                           float* rcond );\nlapack_int LAPACKE_dtrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const double* a, lapack_int lda,\n                           double* rcond );\nlapack_int LAPACKE_ctrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, float* rcond );\nlapack_int LAPACKE_ztrcon( int matrix_order, char norm, char uplo, char diag,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, double* rcond );\n\nlapack_int LAPACKE_strevc( int matrix_order, char side, char howmny,\n                           lapack_logical* select, lapack_int n, const float* t,\n                           lapack_int ldt, float* vl, lapack_int ldvl,\n                           float* vr, lapack_int ldvr, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_dtrevc( int matrix_order, char side, char howmny,\n                           lapack_logical* select, lapack_int n,\n                           const double* t, lapack_int ldt, double* vl,\n                           lapack_int ldvl, double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ctrevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* vl, lapack_int ldvl,\n                           lapack_complex_float* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztrevc( int matrix_order, char side, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* vl, lapack_int ldvl,\n                           lapack_complex_double* vr, lapack_int ldvr,\n                           lapack_int mm, lapack_int* m );\n\nlapack_int LAPACKE_strexc( int matrix_order, char compq, lapack_int n, float* t,\n                           lapack_int ldt, float* q, lapack_int ldq,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_dtrexc( int matrix_order, char compq, lapack_int n,\n                           double* t, lapack_int ldt, double* q, lapack_int ldq,\n                           lapack_int* ifst, lapack_int* ilst );\nlapack_int LAPACKE_ctrexc( int matrix_order, char compq, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztrexc( int matrix_order, char compq, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_strrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           lapack_int lda, const float* b, lapack_int ldb,\n                           const float* x, lapack_int ldx, float* ferr,\n                           float* berr );\nlapack_int LAPACKE_dtrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           lapack_int lda, const double* b, lapack_int ldb,\n                           const double* x, lapack_int ldx, double* ferr,\n                           double* berr );\nlapack_int LAPACKE_ctrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           const lapack_complex_float* x, lapack_int ldx,\n                           float* ferr, float* berr );\nlapack_int LAPACKE_ztrrfs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           const lapack_complex_double* x, lapack_int ldx,\n                           double* ferr, double* berr );\n\nlapack_int LAPACKE_strsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n, float* t,\n                           lapack_int ldt, float* q, lapack_int ldq, float* wr,\n                           float* wi, lapack_int* m, float* s, float* sep );\nlapack_int LAPACKE_dtrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           double* t, lapack_int ldt, double* q, lapack_int ldq,\n                           double* wr, double* wi, lapack_int* m, double* s,\n                           double* sep );\nlapack_int LAPACKE_ctrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* q, lapack_int ldq,\n                           lapack_complex_float* w, lapack_int* m, float* s,\n                           float* sep );\nlapack_int LAPACKE_ztrsen( int matrix_order, char job, char compq,\n                           const lapack_logical* select, lapack_int n,\n                           lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* q, lapack_int ldq,\n                           lapack_complex_double* w, lapack_int* m, double* s,\n                           double* sep );\n\nlapack_int LAPACKE_strsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const float* t, lapack_int ldt, const float* vl,\n                           lapack_int ldvl, const float* vr, lapack_int ldvr,\n                           float* s, float* sep, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_dtrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const double* t, lapack_int ldt, const double* vl,\n                           lapack_int ldvl, const double* vr, lapack_int ldvr,\n                           double* s, double* sep, lapack_int mm,\n                           lapack_int* m );\nlapack_int LAPACKE_ctrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_float* t, lapack_int ldt,\n                           const lapack_complex_float* vl, lapack_int ldvl,\n                           const lapack_complex_float* vr, lapack_int ldvr,\n                           float* s, float* sep, lapack_int mm, lapack_int* m );\nlapack_int LAPACKE_ztrsna( int matrix_order, char job, char howmny,\n                           const lapack_logical* select, lapack_int n,\n                           const lapack_complex_double* t, lapack_int ldt,\n                           const lapack_complex_double* vl, lapack_int ldvl,\n                           const lapack_complex_double* vr, lapack_int ldvr,\n                           double* s, double* sep, lapack_int mm,\n                           lapack_int* m );\n\nlapack_int LAPACKE_strsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const float* a, lapack_int lda, const float* b,\n                           lapack_int ldb, float* c, lapack_int ldc,\n                           float* scale );\nlapack_int LAPACKE_dtrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const double* a, lapack_int lda, const double* b,\n                           lapack_int ldb, double* c, lapack_int ldc,\n                           double* scale );\nlapack_int LAPACKE_ctrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* b, lapack_int ldb,\n                           lapack_complex_float* c, lapack_int ldc,\n                           float* scale );\nlapack_int LAPACKE_ztrsyl( int matrix_order, char trana, char tranb,\n                           lapack_int isgn, lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* c, lapack_int ldc,\n                           double* scale );\n\nlapack_int LAPACKE_strtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           float* a, lapack_int lda );\nlapack_int LAPACKE_dtrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           double* a, lapack_int lda );\nlapack_int LAPACKE_ctrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztrtri( int matrix_order, char uplo, char diag, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const float* a,\n                           lapack_int lda, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs, const double* a,\n                           lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztrtrs( int matrix_order, char uplo, char trans, char diag,\n                           lapack_int n, lapack_int nrhs,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_strttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const float* a, lapack_int lda,\n                           float* arf );\nlapack_int LAPACKE_dtrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const double* a, lapack_int lda,\n                           double* arf );\nlapack_int LAPACKE_ctrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* arf );\nlapack_int LAPACKE_ztrttf( int matrix_order, char transr, char uplo,\n                           lapack_int n, const lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* arf );\n\nlapack_int LAPACKE_strttp( int matrix_order, char uplo, lapack_int n,\n                           const float* a, lapack_int lda, float* ap );\nlapack_int LAPACKE_dtrttp( int matrix_order, char uplo, lapack_int n,\n                           const double* a, lapack_int lda, double* ap );\nlapack_int LAPACKE_ctrttp( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* ap );\nlapack_int LAPACKE_ztrttp( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* ap );\n\nlapack_int LAPACKE_stzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           float* a, lapack_int lda, float* tau );\nlapack_int LAPACKE_dtzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           double* a, lapack_int lda, double* tau );\nlapack_int LAPACKE_ctzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* tau );\nlapack_int LAPACKE_ztzrzf( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungbr( int matrix_order, char vect, lapack_int m,\n                           lapack_int n, lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zunghr( int matrix_order, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zunglq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungql( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungqr( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau );\nlapack_int LAPACKE_zungrq( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cungtr( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau );\nlapack_int LAPACKE_zungtr( int matrix_order, char uplo, lapack_int n,\n                           lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau );\n\nlapack_int LAPACKE_cunmbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmbr( int matrix_order, char vect, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmhr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int ilo,\n                           lapack_int ihi, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmlq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmql( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmqr( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmrq( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const lapack_complex_float* a,\n                           lapack_int lda, const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmrz( int matrix_order, char side, char trans,\n                           lapack_int m, lapack_int n, lapack_int k,\n                           lapack_int l, const lapack_complex_double* a,\n                           lapack_int lda, const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cunmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* a, lapack_int lda,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zunmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* a, lapack_int lda,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_cupgtr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* q, lapack_int ldq );\nlapack_int LAPACKE_zupgtr( int matrix_order, char uplo, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* q, lapack_int ldq );\n\nlapack_int LAPACKE_cupmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_float* ap,\n                           const lapack_complex_float* tau,\n                           lapack_complex_float* c, lapack_int ldc );\nlapack_int LAPACKE_zupmtr( int matrix_order, char side, char uplo, char trans,\n                           lapack_int m, lapack_int n,\n                           const lapack_complex_double* ap,\n                           const lapack_complex_double* tau,\n                           lapack_complex_double* c, lapack_int ldc );\n\nlapack_int LAPACKE_sbdsdc_work( int matrix_order, char uplo, char compq,\n                                lapack_int n, float* d, float* e, float* u,\n                                lapack_int ldu, float* vt, lapack_int ldvt,\n                                float* q, lapack_int* iq, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dbdsdc_work( int matrix_order, char uplo, char compq,\n                                lapack_int n, double* d, double* e, double* u,\n                                lapack_int ldu, double* vt, lapack_int ldvt,\n                                double* q, lapack_int* iq, double* work,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                float* d, float* e, float* vt, lapack_int ldvt,\n                                float* u, lapack_int ldu, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                double* d, double* e, double* vt,\n                                lapack_int ldvt, double* u, lapack_int ldu,\n                                double* c, lapack_int ldc, double* work );\nlapack_int LAPACKE_cbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                float* d, float* e, lapack_complex_float* vt,\n                                lapack_int ldvt, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_zbdsqr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int ncvt, lapack_int nru, lapack_int ncc,\n                                double* d, double* e, lapack_complex_double* vt,\n                                lapack_int ldvt, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* c,\n                                lapack_int ldc, double* work );\n\nlapack_int LAPACKE_sdisna_work( char job, lapack_int m, lapack_int n,\n                                const float* d, float* sep );\nlapack_int LAPACKE_ddisna_work( char job, lapack_int m, lapack_int n,\n                                const double* d, double* sep );\n\nlapack_int LAPACKE_sgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, float* ab, lapack_int ldab,\n                                float* d, float* e, float* q, lapack_int ldq,\n                                float* pt, lapack_int ldpt, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, double* ab, lapack_int ldab,\n                                double* d, double* e, double* q, lapack_int ldq,\n                                double* pt, lapack_int ldpt, double* c,\n                                lapack_int ldc, double* work );\nlapack_int LAPACKE_cgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, lapack_complex_float* ab,\n                                lapack_int ldab, float* d, float* e,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* pt, lapack_int ldpt,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgbbrd_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int ncc, lapack_int kl,\n                                lapack_int ku, lapack_complex_double* ab,\n                                lapack_int ldab, double* d, double* e,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* pt, lapack_int ldpt,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgbcon_work( int matrix_order, char norm, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* ab,\n                                lapack_int ldab, float* r, float* c,\n                                float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* ab,\n                                lapack_int ldab, double* r, double* c,\n                                double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                float* r, float* c, float* rowcnd,\n                                float* colcnd, float* amax );\nlapack_int LAPACKE_zgbequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, double* r, double* c,\n                                double* rowcnd, double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku, const float* ab,\n                                 lapack_int ldab, float* r, float* c,\n                                 float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_dgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku, const double* ab,\n                                 lapack_int ldab, double* r, double* c,\n                                 double* rowcnd, double* colcnd, double* amax );\nlapack_int LAPACKE_cgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku,\n                                 const lapack_complex_float* ab,\n                                 lapack_int ldab, float* r, float* c,\n                                 float* rowcnd, float* colcnd, float* amax );\nlapack_int LAPACKE_zgbequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_int kl, lapack_int ku,\n                                 const lapack_complex_double* ab,\n                                 lapack_int ldab, double* r, double* c,\n                                 double* rowcnd, double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const float* ab, lapack_int ldab,\n                                const float* afb, lapack_int ldafb,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const double* afb, lapack_int ldafb,\n                                const lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* afb,\n                                lapack_int ldafb, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgbrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab,\n                                const lapack_complex_double* afb,\n                                lapack_int ldafb, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, const float* ab,\n                                 lapack_int ldab, const float* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const float* r, const float* c, const float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, const double* ab,\n                                 lapack_int ldab, const double* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs,\n                                 const lapack_complex_float* ab,\n                                 lapack_int ldab,\n                                 const lapack_complex_float* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const float* r, const float* c,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgbrfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs,\n                                 const lapack_complex_double* ab,\n                                 lapack_int ldab,\n                                 const lapack_complex_double* afb,\n                                 lapack_int ldafb, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs, float* ab,\n                               lapack_int ldab, lapack_int* ipiv, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs, double* ab,\n                               lapack_int ldab, lapack_int* ipiv, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zgbsv_work( int matrix_order, lapack_int n, lapack_int kl,\n                               lapack_int ku, lapack_int nrhs,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_sgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, float* ab, lapack_int ldab,\n                                float* afb, lapack_int ldafb, lapack_int* ipiv,\n                                char* equed, float* r, float* c, float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, double* ab, lapack_int ldab,\n                                double* afb, lapack_int ldafb, lapack_int* ipiv,\n                                char* equed, double* r, double* c, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* afb,\n                                lapack_int ldafb, lapack_int* ipiv, char* equed,\n                                float* r, float* c, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgbsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int kl, lapack_int ku,\n                                lapack_int nrhs, lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* afb,\n                                lapack_int ldafb, lapack_int* ipiv, char* equed,\n                                double* r, double* c, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, float* ab, lapack_int ldab,\n                                 float* afb, lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, float* r, float* c, float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, double* ab, lapack_int ldab,\n                                 double* afb, lapack_int ldafb,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, double* b, lapack_int ldb,\n                                 double* x, lapack_int ldx, double* rcond,\n                                 double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, lapack_complex_float* ab,\n                                 lapack_int ldab, lapack_complex_float* afb,\n                                 lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, float* r, float* c,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgbsvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int kl, lapack_int ku,\n                                 lapack_int nrhs, lapack_complex_double* ab,\n                                 lapack_int ldab, lapack_complex_double* afb,\n                                 lapack_int ldafb, lapack_int* ipiv,\n                                 char* equed, double* r, double* c,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, float* ab,\n                                lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_dgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, double* ab,\n                                lapack_int ldab, lapack_int* ipiv );\nlapack_int LAPACKE_cgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgbtrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const lapack_int* ipiv, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zgbtrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int kl, lapack_int ku, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* scale, lapack_int m, float* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_dgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* scale, lapack_int m, double* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_cgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* scale, lapack_int m,\n                                lapack_complex_float* v, lapack_int ldv );\nlapack_int LAPACKE_zgebak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* scale, lapack_int m,\n                                lapack_complex_double* v, lapack_int ldv );\n\nlapack_int LAPACKE_sgebal_work( int matrix_order, char job, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ilo,\n                                lapack_int* ihi, float* scale );\nlapack_int LAPACKE_dgebal_work( int matrix_order, char job, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ilo,\n                                lapack_int* ihi, double* scale );\nlapack_int LAPACKE_cgebal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ilo, lapack_int* ihi,\n                                float* scale );\nlapack_int LAPACKE_zgebal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* scale );\n\nlapack_int LAPACKE_sgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* d, float* e,\n                                float* tauq, float* taup, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* d, double* e,\n                                double* tauq, double* taup, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* d, float* e, lapack_complex_float* tauq,\n                                lapack_complex_float* taup,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgebrd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* d, double* e,\n                                lapack_complex_double* tauq,\n                                lapack_complex_double* taup,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgecon_work( int matrix_order, char norm, lapack_int n,\n                                const float* a, lapack_int lda, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgecon_work( int matrix_order, char norm, lapack_int n,\n                                const double* a, lapack_int lda, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgecon_work( int matrix_order, char norm, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgecon_work( int matrix_order, char norm, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda, float* r,\n                                float* c, float* rowcnd, float* colcnd,\n                                float* amax );\nlapack_int LAPACKE_dgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, double* r,\n                                double* c, double* rowcnd, double* colcnd,\n                                double* amax );\nlapack_int LAPACKE_cgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* r, float* c, float* rowcnd,\n                                float* colcnd, float* amax );\nlapack_int LAPACKE_zgeequ_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* r, double* c, double* rowcnd,\n                                double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const float* a, lapack_int lda, float* r,\n                                 float* c, float* rowcnd, float* colcnd,\n                                 float* amax );\nlapack_int LAPACKE_dgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const double* a, lapack_int lda, double* r,\n                                 double* c, double* rowcnd, double* colcnd,\n                                 double* amax );\nlapack_int LAPACKE_cgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* r, float* c, float* rowcnd,\n                                 float* colcnd, float* amax );\nlapack_int LAPACKE_zgeequb_work( int matrix_order, lapack_int m, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* r, double* c, double* rowcnd,\n                                 double* colcnd, double* amax );\n\nlapack_int LAPACKE_sgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_S_SELECT2 select, lapack_int n, float* a,\n                               lapack_int lda, lapack_int* sdim, float* wr,\n                               float* wi, float* vs, lapack_int ldvs,\n                               float* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_dgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_D_SELECT2 select, lapack_int n, double* a,\n                               lapack_int lda, lapack_int* sdim, double* wr,\n                               double* wi, double* vs, lapack_int ldvs,\n                               double* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_cgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_C_SELECT1 select, lapack_int n,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_int* sdim, lapack_complex_float* w,\n                               lapack_complex_float* vs, lapack_int ldvs,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgees_work( int matrix_order, char jobvs, char sort,\n                               LAPACK_Z_SELECT1 select, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_int* sdim, lapack_complex_double* w,\n                               lapack_complex_double* vs, lapack_int ldvs,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_S_SELECT2 select, char sense,\n                                lapack_int n, float* a, lapack_int lda,\n                                lapack_int* sdim, float* wr, float* wi,\n                                float* vs, lapack_int ldvs, float* rconde,\n                                float* rcondv, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_dgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_D_SELECT2 select, char sense,\n                                lapack_int n, double* a, lapack_int lda,\n                                lapack_int* sdim, double* wr, double* wi,\n                                double* vs, lapack_int ldvs, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_cgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_C_SELECT1 select, char sense,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* sdim,\n                                lapack_complex_float* w,\n                                lapack_complex_float* vs, lapack_int ldvs,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgeesx_work( int matrix_order, char jobvs, char sort,\n                                LAPACK_Z_SELECT1 select, char sense,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* sdim,\n                                lapack_complex_double* w,\n                                lapack_complex_double* vs, lapack_int ldvs,\n                                double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, float* a, lapack_int lda,\n                               float* wr, float* wi, float* vl, lapack_int ldvl,\n                               float* vr, lapack_int ldvr, float* work,\n                               lapack_int lwork );\nlapack_int LAPACKE_dgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* wr, double* wi, double* vl,\n                               lapack_int ldvl, double* vr, lapack_int ldvr,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* w,\n                               lapack_complex_float* vl, lapack_int ldvl,\n                               lapack_complex_float* vr, lapack_int ldvr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zgeev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* w,\n                               lapack_complex_double* vl, lapack_int ldvl,\n                               lapack_complex_double* vr, lapack_int ldvr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_sgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, float* a,\n                                lapack_int lda, float* wr, float* wi, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* scale,\n                                float* abnrm, float* rconde, float* rcondv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, double* a,\n                                lapack_int lda, double* wr, double* wi,\n                                double* vl, lapack_int ldvl, double* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, double* scale, double* abnrm,\n                                double* rconde, double* rcondv, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* w,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* scale,\n                                float* abnrm, float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zgeevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* w,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, double* scale,\n                                double* abnrm, double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_sgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* a, lapack_int lda,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* a, lapack_int lda,\n                                double* tau, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgehrd_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgejsv_work( int matrix_order, char joba, char jobu,\n                                char jobv, char jobr, char jobt, char jobp,\n                                lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* sva, float* u,\n                                lapack_int ldu, float* v, lapack_int ldv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgejsv_work( int matrix_order, char joba, char jobu,\n                                char jobv, char jobr, char jobt, char jobp,\n                                lapack_int m, lapack_int n, double* a,\n                                lapack_int lda, double* sva, double* u,\n                                lapack_int ldu, double* v, lapack_int ldv,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work );\nlapack_int LAPACKE_dgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work );\nlapack_int LAPACKE_cgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zgelq2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgelqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs, float* a,\n                               lapack_int lda, float* b, lapack_int ldb,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs, double* a,\n                               lapack_int lda, double* b, lapack_int ldb,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgels_work( int matrix_order, char trans, lapack_int m,\n                               lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* s,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_zgelsd_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double* s, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* s,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float* s, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgelss_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double* s, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, lapack_int* jpvt,\n                                float rcond, lapack_int* rank, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, lapack_int* jpvt,\n                                double rcond, lapack_int* rank, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_cgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_int* jpvt, float rcond,\n                                lapack_int* rank, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgelsy_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_int* jpvt, double rcond,\n                                lapack_int* rank, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqlf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* jpvt,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* jpvt,\n                                double* tau, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zgeqp3_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_sgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* jpvt,\n                                float* tau, float* work );\nlapack_int LAPACKE_dgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* jpvt,\n                                double* tau, double* work );\nlapack_int LAPACKE_cgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_float* tau,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgeqpf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* jpvt, lapack_complex_double* tau,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work );\nlapack_int LAPACKE_dgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work );\nlapack_int LAPACKE_cgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zgeqr2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* tau,\n                                 float* work, lapack_int lwork );\nlapack_int LAPACKE_dgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* tau,\n                                 double* work, lapack_int lwork );\nlapack_int LAPACKE_cgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* tau,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgeqrfp_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* tau,\n                                 lapack_complex_double* work,\n                                 lapack_int lwork );\n\nlapack_int LAPACKE_sgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgerfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* r, const float* c, const float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* r, const float* c,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zgerfsx_work( int matrix_order, char trans, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* r, const double* c,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgerqf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* s, float* u, lapack_int ldu, float* vt,\n                                lapack_int ldvt, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* s, double* u, lapack_int ldu,\n                                double* vt, lapack_int ldvt, double* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_cgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, float* s,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* vt, lapack_int ldvt,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork );\nlapack_int LAPACKE_zgesdd_work( int matrix_order, char jobz, lapack_int m,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, double* s,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* vt, lapack_int ldvt,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork );\n\nlapack_int LAPACKE_sgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* a, lapack_int lda, lapack_int* ipiv,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* a, lapack_int lda, lapack_int* ipiv,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dsgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                double* a, lapack_int lda, lapack_int* ipiv,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* work, float* swork,\n                                lapack_int* iter );\nlapack_int LAPACKE_zcgesv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                lapack_complex_float* swork, double* rwork,\n                                lapack_int* iter );\n\nlapack_int LAPACKE_sgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* s, float* u,\n                                lapack_int ldu, float* vt, lapack_int ldvt,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n, double* a,\n                                lapack_int lda, double* s, double* u,\n                                lapack_int ldu, double* vt, lapack_int ldvt,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* s, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* vt,\n                                lapack_int ldvt, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zgesvd_work( int matrix_order, char jobu, char jobvt,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* s, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* vt,\n                                lapack_int ldvt, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_sgesvj_work( int matrix_order, char joba, char jobu,\n                                char jobv, lapack_int m, lapack_int n, float* a,\n                                lapack_int lda, float* sva, lapack_int mv,\n                                float* v, lapack_int ldv, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dgesvj_work( int matrix_order, char joba, char jobu,\n                                char jobv, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* sva,\n                                lapack_int mv, double* v, lapack_int ldv,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, float* r,\n                                float* c, float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, double* r,\n                                double* c, double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, float* r,\n                                float* c, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zgesvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv, char* equed, double* r,\n                                double* c, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* r,\n                                 float* c, float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, double* b, lapack_int ldb,\n                                 double* x, lapack_int ldx, double* rcond,\n                                 double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* r,\n                                 float* c, lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params,\n                                 lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgesvxx_work( int matrix_order, char fact, char trans,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* r,\n                                 double* c, lapack_complex_double* b,\n                                 lapack_int ldb, lapack_complex_double* x,\n                                 lapack_int ldx, double* rcond, double* rpvgrw,\n                                 double* berr, lapack_int n_err_bnds,\n                                 double* err_bnds_norm, double* err_bnds_comp,\n                                 lapack_int nparams, double* params,\n                                 lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgetf2_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_dgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv );\nlapack_int LAPACKE_cgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv );\nlapack_int LAPACKE_zgetrf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv );\n\nlapack_int LAPACKE_sgetri_work( int matrix_order, lapack_int n, float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dgetri_work( int matrix_order, lapack_int n, double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_cgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgetri_work( int matrix_order, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgetrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* lscale, const float* rscale,\n                                lapack_int m, float* v, lapack_int ldv );\nlapack_int LAPACKE_dggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* lscale, const double* rscale,\n                                lapack_int m, double* v, lapack_int ldv );\nlapack_int LAPACKE_cggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const float* lscale, const float* rscale,\n                                lapack_int m, lapack_complex_float* v,\n                                lapack_int ldv );\nlapack_int LAPACKE_zggbak_work( int matrix_order, char job, char side,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                const double* lscale, const double* rscale,\n                                lapack_int m, lapack_complex_double* v,\n                                lapack_int ldv );\n\nlapack_int LAPACKE_sggbal_work( int matrix_order, char job, lapack_int n,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, lapack_int* ilo,\n                                lapack_int* ihi, float* lscale, float* rscale,\n                                float* work );\nlapack_int LAPACKE_dggbal_work( int matrix_order, char job, lapack_int n,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, lapack_int* ilo,\n                                lapack_int* ihi, double* lscale, double* rscale,\n                                double* work );\nlapack_int LAPACKE_cggbal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_int* ilo, lapack_int* ihi, float* lscale,\n                                float* rscale, float* work );\nlapack_int LAPACKE_zggbal_work( int matrix_order, char job, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* lscale, double* rscale, double* work );\n\nlapack_int LAPACKE_sgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_S_SELECT3 selctg, lapack_int n,\n                               float* a, lapack_int lda, float* b,\n                               lapack_int ldb, lapack_int* sdim, float* alphar,\n                               float* alphai, float* beta, float* vsl,\n                               lapack_int ldvsl, float* vsr, lapack_int ldvsr,\n                               float* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_dgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_D_SELECT3 selctg, lapack_int n,\n                               double* a, lapack_int lda, double* b,\n                               lapack_int ldb, lapack_int* sdim, double* alphar,\n                               double* alphai, double* beta, double* vsl,\n                               lapack_int ldvsl, double* vsr, lapack_int ldvsr,\n                               double* work, lapack_int lwork,\n                               lapack_logical* bwork );\nlapack_int LAPACKE_cgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_C_SELECT2 selctg, lapack_int n,\n                               lapack_complex_float* a, lapack_int lda,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_int* sdim, lapack_complex_float* alpha,\n                               lapack_complex_float* beta,\n                               lapack_complex_float* vsl, lapack_int ldvsl,\n                               lapack_complex_float* vsr, lapack_int ldvsr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork, lapack_logical* bwork );\nlapack_int LAPACKE_zgges_work( int matrix_order, char jobvsl, char jobvsr,\n                               char sort, LAPACK_Z_SELECT2 selctg, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_int* sdim, lapack_complex_double* alpha,\n                               lapack_complex_double* beta,\n                               lapack_complex_double* vsl, lapack_int ldvsl,\n                               lapack_complex_double* vsr, lapack_int ldvsr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_S_SELECT3 selctg, char sense,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, lapack_int* sdim,\n                                float* alphar, float* alphai, float* beta,\n                                float* vsl, lapack_int ldvsl, float* vsr,\n                                lapack_int ldvsr, float* rconde, float* rcondv,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_dggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_D_SELECT3 selctg, char sense,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, lapack_int* sdim,\n                                double* alphar, double* alphai, double* beta,\n                                double* vsl, lapack_int ldvsl, double* vsr,\n                                lapack_int ldvsr, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_cggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_C_SELECT2 selctg, char sense,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_int* sdim,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* vsl, lapack_int ldvsl,\n                                lapack_complex_float* vsr, lapack_int ldvsr,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork,\n                                lapack_int liwork, lapack_logical* bwork );\nlapack_int LAPACKE_zggesx_work( int matrix_order, char jobvsl, char jobvsr,\n                                char sort, LAPACK_Z_SELECT2 selctg, char sense,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_int* sdim,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* vsl, lapack_int ldvsl,\n                                lapack_complex_double* vsr, lapack_int ldvsr,\n                                double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork,\n                                lapack_int liwork, lapack_logical* bwork );\n\nlapack_int LAPACKE_sggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, float* a, lapack_int lda, float* b,\n                               lapack_int ldb, float* alphar, float* alphai,\n                               float* beta, float* vl, lapack_int ldvl,\n                               float* vr, lapack_int ldvr, float* work,\n                               lapack_int lwork );\nlapack_int LAPACKE_dggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* b, lapack_int ldb, double* alphar,\n                               double* alphai, double* beta, double* vl,\n                               lapack_int ldvl, double* vr, lapack_int ldvr,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_cggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb, lapack_complex_float* alpha,\n                               lapack_complex_float* beta,\n                               lapack_complex_float* vl, lapack_int ldvl,\n                               lapack_complex_float* vr, lapack_int ldvr,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zggev_work( int matrix_order, char jobvl, char jobvr,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* b,\n                               lapack_int ldb, lapack_complex_double* alpha,\n                               lapack_complex_double* beta,\n                               lapack_complex_double* vl, lapack_int ldvl,\n                               lapack_complex_double* vr, lapack_int ldvr,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_sggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* alphar, float* alphai, float* beta,\n                                float* vl, lapack_int ldvl, float* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, float* lscale, float* rscale,\n                                float* abnrm, float* bbnrm, float* rconde,\n                                float* rcondv, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_logical* bwork );\nlapack_int LAPACKE_dggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* alphar, double* alphai, double* beta,\n                                double* vl, lapack_int ldvl, double* vr,\n                                lapack_int ldvr, lapack_int* ilo,\n                                lapack_int* ihi, double* lscale, double* rscale,\n                                double* abnrm, double* bbnrm, double* rconde,\n                                double* rcondv, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_logical* bwork );\nlapack_int LAPACKE_cggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi, float* lscale,\n                                float* rscale, float* abnrm, float* bbnrm,\n                                float* rconde, float* rcondv,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int* iwork,\n                                lapack_logical* bwork );\nlapack_int LAPACKE_zggevx_work( int matrix_order, char balanc, char jobvl,\n                                char jobvr, char sense, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int* ilo, lapack_int* ihi,\n                                double* lscale, double* rscale, double* abnrm,\n                                double* bbnrm, double* rconde, double* rcondv,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int* iwork,\n                                lapack_logical* bwork );\n\nlapack_int LAPACKE_sggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* d, float* x,\n                                float* y, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* d, double* x,\n                                double* y, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* d,\n                                lapack_complex_float* x,\n                                lapack_complex_float* y,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggglm_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* d,\n                                lapack_complex_double* x,\n                                lapack_complex_double* y,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* q, lapack_int ldq,\n                                float* z, lapack_int ldz );\nlapack_int LAPACKE_dgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* q, lapack_int ldq,\n                                double* z, lapack_int ldz );\nlapack_int LAPACKE_cgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz );\nlapack_int LAPACKE_zgghrd_work( int matrix_order, char compq, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz );\n\nlapack_int LAPACKE_sgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float* c, float* d,\n                                float* x, float* work, lapack_int lwork );\nlapack_int LAPACKE_dgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* c, double* d,\n                                double* x, double* work, lapack_int lwork );\nlapack_int LAPACKE_cgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* c,\n                                lapack_complex_float* d,\n                                lapack_complex_float* x,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zgglse_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* c,\n                                lapack_complex_double* d,\n                                lapack_complex_double* x,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, float* a, lapack_int lda,\n                                float* taua, float* b, lapack_int ldb,\n                                float* taub, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, double* a, lapack_int lda,\n                                double* taua, double* b, lapack_int ldb,\n                                double* taub, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* taua,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* taub,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggqrf_work( int matrix_order, lapack_int n, lapack_int m,\n                                lapack_int p, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* taua,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* taub,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* taua, float* b, lapack_int ldb,\n                                float* taub, float* work, lapack_int lwork );\nlapack_int LAPACKE_dggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* taua, double* b, lapack_int ldb,\n                                double* taub, double* work, lapack_int lwork );\nlapack_int LAPACKE_cggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* taua,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* taub,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zggrqf_work( int matrix_order, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* taua,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* taub,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* alpha, float* beta,\n                                float* u, lapack_int ldu, float* v,\n                                lapack_int ldv, float* q, lapack_int ldq,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* alpha, double* beta,\n                                double* u, lapack_int ldu, double* v,\n                                lapack_int ldv, double* q, lapack_int ldq,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float* alpha, float* beta,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* v, lapack_int ldv,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_zggsvd_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int n,\n                                lapack_int p, lapack_int* k, lapack_int* l,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double* alpha, double* beta,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* v, lapack_int ldv,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_sggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, float tola,\n                                float tolb, lapack_int* k, lapack_int* l,\n                                float* u, lapack_int ldu, float* v,\n                                lapack_int ldv, float* q, lapack_int ldq,\n                                lapack_int* iwork, float* tau, float* work );\nlapack_int LAPACKE_dggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double tola,\n                                double tolb, lapack_int* k, lapack_int* l,\n                                double* u, lapack_int ldu, double* v,\n                                lapack_int ldv, double* q, lapack_int ldq,\n                                lapack_int* iwork, double* tau, double* work );\nlapack_int LAPACKE_cggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb, float tola, float tolb,\n                                lapack_int* k, lapack_int* l,\n                                lapack_complex_float* u, lapack_int ldu,\n                                lapack_complex_float* v, lapack_int ldv,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_int* iwork, float* rwork,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zggsvp_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, double tola, double tolb,\n                                lapack_int* k, lapack_int* l,\n                                lapack_complex_double* u, lapack_int ldu,\n                                lapack_complex_double* v, lapack_int ldv,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_int* iwork, double* rwork,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_sgtcon_work( char norm, lapack_int n, const float* dl,\n                                const float* d, const float* du,\n                                const float* du2, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgtcon_work( char norm, lapack_int n, const double* dl,\n                                const double* d, const double* du,\n                                const double* du2, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgtcon_work( char norm, lapack_int n,\n                                const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zgtcon_work( char norm, lapack_int n,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* dl,\n                                const float* d, const float* du,\n                                const float* dlf, const float* df,\n                                const float* duf, const float* du2,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* dl,\n                                const double* d, const double* du,\n                                const double* dlf, const double* df,\n                                const double* duf, const double* du2,\n                                const lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* dlf,\n                                const lapack_complex_float* df,\n                                const lapack_complex_float* duf,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgtrfs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* dlf,\n                                const lapack_complex_double* df,\n                                const lapack_complex_double* duf,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* dl, float* d, float* du, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* dl, double* d, double* du, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_float* dl,\n                               lapack_complex_float* d,\n                               lapack_complex_float* du,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zgtsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               lapack_complex_double* dl,\n                               lapack_complex_double* d,\n                               lapack_complex_double* du,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, const float* dl,\n                                const float* d, const float* du, float* dlf,\n                                float* df, float* duf, float* du2,\n                                lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs, const double* dl,\n                                const double* d, const double* du, double* dlf,\n                                double* df, double* duf, double* du2,\n                                lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                lapack_complex_float* dlf,\n                                lapack_complex_float* df,\n                                lapack_complex_float* duf,\n                                lapack_complex_float* du2, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zgtsvx_work( int matrix_order, char fact, char trans,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                lapack_complex_double* dlf,\n                                lapack_complex_double* df,\n                                lapack_complex_double* duf,\n                                lapack_complex_double* du2, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sgttrf_work( lapack_int n, float* dl, float* d, float* du,\n                                float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_dgttrf_work( lapack_int n, double* dl, double* d, double* du,\n                                double* du2, lapack_int* ipiv );\nlapack_int LAPACKE_cgttrf_work( lapack_int n, lapack_complex_float* dl,\n                                lapack_complex_float* d,\n                                lapack_complex_float* du,\n                                lapack_complex_float* du2, lapack_int* ipiv );\nlapack_int LAPACKE_zgttrf_work( lapack_int n, lapack_complex_double* dl,\n                                lapack_complex_double* d,\n                                lapack_complex_double* du,\n                                lapack_complex_double* du2, lapack_int* ipiv );\n\nlapack_int LAPACKE_sgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const float* dl,\n                                const float* d, const float* du,\n                                const float* du2, const lapack_int* ipiv,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const double* dl,\n                                const double* d, const double* du,\n                                const double* du2, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* dl,\n                                const lapack_complex_float* d,\n                                const lapack_complex_float* du,\n                                const lapack_complex_float* du2,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zgttrs_work( int matrix_order, char trans, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* dl,\n                                const lapack_complex_double* d,\n                                const lapack_complex_double* du,\n                                const lapack_complex_double* du2,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               float* w, lapack_complex_float* z,\n                               lapack_int ldz, lapack_complex_float* work,\n                               float* rwork );\nlapack_int LAPACKE_zhbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* q, lapack_int ldq,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                float* rwork, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_zhbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* q, lapack_int ldq,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                double* rwork, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_chbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* bb, lapack_int ldbb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                const lapack_complex_double* bb,\n                                lapack_int ldbb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_chbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_complex_float* bb, lapack_int ldbb,\n                               float* w, lapack_complex_float* z,\n                               lapack_int ldz, lapack_complex_float* work,\n                               float* rwork );\nlapack_int LAPACKE_zhbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_complex_double* bb, lapack_int ldbb,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* bb, lapack_int ldbb,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* bb, lapack_int ldbb,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* bb,\n                                lapack_int ldbb, lapack_complex_float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* bb,\n                                lapack_int ldbb, lapack_complex_double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                float* d, float* e, lapack_complex_float* q,\n                                lapack_int ldq, lapack_complex_float* work );\nlapack_int LAPACKE_zhbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                double* d, double* e, lapack_complex_double* q,\n                                lapack_int ldq, lapack_complex_double* work );\n\nlapack_int LAPACKE_checon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zhecon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_cheequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_zheequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_cheev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, float* w,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zheev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_double* a,\n                               lapack_int lda, double* w,\n                               lapack_complex_double* work, lapack_int lwork,\n                               double* rwork );\n\nlapack_int LAPACKE_cheevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, float* w,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zheevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, double* w,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_cheevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zheevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_cheevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zheevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chegst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zhegst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_chegv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb, float* w,\n                               lapack_complex_float* work, lapack_int lwork,\n                               float* rwork );\nlapack_int LAPACKE_zhegv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_double* a, lapack_int lda,\n                               lapack_complex_double* b, lapack_int ldb,\n                               double* w, lapack_complex_double* work,\n                               lapack_int lwork, double* rwork );\n\nlapack_int LAPACKE_chegvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float* w, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhegvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double* w, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chegvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhegvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_cherfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zherfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_cherfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zherfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_chesv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhesv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_chesvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zhesvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_chesvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zhesvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_chetrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                float* d, float* e, lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhetrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                double* d, double* e,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_chetrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_zhetrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_chetri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zhetri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_chetrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_chfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               float alpha, const lapack_complex_float* a,\n                               lapack_int lda, float beta,\n                               lapack_complex_float* c );\nlapack_int LAPACKE_zhfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               double alpha, const lapack_complex_double* a,\n                               lapack_int lda, double beta,\n                               lapack_complex_double* c );\n\nlapack_int LAPACKE_shgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* h, lapack_int ldh,\n                                float* t, lapack_int ldt, float* alphar,\n                                float* alphai, float* beta, float* q,\n                                lapack_int ldq, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dhgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* h, lapack_int ldh,\n                                double* t, lapack_int ldt, double* alphar,\n                                double* alphai, double* beta, double* q,\n                                lapack_int ldq, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_chgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* h,\n                                lapack_int ldh, lapack_complex_float* t,\n                                lapack_int ldt, lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork );\nlapack_int LAPACKE_zhgeqz_work( int matrix_order, char job, char compq,\n                                char compz, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* h,\n                                lapack_int ldh, lapack_complex_double* t,\n                                lapack_int ldt, lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_chpcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zhpcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_chpev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_float* ap, float* w,\n                               lapack_complex_float* z, lapack_int ldz,\n                               lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_complex_double* ap,\n                               double* w, lapack_complex_double* z,\n                               lapack_int ldz, lapack_complex_double* work,\n                               double* rwork );\n\nlapack_int LAPACKE_chpevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_float* ap,\n                                float* w, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zhpevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_complex_double* ap,\n                                double* w, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_chpevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* ap, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhpevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* ap, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chpgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_float* ap,\n                                const lapack_complex_float* bp );\nlapack_int LAPACKE_zhpgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, lapack_complex_double* ap,\n                                const lapack_complex_double* bp );\n\nlapack_int LAPACKE_chpgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_float* ap,\n                               lapack_complex_float* bp, float* w,\n                               lapack_complex_float* z, lapack_int ldz,\n                               lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n,\n                               lapack_complex_double* ap,\n                               lapack_complex_double* bp, double* w,\n                               lapack_complex_double* z, lapack_int ldz,\n                               lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chpgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* bp, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_zhpgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* bp, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_chpgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* bp, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_zhpgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* bp, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_chprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chpsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zhpsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_chpsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* afp, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zhpsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* afp, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_chptrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, float* d, float* e,\n                                lapack_complex_float* tau );\nlapack_int LAPACKE_zhptrd_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, double* d, double* e,\n                                lapack_complex_double* tau );\n\nlapack_int LAPACKE_chptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zhptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_chptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zhptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_chptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zhptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_shsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, lapack_logical* select,\n                                lapack_int n, const float* h, lapack_int ldh,\n                                float* wr, const float* wi, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_dhsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, lapack_logical* select,\n                                lapack_int n, const double* h, lapack_int ldh,\n                                double* wr, const double* wi, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_chsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, const lapack_logical* select,\n                                lapack_int n, const lapack_complex_float* h,\n                                lapack_int ldh, lapack_complex_float* w,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork,\n                                lapack_int* ifaill, lapack_int* ifailr );\nlapack_int LAPACKE_zhsein_work( int matrix_order, char job, char eigsrc,\n                                char initv, const lapack_logical* select,\n                                lapack_int n, const lapack_complex_double* h,\n                                lapack_int ldh, lapack_complex_double* w,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork,\n                                lapack_int* ifaill, lapack_int* ifailr );\n\nlapack_int LAPACKE_shseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                float* h, lapack_int ldh, float* wr, float* wi,\n                                float* z, lapack_int ldz, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dhseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                double* h, lapack_int ldh, double* wr,\n                                double* wi, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_chseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_float* h, lapack_int ldh,\n                                lapack_complex_float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zhseqr_work( int matrix_order, char job, char compz,\n                                lapack_int n, lapack_int ilo, lapack_int ihi,\n                                lapack_complex_double* h, lapack_int ldh,\n                                lapack_complex_double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_clacgv_work( lapack_int n, lapack_complex_float* x,\n                                lapack_int incx );\nlapack_int LAPACKE_zlacgv_work( lapack_int n, lapack_complex_double* x,\n                                lapack_int incx );\n\nlapack_int LAPACKE_slacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dlacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_clacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zlacpy_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_zlag2c_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_float* sa, lapack_int ldsa );\n\nlapack_int LAPACKE_slag2d_work( int matrix_order, lapack_int m, lapack_int n,\n                                const float* sa, lapack_int ldsa, double* a,\n                                lapack_int lda );\n\nlapack_int LAPACKE_dlag2s_work( int matrix_order, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, float* sa,\n                                lapack_int ldsa );\n\nlapack_int LAPACKE_clag2z_work( int matrix_order, lapack_int m, lapack_int n,\n                                const lapack_complex_float* sa, lapack_int ldsa,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* d,\n                                float* a, lapack_int lda, lapack_int* iseed,\n                                float* work );\nlapack_int LAPACKE_dlagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* d,\n                                double* a, lapack_int lda, lapack_int* iseed,\n                                double* work );\nlapack_int LAPACKE_clagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const float* d,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* iseed, lapack_complex_float* work );\nlapack_int LAPACKE_zlagge_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int kl, lapack_int ku, const double* d,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* iseed,\n                                lapack_complex_double* work );\n                                \nlapack_int LAPACKE_claghe_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlaghe_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, float* a, lapack_int lda,\n                                lapack_int* iseed, float* work );\nlapack_int LAPACKE_dlagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, double* a, lapack_int lda,\n                                lapack_int* iseed, double* work );\nlapack_int LAPACKE_clagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const float* d, lapack_complex_float* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlagsy_work( int matrix_order, lapack_int n, lapack_int k,\n                                const double* d, lapack_complex_double* a,\n                                lapack_int lda, lapack_int* iseed,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n, float* x,\n                                lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_dlapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n, double* x,\n                                lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_clapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_float* x, lapack_int ldx,\n                                lapack_int* k );\nlapack_int LAPACKE_zlapmr_work( int matrix_order, lapack_logical forwrd,\n                                lapack_int m, lapack_int n,\n                                lapack_complex_double* x, lapack_int ldx,\n                                lapack_int* k );\n\nlapack_int LAPACKE_slartgp_work( float f, float g, float* cs, float* sn,\n                                 float* r );\nlapack_int LAPACKE_dlartgp_work( double f, double g, double* cs, double* sn,\n                                 double* r );\n\nlapack_int LAPACKE_slartgs_work( float x, float y, float sigma, float* cs,\n                                 float* sn );\nlapack_int LAPACKE_dlartgs_work( double x, double y, double sigma, double* cs,\n                                 double* sn );\n                                \nfloat LAPACKE_slapy2_work( float x, float y );\ndouble LAPACKE_dlapy2_work( double x, double y );\n\nfloat LAPACKE_slapy3_work( float x, float y, float z );\ndouble LAPACKE_dlapy3_work( double x, double y, double z );\n\nfloat LAPACKE_slamch_work( char cmach );\ndouble LAPACKE_dlamch_work( char cmach );\n\nfloat LAPACKE_slange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_dlange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* work );\nfloat LAPACKE_clange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlange_work( int matrix_order, char norm, lapack_int m,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_clanhe_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlanhe_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_slansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_dlansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* work );\nfloat LAPACKE_clansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_zlansy_work( int matrix_order, char norm, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, double* work );\n\nfloat LAPACKE_slantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n, const float* a,\n                                lapack_int lda, float* work );\ndouble LAPACKE_dlantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda, double* work );\nfloat LAPACKE_clantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* work );\ndouble LAPACKE_zlantr_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* work );\n\nlapack_int LAPACKE_slarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, const float* v,\n                                lapack_int ldv, const float* t, lapack_int ldt,\n                                float* c, lapack_int ldc, float* work,\n                                lapack_int ldwork );\nlapack_int LAPACKE_dlarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, const double* v,\n                                lapack_int ldv, const double* t, lapack_int ldt,\n                                double* c, lapack_int ldc, double* work,\n                                lapack_int ldwork );\nlapack_int LAPACKE_clarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int ldwork );\nlapack_int LAPACKE_zlarfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work,\n                                lapack_int ldwork );\n\nlapack_int LAPACKE_slarfg_work( lapack_int n, float* alpha, float* x,\n                                lapack_int incx, float* tau );\nlapack_int LAPACKE_dlarfg_work( lapack_int n, double* alpha, double* x,\n                                lapack_int incx, double* tau );\nlapack_int LAPACKE_clarfg_work( lapack_int n, lapack_complex_float* alpha,\n                                lapack_complex_float* x, lapack_int incx,\n                                lapack_complex_float* tau );\nlapack_int LAPACKE_zlarfg_work( lapack_int n, lapack_complex_double* alpha,\n                                lapack_complex_double* x, lapack_int incx,\n                                lapack_complex_double* tau );\n\nlapack_int LAPACKE_slarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k, const float* v,\n                                lapack_int ldv, const float* tau, float* t,\n                                lapack_int ldt );\nlapack_int LAPACKE_dlarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k, const double* v,\n                                lapack_int ldv, const double* tau, double* t,\n                                lapack_int ldt );\nlapack_int LAPACKE_clarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zlarft_work( int matrix_order, char direct, char storev,\n                                lapack_int n, lapack_int k,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_slarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const float* v, float tau,\n                                float* c, lapack_int ldc, float* work );\nlapack_int LAPACKE_dlarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const double* v, double tau,\n                                double* c, lapack_int ldc, double* work );\nlapack_int LAPACKE_clarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const lapack_complex_float* v,\n                                lapack_complex_float tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zlarfx_work( int matrix_order, char side, lapack_int m,\n                                lapack_int n, const lapack_complex_double* v,\n                                lapack_complex_double tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_slarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, float* x );\nlapack_int LAPACKE_dlarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, double* x );\nlapack_int LAPACKE_clarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, lapack_complex_float* x );\nlapack_int LAPACKE_zlarnv_work( lapack_int idist, lapack_int* iseed,\n                                lapack_int n, lapack_complex_double* x );\n\nlapack_int LAPACKE_slaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, float alpha, float beta, float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_dlaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, double alpha, double beta,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_claset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, lapack_complex_float alpha,\n                                lapack_complex_float beta,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlaset_work( int matrix_order, char uplo, lapack_int m,\n                                lapack_int n, lapack_complex_double alpha,\n                                lapack_complex_double beta,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_slasrt_work( char id, lapack_int n, float* d );\nlapack_int LAPACKE_dlasrt_work( char id, lapack_int n, double* d );\n\nlapack_int LAPACKE_slaswp_work( int matrix_order, lapack_int n, float* a,\n                                lapack_int lda, lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_dlaswp_work( int matrix_order, lapack_int n, double* a,\n                                lapack_int lda, lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_claswp_work( int matrix_order, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\nlapack_int LAPACKE_zlaswp_work( int matrix_order, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int k1, lapack_int k2,\n                                const lapack_int* ipiv, lapack_int incx );\n\nlapack_int LAPACKE_slatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                float* d, lapack_int mode, float cond,\n                                float dmax, lapack_int kl, lapack_int ku,\n                                char pack, float* a, lapack_int lda,\n                                float* work );\nlapack_int LAPACKE_dlatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                double* d, lapack_int mode, double cond,\n                                double dmax, lapack_int kl, lapack_int ku,\n                                char pack, double* a, lapack_int lda,\n                                double* work );\nlapack_int LAPACKE_clatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                float* d, lapack_int mode, float cond,\n                                float dmax, lapack_int kl, lapack_int ku,\n                                char pack, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* work );\nlapack_int LAPACKE_zlatms_work( int matrix_order, lapack_int m, lapack_int n,\n                                char dist, lapack_int* iseed, char sym,\n                                double* d, lapack_int mode, double cond,\n                                double dmax, lapack_int kl, lapack_int ku,\n                                char pack, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* work );\n\nlapack_int LAPACKE_slauum_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dlauum_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_clauum_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zlauum_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_sopgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, const float* tau, float* q,\n                                lapack_int ldq, float* work );\nlapack_int LAPACKE_dopgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, const double* tau, double* q,\n                                lapack_int ldq, double* work );\n\nlapack_int LAPACKE_sopmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const float* ap, const float* tau, float* c,\n                                lapack_int ldc, float* work );\nlapack_int LAPACKE_dopmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const double* ap, const double* tau, double* c,\n                                lapack_int ldc, double* work );\n\nlapack_int LAPACKE_sorgbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k, float* a,\n                                lapack_int lda, const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k, double* a,\n                                lapack_int lda, const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, float* a, lapack_int lda,\n                                const float* tau, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorgrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, double* a, lapack_int lda,\n                                const double* tau, double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_sorgtr_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, const float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dorgtr_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, const double* tau,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_sormtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda,\n                                const float* tau, float* c, lapack_int ldc,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dormtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* tau, double* c, lapack_int ldc,\n                                double* work, lapack_int lwork );\n\nlapack_int LAPACKE_spbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const float* ab, lapack_int ldab,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const double* ab,\n                                lapack_int ldab, double anorm, double* rcond,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_float* ab,\n                                lapack_int ldab, float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpbcon_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_double* ab,\n                                lapack_int ldab, double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const float* ab, lapack_int ldab,\n                                float* s, float* scond, float* amax );\nlapack_int LAPACKE_dpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const double* ab,\n                                lapack_int ldab, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_float* ab,\n                                lapack_int ldab, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_zpbequ_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, const lapack_complex_double* ab,\n                                lapack_int ldab, double* s, double* scond,\n                                double* amax );\n\nlapack_int LAPACKE_spbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs, const float* ab,\n                                lapack_int ldab, const float* afb,\n                                lapack_int ldafb, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const double* ab, lapack_int ldab,\n                                const double* afb, lapack_int ldafb,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                const lapack_complex_float* afb,\n                                lapack_int ldafb, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpbrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab,\n                                const lapack_complex_double* afb,\n                                lapack_int ldafb,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, float* bb, lapack_int ldbb );\nlapack_int LAPACKE_dpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, double* bb, lapack_int ldbb );\nlapack_int LAPACKE_cpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, lapack_complex_float* bb,\n                                lapack_int ldbb );\nlapack_int LAPACKE_zpbstf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kb, lapack_complex_double* bb,\n                                lapack_int ldbb );\n\nlapack_int LAPACKE_spbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs, float* ab,\n                               lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs, double* ab,\n                               lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs,\n                               lapack_complex_float* ab, lapack_int ldab,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int kd, lapack_int nrhs,\n                               lapack_complex_double* ab, lapack_int ldab,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                float* ab, lapack_int ldab, float* afb,\n                                lapack_int ldafb, char* equed, float* s,\n                                float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                double* ab, lapack_int ldab, double* afb,\n                                lapack_int ldafb, char* equed, double* s,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_cpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* afb, lapack_int ldafb,\n                                char* equed, float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zpbsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int kd, lapack_int nrhs,\n                                lapack_complex_double* ab, lapack_int ldab,\n                                lapack_complex_double* afb, lapack_int ldafb,\n                                char* equed, double* s,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, float* ab, lapack_int ldab );\nlapack_int LAPACKE_dpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, double* ab, lapack_int ldab );\nlapack_int LAPACKE_cpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_complex_float* ab,\n                                lapack_int ldab );\nlapack_int LAPACKE_zpbtrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_complex_double* ab,\n                                lapack_int ldab );\n\nlapack_int LAPACKE_spbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs, const float* ab,\n                                lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const double* ab, lapack_int ldab, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpbtrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int kd, lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_spftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, float* a );\nlapack_int LAPACKE_dpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, double* a );\nlapack_int LAPACKE_cpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftrf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, float* a );\nlapack_int LAPACKE_dpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, double* a );\nlapack_int LAPACKE_cpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_float* a );\nlapack_int LAPACKE_zpftri_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_complex_double* a );\n\nlapack_int LAPACKE_spftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* a,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* a,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_cpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpftrs_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spocon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float anorm, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpocon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double anorm, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spoequ_work( int matrix_order, lapack_int n, const float* a,\n                                lapack_int lda, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_dpoequ_work( int matrix_order, lapack_int n, const double* a,\n                                lapack_int lda, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cpoequ_work( int matrix_order, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequ_work( int matrix_order, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_spoequb_work( int matrix_order, lapack_int n, const float* a,\n                                 lapack_int lda, float* s, float* scond,\n                                 float* amax );\nlapack_int LAPACKE_dpoequb_work( int matrix_order, lapack_int n,\n                                 const double* a, lapack_int lda, double* s,\n                                 double* scond, double* amax );\nlapack_int LAPACKE_cpoequb_work( int matrix_order, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax );\nlapack_int LAPACKE_zpoequb_work( int matrix_order, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax );\n\nlapack_int LAPACKE_sporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zporfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const float* s,\n                                 const float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const double* s,\n                                 const double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const float* s,\n                                 const lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zporfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_sposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* a, lapack_int lda,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* a, lapack_int lda,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zposv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_complex_double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dsposv_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, double* a, lapack_int lda,\n                                double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* work, float* swork,\n                                lapack_int* iter );\nlapack_int LAPACKE_zcposv_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, lapack_complex_double* work,\n                                lapack_complex_float* swork, double* rwork,\n                                lapack_int* iter );\n\nlapack_int LAPACKE_sposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                char* equed, float* s, float* b, lapack_int ldb,\n                                float* x, lapack_int ldx, float* rcond,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                char* equed, double* s, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                char* equed, float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zposvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                char* equed, double* s,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 char* equed, float* s, float* b,\n                                 lapack_int ldb, float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 char* equed, double* s, double* b,\n                                 lapack_int ldb, double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_cposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 char* equed, float* s, lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params,\n                                 lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zposvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_spotrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_cpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotri_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda );\nlapack_int LAPACKE_dpotri_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda );\nlapack_int LAPACKE_cpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zpotri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_spotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                float* b, lapack_int ldb );\nlapack_int LAPACKE_dpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, double* b, lapack_int ldb );\nlapack_int LAPACKE_cpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zpotrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_sppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float anorm, float* rcond,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double anorm, double* rcond,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap, float anorm,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zppcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap, double anorm,\n                                double* rcond, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_sppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float* s, float* scond,\n                                float* amax );\nlapack_int LAPACKE_dppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double* s, double* scond,\n                                double* amax );\nlapack_int LAPACKE_cppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap, float* s,\n                                float* scond, float* amax );\nlapack_int LAPACKE_zppequ_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap, double* s,\n                                double* scond, double* amax );\n\nlapack_int LAPACKE_spprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const float* afp, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const double* afp, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zpprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* ap, float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_dppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* ap, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zppsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, float* ap,\n                                float* afp, char* equed, float* s, float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, double* ap,\n                                double* afp, char* equed, double* s, double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_float* ap,\n                                lapack_complex_float* afp, char* equed,\n                                float* s, lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_zppsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                lapack_complex_double* ap,\n                                lapack_complex_double* afp, char* equed,\n                                double* s, lapack_complex_double* b,\n                                lapack_int ldb, lapack_complex_double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_spptrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap );\nlapack_int LAPACKE_dpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap );\nlapack_int LAPACKE_cpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_zpptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptri_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap );\nlapack_int LAPACKE_dpptri_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap );\nlapack_int LAPACKE_cpptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_zpptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_spptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_spstrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* piv,\n                                lapack_int* rank, float tol, float* work );\nlapack_int LAPACKE_dpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* piv,\n                                lapack_int* rank, double tol, double* work );\nlapack_int LAPACKE_cpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* piv, lapack_int* rank, float tol,\n                                float* work );\nlapack_int LAPACKE_zpstrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* piv, lapack_int* rank, double tol,\n                                double* work );\n\nlapack_int LAPACKE_sptcon_work( lapack_int n, const float* d, const float* e,\n                                float anorm, float* rcond, float* work );\nlapack_int LAPACKE_dptcon_work( lapack_int n, const double* d, const double* e,\n                                double anorm, double* rcond, double* work );\nlapack_int LAPACKE_cptcon_work( lapack_int n, const float* d,\n                                const lapack_complex_float* e, float anorm,\n                                float* rcond, float* work );\nlapack_int LAPACKE_zptcon_work( lapack_int n, const double* d,\n                                const lapack_complex_double* e, double anorm,\n                                double* rcond, double* work );\n\nlapack_int LAPACKE_spteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work );\nlapack_int LAPACKE_dpteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work );\nlapack_int LAPACKE_cpteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, float* work );\nlapack_int LAPACKE_zpteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, double* work );\n\nlapack_int LAPACKE_sptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const float* d, const float* e, const float* df,\n                                const float* ef, const float* b, lapack_int ldb,\n                                float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work );\nlapack_int LAPACKE_dptrfs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const double* d, const double* e,\n                                const double* df, const double* ef,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work );\nlapack_int LAPACKE_cptrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e, const float* df,\n                                const lapack_complex_float* ef,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zptrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e,\n                                const double* df,\n                                const lapack_complex_double* ef,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* d, float* e, float* b, lapack_int ldb );\nlapack_int LAPACKE_dptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* d, double* e, double* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_cptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               float* d, lapack_complex_float* e,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zptsv_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                               double* d, lapack_complex_double* e,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const float* d, const float* e,\n                                float* df, float* ef, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work );\nlapack_int LAPACKE_dptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const double* e, double* df, double* ef,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* rcond, double* ferr,\n                                double* berr, double* work );\nlapack_int LAPACKE_cptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e, float* df,\n                                lapack_complex_float* ef,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zptsvx_work( int matrix_order, char fact, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e, double* df,\n                                lapack_complex_double* ef,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_spttrf_work( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dpttrf_work( lapack_int n, double* d, double* e );\nlapack_int LAPACKE_cpttrf_work( lapack_int n, float* d,\n                                lapack_complex_float* e );\nlapack_int LAPACKE_zpttrf_work( lapack_int n, double* d,\n                                lapack_complex_double* e );\n\nlapack_int LAPACKE_spttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const float* d, const float* e, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dpttrs_work( int matrix_order, lapack_int n, lapack_int nrhs,\n                                const double* d, const double* e, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_cpttrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* d,\n                                const lapack_complex_float* e,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zpttrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* d,\n                                const lapack_complex_double* e,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd, float* ab,\n                               lapack_int ldab, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dsbev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int kd, double* ab,\n                               lapack_int ldab, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd, float* ab,\n                                lapack_int ldab, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsbevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int kd, double* ab,\n                                lapack_int ldab, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                float* ab, lapack_int ldab, float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsbevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int kd,\n                                double* ab, lapack_int ldab, double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                float* ab, lapack_int ldab, const float* bb,\n                                lapack_int ldbb, float* x, lapack_int ldx,\n                                float* work );\nlapack_int LAPACKE_dsbgst_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                double* ab, lapack_int ldab, const double* bb,\n                                lapack_int ldbb, double* x, lapack_int ldx,\n                                double* work );\n\nlapack_int LAPACKE_ssbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               float* ab, lapack_int ldab, float* bb,\n                               lapack_int ldbb, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dsbgv_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, lapack_int ka, lapack_int kb,\n                               double* ab, lapack_int ldab, double* bb,\n                               lapack_int ldbb, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                float* ab, lapack_int ldab, float* bb,\n                                lapack_int ldbb, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsbgvd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, lapack_int ka, lapack_int kb,\n                                double* ab, lapack_int ldab, double* bb,\n                                lapack_int ldbb, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, float* ab, lapack_int ldab,\n                                float* bb, lapack_int ldbb, float* q,\n                                lapack_int ldq, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsbgvx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, lapack_int ka,\n                                lapack_int kb, double* ab, lapack_int ldab,\n                                double* bb, lapack_int ldbb, double* q,\n                                lapack_int ldq, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd, float* ab,\n                                lapack_int ldab, float* d, float* e, float* q,\n                                lapack_int ldq, float* work );\nlapack_int LAPACKE_dsbtrd_work( int matrix_order, char vect, char uplo,\n                                lapack_int n, lapack_int kd, double* ab,\n                                lapack_int ldab, double* d, double* e,\n                                double* q, lapack_int ldq, double* work );\n\nlapack_int LAPACKE_ssfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               float alpha, const float* a, lapack_int lda,\n                               float beta, float* c );\nlapack_int LAPACKE_dsfrk_work( int matrix_order, char transr, char uplo,\n                               char trans, lapack_int n, lapack_int k,\n                               double alpha, const double* a, lapack_int lda,\n                               double beta, double* c );\n\nlapack_int LAPACKE_sspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, const lapack_int* ipiv,\n                                float anorm, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, const lapack_int* ipiv,\n                                double anorm, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_cspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zspcon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_sspev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, float* ap, float* w, float* z,\n                               lapack_int ldz, float* work );\nlapack_int LAPACKE_dspev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, double* ap, double* w, double* z,\n                               lapack_int ldz, double* work );\n\nlapack_int LAPACKE_sspevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, float* ap, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dspevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, double* ap, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sspevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* ap, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dspevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* ap, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, double* work,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_sspgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, float* ap, const float* bp );\nlapack_int LAPACKE_dspgst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, double* ap, const double* bp );\n\nlapack_int LAPACKE_sspgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, float* ap, float* bp,\n                               float* w, float* z, lapack_int ldz,\n                               float* work );\nlapack_int LAPACKE_dspgv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, double* ap, double* bp,\n                               double* w, double* z, lapack_int ldz,\n                               double* work );\n\nlapack_int LAPACKE_sspgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, float* ap, float* bp,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dspgvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, double* ap, double* bp,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sspgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, float* ap,\n                                float* bp, float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_dspgvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, double* ap,\n                                double* bp, double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int* iwork,\n                                lapack_int* ifail );\n\nlapack_int LAPACKE_ssprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const float* afp, const lapack_int* ipiv,\n                                const float* b, lapack_int ldb, float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dsprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const double* afp, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_csprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_complex_float* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zsprfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* afp,\n                                const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_sspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* ap, lapack_int* ipiv,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* ap, lapack_int* ipiv,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_cspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* ap,\n                               lapack_int* ipiv, lapack_complex_float* b,\n                               lapack_int ldb );\nlapack_int LAPACKE_zspsv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* ap,\n                               lapack_int* ipiv, lapack_complex_double* b,\n                               lapack_int ldb );\n\nlapack_int LAPACKE_sspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* ap,\n                                float* afp, lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* ap,\n                                double* afp, lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_cspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* afp, lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zspsvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* afp, lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_ssptrd_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, float* d, float* e, float* tau );\nlapack_int LAPACKE_dsptrd_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, double* d, double* e, double* tau );\n\nlapack_int LAPACKE_ssptrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_dsptrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, lapack_int* ipiv );\nlapack_int LAPACKE_csptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap, lapack_int* ipiv );\nlapack_int LAPACKE_zsptrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap, lapack_int* ipiv );\n\nlapack_int LAPACKE_ssptri_work( int matrix_order, char uplo, lapack_int n,\n                                float* ap, const lapack_int* ipiv,\n                                float* work );\nlapack_int LAPACKE_dsptri_work( int matrix_order, char uplo, lapack_int n,\n                                double* ap, const lapack_int* ipiv,\n                                double* work );\nlapack_int LAPACKE_csptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zsptri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_ssptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* ap,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dsptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* ap,\n                                const lapack_int* ipiv, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_csptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* ap,\n                                const lapack_int* ipiv, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_zsptrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_sstebz_work( char range, char order, lapack_int n, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, const float* d, const float* e,\n                                lapack_int* m, lapack_int* nsplit, float* w,\n                                lapack_int* iblock, lapack_int* isplit,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dstebz_work( char range, char order, lapack_int n, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, const double* d, const double* e,\n                                lapack_int* m, lapack_int* nsplit, double* w,\n                                lapack_int* iblock, lapack_int* isplit,\n                                double* work, lapack_int* iwork );\n\nlapack_int LAPACKE_sstedc_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstedc_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstedc_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstedc_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, lapack_complex_double* work,\n                                lapack_int lwork, double* rwork,\n                                lapack_int lrwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_sstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstegr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int* isuppz, double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_sstein_work( int matrix_order, lapack_int n, const float* d,\n                                const float* e, lapack_int m, const float* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_dstein_work( int matrix_order, lapack_int n, const double* d,\n                                const double* e, lapack_int m, const double* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit, double* z,\n                                lapack_int ldz, double* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_cstein_work( int matrix_order, lapack_int n, const float* d,\n                                const float* e, lapack_int m, const float* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit,\n                                lapack_complex_float* z, lapack_int ldz,\n                                float* work, lapack_int* iwork,\n                                lapack_int* ifailv );\nlapack_int LAPACKE_zstein_work( int matrix_order, lapack_int n, const double* d,\n                                const double* e, lapack_int m, const double* w,\n                                const lapack_int* iblock,\n                                const lapack_int* isplit,\n                                lapack_complex_double* z, lapack_int ldz,\n                                double* work, lapack_int* iwork,\n                                lapack_int* ifailv );\n\nlapack_int LAPACKE_sstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int nzc,\n                                lapack_int* isuppz, lapack_logical* tryrac,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, lapack_int nzc,\n                                lapack_int* isuppz, lapack_logical* tryrac,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_cstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, float* w,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int nzc, lapack_int* isuppz,\n                                lapack_logical* tryrac, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_zstemr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                lapack_int* m, double* w,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int nzc, lapack_int* isuppz,\n                                lapack_logical* tryrac, double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_ssteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work );\nlapack_int LAPACKE_dsteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work );\nlapack_int LAPACKE_csteqr_work( int matrix_order, char compz, lapack_int n,\n                                float* d, float* e, lapack_complex_float* z,\n                                lapack_int ldz, float* work );\nlapack_int LAPACKE_zsteqr_work( int matrix_order, char compz, lapack_int n,\n                                double* d, double* e, lapack_complex_double* z,\n                                lapack_int ldz, double* work );\n\nlapack_int LAPACKE_ssterf_work( lapack_int n, float* d, float* e );\nlapack_int LAPACKE_dsterf_work( lapack_int n, double* d, double* e );\n\nlapack_int LAPACKE_sstev_work( int matrix_order, char jobz, lapack_int n,\n                               float* d, float* e, float* z, lapack_int ldz,\n                               float* work );\nlapack_int LAPACKE_dstev_work( int matrix_order, char jobz, lapack_int n,\n                               double* d, double* e, double* z, lapack_int ldz,\n                               double* work );\n\nlapack_int LAPACKE_sstevd_work( int matrix_order, char jobz, lapack_int n,\n                                float* d, float* e, float* z, lapack_int ldz,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dstevd_work( int matrix_order, char jobz, lapack_int n,\n                                double* d, double* e, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sstevr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dstevr_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_sstevx_work( int matrix_order, char jobz, char range,\n                                lapack_int n, float* d, float* e, float vl,\n                                float vu, lapack_int il, lapack_int iu,\n                                float abstol, lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dstevx_work( int matrix_order, char jobz, char range,\n                                lapack_int n, double* d, double* e, double vl,\n                                double vu, lapack_int il, lapack_int iu,\n                                double abstol, lapack_int* m, double* w,\n                                double* z, lapack_int ldz, double* work,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssycon_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dsycon_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_csycon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv, float anorm,\n                                float* rcond, lapack_complex_float* work );\nlapack_int LAPACKE_zsycon_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv, double anorm,\n                                double* rcond, lapack_complex_double* work );\n\nlapack_int LAPACKE_ssyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const float* a, lapack_int lda, float* s,\n                                 float* scond, float* amax, float* work );\nlapack_int LAPACKE_dsyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const double* a, lapack_int lda, double* s,\n                                 double* scond, double* amax, double* work );\nlapack_int LAPACKE_csyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 float* s, float* scond, float* amax,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_zsyequb_work( int matrix_order, char uplo, lapack_int n,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 double* s, double* scond, double* amax,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_ssyev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, float* a, lapack_int lda, float* w,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dsyev_work( int matrix_order, char jobz, char uplo,\n                               lapack_int n, double* a, lapack_int lda,\n                               double* w, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssyevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, float* a, lapack_int lda,\n                                float* w, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsyevd_work( int matrix_order, char jobz, char uplo,\n                                lapack_int n, double* a, lapack_int lda,\n                                double* w, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssyevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, lapack_int* isuppz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dsyevr_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, lapack_int* isuppz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssyevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float vl, float vu,\n                                lapack_int il, lapack_int iu, float abstol,\n                                lapack_int* m, float* w, float* z,\n                                lapack_int ldz, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\nlapack_int LAPACKE_dsyevx_work( int matrix_order, char jobz, char range,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double vl, double vu,\n                                lapack_int il, lapack_int iu, double abstol,\n                                lapack_int* m, double* w, double* z,\n                                lapack_int ldz, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssygst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, float* a, lapack_int lda,\n                                const float* b, lapack_int ldb );\nlapack_int LAPACKE_dsygst_work( int matrix_order, lapack_int itype, char uplo,\n                                lapack_int n, double* a, lapack_int lda,\n                                const double* b, lapack_int ldb );\n\nlapack_int LAPACKE_ssygv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, float* a,\n                               lapack_int lda, float* b, lapack_int ldb,\n                               float* w, float* work, lapack_int lwork );\nlapack_int LAPACKE_dsygv_work( int matrix_order, lapack_int itype, char jobz,\n                               char uplo, lapack_int n, double* a,\n                               lapack_int lda, double* b, lapack_int ldb,\n                               double* w, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssygvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* w, float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dsygvd_work( int matrix_order, lapack_int itype, char jobz,\n                                char uplo, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* w, double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\n\nlapack_int LAPACKE_ssygvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float vl, float vu, lapack_int il,\n                                lapack_int iu, float abstol, lapack_int* m,\n                                float* w, float* z, lapack_int ldz, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int* ifail );\nlapack_int LAPACKE_dsygvx_work( int matrix_order, lapack_int itype, char jobz,\n                                char range, char uplo, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double vl, double vu, lapack_int il,\n                                lapack_int iu, double abstol, lapack_int* m,\n                                double* w, double* z, lapack_int ldz,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int* ifail );\n\nlapack_int LAPACKE_ssyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const float* af, lapack_int ldaf,\n                                const lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const double* b, lapack_int ldb, double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                double* work, lapack_int* iwork );\nlapack_int LAPACKE_csyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_zsyrfs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_complex_double* af,\n                                lapack_int ldaf, const lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_ssyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const float* a,\n                                 lapack_int lda, const float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const float* b, lapack_int ldb,\n                                 float* x, lapack_int ldx, float* rcond,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dsyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs, const double* a,\n                                 lapack_int lda, const double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s, const double* b,\n                                 lapack_int ldb, double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, double* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_csyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_float* a, lapack_int lda,\n                                 const lapack_complex_float* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const float* s, const lapack_complex_float* b,\n                                 lapack_int ldb, lapack_complex_float* x,\n                                 lapack_int ldx, float* rcond, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zsyrfsx_work( int matrix_order, char uplo, char equed,\n                                 lapack_int n, lapack_int nrhs,\n                                 const lapack_complex_double* a, lapack_int lda,\n                                 const lapack_complex_double* af,\n                                 lapack_int ldaf, const lapack_int* ipiv,\n                                 const double* s,\n                                 const lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_ssysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, float* a, lapack_int lda,\n                               lapack_int* ipiv, float* b, lapack_int ldb,\n                               float* work, lapack_int lwork );\nlapack_int LAPACKE_dsysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, double* a, lapack_int lda,\n                               lapack_int* ipiv, double* b, lapack_int ldb,\n                               double* work, lapack_int lwork );\nlapack_int LAPACKE_csysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_float* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_float* b, lapack_int ldb,\n                               lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zsysv_work( int matrix_order, char uplo, lapack_int n,\n                               lapack_int nrhs, lapack_complex_double* a,\n                               lapack_int lda, lapack_int* ipiv,\n                               lapack_complex_double* b, lapack_int ldb,\n                               lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const float* a,\n                                lapack_int lda, float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const float* b,\n                                lapack_int ldb, float* x, lapack_int ldx,\n                                float* rcond, float* ferr, float* berr,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs, const double* a,\n                                lapack_int lda, double* af, lapack_int ldaf,\n                                lapack_int* ipiv, const double* b,\n                                lapack_int ldb, double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_csysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* af, lapack_int ldaf,\n                                lapack_int* ipiv, const lapack_complex_float* b,\n                                lapack_int ldb, lapack_complex_float* x,\n                                lapack_int ldx, float* rcond, float* ferr,\n                                float* berr, lapack_complex_float* work,\n                                lapack_int lwork, float* rwork );\nlapack_int LAPACKE_zsysvx_work( int matrix_order, char fact, char uplo,\n                                lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* af, lapack_int ldaf,\n                                lapack_int* ipiv,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* x, lapack_int ldx,\n                                double* rcond, double* ferr, double* berr,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork );\n\nlapack_int LAPACKE_ssysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, float* a,\n                                 lapack_int lda, float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 float* b, lapack_int ldb, float* x,\n                                 lapack_int ldx, float* rcond, float* rpvgrw,\n                                 float* berr, lapack_int n_err_bnds,\n                                 float* err_bnds_norm, float* err_bnds_comp,\n                                 lapack_int nparams, float* params, float* work,\n                                 lapack_int* iwork );\nlapack_int LAPACKE_dsysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs, double* a,\n                                 lapack_int lda, double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 double* b, lapack_int ldb, double* x,\n                                 lapack_int ldx, double* rcond, double* rpvgrw,\n                                 double* berr, lapack_int n_err_bnds,\n                                 double* err_bnds_norm, double* err_bnds_comp,\n                                 lapack_int nparams, double* params,\n                                 double* work, lapack_int* iwork );\nlapack_int LAPACKE_csysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, float* s,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* x, lapack_int ldx,\n                                 float* rcond, float* rpvgrw, float* berr,\n                                 lapack_int n_err_bnds, float* err_bnds_norm,\n                                 float* err_bnds_comp, lapack_int nparams,\n                                 float* params, lapack_complex_float* work,\n                                 float* rwork );\nlapack_int LAPACKE_zsysvxx_work( int matrix_order, char fact, char uplo,\n                                 lapack_int n, lapack_int nrhs,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* af, lapack_int ldaf,\n                                 lapack_int* ipiv, char* equed, double* s,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* x, lapack_int ldx,\n                                 double* rcond, double* rpvgrw, double* berr,\n                                 lapack_int n_err_bnds, double* err_bnds_norm,\n                                 double* err_bnds_comp, lapack_int nparams,\n                                 double* params, lapack_complex_double* work,\n                                 double* rwork );\n\nlapack_int LAPACKE_ssytrd_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, float* d, float* e,\n                                float* tau, float* work, lapack_int lwork );\nlapack_int LAPACKE_dsytrd_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, double* d, double* e,\n                                double* tau, double* work, lapack_int lwork );\n\nlapack_int LAPACKE_ssytrf_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda, lapack_int* ipiv,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dsytrf_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda, lapack_int* ipiv,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_csytrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_zsytrf_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_int* ipiv, lapack_complex_double* work,\n                                lapack_int lwork );\n\nlapack_int LAPACKE_ssytri_work( int matrix_order, char uplo, lapack_int n,\n                                float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* work );\nlapack_int LAPACKE_dsytri_work( int matrix_order, char uplo, lapack_int n,\n                                double* a, lapack_int lda,\n                                const lapack_int* ipiv, double* work );\nlapack_int LAPACKE_csytri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zsytri_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_int* ipiv,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_ssytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const float* a, lapack_int lda,\n                                const lapack_int* ipiv, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dsytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                double* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_int nrhs, const lapack_complex_double* a,\n                                lapack_int lda, const lapack_int* ipiv,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const float* ab, lapack_int ldab, float* rcond,\n                                float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const double* ab, lapack_int ldab,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const lapack_complex_float* ab, lapack_int ldab,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_ztbcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, lapack_int kd,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const float* ab,\n                                lapack_int ldab, const float* b, lapack_int ldb,\n                                const float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const double* ab,\n                                lapack_int ldab, const double* b,\n                                lapack_int ldb, const double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const lapack_complex_float* ab,\n                                lapack_int ldab, const lapack_complex_float* b,\n                                lapack_int ldb, const lapack_complex_float* x,\n                                lapack_int ldx, float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztbrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, const lapack_complex_double* b,\n                                lapack_int ldb, const lapack_complex_double* x,\n                                lapack_int ldx, double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const float* ab,\n                                lapack_int ldab, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const double* ab,\n                                lapack_int ldab, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs, const lapack_complex_float* ab,\n                                lapack_int ldab, lapack_complex_float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_ztbtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int kd,\n                                lapack_int nrhs,\n                                const lapack_complex_double* ab,\n                                lapack_int ldab, lapack_complex_double* b,\n                                lapack_int ldb );\n\nlapack_int LAPACKE_stfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, float alpha, const float* a,\n                               float* b, lapack_int ldb );\nlapack_int LAPACKE_dtfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, double alpha, const double* a,\n                               double* b, lapack_int ldb );\nlapack_int LAPACKE_ctfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, lapack_complex_float alpha,\n                               const lapack_complex_float* a,\n                               lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztfsm_work( int matrix_order, char transr, char side,\n                               char uplo, char trans, char diag, lapack_int m,\n                               lapack_int n, lapack_complex_double alpha,\n                               const lapack_complex_double* a,\n                               lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n, float* a );\nlapack_int LAPACKE_dtftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n, double* a );\nlapack_int LAPACKE_ctftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n,\n                                lapack_complex_float* a );\nlapack_int LAPACKE_ztftri_work( int matrix_order, char transr, char uplo,\n                                char diag, lapack_int n,\n                                lapack_complex_double* a );\n\nlapack_int LAPACKE_stfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* arf, float* ap );\nlapack_int LAPACKE_dtfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* arf, double* ap );\nlapack_int LAPACKE_ctfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* arf,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_ztfttp_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* arf,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_stfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* arf, float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_dtfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* arf, double* a,\n                                lapack_int lda );\nlapack_int LAPACKE_ctfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* arf,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztfttr_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* arf,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_stgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* s, lapack_int lds, const float* p,\n                                lapack_int ldp, float* vl, lapack_int ldvl,\n                                float* vr, lapack_int ldvr, lapack_int mm,\n                                lapack_int* m, float* work );\nlapack_int LAPACKE_dtgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* s, lapack_int lds,\n                                const double* p, lapack_int ldp, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work );\nlapack_int LAPACKE_ctgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* s, lapack_int lds,\n                                const lapack_complex_float* p, lapack_int ldp,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztgevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* s, lapack_int lds,\n                                const lapack_complex_double* p, lapack_int ldp,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n, float* a,\n                                lapack_int lda, float* b, lapack_int ldb,\n                                float* q, lapack_int ldq, float* z,\n                                lapack_int ldz, lapack_int* ifst,\n                                lapack_int* ilst, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dtgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* q, lapack_int ldq, double* z,\n                                lapack_int ldz, lapack_int* ifst,\n                                lapack_int* ilst, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_ctgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztgexc_work( int matrix_order, lapack_logical wantq,\n                                lapack_logical wantz, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_stgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float* alphar, float* alphai,\n                                float* beta, float* q, lapack_int ldq, float* z,\n                                lapack_int ldz, lapack_int* m, float* pl,\n                                float* pr, float* dif, float* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\nlapack_int LAPACKE_dtgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double* alphar, double* alphai,\n                                double* beta, double* q, lapack_int ldq,\n                                double* z, lapack_int ldz, lapack_int* m,\n                                double* pl, double* pr, double* dif,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ctgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* alpha,\n                                lapack_complex_float* beta,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* z, lapack_int ldz,\n                                lapack_int* m, float* pl, float* pr, float* dif,\n                                lapack_complex_float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ztgsen_work( int matrix_order, lapack_int ijob,\n                                lapack_logical wantq, lapack_logical wantz,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* alpha,\n                                lapack_complex_double* beta,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* z, lapack_int ldz,\n                                lapack_int* m, double* pl, double* pr,\n                                double* dif, lapack_complex_double* work,\n                                lapack_int lwork, lapack_int* iwork,\n                                lapack_int liwork );\n\nlapack_int LAPACKE_stgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                float* a, lapack_int lda, float* b,\n                                lapack_int ldb, float tola, float tolb,\n                                float* alpha, float* beta, float* u,\n                                lapack_int ldu, float* v, lapack_int ldv,\n                                float* q, lapack_int ldq, float* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_dtgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                double* a, lapack_int lda, double* b,\n                                lapack_int ldb, double tola, double tolb,\n                                double* alpha, double* beta, double* u,\n                                lapack_int ldu, double* v, lapack_int ldv,\n                                double* q, lapack_int ldq, double* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_ctgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                float tola, float tolb, float* alpha,\n                                float* beta, lapack_complex_float* u,\n                                lapack_int ldu, lapack_complex_float* v,\n                                lapack_int ldv, lapack_complex_float* q,\n                                lapack_int ldq, lapack_complex_float* work,\n                                lapack_int* ncycle );\nlapack_int LAPACKE_ztgsja_work( int matrix_order, char jobu, char jobv,\n                                char jobq, lapack_int m, lapack_int p,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                double tola, double tolb, double* alpha,\n                                double* beta, lapack_complex_double* u,\n                                lapack_int ldu, lapack_complex_double* v,\n                                lapack_int ldv, lapack_complex_double* q,\n                                lapack_int ldq, lapack_complex_double* work,\n                                lapack_int* ncycle );\n\nlapack_int LAPACKE_stgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, const float* vl,\n                                lapack_int ldvl, const float* vr,\n                                lapack_int ldvr, float* s, float* dif,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_dtgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb,\n                                const double* vl, lapack_int ldvl,\n                                const double* vr, lapack_int ldvr, double* s,\n                                double* dif, lapack_int mm, lapack_int* m,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* vl, lapack_int ldvl,\n                                const lapack_complex_float* vr, lapack_int ldvr,\n                                float* s, float* dif, lapack_int mm,\n                                lapack_int* m, lapack_complex_float* work,\n                                lapack_int lwork, lapack_int* iwork );\nlapack_int LAPACKE_ztgsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* vl,\n                                lapack_int ldvl,\n                                const lapack_complex_double* vr,\n                                lapack_int ldvr, double* s, double* dif,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_stgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n, const float* a,\n                                lapack_int lda, const float* b, lapack_int ldb,\n                                float* c, lapack_int ldc, const float* d,\n                                lapack_int ldd, const float* e, lapack_int lde,\n                                float* f, lapack_int ldf, float* scale,\n                                float* dif, float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n, const double* a,\n                                lapack_int lda, const double* b, lapack_int ldb,\n                                double* c, lapack_int ldc, const double* d,\n                                lapack_int ldd, const double* e, lapack_int lde,\n                                double* f, lapack_int ldf, double* scale,\n                                double* dif, double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* c, lapack_int ldc,\n                                const lapack_complex_float* d, lapack_int ldd,\n                                const lapack_complex_float* e, lapack_int lde,\n                                lapack_complex_float* f, lapack_int ldf,\n                                float* scale, float* dif,\n                                lapack_complex_float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ztgsyl_work( int matrix_order, char trans, lapack_int ijob,\n                                lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* c, lapack_int ldc,\n                                const lapack_complex_double* d, lapack_int ldd,\n                                const lapack_complex_double* e, lapack_int lde,\n                                lapack_complex_double* f, lapack_int ldf,\n                                double* scale, double* dif,\n                                lapack_complex_double* work, lapack_int lwork,\n                                lapack_int* iwork );\n\nlapack_int LAPACKE_stpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const float* ap,\n                                float* rcond, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const double* ap,\n                                double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_float* ap, float* rcond,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztpcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_double* ap, double* rcond,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* ap, const float* b, lapack_int ldb,\n                                const float* x, lapack_int ldx, float* ferr,\n                                float* berr, float* work, lapack_int* iwork );\nlapack_int LAPACKE_dtprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* ap, const double* b,\n                                lapack_int ldb, const double* x, lapack_int ldx,\n                                double* ferr, double* berr, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztprfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_stptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, float* ap );\nlapack_int LAPACKE_dtptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, double* ap );\nlapack_int LAPACKE_ctptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_float* ap );\nlapack_int LAPACKE_ztptri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_double* ap );\n\nlapack_int LAPACKE_stptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* ap, float* b, lapack_int ldb );\nlapack_int LAPACKE_dtptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* ap, double* b, lapack_int ldb );\nlapack_int LAPACKE_ctptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztptrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_stpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* ap, float* arf );\nlapack_int LAPACKE_dtpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* ap, double* arf );\nlapack_int LAPACKE_ctpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* ap,\n                                lapack_complex_float* arf );\nlapack_int LAPACKE_ztpttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* ap,\n                                lapack_complex_double* arf );\n\nlapack_int LAPACKE_stpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const float* ap, float* a, lapack_int lda );\nlapack_int LAPACKE_dtpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const double* ap, double* a, lapack_int lda );\nlapack_int LAPACKE_ctpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_ztpttr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_strcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const float* a,\n                                lapack_int lda, float* rcond, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n, const double* a,\n                                lapack_int lda, double* rcond, double* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                float* rcond, lapack_complex_float* work,\n                                float* rwork );\nlapack_int LAPACKE_ztrcon_work( int matrix_order, char norm, char uplo,\n                                char diag, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                double* rcond, lapack_complex_double* work,\n                                double* rwork );\n\nlapack_int LAPACKE_strevc_work( int matrix_order, char side, char howmny,\n                                lapack_logical* select, lapack_int n,\n                                const float* t, lapack_int ldt, float* vl,\n                                lapack_int ldvl, float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, float* work );\nlapack_int LAPACKE_dtrevc_work( int matrix_order, char side, char howmny,\n                                lapack_logical* select, lapack_int n,\n                                const double* t, lapack_int ldt, double* vl,\n                                lapack_int ldvl, double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m, double* work );\nlapack_int LAPACKE_ctrevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* vl, lapack_int ldvl,\n                                lapack_complex_float* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztrevc_work( int matrix_order, char side, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* vl, lapack_int ldvl,\n                                lapack_complex_double* vr, lapack_int ldvr,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_strexc_work( int matrix_order, char compq, lapack_int n,\n                                float* t, lapack_int ldt, float* q,\n                                lapack_int ldq, lapack_int* ifst,\n                                lapack_int* ilst, float* work );\nlapack_int LAPACKE_dtrexc_work( int matrix_order, char compq, lapack_int n,\n                                double* t, lapack_int ldt, double* q,\n                                lapack_int ldq, lapack_int* ifst,\n                                lapack_int* ilst, double* work );\nlapack_int LAPACKE_ctrexc_work( int matrix_order, char compq, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_int ifst, lapack_int ilst );\nlapack_int LAPACKE_ztrexc_work( int matrix_order, char compq, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_int ifst, lapack_int ilst );\n\nlapack_int LAPACKE_strrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, const float* x, lapack_int ldx,\n                                float* ferr, float* berr, float* work,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dtrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb,\n                                const double* x, lapack_int ldx, double* ferr,\n                                double* berr, double* work, lapack_int* iwork );\nlapack_int LAPACKE_ctrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                const lapack_complex_float* x, lapack_int ldx,\n                                float* ferr, float* berr,\n                                lapack_complex_float* work, float* rwork );\nlapack_int LAPACKE_ztrrfs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                const lapack_complex_double* x, lapack_int ldx,\n                                double* ferr, double* berr,\n                                lapack_complex_double* work, double* rwork );\n\nlapack_int LAPACKE_strsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                float* t, lapack_int ldt, float* q,\n                                lapack_int ldq, float* wr, float* wi,\n                                lapack_int* m, float* s, float* sep,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_dtrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                double* t, lapack_int ldt, double* q,\n                                lapack_int ldq, double* wr, double* wi,\n                                lapack_int* m, double* s, double* sep,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork, lapack_int liwork );\nlapack_int LAPACKE_ctrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* w, lapack_int* m,\n                                float* s, float* sep,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ztrsen_work( int matrix_order, char job, char compq,\n                                const lapack_logical* select, lapack_int n,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* w, lapack_int* m,\n                                double* s, double* sep,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_strsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const float* t, lapack_int ldt, const float* vl,\n                                lapack_int ldvl, const float* vr,\n                                lapack_int ldvr, float* s, float* sep,\n                                lapack_int mm, lapack_int* m, float* work,\n                                lapack_int ldwork, lapack_int* iwork );\nlapack_int LAPACKE_dtrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const double* t, lapack_int ldt,\n                                const double* vl, lapack_int ldvl,\n                                const double* vr, lapack_int ldvr, double* s,\n                                double* sep, lapack_int mm, lapack_int* m,\n                                double* work, lapack_int ldwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ctrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                const lapack_complex_float* vl, lapack_int ldvl,\n                                const lapack_complex_float* vr, lapack_int ldvr,\n                                float* s, float* sep, lapack_int mm,\n                                lapack_int* m, lapack_complex_float* work,\n                                lapack_int ldwork, float* rwork );\nlapack_int LAPACKE_ztrsna_work( int matrix_order, char job, char howmny,\n                                const lapack_logical* select, lapack_int n,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                const lapack_complex_double* vl,\n                                lapack_int ldvl,\n                                const lapack_complex_double* vr,\n                                lapack_int ldvr, double* s, double* sep,\n                                lapack_int mm, lapack_int* m,\n                                lapack_complex_double* work, lapack_int ldwork,\n                                double* rwork );\n\nlapack_int LAPACKE_strsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const float* a, lapack_int lda, const float* b,\n                                lapack_int ldb, float* c, lapack_int ldc,\n                                float* scale );\nlapack_int LAPACKE_dtrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const double* a, lapack_int lda,\n                                const double* b, lapack_int ldb, double* c,\n                                lapack_int ldc, double* scale );\nlapack_int LAPACKE_ctrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* b, lapack_int ldb,\n                                lapack_complex_float* c, lapack_int ldc,\n                                float* scale );\nlapack_int LAPACKE_ztrsyl_work( int matrix_order, char trana, char tranb,\n                                lapack_int isgn, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* c, lapack_int ldc,\n                                double* scale );\n\nlapack_int LAPACKE_strtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, float* a, lapack_int lda );\nlapack_int LAPACKE_dtrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, double* a, lapack_int lda );\nlapack_int LAPACKE_ctrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_float* a,\n                                lapack_int lda );\nlapack_int LAPACKE_ztrtri_work( int matrix_order, char uplo, char diag,\n                                lapack_int n, lapack_complex_double* a,\n                                lapack_int lda );\n\nlapack_int LAPACKE_strtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const float* a, lapack_int lda, float* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_dtrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const double* a, lapack_int lda, double* b,\n                                lapack_int ldb );\nlapack_int LAPACKE_ctrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztrtrs_work( int matrix_order, char uplo, char trans,\n                                char diag, lapack_int n, lapack_int nrhs,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_strttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const float* a, lapack_int lda,\n                                float* arf );\nlapack_int LAPACKE_dtrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const double* a, lapack_int lda,\n                                double* arf );\nlapack_int LAPACKE_ctrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* arf );\nlapack_int LAPACKE_ztrttf_work( int matrix_order, char transr, char uplo,\n                                lapack_int n, const lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* arf );\n\nlapack_int LAPACKE_strttp_work( int matrix_order, char uplo, lapack_int n,\n                                const float* a, lapack_int lda, float* ap );\nlapack_int LAPACKE_dtrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const double* a, lapack_int lda, double* ap );\nlapack_int LAPACKE_ctrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* ap );\nlapack_int LAPACKE_ztrttp_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* ap );\n\nlapack_int LAPACKE_stzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                float* a, lapack_int lda, float* tau,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_dtzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                double* a, lapack_int lda, double* tau,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_ctzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ztzrzf_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungbr_work( int matrix_order, char vect, lapack_int m,\n                                lapack_int n, lapack_int k,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunghr_work( int matrix_order, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunglq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungql_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungqr_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungrq_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int k, lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cungtr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zungtr_work( int matrix_order, char uplo, lapack_int n,\n                                lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmbr_work( int matrix_order, char vect, char side,\n                                char trans, lapack_int m, lapack_int n,\n                                lapack_int k, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmhr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int ilo,\n                                lapack_int ihi, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmlq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmql_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmqr_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmrq_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const lapack_complex_float* a,\n                                lapack_int lda, const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmrz_work( int matrix_order, char side, char trans,\n                                lapack_int m, lapack_int n, lapack_int k,\n                                lapack_int l, const lapack_complex_double* a,\n                                lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cunmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_float* a, lapack_int lda,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_zunmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_double* a, lapack_int lda,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work, lapack_int lwork );\n\nlapack_int LAPACKE_cupgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* q, lapack_int ldq,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zupgtr_work( int matrix_order, char uplo, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* q, lapack_int ldq,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_cupmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_float* ap,\n                                const lapack_complex_float* tau,\n                                lapack_complex_float* c, lapack_int ldc,\n                                lapack_complex_float* work );\nlapack_int LAPACKE_zupmtr_work( int matrix_order, char side, char uplo,\n                                char trans, lapack_int m, lapack_int n,\n                                const lapack_complex_double* ap,\n                                const lapack_complex_double* tau,\n                                lapack_complex_double* c, lapack_int ldc,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_claghe( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, lapack_complex_float* a,\n                           lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_zlaghe( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, lapack_complex_double* a,\n                           lapack_int lda, lapack_int* iseed );\n\nlapack_int LAPACKE_slagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, float* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_dlagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, double* a, lapack_int lda,\n                           lapack_int* iseed );\nlapack_int LAPACKE_clagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const float* d, lapack_complex_float* a,\n                           lapack_int lda, lapack_int* iseed );\nlapack_int LAPACKE_zlagsy( int matrix_order, lapack_int n, lapack_int k,\n                           const double* d, lapack_complex_double* a,\n                           lapack_int lda, lapack_int* iseed );\n\nlapack_int LAPACKE_slapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, float* x, lapack_int ldx,\n                           lapack_int* k );\nlapack_int LAPACKE_dlapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, double* x,\n                           lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_clapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, lapack_complex_float* x,\n                           lapack_int ldx, lapack_int* k );\nlapack_int LAPACKE_zlapmr( int matrix_order, lapack_logical forwrd,\n                           lapack_int m, lapack_int n, lapack_complex_double* x,\n                           lapack_int ldx, lapack_int* k );\n\n\nfloat LAPACKE_slapy2( float x, float y );\ndouble LAPACKE_dlapy2( double x, double y );\n\nfloat LAPACKE_slapy3( float x, float y, float z );\ndouble LAPACKE_dlapy3( double x, double y, double z );\n\nlapack_int LAPACKE_slartgp( float f, float g, float* cs, float* sn, float* r );\nlapack_int LAPACKE_dlartgp( double f, double g, double* cs, double* sn,\n                            double* r );\n\nlapack_int LAPACKE_slartgs( float x, float y, float sigma, float* cs,\n                            float* sn );\nlapack_int LAPACKE_dlartgs( double x, double y, double sigma, double* cs,\n                            double* sn );\n\n\n//LAPACK 3.3.0\nlapack_int LAPACKE_cbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, float* theta, float* phi,\n                           lapack_complex_float* u1, lapack_int ldu1,\n                           lapack_complex_float* u2, lapack_int ldu2,\n                           lapack_complex_float* v1t, lapack_int ldv1t,\n                           lapack_complex_float* v2t, lapack_int ldv2t,\n                           float* b11d, float* b11e, float* b12d, float* b12e,\n                           float* b21d, float* b21e, float* b22d, float* b22e );\nlapack_int LAPACKE_cbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* theta, float* phi,\n                                lapack_complex_float* u1, lapack_int ldu1,\n                                lapack_complex_float* u2, lapack_int ldu2,\n                                lapack_complex_float* v1t, lapack_int ldv1t,\n                                lapack_complex_float* v2t, lapack_int ldv2t,\n                                float* b11d, float* b11e, float* b12d,\n                                float* b12e, float* b21d, float* b21e,\n                                float* b22d, float* b22e, float* rwork,\n                                lapack_int lrwork );\nlapack_int LAPACKE_cheswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_cheswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_chetri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_chetri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_chetri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_chetri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_float* work, lapack_int nb );\nlapack_int LAPACKE_chetrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_float* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_chetrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_csyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_csyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_csyswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_csyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_csytri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_csytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_csytri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_csytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_float* work, lapack_int nb );\nlapack_int LAPACKE_csytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_float* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_csytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_cunbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_float* x11, lapack_int ldx11,\n                           lapack_complex_float* x12, lapack_int ldx12,\n                           lapack_complex_float* x21, lapack_int ldx21,\n                           lapack_complex_float* x22, lapack_int ldx22,\n                           float* theta, float* phi,\n                           lapack_complex_float* taup1,\n                           lapack_complex_float* taup2,\n                           lapack_complex_float* tauq1,\n                           lapack_complex_float* tauq2 );\nlapack_int LAPACKE_cunbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                lapack_complex_float* x11, lapack_int ldx11,\n                                lapack_complex_float* x12, lapack_int ldx12,\n                                lapack_complex_float* x21, lapack_int ldx21,\n                                lapack_complex_float* x22, lapack_int ldx22,\n                                float* theta, float* phi,\n                                lapack_complex_float* taup1,\n                                lapack_complex_float* taup2,\n                                lapack_complex_float* tauq1,\n                                lapack_complex_float* tauq2,\n                                lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_cuncsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_float* x11, lapack_int ldx11,\n                           lapack_complex_float* x12, lapack_int ldx12,\n                           lapack_complex_float* x21, lapack_int ldx21,\n                           lapack_complex_float* x22, lapack_int ldx22,\n                           float* theta, lapack_complex_float* u1,\n                           lapack_int ldu1, lapack_complex_float* u2,\n                           lapack_int ldu2, lapack_complex_float* v1t,\n                           lapack_int ldv1t, lapack_complex_float* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_cuncsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, lapack_complex_float* x11,\n                                lapack_int ldx11, lapack_complex_float* x12,\n                                lapack_int ldx12, lapack_complex_float* x21,\n                                lapack_int ldx21, lapack_complex_float* x22,\n                                lapack_int ldx22, float* theta,\n                                lapack_complex_float* u1, lapack_int ldu1,\n                                lapack_complex_float* u2, lapack_int ldu2,\n                                lapack_complex_float* v1t, lapack_int ldv1t,\n                                lapack_complex_float* v2t, lapack_int ldv2t,\n                                lapack_complex_float* work, lapack_int lwork,\n                                float* rwork, lapack_int lrwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, double* theta,\n                           double* phi, double* u1, lapack_int ldu1, double* u2,\n                           lapack_int ldu2, double* v1t, lapack_int ldv1t,\n                           double* v2t, lapack_int ldv2t, double* b11d,\n                           double* b11e, double* b12d, double* b12e,\n                           double* b21d, double* b21e, double* b22d,\n                           double* b22e );\nlapack_int LAPACKE_dbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* theta, double* phi, double* u1,\n                                lapack_int ldu1, double* u2, lapack_int ldu2,\n                                double* v1t, lapack_int ldv1t, double* v2t,\n                                lapack_int ldv2t, double* b11d, double* b11e,\n                                double* b12d, double* b12e, double* b21d,\n                                double* b21e, double* b22d, double* b22e,\n                                double* work, lapack_int lwork );\nlapack_int LAPACKE_dorbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           double* x11, lapack_int ldx11, double* x12,\n                           lapack_int ldx12, double* x21, lapack_int ldx21,\n                           double* x22, lapack_int ldx22, double* theta,\n                           double* phi, double* taup1, double* taup2,\n                           double* tauq1, double* tauq2 );\nlapack_int LAPACKE_dorbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* x11, lapack_int ldx11, double* x12,\n                                lapack_int ldx12, double* x21, lapack_int ldx21,\n                                double* x22, lapack_int ldx22, double* theta,\n                                double* phi, double* taup1, double* taup2,\n                                double* tauq1, double* tauq2, double* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_dorcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           double* x11, lapack_int ldx11, double* x12,\n                           lapack_int ldx12, double* x21, lapack_int ldx21,\n                           double* x22, lapack_int ldx22, double* theta,\n                           double* u1, lapack_int ldu1, double* u2,\n                           lapack_int ldu2, double* v1t, lapack_int ldv1t,\n                           double* v2t, lapack_int ldv2t );\nlapack_int LAPACKE_dorcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, double* x11, lapack_int ldx11,\n                                double* x12, lapack_int ldx12, double* x21,\n                                lapack_int ldx21, double* x22, lapack_int ldx22,\n                                double* theta, double* u1, lapack_int ldu1,\n                                double* u2, lapack_int ldu2, double* v1t,\n                                lapack_int ldv1t, double* v2t, lapack_int ldv2t,\n                                double* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_dsyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            double* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, double* a, lapack_int lda,\n                                 const lapack_int* ipiv, double* work );\nlapack_int LAPACKE_dsyswapr( int matrix_order, char uplo, lapack_int n,\n                             double* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_dsyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  double* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_dsytri2( int matrix_order, char uplo, lapack_int n,\n                            double* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_dsytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_dsytri2x( int matrix_order, char uplo, lapack_int n,\n                             double* a, lapack_int lda, const lapack_int* ipiv,\n                             lapack_int nb );\nlapack_int LAPACKE_dsytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  double* a, lapack_int lda,\n                                  const lapack_int* ipiv, double* work,\n                                  lapack_int nb );\nlapack_int LAPACKE_dsytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const double* a, lapack_int lda,\n                            const lapack_int* ipiv, double* b, lapack_int ldb );\nlapack_int LAPACKE_dsytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 double* b, lapack_int ldb, double* work );\nlapack_int LAPACKE_sbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, float* theta, float* phi,\n                           float* u1, lapack_int ldu1, float* u2,\n                           lapack_int ldu2, float* v1t, lapack_int ldv1t,\n                           float* v2t, lapack_int ldv2t, float* b11d,\n                           float* b11e, float* b12d, float* b12e, float* b21d,\n                           float* b21e, float* b22d, float* b22e );\nlapack_int LAPACKE_sbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* theta, float* phi, float* u1,\n                                lapack_int ldu1, float* u2, lapack_int ldu2,\n                                float* v1t, lapack_int ldv1t, float* v2t,\n                                lapack_int ldv2t, float* b11d, float* b11e,\n                                float* b12d, float* b12e, float* b21d,\n                                float* b21e, float* b22d, float* b22e,\n                                float* work, lapack_int lwork );\nlapack_int LAPACKE_sorbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q, float* x11,\n                           lapack_int ldx11, float* x12, lapack_int ldx12,\n                           float* x21, lapack_int ldx21, float* x22,\n                           lapack_int ldx22, float* theta, float* phi,\n                           float* taup1, float* taup2, float* tauq1,\n                           float* tauq2 );\nlapack_int LAPACKE_sorbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                float* x11, lapack_int ldx11, float* x12,\n                                lapack_int ldx12, float* x21, lapack_int ldx21,\n                                float* x22, lapack_int ldx22, float* theta,\n                                float* phi, float* taup1, float* taup2,\n                                float* tauq1, float* tauq2, float* work,\n                                lapack_int lwork );\nlapack_int LAPACKE_sorcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q, float* x11,\n                           lapack_int ldx11, float* x12, lapack_int ldx12,\n                           float* x21, lapack_int ldx21, float* x22,\n                           lapack_int ldx22, float* theta, float* u1,\n                           lapack_int ldu1, float* u2, lapack_int ldu2,\n                           float* v1t, lapack_int ldv1t, float* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_sorcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, float* x11, lapack_int ldx11,\n                                float* x12, lapack_int ldx12, float* x21,\n                                lapack_int ldx21, float* x22, lapack_int ldx22,\n                                float* theta, float* u1, lapack_int ldu1,\n                                float* u2, lapack_int ldu2, float* v1t,\n                                lapack_int ldv1t, float* v2t, lapack_int ldv2t,\n                                float* work, lapack_int lwork,\n                                lapack_int* iwork );\nlapack_int LAPACKE_ssyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            float* a, lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_ssyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, float* a, lapack_int lda,\n                                 const lapack_int* ipiv, float* work );\nlapack_int LAPACKE_ssyswapr( int matrix_order, char uplo, lapack_int n,\n                             float* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_ssyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  float* a, lapack_int i1, lapack_int i2 );\nlapack_int LAPACKE_ssytri2( int matrix_order, char uplo, lapack_int n, float* a,\n                            lapack_int lda, const lapack_int* ipiv );\nlapack_int LAPACKE_ssytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 float* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_float* work, lapack_int lwork );\nlapack_int LAPACKE_ssytri2x( int matrix_order, char uplo, lapack_int n,\n                             float* a, lapack_int lda, const lapack_int* ipiv,\n                             lapack_int nb );\nlapack_int LAPACKE_ssytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  float* a, lapack_int lda,\n                                  const lapack_int* ipiv, float* work,\n                                  lapack_int nb );\nlapack_int LAPACKE_ssytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const float* a, lapack_int lda,\n                            const lapack_int* ipiv, float* b, lapack_int ldb );\nlapack_int LAPACKE_ssytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const float* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 float* b, lapack_int ldb, float* work );\nlapack_int LAPACKE_zbbcsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, lapack_int m,\n                           lapack_int p, lapack_int q, double* theta,\n                           double* phi, lapack_complex_double* u1,\n                           lapack_int ldu1, lapack_complex_double* u2,\n                           lapack_int ldu2, lapack_complex_double* v1t,\n                           lapack_int ldv1t, lapack_complex_double* v2t,\n                           lapack_int ldv2t, double* b11d, double* b11e,\n                           double* b12d, double* b12e, double* b21d,\n                           double* b21e, double* b22d, double* b22e );\nlapack_int LAPACKE_zbbcsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                double* theta, double* phi,\n                                lapack_complex_double* u1, lapack_int ldu1,\n                                lapack_complex_double* u2, lapack_int ldu2,\n                                lapack_complex_double* v1t, lapack_int ldv1t,\n                                lapack_complex_double* v2t, lapack_int ldv2t,\n                                double* b11d, double* b11e, double* b12d,\n                                double* b12e, double* b21d, double* b21e,\n                                double* b22d, double* b22e, double* rwork,\n                                lapack_int lrwork );\nlapack_int LAPACKE_zheswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_zheswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_zhetri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zhetri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zhetri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_zhetri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_double* work, lapack_int nb );\nlapack_int LAPACKE_zhetrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_double* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_double* b, lapack_int ldb );\nlapack_int LAPACKE_zhetrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zsyconv( int matrix_order, char uplo, char way, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zsyconv_work( int matrix_order, char uplo, char way,\n                                 lapack_int n, lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zsyswapr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int i1,\n                             lapack_int i2 );\nlapack_int LAPACKE_zsyswapr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int i1,\n                                  lapack_int i2 );\nlapack_int LAPACKE_zsytri2( int matrix_order, char uplo, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            const lapack_int* ipiv );\nlapack_int LAPACKE_zsytri2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 const lapack_int* ipiv,\n                                 lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zsytri2x( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double* a, lapack_int lda,\n                             const lapack_int* ipiv, lapack_int nb );\nlapack_int LAPACKE_zsytri2x_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double* a, lapack_int lda,\n                                  const lapack_int* ipiv,\n                                  lapack_complex_double* work, lapack_int nb );\nlapack_int LAPACKE_zsytrs2( int matrix_order, char uplo, lapack_int n,\n                            lapack_int nrhs, const lapack_complex_double* a,\n                            lapack_int lda, const lapack_int* ipiv,\n                            lapack_complex_double* b, lapack_int ldb );\nlapack_int LAPACKE_zsytrs2_work( int matrix_order, char uplo, lapack_int n,\n                                 lapack_int nrhs, const lapack_complex_double* a,\n                                 lapack_int lda, const lapack_int* ipiv,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\nlapack_int LAPACKE_zunbdb( int matrix_order, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_double* x11, lapack_int ldx11,\n                           lapack_complex_double* x12, lapack_int ldx12,\n                           lapack_complex_double* x21, lapack_int ldx21,\n                           lapack_complex_double* x22, lapack_int ldx22,\n                           double* theta, double* phi,\n                           lapack_complex_double* taup1,\n                           lapack_complex_double* taup2,\n                           lapack_complex_double* tauq1,\n                           lapack_complex_double* tauq2 );\nlapack_int LAPACKE_zunbdb_work( int matrix_order, char trans, char signs,\n                                lapack_int m, lapack_int p, lapack_int q,\n                                lapack_complex_double* x11, lapack_int ldx11,\n                                lapack_complex_double* x12, lapack_int ldx12,\n                                lapack_complex_double* x21, lapack_int ldx21,\n                                lapack_complex_double* x22, lapack_int ldx22,\n                                double* theta, double* phi,\n                                lapack_complex_double* taup1,\n                                lapack_complex_double* taup2,\n                                lapack_complex_double* tauq1,\n                                lapack_complex_double* tauq2,\n                                lapack_complex_double* work, lapack_int lwork );\nlapack_int LAPACKE_zuncsd( int matrix_order, char jobu1, char jobu2,\n                           char jobv1t, char jobv2t, char trans, char signs,\n                           lapack_int m, lapack_int p, lapack_int q,\n                           lapack_complex_double* x11, lapack_int ldx11,\n                           lapack_complex_double* x12, lapack_int ldx12,\n                           lapack_complex_double* x21, lapack_int ldx21,\n                           lapack_complex_double* x22, lapack_int ldx22,\n                           double* theta, lapack_complex_double* u1,\n                           lapack_int ldu1, lapack_complex_double* u2,\n                           lapack_int ldu2, lapack_complex_double* v1t,\n                           lapack_int ldv1t, lapack_complex_double* v2t,\n                           lapack_int ldv2t );\nlapack_int LAPACKE_zuncsd_work( int matrix_order, char jobu1, char jobu2,\n                                char jobv1t, char jobv2t, char trans,\n                                char signs, lapack_int m, lapack_int p,\n                                lapack_int q, lapack_complex_double* x11,\n                                lapack_int ldx11, lapack_complex_double* x12,\n                                lapack_int ldx12, lapack_complex_double* x21,\n                                lapack_int ldx21, lapack_complex_double* x22,\n                                lapack_int ldx22, double* theta,\n                                lapack_complex_double* u1, lapack_int ldu1,\n                                lapack_complex_double* u2, lapack_int ldu2,\n                                lapack_complex_double* v1t, lapack_int ldv1t,\n                                lapack_complex_double* v2t, lapack_int ldv2t,\n                                lapack_complex_double* work, lapack_int lwork,\n                                double* rwork, lapack_int lrwork,\n                                lapack_int* iwork );\n//LAPACK 3.4.0\nlapack_int LAPACKE_sgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const float* v, lapack_int ldv,\n                            const float* t, lapack_int ldt, float* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_dgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const double* v, lapack_int ldv,\n                            const double* t, lapack_int ldt, double* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_cgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const lapack_complex_float* v,\n                            lapack_int ldv, const lapack_complex_float* t,\n                            lapack_int ldt, lapack_complex_float* c,\n                            lapack_int ldc );\nlapack_int LAPACKE_zgemqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int nb, const lapack_complex_double* v,\n                            lapack_int ldv, const lapack_complex_double* t,\n                            lapack_int ldt, lapack_complex_double* c,\n                            lapack_int ldc );\n\nlapack_int LAPACKE_sgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, float* a, lapack_int lda, float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_dgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, double* a, lapack_int lda, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_cgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_zgeqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int nb, lapack_complex_double* a,\n                           lapack_int lda, lapack_complex_double* t,\n                           lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_dgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_cgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_dgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* t,\n                            lapack_int ldt );\nlapack_int LAPACKE_cgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt3( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb, const float* v,\n                            lapack_int ldv, const float* t, lapack_int ldt,\n                            float* a, lapack_int lda, float* b,\n                            lapack_int ldb );\nlapack_int LAPACKE_dtpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb, const double* v,\n                            lapack_int ldv, const double* t, lapack_int ldt,\n                            double* a, lapack_int lda, double* b,\n                            lapack_int ldb );\nlapack_int LAPACKE_ctpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb,\n                            const lapack_complex_float* v, lapack_int ldv,\n                            const lapack_complex_float* t, lapack_int ldt,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* b, lapack_int ldb );\nlapack_int LAPACKE_ztpmqrt( int matrix_order, char side, char trans,\n                            lapack_int m, lapack_int n, lapack_int k,\n                            lapack_int l, lapack_int nb,\n                            const lapack_complex_double* v, lapack_int ldv,\n                            const lapack_complex_double* t, lapack_int ldt,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* b, lapack_int ldb );\n\nlapack_int LAPACKE_dtpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb, double* a,\n                           lapack_int lda, double* b, lapack_int ldb, double* t,\n                           lapack_int ldt );\nlapack_int LAPACKE_ctpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb, lapack_complex_float* a,\n                           lapack_int lda, lapack_complex_float* t,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_int ldt );\nlapack_int LAPACKE_ztpqrt( int matrix_order, lapack_int m, lapack_int n,\n                           lapack_int l, lapack_int nb,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            float* a, lapack_int lda, float* b, lapack_int ldb,\n                            float* t, lapack_int ldt );\nlapack_int LAPACKE_dtpqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            double* a, lapack_int lda, double* b,\n                            lapack_int ldb, double* t, lapack_int ldt );\nlapack_int LAPACKE_ctpqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_float* a, lapack_int lda,\n                            lapack_complex_float* b, lapack_int ldb,\n                            lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_ztpqrt2( int matrix_order, lapack_int m, lapack_int n,\n                            lapack_complex_double* a, lapack_int lda,\n                            lapack_complex_double* b, lapack_int ldb,\n                            lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l, const float* v,\n                           lapack_int ldv, const float* t, lapack_int ldt,\n                           float* a, lapack_int lda, float* b, lapack_int ldb,\n                           lapack_int myldwork );\nlapack_int LAPACKE_dtprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l, const double* v,\n                           lapack_int ldv, const double* t, lapack_int ldt,\n                           double* a, lapack_int lda, double* b, lapack_int ldb,\n                           lapack_int myldwork );\nlapack_int LAPACKE_ctprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l,\n                           const lapack_complex_float* v, lapack_int ldv,\n                           const lapack_complex_float* t, lapack_int ldt,\n                           lapack_complex_float* a, lapack_int lda,\n                           lapack_complex_float* b, lapack_int ldb,\n                           lapack_int myldwork );\nlapack_int LAPACKE_ztprfb( int matrix_order, char side, char trans, char direct,\n                           char storev, lapack_int m, lapack_int n,\n                           lapack_int k, lapack_int l,\n                           const lapack_complex_double* v, lapack_int ldv,\n                           const lapack_complex_double* t, lapack_int ldt,\n                           lapack_complex_double* a, lapack_int lda,\n                           lapack_complex_double* b, lapack_int ldb,\n                           lapack_int myldwork );\n\nlapack_int LAPACKE_sgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const float* v, lapack_int ldv,\n                                 const float* t, lapack_int ldt, float* c,\n                                 lapack_int ldc, float* work );\nlapack_int LAPACKE_dgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const double* v, lapack_int ldv,\n                                 const double* t, lapack_int ldt, double* c,\n                                 lapack_int ldc, double* work );\nlapack_int LAPACKE_cgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const lapack_complex_float* v,\n                                 lapack_int ldv, const lapack_complex_float* t,\n                                 lapack_int ldt, lapack_complex_float* c,\n                                 lapack_int ldc, lapack_complex_float* work );\nlapack_int LAPACKE_zgemqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int nb, const lapack_complex_double* v,\n                                 lapack_int ldv, const lapack_complex_double* t,\n                                 lapack_int ldt, lapack_complex_double* c,\n                                 lapack_int ldc, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, float* a, lapack_int lda,\n                                float* t, lapack_int ldt, float* work );\nlapack_int LAPACKE_dgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, double* a, lapack_int lda,\n                                double* t, lapack_int ldt, double* work );\nlapack_int LAPACKE_cgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, lapack_complex_float* a,\n                                lapack_int lda, lapack_complex_float* t,\n                                lapack_int ldt, lapack_complex_float* work );\nlapack_int LAPACKE_zgeqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int nb, lapack_complex_double* a,\n                                lapack_int lda, lapack_complex_double* t,\n                                lapack_int ldt, lapack_complex_double* work );\n\nlapack_int LAPACKE_sgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_dgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_cgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_sgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_dgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* t,\n                                 lapack_int ldt );\nlapack_int LAPACKE_cgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_zgeqrt3_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb, const float* v,\n                                 lapack_int ldv, const float* t, lapack_int ldt,\n                                 float* a, lapack_int lda, float* b,\n                                 lapack_int ldb, float* work );\nlapack_int LAPACKE_dtpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb, const double* v,\n                                 lapack_int ldv, const double* t,\n                                 lapack_int ldt, double* a, lapack_int lda,\n                                 double* b, lapack_int ldb, double* work );\nlapack_int LAPACKE_ctpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb,\n                                 const lapack_complex_float* v, lapack_int ldv,\n                                 const lapack_complex_float* t, lapack_int ldt,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* work );\nlapack_int LAPACKE_ztpmqrt_work( int matrix_order, char side, char trans,\n                                 lapack_int m, lapack_int n, lapack_int k,\n                                 lapack_int l, lapack_int nb,\n                                 const lapack_complex_double* v, lapack_int ldv,\n                                 const lapack_complex_double* t, lapack_int ldt,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* work );\n\nlapack_int LAPACKE_dtpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                double* t, lapack_int ldt, double* work );\nlapack_int LAPACKE_ctpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* t,\n                                lapack_complex_float* b, lapack_int ldb,\n                                lapack_int ldt, lapack_complex_float* work );\nlapack_int LAPACKE_ztpqrt_work( int matrix_order, lapack_int m, lapack_int n,\n                                lapack_int l, lapack_int nb,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* work );\n\nlapack_int LAPACKE_stpqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 float* a, lapack_int lda, float* b,\n                                 lapack_int ldb, float* t, lapack_int ldt );\nlapack_int LAPACKE_dtpqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 double* a, lapack_int lda, double* b,\n                                 lapack_int ldb, double* t, lapack_int ldt );\nlapack_int LAPACKE_ctpqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_float* a, lapack_int lda,\n                                 lapack_complex_float* b, lapack_int ldb,\n                                 lapack_complex_float* t, lapack_int ldt );\nlapack_int LAPACKE_ztpqrt2_work( int matrix_order, lapack_int m, lapack_int n,\n                                 lapack_complex_double* a, lapack_int lda,\n                                 lapack_complex_double* b, lapack_int ldb,\n                                 lapack_complex_double* t, lapack_int ldt );\n\nlapack_int LAPACKE_stprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const float* v, lapack_int ldv, const float* t,\n                                lapack_int ldt, float* a, lapack_int lda,\n                                float* b, lapack_int ldb, const float* mywork,\n                                lapack_int myldwork );\nlapack_int LAPACKE_dtprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const double* v, lapack_int ldv,\n                                const double* t, lapack_int ldt, double* a,\n                                lapack_int lda, double* b, lapack_int ldb,\n                                const double* mywork, lapack_int myldwork );\nlapack_int LAPACKE_ctprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const lapack_complex_float* v, lapack_int ldv,\n                                const lapack_complex_float* t, lapack_int ldt,\n                                lapack_complex_float* a, lapack_int lda,\n                                lapack_complex_float* b, lapack_int ldb,\n                                const float* mywork, lapack_int myldwork );\nlapack_int LAPACKE_ztprfb_work( int matrix_order, char side, char trans,\n                                char direct, char storev, lapack_int m,\n                                lapack_int n, lapack_int k, lapack_int l,\n                                const lapack_complex_double* v, lapack_int ldv,\n                                const lapack_complex_double* t, lapack_int ldt,\n                                lapack_complex_double* a, lapack_int lda,\n                                lapack_complex_double* b, lapack_int ldb,\n                                const double* mywork, lapack_int myldwork );\n//LAPACK 3.X.X\nlapack_int LAPACKE_csyr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_float alpha,\n                             const lapack_complex_float* x, lapack_int incx,\n                             lapack_complex_float* a, lapack_int lda );\nlapack_int LAPACKE_zsyr( int matrix_order, char uplo, lapack_int n,\n                             lapack_complex_double alpha,\n                             const lapack_complex_double* x, lapack_int incx,\n                             lapack_complex_double* a, lapack_int lda );\n\nlapack_int LAPACKE_csyr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_float alpha,\n                                  const lapack_complex_float* x,\n                                  lapack_int incx, lapack_complex_float* a,\n                                  lapack_int lda );\nlapack_int LAPACKE_zsyr_work( int matrix_order, char uplo, lapack_int n,\n                                  lapack_complex_double alpha,\n                                  const lapack_complex_double* x,\n                                  lapack_int incx, lapack_complex_double* a,\n                                  lapack_int lda );\n\n\n\n#define LAPACK_sgetrf LAPACK_GLOBAL(sgetrf,SGETRF)\n#define LAPACK_dgetrf LAPACK_GLOBAL(dgetrf,DGETRF)\n#define LAPACK_cgetrf LAPACK_GLOBAL(cgetrf,CGETRF)\n#define LAPACK_zgetrf LAPACK_GLOBAL(zgetrf,ZGETRF)\n#define LAPACK_sgbtrf LAPACK_GLOBAL(sgbtrf,SGBTRF)\n#define LAPACK_dgbtrf LAPACK_GLOBAL(dgbtrf,DGBTRF)\n#define LAPACK_cgbtrf LAPACK_GLOBAL(cgbtrf,CGBTRF)\n#define LAPACK_zgbtrf LAPACK_GLOBAL(zgbtrf,ZGBTRF)\n#define LAPACK_sgttrf LAPACK_GLOBAL(sgttrf,SGTTRF)\n#define LAPACK_dgttrf LAPACK_GLOBAL(dgttrf,DGTTRF)\n#define LAPACK_cgttrf LAPACK_GLOBAL(cgttrf,CGTTRF)\n#define LAPACK_zgttrf LAPACK_GLOBAL(zgttrf,ZGTTRF)\n#define LAPACK_spotrf LAPACK_GLOBAL(spotrf,SPOTRF)\n#define LAPACK_dpotrf LAPACK_GLOBAL(dpotrf,DPOTRF)\n#define LAPACK_cpotrf LAPACK_GLOBAL(cpotrf,CPOTRF)\n#define LAPACK_zpotrf LAPACK_GLOBAL(zpotrf,ZPOTRF)\n#define LAPACK_dpstrf LAPACK_GLOBAL(dpstrf,DPSTRF)\n#define LAPACK_spstrf LAPACK_GLOBAL(spstrf,SPSTRF)\n#define LAPACK_zpstrf LAPACK_GLOBAL(zpstrf,ZPSTRF)\n#define LAPACK_cpstrf LAPACK_GLOBAL(cpstrf,CPSTRF)\n#define LAPACK_dpftrf LAPACK_GLOBAL(dpftrf,DPFTRF)\n#define LAPACK_spftrf LAPACK_GLOBAL(spftrf,SPFTRF)\n#define LAPACK_zpftrf LAPACK_GLOBAL(zpftrf,ZPFTRF)\n#define LAPACK_cpftrf LAPACK_GLOBAL(cpftrf,CPFTRF)\n#define LAPACK_spptrf LAPACK_GLOBAL(spptrf,SPPTRF)\n#define LAPACK_dpptrf LAPACK_GLOBAL(dpptrf,DPPTRF)\n#define LAPACK_cpptrf LAPACK_GLOBAL(cpptrf,CPPTRF)\n#define LAPACK_zpptrf LAPACK_GLOBAL(zpptrf,ZPPTRF)\n#define LAPACK_spbtrf LAPACK_GLOBAL(spbtrf,SPBTRF)\n#define LAPACK_dpbtrf LAPACK_GLOBAL(dpbtrf,DPBTRF)\n#define LAPACK_cpbtrf LAPACK_GLOBAL(cpbtrf,CPBTRF)\n#define LAPACK_zpbtrf LAPACK_GLOBAL(zpbtrf,ZPBTRF)\n#define LAPACK_spttrf LAPACK_GLOBAL(spttrf,SPTTRF)\n#define LAPACK_dpttrf LAPACK_GLOBAL(dpttrf,DPTTRF)\n#define LAPACK_cpttrf LAPACK_GLOBAL(cpttrf,CPTTRF)\n#define LAPACK_zpttrf LAPACK_GLOBAL(zpttrf,ZPTTRF)\n#define LAPACK_ssytrf LAPACK_GLOBAL(ssytrf,SSYTRF)\n#define LAPACK_dsytrf LAPACK_GLOBAL(dsytrf,DSYTRF)\n#define LAPACK_csytrf LAPACK_GLOBAL(csytrf,CSYTRF)\n#define LAPACK_zsytrf LAPACK_GLOBAL(zsytrf,ZSYTRF)\n#define LAPACK_chetrf LAPACK_GLOBAL(chetrf,CHETRF)\n#define LAPACK_zhetrf LAPACK_GLOBAL(zhetrf,ZHETRF)\n#define LAPACK_ssptrf LAPACK_GLOBAL(ssptrf,SSPTRF)\n#define LAPACK_dsptrf LAPACK_GLOBAL(dsptrf,DSPTRF)\n#define LAPACK_csptrf LAPACK_GLOBAL(csptrf,CSPTRF)\n#define LAPACK_zsptrf LAPACK_GLOBAL(zsptrf,ZSPTRF)\n#define LAPACK_chptrf LAPACK_GLOBAL(chptrf,CHPTRF)\n#define LAPACK_zhptrf LAPACK_GLOBAL(zhptrf,ZHPTRF)\n#define LAPACK_sgetrs LAPACK_GLOBAL(sgetrs,SGETRS)\n#define LAPACK_dgetrs LAPACK_GLOBAL(dgetrs,DGETRS)\n#define LAPACK_cgetrs LAPACK_GLOBAL(cgetrs,CGETRS)\n#define LAPACK_zgetrs LAPACK_GLOBAL(zgetrs,ZGETRS)\n#define LAPACK_sgbtrs LAPACK_GLOBAL(sgbtrs,SGBTRS)\n#define LAPACK_dgbtrs LAPACK_GLOBAL(dgbtrs,DGBTRS)\n#define LAPACK_cgbtrs LAPACK_GLOBAL(cgbtrs,CGBTRS)\n#define LAPACK_zgbtrs LAPACK_GLOBAL(zgbtrs,ZGBTRS)\n#define LAPACK_sgttrs LAPACK_GLOBAL(sgttrs,SGTTRS)\n#define LAPACK_dgttrs LAPACK_GLOBAL(dgttrs,DGTTRS)\n#define LAPACK_cgttrs LAPACK_GLOBAL(cgttrs,CGTTRS)\n#define LAPACK_zgttrs LAPACK_GLOBAL(zgttrs,ZGTTRS)\n#define LAPACK_spotrs LAPACK_GLOBAL(spotrs,SPOTRS)\n#define LAPACK_dpotrs LAPACK_GLOBAL(dpotrs,DPOTRS)\n#define LAPACK_cpotrs LAPACK_GLOBAL(cpotrs,CPOTRS)\n#define LAPACK_zpotrs LAPACK_GLOBAL(zpotrs,ZPOTRS)\n#define LAPACK_dpftrs LAPACK_GLOBAL(dpftrs,DPFTRS)\n#define LAPACK_spftrs LAPACK_GLOBAL(spftrs,SPFTRS)\n#define LAPACK_zpftrs LAPACK_GLOBAL(zpftrs,ZPFTRS)\n#define LAPACK_cpftrs LAPACK_GLOBAL(cpftrs,CPFTRS)\n#define LAPACK_spptrs LAPACK_GLOBAL(spptrs,SPPTRS)\n#define LAPACK_dpptrs LAPACK_GLOBAL(dpptrs,DPPTRS)\n#define LAPACK_cpptrs LAPACK_GLOBAL(cpptrs,CPPTRS)\n#define LAPACK_zpptrs LAPACK_GLOBAL(zpptrs,ZPPTRS)\n#define LAPACK_spbtrs LAPACK_GLOBAL(spbtrs,SPBTRS)\n#define LAPACK_dpbtrs LAPACK_GLOBAL(dpbtrs,DPBTRS)\n#define LAPACK_cpbtrs LAPACK_GLOBAL(cpbtrs,CPBTRS)\n#define LAPACK_zpbtrs LAPACK_GLOBAL(zpbtrs,ZPBTRS)\n#define LAPACK_spttrs LAPACK_GLOBAL(spttrs,SPTTRS)\n#define LAPACK_dpttrs LAPACK_GLOBAL(dpttrs,DPTTRS)\n#define LAPACK_cpttrs LAPACK_GLOBAL(cpttrs,CPTTRS)\n#define LAPACK_zpttrs LAPACK_GLOBAL(zpttrs,ZPTTRS)\n#define LAPACK_ssytrs LAPACK_GLOBAL(ssytrs,SSYTRS)\n#define LAPACK_dsytrs LAPACK_GLOBAL(dsytrs,DSYTRS)\n#define LAPACK_csytrs LAPACK_GLOBAL(csytrs,CSYTRS)\n#define LAPACK_zsytrs LAPACK_GLOBAL(zsytrs,ZSYTRS)\n#define LAPACK_chetrs LAPACK_GLOBAL(chetrs,CHETRS)\n#define LAPACK_zhetrs LAPACK_GLOBAL(zhetrs,ZHETRS)\n#define LAPACK_ssptrs LAPACK_GLOBAL(ssptrs,SSPTRS)\n#define LAPACK_dsptrs LAPACK_GLOBAL(dsptrs,DSPTRS)\n#define LAPACK_csptrs LAPACK_GLOBAL(csptrs,CSPTRS)\n#define LAPACK_zsptrs LAPACK_GLOBAL(zsptrs,ZSPTRS)\n#define LAPACK_chptrs LAPACK_GLOBAL(chptrs,CHPTRS)\n#define LAPACK_zhptrs LAPACK_GLOBAL(zhptrs,ZHPTRS)\n#define LAPACK_strtrs LAPACK_GLOBAL(strtrs,STRTRS)\n#define LAPACK_dtrtrs LAPACK_GLOBAL(dtrtrs,DTRTRS)\n#define LAPACK_ctrtrs LAPACK_GLOBAL(ctrtrs,CTRTRS)\n#define LAPACK_ztrtrs LAPACK_GLOBAL(ztrtrs,ZTRTRS)\n#define LAPACK_stptrs LAPACK_GLOBAL(stptrs,STPTRS)\n#define LAPACK_dtptrs LAPACK_GLOBAL(dtptrs,DTPTRS)\n#define LAPACK_ctptrs LAPACK_GLOBAL(ctptrs,CTPTRS)\n#define LAPACK_ztptrs LAPACK_GLOBAL(ztptrs,ZTPTRS)\n#define LAPACK_stbtrs LAPACK_GLOBAL(stbtrs,STBTRS)\n#define LAPACK_dtbtrs LAPACK_GLOBAL(dtbtrs,DTBTRS)\n#define LAPACK_ctbtrs LAPACK_GLOBAL(ctbtrs,CTBTRS)\n#define LAPACK_ztbtrs LAPACK_GLOBAL(ztbtrs,ZTBTRS)\n#define LAPACK_sgecon LAPACK_GLOBAL(sgecon,SGECON)\n#define LAPACK_dgecon LAPACK_GLOBAL(dgecon,DGECON)\n#define LAPACK_cgecon LAPACK_GLOBAL(cgecon,CGECON)\n#define LAPACK_zgecon LAPACK_GLOBAL(zgecon,ZGECON)\n#define LAPACK_sgbcon LAPACK_GLOBAL(sgbcon,SGBCON)\n#define LAPACK_dgbcon LAPACK_GLOBAL(dgbcon,DGBCON)\n#define LAPACK_cgbcon LAPACK_GLOBAL(cgbcon,CGBCON)\n#define LAPACK_zgbcon LAPACK_GLOBAL(zgbcon,ZGBCON)\n#define LAPACK_sgtcon LAPACK_GLOBAL(sgtcon,SGTCON)\n#define LAPACK_dgtcon LAPACK_GLOBAL(dgtcon,DGTCON)\n#define LAPACK_cgtcon LAPACK_GLOBAL(cgtcon,CGTCON)\n#define LAPACK_zgtcon LAPACK_GLOBAL(zgtcon,ZGTCON)\n#define LAPACK_spocon LAPACK_GLOBAL(spocon,SPOCON)\n#define LAPACK_dpocon LAPACK_GLOBAL(dpocon,DPOCON)\n#define LAPACK_cpocon LAPACK_GLOBAL(cpocon,CPOCON)\n#define LAPACK_zpocon LAPACK_GLOBAL(zpocon,ZPOCON)\n#define LAPACK_sppcon LAPACK_GLOBAL(sppcon,SPPCON)\n#define LAPACK_dppcon LAPACK_GLOBAL(dppcon,DPPCON)\n#define LAPACK_cppcon LAPACK_GLOBAL(cppcon,CPPCON)\n#define LAPACK_zppcon LAPACK_GLOBAL(zppcon,ZPPCON)\n#define LAPACK_spbcon LAPACK_GLOBAL(spbcon,SPBCON)\n#define LAPACK_dpbcon LAPACK_GLOBAL(dpbcon,DPBCON)\n#define LAPACK_cpbcon LAPACK_GLOBAL(cpbcon,CPBCON)\n#define LAPACK_zpbcon LAPACK_GLOBAL(zpbcon,ZPBCON)\n#define LAPACK_sptcon LAPACK_GLOBAL(sptcon,SPTCON)\n#define LAPACK_dptcon LAPACK_GLOBAL(dptcon,DPTCON)\n#define LAPACK_cptcon LAPACK_GLOBAL(cptcon,CPTCON)\n#define LAPACK_zptcon LAPACK_GLOBAL(zptcon,ZPTCON)\n#define LAPACK_ssycon LAPACK_GLOBAL(ssycon,SSYCON)\n#define LAPACK_dsycon LAPACK_GLOBAL(dsycon,DSYCON)\n#define LAPACK_csycon LAPACK_GLOBAL(csycon,CSYCON)\n#define LAPACK_zsycon LAPACK_GLOBAL(zsycon,ZSYCON)\n#define LAPACK_checon LAPACK_GLOBAL(checon,CHECON)\n#define LAPACK_zhecon LAPACK_GLOBAL(zhecon,ZHECON)\n#define LAPACK_sspcon LAPACK_GLOBAL(sspcon,SSPCON)\n#define LAPACK_dspcon LAPACK_GLOBAL(dspcon,DSPCON)\n#define LAPACK_cspcon LAPACK_GLOBAL(cspcon,CSPCON)\n#define LAPACK_zspcon LAPACK_GLOBAL(zspcon,ZSPCON)\n#define LAPACK_chpcon LAPACK_GLOBAL(chpcon,CHPCON)\n#define LAPACK_zhpcon LAPACK_GLOBAL(zhpcon,ZHPCON)\n#define LAPACK_strcon LAPACK_GLOBAL(strcon,STRCON)\n#define LAPACK_dtrcon LAPACK_GLOBAL(dtrcon,DTRCON)\n#define LAPACK_ctrcon LAPACK_GLOBAL(ctrcon,CTRCON)\n#define LAPACK_ztrcon LAPACK_GLOBAL(ztrcon,ZTRCON)\n#define LAPACK_stpcon LAPACK_GLOBAL(stpcon,STPCON)\n#define LAPACK_dtpcon LAPACK_GLOBAL(dtpcon,DTPCON)\n#define LAPACK_ctpcon LAPACK_GLOBAL(ctpcon,CTPCON)\n#define LAPACK_ztpcon LAPACK_GLOBAL(ztpcon,ZTPCON)\n#define LAPACK_stbcon LAPACK_GLOBAL(stbcon,STBCON)\n#define LAPACK_dtbcon LAPACK_GLOBAL(dtbcon,DTBCON)\n#define LAPACK_ctbcon LAPACK_GLOBAL(ctbcon,CTBCON)\n#define LAPACK_ztbcon LAPACK_GLOBAL(ztbcon,ZTBCON)\n#define LAPACK_sgerfs LAPACK_GLOBAL(sgerfs,SGERFS)\n#define LAPACK_dgerfs LAPACK_GLOBAL(dgerfs,DGERFS)\n#define LAPACK_cgerfs LAPACK_GLOBAL(cgerfs,CGERFS)\n#define LAPACK_zgerfs LAPACK_GLOBAL(zgerfs,ZGERFS)\n#define LAPACK_dgerfsx LAPACK_GLOBAL(dgerfsx,DGERFSX)\n#define LAPACK_sgerfsx LAPACK_GLOBAL(sgerfsx,SGERFSX)\n#define LAPACK_zgerfsx LAPACK_GLOBAL(zgerfsx,ZGERFSX)\n#define LAPACK_cgerfsx LAPACK_GLOBAL(cgerfsx,CGERFSX)\n#define LAPACK_sgbrfs LAPACK_GLOBAL(sgbrfs,SGBRFS)\n#define LAPACK_dgbrfs LAPACK_GLOBAL(dgbrfs,DGBRFS)\n#define LAPACK_cgbrfs LAPACK_GLOBAL(cgbrfs,CGBRFS)\n#define LAPACK_zgbrfs LAPACK_GLOBAL(zgbrfs,ZGBRFS)\n#define LAPACK_dgbrfsx LAPACK_GLOBAL(dgbrfsx,DGBRFSX)\n#define LAPACK_sgbrfsx LAPACK_GLOBAL(sgbrfsx,SGBRFSX)\n#define LAPACK_zgbrfsx LAPACK_GLOBAL(zgbrfsx,ZGBRFSX)\n#define LAPACK_cgbrfsx LAPACK_GLOBAL(cgbrfsx,CGBRFSX)\n#define LAPACK_sgtrfs LAPACK_GLOBAL(sgtrfs,SGTRFS)\n#define LAPACK_dgtrfs LAPACK_GLOBAL(dgtrfs,DGTRFS)\n#define LAPACK_cgtrfs LAPACK_GLOBAL(cgtrfs,CGTRFS)\n#define LAPACK_zgtrfs LAPACK_GLOBAL(zgtrfs,ZGTRFS)\n#define LAPACK_sporfs LAPACK_GLOBAL(sporfs,SPORFS)\n#define LAPACK_dporfs LAPACK_GLOBAL(dporfs,DPORFS)\n#define LAPACK_cporfs LAPACK_GLOBAL(cporfs,CPORFS)\n#define LAPACK_zporfs LAPACK_GLOBAL(zporfs,ZPORFS)\n#define LAPACK_dporfsx LAPACK_GLOBAL(dporfsx,DPORFSX)\n#define LAPACK_sporfsx LAPACK_GLOBAL(sporfsx,SPORFSX)\n#define LAPACK_zporfsx LAPACK_GLOBAL(zporfsx,ZPORFSX)\n#define LAPACK_cporfsx LAPACK_GLOBAL(cporfsx,CPORFSX)\n#define LAPACK_spprfs LAPACK_GLOBAL(spprfs,SPPRFS)\n#define LAPACK_dpprfs LAPACK_GLOBAL(dpprfs,DPPRFS)\n#define LAPACK_cpprfs LAPACK_GLOBAL(cpprfs,CPPRFS)\n#define LAPACK_zpprfs LAPACK_GLOBAL(zpprfs,ZPPRFS)\n#define LAPACK_spbrfs LAPACK_GLOBAL(spbrfs,SPBRFS)\n#define LAPACK_dpbrfs LAPACK_GLOBAL(dpbrfs,DPBRFS)\n#define LAPACK_cpbrfs LAPACK_GLOBAL(cpbrfs,CPBRFS)\n#define LAPACK_zpbrfs LAPACK_GLOBAL(zpbrfs,ZPBRFS)\n#define LAPACK_sptrfs LAPACK_GLOBAL(sptrfs,SPTRFS)\n#define LAPACK_dptrfs LAPACK_GLOBAL(dptrfs,DPTRFS)\n#define LAPACK_cptrfs LAPACK_GLOBAL(cptrfs,CPTRFS)\n#define LAPACK_zptrfs LAPACK_GLOBAL(zptrfs,ZPTRFS)\n#define LAPACK_ssyrfs LAPACK_GLOBAL(ssyrfs,SSYRFS)\n#define LAPACK_dsyrfs LAPACK_GLOBAL(dsyrfs,DSYRFS)\n#define LAPACK_csyrfs LAPACK_GLOBAL(csyrfs,CSYRFS)\n#define LAPACK_zsyrfs LAPACK_GLOBAL(zsyrfs,ZSYRFS)\n#define LAPACK_dsyrfsx LAPACK_GLOBAL(dsyrfsx,DSYRFSX)\n#define LAPACK_ssyrfsx LAPACK_GLOBAL(ssyrfsx,SSYRFSX)\n#define LAPACK_zsyrfsx LAPACK_GLOBAL(zsyrfsx,ZSYRFSX)\n#define LAPACK_csyrfsx LAPACK_GLOBAL(csyrfsx,CSYRFSX)\n#define LAPACK_cherfs LAPACK_GLOBAL(cherfs,CHERFS)\n#define LAPACK_zherfs LAPACK_GLOBAL(zherfs,ZHERFS)\n#define LAPACK_zherfsx LAPACK_GLOBAL(zherfsx,ZHERFSX)\n#define LAPACK_cherfsx LAPACK_GLOBAL(cherfsx,CHERFSX)\n#define LAPACK_ssprfs LAPACK_GLOBAL(ssprfs,SSPRFS)\n#define LAPACK_dsprfs LAPACK_GLOBAL(dsprfs,DSPRFS)\n#define LAPACK_csprfs LAPACK_GLOBAL(csprfs,CSPRFS)\n#define LAPACK_zsprfs LAPACK_GLOBAL(zsprfs,ZSPRFS)\n#define LAPACK_chprfs LAPACK_GLOBAL(chprfs,CHPRFS)\n#define LAPACK_zhprfs LAPACK_GLOBAL(zhprfs,ZHPRFS)\n#define LAPACK_strrfs LAPACK_GLOBAL(strrfs,STRRFS)\n#define LAPACK_dtrrfs LAPACK_GLOBAL(dtrrfs,DTRRFS)\n#define LAPACK_ctrrfs LAPACK_GLOBAL(ctrrfs,CTRRFS)\n#define LAPACK_ztrrfs LAPACK_GLOBAL(ztrrfs,ZTRRFS)\n#define LAPACK_stprfs LAPACK_GLOBAL(stprfs,STPRFS)\n#define LAPACK_dtprfs LAPACK_GLOBAL(dtprfs,DTPRFS)\n#define LAPACK_ctprfs LAPACK_GLOBAL(ctprfs,CTPRFS)\n#define LAPACK_ztprfs LAPACK_GLOBAL(ztprfs,ZTPRFS)\n#define LAPACK_stbrfs LAPACK_GLOBAL(stbrfs,STBRFS)\n#define LAPACK_dtbrfs LAPACK_GLOBAL(dtbrfs,DTBRFS)\n#define LAPACK_ctbrfs LAPACK_GLOBAL(ctbrfs,CTBRFS)\n#define LAPACK_ztbrfs LAPACK_GLOBAL(ztbrfs,ZTBRFS)\n#define LAPACK_sgetri LAPACK_GLOBAL(sgetri,SGETRI)\n#define LAPACK_dgetri LAPACK_GLOBAL(dgetri,DGETRI)\n#define LAPACK_cgetri LAPACK_GLOBAL(cgetri,CGETRI)\n#define LAPACK_zgetri LAPACK_GLOBAL(zgetri,ZGETRI)\n#define LAPACK_spotri LAPACK_GLOBAL(spotri,SPOTRI)\n#define LAPACK_dpotri LAPACK_GLOBAL(dpotri,DPOTRI)\n#define LAPACK_cpotri LAPACK_GLOBAL(cpotri,CPOTRI)\n#define LAPACK_zpotri LAPACK_GLOBAL(zpotri,ZPOTRI)\n#define LAPACK_dpftri LAPACK_GLOBAL(dpftri,DPFTRI)\n#define LAPACK_spftri LAPACK_GLOBAL(spftri,SPFTRI)\n#define LAPACK_zpftri LAPACK_GLOBAL(zpftri,ZPFTRI)\n#define LAPACK_cpftri LAPACK_GLOBAL(cpftri,CPFTRI)\n#define LAPACK_spptri LAPACK_GLOBAL(spptri,SPPTRI)\n#define LAPACK_dpptri LAPACK_GLOBAL(dpptri,DPPTRI)\n#define LAPACK_cpptri LAPACK_GLOBAL(cpptri,CPPTRI)\n#define LAPACK_zpptri LAPACK_GLOBAL(zpptri,ZPPTRI)\n#define LAPACK_ssytri LAPACK_GLOBAL(ssytri,SSYTRI)\n#define LAPACK_dsytri LAPACK_GLOBAL(dsytri,DSYTRI)\n#define LAPACK_csytri LAPACK_GLOBAL(csytri,CSYTRI)\n#define LAPACK_zsytri LAPACK_GLOBAL(zsytri,ZSYTRI)\n#define LAPACK_chetri LAPACK_GLOBAL(chetri,CHETRI)\n#define LAPACK_zhetri LAPACK_GLOBAL(zhetri,ZHETRI)\n#define LAPACK_ssptri LAPACK_GLOBAL(ssptri,SSPTRI)\n#define LAPACK_dsptri LAPACK_GLOBAL(dsptri,DSPTRI)\n#define LAPACK_csptri LAPACK_GLOBAL(csptri,CSPTRI)\n#define LAPACK_zsptri LAPACK_GLOBAL(zsptri,ZSPTRI)\n#define LAPACK_chptri LAPACK_GLOBAL(chptri,CHPTRI)\n#define LAPACK_zhptri LAPACK_GLOBAL(zhptri,ZHPTRI)\n#define LAPACK_strtri LAPACK_GLOBAL(strtri,STRTRI)\n#define LAPACK_dtrtri LAPACK_GLOBAL(dtrtri,DTRTRI)\n#define LAPACK_ctrtri LAPACK_GLOBAL(ctrtri,CTRTRI)\n#define LAPACK_ztrtri LAPACK_GLOBAL(ztrtri,ZTRTRI)\n#define LAPACK_dtftri LAPACK_GLOBAL(dtftri,DTFTRI)\n#define LAPACK_stftri LAPACK_GLOBAL(stftri,STFTRI)\n#define LAPACK_ztftri LAPACK_GLOBAL(ztftri,ZTFTRI)\n#define LAPACK_ctftri LAPACK_GLOBAL(ctftri,CTFTRI)\n#define LAPACK_stptri LAPACK_GLOBAL(stptri,STPTRI)\n#define LAPACK_dtptri LAPACK_GLOBAL(dtptri,DTPTRI)\n#define LAPACK_ctptri LAPACK_GLOBAL(ctptri,CTPTRI)\n#define LAPACK_ztptri LAPACK_GLOBAL(ztptri,ZTPTRI)\n#define LAPACK_sgeequ LAPACK_GLOBAL(sgeequ,SGEEQU)\n#define LAPACK_dgeequ LAPACK_GLOBAL(dgeequ,DGEEQU)\n#define LAPACK_cgeequ LAPACK_GLOBAL(cgeequ,CGEEQU)\n#define LAPACK_zgeequ LAPACK_GLOBAL(zgeequ,ZGEEQU)\n#define LAPACK_dgeequb LAPACK_GLOBAL(dgeequb,DGEEQUB)\n#define LAPACK_sgeequb LAPACK_GLOBAL(sgeequb,SGEEQUB)\n#define LAPACK_zgeequb LAPACK_GLOBAL(zgeequb,ZGEEQUB)\n#define LAPACK_cgeequb LAPACK_GLOBAL(cgeequb,CGEEQUB)\n#define LAPACK_sgbequ LAPACK_GLOBAL(sgbequ,SGBEQU)\n#define LAPACK_dgbequ LAPACK_GLOBAL(dgbequ,DGBEQU)\n#define LAPACK_cgbequ LAPACK_GLOBAL(cgbequ,CGBEQU)\n#define LAPACK_zgbequ LAPACK_GLOBAL(zgbequ,ZGBEQU)\n#define LAPACK_dgbequb LAPACK_GLOBAL(dgbequb,DGBEQUB)\n#define LAPACK_sgbequb LAPACK_GLOBAL(sgbequb,SGBEQUB)\n#define LAPACK_zgbequb LAPACK_GLOBAL(zgbequb,ZGBEQUB)\n#define LAPACK_cgbequb LAPACK_GLOBAL(cgbequb,CGBEQUB)\n#define LAPACK_spoequ LAPACK_GLOBAL(spoequ,SPOEQU)\n#define LAPACK_dpoequ LAPACK_GLOBAL(dpoequ,DPOEQU)\n#define LAPACK_cpoequ LAPACK_GLOBAL(cpoequ,CPOEQU)\n#define LAPACK_zpoequ LAPACK_GLOBAL(zpoequ,ZPOEQU)\n#define LAPACK_dpoequb LAPACK_GLOBAL(dpoequb,DPOEQUB)\n#define LAPACK_spoequb LAPACK_GLOBAL(spoequb,SPOEQUB)\n#define LAPACK_zpoequb LAPACK_GLOBAL(zpoequb,ZPOEQUB)\n#define LAPACK_cpoequb LAPACK_GLOBAL(cpoequb,CPOEQUB)\n#define LAPACK_sppequ LAPACK_GLOBAL(sppequ,SPPEQU)\n#define LAPACK_dppequ LAPACK_GLOBAL(dppequ,DPPEQU)\n#define LAPACK_cppequ LAPACK_GLOBAL(cppequ,CPPEQU)\n#define LAPACK_zppequ LAPACK_GLOBAL(zppequ,ZPPEQU)\n#define LAPACK_spbequ LAPACK_GLOBAL(spbequ,SPBEQU)\n#define LAPACK_dpbequ LAPACK_GLOBAL(dpbequ,DPBEQU)\n#define LAPACK_cpbequ LAPACK_GLOBAL(cpbequ,CPBEQU)\n#define LAPACK_zpbequ LAPACK_GLOBAL(zpbequ,ZPBEQU)\n#define LAPACK_dsyequb LAPACK_GLOBAL(dsyequb,DSYEQUB)\n#define LAPACK_ssyequb LAPACK_GLOBAL(ssyequb,SSYEQUB)\n#define LAPACK_zsyequb LAPACK_GLOBAL(zsyequb,ZSYEQUB)\n#define LAPACK_csyequb LAPACK_GLOBAL(csyequb,CSYEQUB)\n#define LAPACK_zheequb LAPACK_GLOBAL(zheequb,ZHEEQUB)\n#define LAPACK_cheequb LAPACK_GLOBAL(cheequb,CHEEQUB)\n#define LAPACK_sgesv LAPACK_GLOBAL(sgesv,SGESV)\n#define LAPACK_dgesv LAPACK_GLOBAL(dgesv,DGESV)\n#define LAPACK_cgesv LAPACK_GLOBAL(cgesv,CGESV)\n#define LAPACK_zgesv LAPACK_GLOBAL(zgesv,ZGESV)\n#define LAPACK_dsgesv LAPACK_GLOBAL(dsgesv,DSGESV)\n#define LAPACK_zcgesv LAPACK_GLOBAL(zcgesv,ZCGESV)\n#define LAPACK_sgesvx LAPACK_GLOBAL(sgesvx,SGESVX)\n#define LAPACK_dgesvx LAPACK_GLOBAL(dgesvx,DGESVX)\n#define LAPACK_cgesvx LAPACK_GLOBAL(cgesvx,CGESVX)\n#define LAPACK_zgesvx LAPACK_GLOBAL(zgesvx,ZGESVX)\n#define LAPACK_dgesvxx LAPACK_GLOBAL(dgesvxx,DGESVXX)\n#define LAPACK_sgesvxx LAPACK_GLOBAL(sgesvxx,SGESVXX)\n#define LAPACK_zgesvxx LAPACK_GLOBAL(zgesvxx,ZGESVXX)\n#define LAPACK_cgesvxx LAPACK_GLOBAL(cgesvxx,CGESVXX)\n#define LAPACK_sgbsv LAPACK_GLOBAL(sgbsv,SGBSV)\n#define LAPACK_dgbsv LAPACK_GLOBAL(dgbsv,DGBSV)\n#define LAPACK_cgbsv LAPACK_GLOBAL(cgbsv,CGBSV)\n#define LAPACK_zgbsv LAPACK_GLOBAL(zgbsv,ZGBSV)\n#define LAPACK_sgbsvx LAPACK_GLOBAL(sgbsvx,SGBSVX)\n#define LAPACK_dgbsvx LAPACK_GLOBAL(dgbsvx,DGBSVX)\n#define LAPACK_cgbsvx LAPACK_GLOBAL(cgbsvx,CGBSVX)\n#define LAPACK_zgbsvx LAPACK_GLOBAL(zgbsvx,ZGBSVX)\n#define LAPACK_dgbsvxx LAPACK_GLOBAL(dgbsvxx,DGBSVXX)\n#define LAPACK_sgbsvxx LAPACK_GLOBAL(sgbsvxx,SGBSVXX)\n#define LAPACK_zgbsvxx LAPACK_GLOBAL(zgbsvxx,ZGBSVXX)\n#define LAPACK_cgbsvxx LAPACK_GLOBAL(cgbsvxx,CGBSVXX)\n#define LAPACK_sgtsv LAPACK_GLOBAL(sgtsv,SGTSV)\n#define LAPACK_dgtsv LAPACK_GLOBAL(dgtsv,DGTSV)\n#define LAPACK_cgtsv LAPACK_GLOBAL(cgtsv,CGTSV)\n#define LAPACK_zgtsv LAPACK_GLOBAL(zgtsv,ZGTSV)\n#define LAPACK_sgtsvx LAPACK_GLOBAL(sgtsvx,SGTSVX)\n#define LAPACK_dgtsvx LAPACK_GLOBAL(dgtsvx,DGTSVX)\n#define LAPACK_cgtsvx LAPACK_GLOBAL(cgtsvx,CGTSVX)\n#define LAPACK_zgtsvx LAPACK_GLOBAL(zgtsvx,ZGTSVX)\n#define LAPACK_sposv LAPACK_GLOBAL(sposv,SPOSV)\n#define LAPACK_dposv LAPACK_GLOBAL(dposv,DPOSV)\n#define LAPACK_cposv LAPACK_GLOBAL(cposv,CPOSV)\n#define LAPACK_zposv LAPACK_GLOBAL(zposv,ZPOSV)\n#define LAPACK_dsposv LAPACK_GLOBAL(dsposv,DSPOSV)\n#define LAPACK_zcposv LAPACK_GLOBAL(zcposv,ZCPOSV)\n#define LAPACK_sposvx LAPACK_GLOBAL(sposvx,SPOSVX)\n#define LAPACK_dposvx LAPACK_GLOBAL(dposvx,DPOSVX)\n#define LAPACK_cposvx LAPACK_GLOBAL(cposvx,CPOSVX)\n#define LAPACK_zposvx LAPACK_GLOBAL(zposvx,ZPOSVX)\n#define LAPACK_dposvxx LAPACK_GLOBAL(dposvxx,DPOSVXX)\n#define LAPACK_sposvxx LAPACK_GLOBAL(sposvxx,SPOSVXX)\n#define LAPACK_zposvxx LAPACK_GLOBAL(zposvxx,ZPOSVXX)\n#define LAPACK_cposvxx LAPACK_GLOBAL(cposvxx,CPOSVXX)\n#define LAPACK_sppsv LAPACK_GLOBAL(sppsv,SPPSV)\n#define LAPACK_dppsv LAPACK_GLOBAL(dppsv,DPPSV)\n#define LAPACK_cppsv LAPACK_GLOBAL(cppsv,CPPSV)\n#define LAPACK_zppsv LAPACK_GLOBAL(zppsv,ZPPSV)\n#define LAPACK_sppsvx LAPACK_GLOBAL(sppsvx,SPPSVX)\n#define LAPACK_dppsvx LAPACK_GLOBAL(dppsvx,DPPSVX)\n#define LAPACK_cppsvx LAPACK_GLOBAL(cppsvx,CPPSVX)\n#define LAPACK_zppsvx LAPACK_GLOBAL(zppsvx,ZPPSVX)\n#define LAPACK_spbsv LAPACK_GLOBAL(spbsv,SPBSV)\n#define LAPACK_dpbsv LAPACK_GLOBAL(dpbsv,DPBSV)\n#define LAPACK_cpbsv LAPACK_GLOBAL(cpbsv,CPBSV)\n#define LAPACK_zpbsv LAPACK_GLOBAL(zpbsv,ZPBSV)\n#define LAPACK_spbsvx LAPACK_GLOBAL(spbsvx,SPBSVX)\n#define LAPACK_dpbsvx LAPACK_GLOBAL(dpbsvx,DPBSVX)\n#define LAPACK_cpbsvx LAPACK_GLOBAL(cpbsvx,CPBSVX)\n#define LAPACK_zpbsvx LAPACK_GLOBAL(zpbsvx,ZPBSVX)\n#define LAPACK_sptsv LAPACK_GLOBAL(sptsv,SPTSV)\n#define LAPACK_dptsv LAPACK_GLOBAL(dptsv,DPTSV)\n#define LAPACK_cptsv LAPACK_GLOBAL(cptsv,CPTSV)\n#define LAPACK_zptsv LAPACK_GLOBAL(zptsv,ZPTSV)\n#define LAPACK_sptsvx LAPACK_GLOBAL(sptsvx,SPTSVX)\n#define LAPACK_dptsvx LAPACK_GLOBAL(dptsvx,DPTSVX)\n#define LAPACK_cptsvx LAPACK_GLOBAL(cptsvx,CPTSVX)\n#define LAPACK_zptsvx LAPACK_GLOBAL(zptsvx,ZPTSVX)\n#define LAPACK_ssysv LAPACK_GLOBAL(ssysv,SSYSV)\n#define LAPACK_dsysv LAPACK_GLOBAL(dsysv,DSYSV)\n#define LAPACK_csysv LAPACK_GLOBAL(csysv,CSYSV)\n#define LAPACK_zsysv LAPACK_GLOBAL(zsysv,ZSYSV)\n#define LAPACK_ssysvx LAPACK_GLOBAL(ssysvx,SSYSVX)\n#define LAPACK_dsysvx LAPACK_GLOBAL(dsysvx,DSYSVX)\n#define LAPACK_csysvx LAPACK_GLOBAL(csysvx,CSYSVX)\n#define LAPACK_zsysvx LAPACK_GLOBAL(zsysvx,ZSYSVX)\n#define LAPACK_dsysvxx LAPACK_GLOBAL(dsysvxx,DSYSVXX)\n#define LAPACK_ssysvxx LAPACK_GLOBAL(ssysvxx,SSYSVXX)\n#define LAPACK_zsysvxx LAPACK_GLOBAL(zsysvxx,ZSYSVXX)\n#define LAPACK_csysvxx LAPACK_GLOBAL(csysvxx,CSYSVXX)\n#define LAPACK_chesv LAPACK_GLOBAL(chesv,CHESV)\n#define LAPACK_zhesv LAPACK_GLOBAL(zhesv,ZHESV)\n#define LAPACK_chesvx LAPACK_GLOBAL(chesvx,CHESVX)\n#define LAPACK_zhesvx LAPACK_GLOBAL(zhesvx,ZHESVX)\n#define LAPACK_zhesvxx LAPACK_GLOBAL(zhesvxx,ZHESVXX)\n#define LAPACK_chesvxx LAPACK_GLOBAL(chesvxx,CHESVXX)\n#define LAPACK_sspsv LAPACK_GLOBAL(sspsv,SSPSV)\n#define LAPACK_dspsv LAPACK_GLOBAL(dspsv,DSPSV)\n#define LAPACK_cspsv LAPACK_GLOBAL(cspsv,CSPSV)\n#define LAPACK_zspsv LAPACK_GLOBAL(zspsv,ZSPSV)\n#define LAPACK_sspsvx LAPACK_GLOBAL(sspsvx,SSPSVX)\n#define LAPACK_dspsvx LAPACK_GLOBAL(dspsvx,DSPSVX)\n#define LAPACK_cspsvx LAPACK_GLOBAL(cspsvx,CSPSVX)\n#define LAPACK_zspsvx LAPACK_GLOBAL(zspsvx,ZSPSVX)\n#define LAPACK_chpsv LAPACK_GLOBAL(chpsv,CHPSV)\n#define LAPACK_zhpsv LAPACK_GLOBAL(zhpsv,ZHPSV)\n#define LAPACK_chpsvx LAPACK_GLOBAL(chpsvx,CHPSVX)\n#define LAPACK_zhpsvx LAPACK_GLOBAL(zhpsvx,ZHPSVX)\n#define LAPACK_sgeqrf LAPACK_GLOBAL(sgeqrf,SGEQRF)\n#define LAPACK_dgeqrf LAPACK_GLOBAL(dgeqrf,DGEQRF)\n#define LAPACK_cgeqrf LAPACK_GLOBAL(cgeqrf,CGEQRF)\n#define LAPACK_zgeqrf LAPACK_GLOBAL(zgeqrf,ZGEQRF)\n#define LAPACK_sgeqpf LAPACK_GLOBAL(sgeqpf,SGEQPF)\n#define LAPACK_dgeqpf LAPACK_GLOBAL(dgeqpf,DGEQPF)\n#define LAPACK_cgeqpf LAPACK_GLOBAL(cgeqpf,CGEQPF)\n#define LAPACK_zgeqpf LAPACK_GLOBAL(zgeqpf,ZGEQPF)\n#define LAPACK_sgeqp3 LAPACK_GLOBAL(sgeqp3,SGEQP3)\n#define LAPACK_dgeqp3 LAPACK_GLOBAL(dgeqp3,DGEQP3)\n#define LAPACK_cgeqp3 LAPACK_GLOBAL(cgeqp3,CGEQP3)\n#define LAPACK_zgeqp3 LAPACK_GLOBAL(zgeqp3,ZGEQP3)\n#define LAPACK_sorgqr LAPACK_GLOBAL(sorgqr,SORGQR)\n#define LAPACK_dorgqr LAPACK_GLOBAL(dorgqr,DORGQR)\n#define LAPACK_sormqr LAPACK_GLOBAL(sormqr,SORMQR)\n#define LAPACK_dormqr LAPACK_GLOBAL(dormqr,DORMQR)\n#define LAPACK_cungqr LAPACK_GLOBAL(cungqr,CUNGQR)\n#define LAPACK_zungqr LAPACK_GLOBAL(zungqr,ZUNGQR)\n#define LAPACK_cunmqr LAPACK_GLOBAL(cunmqr,CUNMQR)\n#define LAPACK_zunmqr LAPACK_GLOBAL(zunmqr,ZUNMQR)\n#define LAPACK_sgelqf LAPACK_GLOBAL(sgelqf,SGELQF)\n#define LAPACK_dgelqf LAPACK_GLOBAL(dgelqf,DGELQF)\n#define LAPACK_cgelqf LAPACK_GLOBAL(cgelqf,CGELQF)\n#define LAPACK_zgelqf LAPACK_GLOBAL(zgelqf,ZGELQF)\n#define LAPACK_sorglq LAPACK_GLOBAL(sorglq,SORGLQ)\n#define LAPACK_dorglq LAPACK_GLOBAL(dorglq,DORGLQ)\n#define LAPACK_sormlq LAPACK_GLOBAL(sormlq,SORMLQ)\n#define LAPACK_dormlq LAPACK_GLOBAL(dormlq,DORMLQ)\n#define LAPACK_cunglq LAPACK_GLOBAL(cunglq,CUNGLQ)\n#define LAPACK_zunglq LAPACK_GLOBAL(zunglq,ZUNGLQ)\n#define LAPACK_cunmlq LAPACK_GLOBAL(cunmlq,CUNMLQ)\n#define LAPACK_zunmlq LAPACK_GLOBAL(zunmlq,ZUNMLQ)\n#define LAPACK_sgeqlf LAPACK_GLOBAL(sgeqlf,SGEQLF)\n#define LAPACK_dgeqlf LAPACK_GLOBAL(dgeqlf,DGEQLF)\n#define LAPACK_cgeqlf LAPACK_GLOBAL(cgeqlf,CGEQLF)\n#define LAPACK_zgeqlf LAPACK_GLOBAL(zgeqlf,ZGEQLF)\n#define LAPACK_sorgql LAPACK_GLOBAL(sorgql,SORGQL)\n#define LAPACK_dorgql LAPACK_GLOBAL(dorgql,DORGQL)\n#define LAPACK_cungql LAPACK_GLOBAL(cungql,CUNGQL)\n#define LAPACK_zungql LAPACK_GLOBAL(zungql,ZUNGQL)\n#define LAPACK_sormql LAPACK_GLOBAL(sormql,SORMQL)\n#define LAPACK_dormql LAPACK_GLOBAL(dormql,DORMQL)\n#define LAPACK_cunmql LAPACK_GLOBAL(cunmql,CUNMQL)\n#define LAPACK_zunmql LAPACK_GLOBAL(zunmql,ZUNMQL)\n#define LAPACK_sgerqf LAPACK_GLOBAL(sgerqf,SGERQF)\n#define LAPACK_dgerqf LAPACK_GLOBAL(dgerqf,DGERQF)\n#define LAPACK_cgerqf LAPACK_GLOBAL(cgerqf,CGERQF)\n#define LAPACK_zgerqf LAPACK_GLOBAL(zgerqf,ZGERQF)\n#define LAPACK_sorgrq LAPACK_GLOBAL(sorgrq,SORGRQ)\n#define LAPACK_dorgrq LAPACK_GLOBAL(dorgrq,DORGRQ)\n#define LAPACK_cungrq LAPACK_GLOBAL(cungrq,CUNGRQ)\n#define LAPACK_zungrq LAPACK_GLOBAL(zungrq,ZUNGRQ)\n#define LAPACK_sormrq LAPACK_GLOBAL(sormrq,SORMRQ)\n#define LAPACK_dormrq LAPACK_GLOBAL(dormrq,DORMRQ)\n#define LAPACK_cunmrq LAPACK_GLOBAL(cunmrq,CUNMRQ)\n#define LAPACK_zunmrq LAPACK_GLOBAL(zunmrq,ZUNMRQ)\n#define LAPACK_stzrzf LAPACK_GLOBAL(stzrzf,STZRZF)\n#define LAPACK_dtzrzf LAPACK_GLOBAL(dtzrzf,DTZRZF)\n#define LAPACK_ctzrzf LAPACK_GLOBAL(ctzrzf,CTZRZF)\n#define LAPACK_ztzrzf LAPACK_GLOBAL(ztzrzf,ZTZRZF)\n#define LAPACK_sormrz LAPACK_GLOBAL(sormrz,SORMRZ)\n#define LAPACK_dormrz LAPACK_GLOBAL(dormrz,DORMRZ)\n#define LAPACK_cunmrz LAPACK_GLOBAL(cunmrz,CUNMRZ)\n#define LAPACK_zunmrz LAPACK_GLOBAL(zunmrz,ZUNMRZ)\n#define LAPACK_sggqrf LAPACK_GLOBAL(sggqrf,SGGQRF)\n#define LAPACK_dggqrf LAPACK_GLOBAL(dggqrf,DGGQRF)\n#define LAPACK_cggqrf LAPACK_GLOBAL(cggqrf,CGGQRF)\n#define LAPACK_zggqrf LAPACK_GLOBAL(zggqrf,ZGGQRF)\n#define LAPACK_sggrqf LAPACK_GLOBAL(sggrqf,SGGRQF)\n#define LAPACK_dggrqf LAPACK_GLOBAL(dggrqf,DGGRQF)\n#define LAPACK_cggrqf LAPACK_GLOBAL(cggrqf,CGGRQF)\n#define LAPACK_zggrqf LAPACK_GLOBAL(zggrqf,ZGGRQF)\n#define LAPACK_sgebrd LAPACK_GLOBAL(sgebrd,SGEBRD)\n#define LAPACK_dgebrd LAPACK_GLOBAL(dgebrd,DGEBRD)\n#define LAPACK_cgebrd LAPACK_GLOBAL(cgebrd,CGEBRD)\n#define LAPACK_zgebrd LAPACK_GLOBAL(zgebrd,ZGEBRD)\n#define LAPACK_sgbbrd LAPACK_GLOBAL(sgbbrd,SGBBRD)\n#define LAPACK_dgbbrd LAPACK_GLOBAL(dgbbrd,DGBBRD)\n#define LAPACK_cgbbrd LAPACK_GLOBAL(cgbbrd,CGBBRD)\n#define LAPACK_zgbbrd LAPACK_GLOBAL(zgbbrd,ZGBBRD)\n#define LAPACK_sorgbr LAPACK_GLOBAL(sorgbr,SORGBR)\n#define LAPACK_dorgbr LAPACK_GLOBAL(dorgbr,DORGBR)\n#define LAPACK_sormbr LAPACK_GLOBAL(sormbr,SORMBR)\n#define LAPACK_dormbr LAPACK_GLOBAL(dormbr,DORMBR)\n#define LAPACK_cungbr LAPACK_GLOBAL(cungbr,CUNGBR)\n#define LAPACK_zungbr LAPACK_GLOBAL(zungbr,ZUNGBR)\n#define LAPACK_cunmbr LAPACK_GLOBAL(cunmbr,CUNMBR)\n#define LAPACK_zunmbr LAPACK_GLOBAL(zunmbr,ZUNMBR)\n#define LAPACK_sbdsqr LAPACK_GLOBAL(sbdsqr,SBDSQR)\n#define LAPACK_dbdsqr LAPACK_GLOBAL(dbdsqr,DBDSQR)\n#define LAPACK_cbdsqr LAPACK_GLOBAL(cbdsqr,CBDSQR)\n#define LAPACK_zbdsqr LAPACK_GLOBAL(zbdsqr,ZBDSQR)\n#define LAPACK_sbdsdc LAPACK_GLOBAL(sbdsdc,SBDSDC)\n#define LAPACK_dbdsdc LAPACK_GLOBAL(dbdsdc,DBDSDC)\n#define LAPACK_ssytrd LAPACK_GLOBAL(ssytrd,SSYTRD)\n#define LAPACK_dsytrd LAPACK_GLOBAL(dsytrd,DSYTRD)\n#define LAPACK_sorgtr LAPACK_GLOBAL(sorgtr,SORGTR)\n#define LAPACK_dorgtr LAPACK_GLOBAL(dorgtr,DORGTR)\n#define LAPACK_sormtr LAPACK_GLOBAL(sormtr,SORMTR)\n#define LAPACK_dormtr LAPACK_GLOBAL(dormtr,DORMTR)\n#define LAPACK_chetrd LAPACK_GLOBAL(chetrd,CHETRD)\n#define LAPACK_zhetrd LAPACK_GLOBAL(zhetrd,ZHETRD)\n#define LAPACK_cungtr LAPACK_GLOBAL(cungtr,CUNGTR)\n#define LAPACK_zungtr LAPACK_GLOBAL(zungtr,ZUNGTR)\n#define LAPACK_cunmtr LAPACK_GLOBAL(cunmtr,CUNMTR)\n#define LAPACK_zunmtr LAPACK_GLOBAL(zunmtr,ZUNMTR)\n#define LAPACK_ssptrd LAPACK_GLOBAL(ssptrd,SSPTRD)\n#define LAPACK_dsptrd LAPACK_GLOBAL(dsptrd,DSPTRD)\n#define LAPACK_sopgtr LAPACK_GLOBAL(sopgtr,SOPGTR)\n#define LAPACK_dopgtr LAPACK_GLOBAL(dopgtr,DOPGTR)\n#define LAPACK_sopmtr LAPACK_GLOBAL(sopmtr,SOPMTR)\n#define LAPACK_dopmtr LAPACK_GLOBAL(dopmtr,DOPMTR)\n#define LAPACK_chptrd LAPACK_GLOBAL(chptrd,CHPTRD)\n#define LAPACK_zhptrd LAPACK_GLOBAL(zhptrd,ZHPTRD)\n#define LAPACK_cupgtr LAPACK_GLOBAL(cupgtr,CUPGTR)\n#define LAPACK_zupgtr LAPACK_GLOBAL(zupgtr,ZUPGTR)\n#define LAPACK_cupmtr LAPACK_GLOBAL(cupmtr,CUPMTR)\n#define LAPACK_zupmtr LAPACK_GLOBAL(zupmtr,ZUPMTR)\n#define LAPACK_ssbtrd LAPACK_GLOBAL(ssbtrd,SSBTRD)\n#define LAPACK_dsbtrd LAPACK_GLOBAL(dsbtrd,DSBTRD)\n#define LAPACK_chbtrd LAPACK_GLOBAL(chbtrd,CHBTRD)\n#define LAPACK_zhbtrd LAPACK_GLOBAL(zhbtrd,ZHBTRD)\n#define LAPACK_ssterf LAPACK_GLOBAL(ssterf,SSTERF)\n#define LAPACK_dsterf LAPACK_GLOBAL(dsterf,DSTERF)\n#define LAPACK_ssteqr LAPACK_GLOBAL(ssteqr,SSTEQR)\n#define LAPACK_dsteqr LAPACK_GLOBAL(dsteqr,DSTEQR)\n#define LAPACK_csteqr LAPACK_GLOBAL(csteqr,CSTEQR)\n#define LAPACK_zsteqr LAPACK_GLOBAL(zsteqr,ZSTEQR)\n#define LAPACK_sstemr LAPACK_GLOBAL(sstemr,SSTEMR)\n#define LAPACK_dstemr LAPACK_GLOBAL(dstemr,DSTEMR)\n#define LAPACK_cstemr LAPACK_GLOBAL(cstemr,CSTEMR)\n#define LAPACK_zstemr LAPACK_GLOBAL(zstemr,ZSTEMR)\n#define LAPACK_sstedc LAPACK_GLOBAL(sstedc,SSTEDC)\n#define LAPACK_dstedc LAPACK_GLOBAL(dstedc,DSTEDC)\n#define LAPACK_cstedc LAPACK_GLOBAL(cstedc,CSTEDC)\n#define LAPACK_zstedc LAPACK_GLOBAL(zstedc,ZSTEDC)\n#define LAPACK_sstegr LAPACK_GLOBAL(sstegr,SSTEGR)\n#define LAPACK_dstegr LAPACK_GLOBAL(dstegr,DSTEGR)\n#define LAPACK_cstegr LAPACK_GLOBAL(cstegr,CSTEGR)\n#define LAPACK_zstegr LAPACK_GLOBAL(zstegr,ZSTEGR)\n#define LAPACK_spteqr LAPACK_GLOBAL(spteqr,SPTEQR)\n#define LAPACK_dpteqr LAPACK_GLOBAL(dpteqr,DPTEQR)\n#define LAPACK_cpteqr LAPACK_GLOBAL(cpteqr,CPTEQR)\n#define LAPACK_zpteqr LAPACK_GLOBAL(zpteqr,ZPTEQR)\n#define LAPACK_sstebz LAPACK_GLOBAL(sstebz,SSTEBZ)\n#define LAPACK_dstebz LAPACK_GLOBAL(dstebz,DSTEBZ)\n#define LAPACK_sstein LAPACK_GLOBAL(sstein,SSTEIN)\n#define LAPACK_dstein LAPACK_GLOBAL(dstein,DSTEIN)\n#define LAPACK_cstein LAPACK_GLOBAL(cstein,CSTEIN)\n#define LAPACK_zstein LAPACK_GLOBAL(zstein,ZSTEIN)\n#define LAPACK_sdisna LAPACK_GLOBAL(sdisna,SDISNA)\n#define LAPACK_ddisna LAPACK_GLOBAL(ddisna,DDISNA)\n#define LAPACK_ssygst LAPACK_GLOBAL(ssygst,SSYGST)\n#define LAPACK_dsygst LAPACK_GLOBAL(dsygst,DSYGST)\n#define LAPACK_chegst LAPACK_GLOBAL(chegst,CHEGST)\n#define LAPACK_zhegst LAPACK_GLOBAL(zhegst,ZHEGST)\n#define LAPACK_sspgst LAPACK_GLOBAL(sspgst,SSPGST)\n#define LAPACK_dspgst LAPACK_GLOBAL(dspgst,DSPGST)\n#define LAPACK_chpgst LAPACK_GLOBAL(chpgst,CHPGST)\n#define LAPACK_zhpgst LAPACK_GLOBAL(zhpgst,ZHPGST)\n#define LAPACK_ssbgst LAPACK_GLOBAL(ssbgst,SSBGST)\n#define LAPACK_dsbgst LAPACK_GLOBAL(dsbgst,DSBGST)\n#define LAPACK_chbgst LAPACK_GLOBAL(chbgst,CHBGST)\n#define LAPACK_zhbgst LAPACK_GLOBAL(zhbgst,ZHBGST)\n#define LAPACK_spbstf LAPACK_GLOBAL(spbstf,SPBSTF)\n#define LAPACK_dpbstf LAPACK_GLOBAL(dpbstf,DPBSTF)\n#define LAPACK_cpbstf LAPACK_GLOBAL(cpbstf,CPBSTF)\n#define LAPACK_zpbstf LAPACK_GLOBAL(zpbstf,ZPBSTF)\n#define LAPACK_sgehrd LAPACK_GLOBAL(sgehrd,SGEHRD)\n#define LAPACK_dgehrd LAPACK_GLOBAL(dgehrd,DGEHRD)\n#define LAPACK_cgehrd LAPACK_GLOBAL(cgehrd,CGEHRD)\n#define LAPACK_zgehrd LAPACK_GLOBAL(zgehrd,ZGEHRD)\n#define LAPACK_sorghr LAPACK_GLOBAL(sorghr,SORGHR)\n#define LAPACK_dorghr LAPACK_GLOBAL(dorghr,DORGHR)\n#define LAPACK_sormhr LAPACK_GLOBAL(sormhr,SORMHR)\n#define LAPACK_dormhr LAPACK_GLOBAL(dormhr,DORMHR)\n#define LAPACK_cunghr LAPACK_GLOBAL(cunghr,CUNGHR)\n#define LAPACK_zunghr LAPACK_GLOBAL(zunghr,ZUNGHR)\n#define LAPACK_cunmhr LAPACK_GLOBAL(cunmhr,CUNMHR)\n#define LAPACK_zunmhr LAPACK_GLOBAL(zunmhr,ZUNMHR)\n#define LAPACK_sgebal LAPACK_GLOBAL(sgebal,SGEBAL)\n#define LAPACK_dgebal LAPACK_GLOBAL(dgebal,DGEBAL)\n#define LAPACK_cgebal LAPACK_GLOBAL(cgebal,CGEBAL)\n#define LAPACK_zgebal LAPACK_GLOBAL(zgebal,ZGEBAL)\n#define LAPACK_sgebak LAPACK_GLOBAL(sgebak,SGEBAK)\n#define LAPACK_dgebak LAPACK_GLOBAL(dgebak,DGEBAK)\n#define LAPACK_cgebak LAPACK_GLOBAL(cgebak,CGEBAK)\n#define LAPACK_zgebak LAPACK_GLOBAL(zgebak,ZGEBAK)\n#define LAPACK_shseqr LAPACK_GLOBAL(shseqr,SHSEQR)\n#define LAPACK_dhseqr LAPACK_GLOBAL(dhseqr,DHSEQR)\n#define LAPACK_chseqr LAPACK_GLOBAL(chseqr,CHSEQR)\n#define LAPACK_zhseqr LAPACK_GLOBAL(zhseqr,ZHSEQR)\n#define LAPACK_shsein LAPACK_GLOBAL(shsein,SHSEIN)\n#define LAPACK_dhsein LAPACK_GLOBAL(dhsein,DHSEIN)\n#define LAPACK_chsein LAPACK_GLOBAL(chsein,CHSEIN)\n#define LAPACK_zhsein LAPACK_GLOBAL(zhsein,ZHSEIN)\n#define LAPACK_strevc LAPACK_GLOBAL(strevc,STREVC)\n#define LAPACK_dtrevc LAPACK_GLOBAL(dtrevc,DTREVC)\n#define LAPACK_ctrevc LAPACK_GLOBAL(ctrevc,CTREVC)\n#define LAPACK_ztrevc LAPACK_GLOBAL(ztrevc,ZTREVC)\n#define LAPACK_strsna LAPACK_GLOBAL(strsna,STRSNA)\n#define LAPACK_dtrsna LAPACK_GLOBAL(dtrsna,DTRSNA)\n#define LAPACK_ctrsna LAPACK_GLOBAL(ctrsna,CTRSNA)\n#define LAPACK_ztrsna LAPACK_GLOBAL(ztrsna,ZTRSNA)\n#define LAPACK_strexc LAPACK_GLOBAL(strexc,STREXC)\n#define LAPACK_dtrexc LAPACK_GLOBAL(dtrexc,DTREXC)\n#define LAPACK_ctrexc LAPACK_GLOBAL(ctrexc,CTREXC)\n#define LAPACK_ztrexc LAPACK_GLOBAL(ztrexc,ZTREXC)\n#define LAPACK_strsen LAPACK_GLOBAL(strsen,STRSEN)\n#define LAPACK_dtrsen LAPACK_GLOBAL(dtrsen,DTRSEN)\n#define LAPACK_ctrsen LAPACK_GLOBAL(ctrsen,CTRSEN)\n#define LAPACK_ztrsen LAPACK_GLOBAL(ztrsen,ZTRSEN)\n#define LAPACK_strsyl LAPACK_GLOBAL(strsyl,STRSYL)\n#define LAPACK_dtrsyl LAPACK_GLOBAL(dtrsyl,DTRSYL)\n#define LAPACK_ctrsyl LAPACK_GLOBAL(ctrsyl,CTRSYL)\n#define LAPACK_ztrsyl LAPACK_GLOBAL(ztrsyl,ZTRSYL)\n#define LAPACK_sgghrd LAPACK_GLOBAL(sgghrd,SGGHRD)\n#define LAPACK_dgghrd LAPACK_GLOBAL(dgghrd,DGGHRD)\n#define LAPACK_cgghrd LAPACK_GLOBAL(cgghrd,CGGHRD)\n#define LAPACK_zgghrd LAPACK_GLOBAL(zgghrd,ZGGHRD)\n#define LAPACK_sggbal LAPACK_GLOBAL(sggbal,SGGBAL)\n#define LAPACK_dggbal LAPACK_GLOBAL(dggbal,DGGBAL)\n#define LAPACK_cggbal LAPACK_GLOBAL(cggbal,CGGBAL)\n#define LAPACK_zggbal LAPACK_GLOBAL(zggbal,ZGGBAL)\n#define LAPACK_sggbak LAPACK_GLOBAL(sggbak,SGGBAK)\n#define LAPACK_dggbak LAPACK_GLOBAL(dggbak,DGGBAK)\n#define LAPACK_cggbak LAPACK_GLOBAL(cggbak,CGGBAK)\n#define LAPACK_zggbak LAPACK_GLOBAL(zggbak,ZGGBAK)\n#define LAPACK_shgeqz LAPACK_GLOBAL(shgeqz,SHGEQZ)\n#define LAPACK_dhgeqz LAPACK_GLOBAL(dhgeqz,DHGEQZ)\n#define LAPACK_chgeqz LAPACK_GLOBAL(chgeqz,CHGEQZ)\n#define LAPACK_zhgeqz LAPACK_GLOBAL(zhgeqz,ZHGEQZ)\n#define LAPACK_stgevc LAPACK_GLOBAL(stgevc,STGEVC)\n#define LAPACK_dtgevc LAPACK_GLOBAL(dtgevc,DTGEVC)\n#define LAPACK_ctgevc LAPACK_GLOBAL(ctgevc,CTGEVC)\n#define LAPACK_ztgevc LAPACK_GLOBAL(ztgevc,ZTGEVC)\n#define LAPACK_stgexc LAPACK_GLOBAL(stgexc,STGEXC)\n#define LAPACK_dtgexc LAPACK_GLOBAL(dtgexc,DTGEXC)\n#define LAPACK_ctgexc LAPACK_GLOBAL(ctgexc,CTGEXC)\n#define LAPACK_ztgexc LAPACK_GLOBAL(ztgexc,ZTGEXC)\n#define LAPACK_stgsen LAPACK_GLOBAL(stgsen,STGSEN)\n#define LAPACK_dtgsen LAPACK_GLOBAL(dtgsen,DTGSEN)\n#define LAPACK_ctgsen LAPACK_GLOBAL(ctgsen,CTGSEN)\n#define LAPACK_ztgsen LAPACK_GLOBAL(ztgsen,ZTGSEN)\n#define LAPACK_stgsyl LAPACK_GLOBAL(stgsyl,STGSYL)\n#define LAPACK_dtgsyl LAPACK_GLOBAL(dtgsyl,DTGSYL)\n#define LAPACK_ctgsyl LAPACK_GLOBAL(ctgsyl,CTGSYL)\n#define LAPACK_ztgsyl LAPACK_GLOBAL(ztgsyl,ZTGSYL)\n#define LAPACK_stgsna LAPACK_GLOBAL(stgsna,STGSNA)\n#define LAPACK_dtgsna LAPACK_GLOBAL(dtgsna,DTGSNA)\n#define LAPACK_ctgsna LAPACK_GLOBAL(ctgsna,CTGSNA)\n#define LAPACK_ztgsna LAPACK_GLOBAL(ztgsna,ZTGSNA)\n#define LAPACK_sggsvp LAPACK_GLOBAL(sggsvp,SGGSVP)\n#define LAPACK_dggsvp LAPACK_GLOBAL(dggsvp,DGGSVP)\n#define LAPACK_cggsvp LAPACK_GLOBAL(cggsvp,CGGSVP)\n#define LAPACK_zggsvp LAPACK_GLOBAL(zggsvp,ZGGSVP)\n#define LAPACK_stgsja LAPACK_GLOBAL(stgsja,STGSJA)\n#define LAPACK_dtgsja LAPACK_GLOBAL(dtgsja,DTGSJA)\n#define LAPACK_ctgsja LAPACK_GLOBAL(ctgsja,CTGSJA)\n#define LAPACK_ztgsja LAPACK_GLOBAL(ztgsja,ZTGSJA)\n#define LAPACK_sgels LAPACK_GLOBAL(sgels,SGELS)\n#define LAPACK_dgels LAPACK_GLOBAL(dgels,DGELS)\n#define LAPACK_cgels LAPACK_GLOBAL(cgels,CGELS)\n#define LAPACK_zgels LAPACK_GLOBAL(zgels,ZGELS)\n#define LAPACK_sgelsy LAPACK_GLOBAL(sgelsy,SGELSY)\n#define LAPACK_dgelsy LAPACK_GLOBAL(dgelsy,DGELSY)\n#define LAPACK_cgelsy LAPACK_GLOBAL(cgelsy,CGELSY)\n#define LAPACK_zgelsy LAPACK_GLOBAL(zgelsy,ZGELSY)\n#define LAPACK_sgelss LAPACK_GLOBAL(sgelss,SGELSS)\n#define LAPACK_dgelss LAPACK_GLOBAL(dgelss,DGELSS)\n#define LAPACK_cgelss LAPACK_GLOBAL(cgelss,CGELSS)\n#define LAPACK_zgelss LAPACK_GLOBAL(zgelss,ZGELSS)\n#define LAPACK_sgelsd LAPACK_GLOBAL(sgelsd,SGELSD)\n#define LAPACK_dgelsd LAPACK_GLOBAL(dgelsd,DGELSD)\n#define LAPACK_cgelsd LAPACK_GLOBAL(cgelsd,CGELSD)\n#define LAPACK_zgelsd LAPACK_GLOBAL(zgelsd,ZGELSD)\n#define LAPACK_sgglse LAPACK_GLOBAL(sgglse,SGGLSE)\n#define LAPACK_dgglse LAPACK_GLOBAL(dgglse,DGGLSE)\n#define LAPACK_cgglse LAPACK_GLOBAL(cgglse,CGGLSE)\n#define LAPACK_zgglse LAPACK_GLOBAL(zgglse,ZGGLSE)\n#define LAPACK_sggglm LAPACK_GLOBAL(sggglm,SGGGLM)\n#define LAPACK_dggglm LAPACK_GLOBAL(dggglm,DGGGLM)\n#define LAPACK_cggglm LAPACK_GLOBAL(cggglm,CGGGLM)\n#define LAPACK_zggglm LAPACK_GLOBAL(zggglm,ZGGGLM)\n#define LAPACK_ssyev LAPACK_GLOBAL(ssyev,SSYEV)\n#define LAPACK_dsyev LAPACK_GLOBAL(dsyev,DSYEV)\n#define LAPACK_cheev LAPACK_GLOBAL(cheev,CHEEV)\n#define LAPACK_zheev LAPACK_GLOBAL(zheev,ZHEEV)\n#define LAPACK_ssyevd LAPACK_GLOBAL(ssyevd,SSYEVD)\n#define LAPACK_dsyevd LAPACK_GLOBAL(dsyevd,DSYEVD)\n#define LAPACK_cheevd LAPACK_GLOBAL(cheevd,CHEEVD)\n#define LAPACK_zheevd LAPACK_GLOBAL(zheevd,ZHEEVD)\n#define LAPACK_ssyevx LAPACK_GLOBAL(ssyevx,SSYEVX)\n#define LAPACK_dsyevx LAPACK_GLOBAL(dsyevx,DSYEVX)\n#define LAPACK_cheevx LAPACK_GLOBAL(cheevx,CHEEVX)\n#define LAPACK_zheevx LAPACK_GLOBAL(zheevx,ZHEEVX)\n#define LAPACK_ssyevr LAPACK_GLOBAL(ssyevr,SSYEVR)\n#define LAPACK_dsyevr LAPACK_GLOBAL(dsyevr,DSYEVR)\n#define LAPACK_cheevr LAPACK_GLOBAL(cheevr,CHEEVR)\n#define LAPACK_zheevr LAPACK_GLOBAL(zheevr,ZHEEVR)\n#define LAPACK_sspev LAPACK_GLOBAL(sspev,SSPEV)\n#define LAPACK_dspev LAPACK_GLOBAL(dspev,DSPEV)\n#define LAPACK_chpev LAPACK_GLOBAL(chpev,CHPEV)\n#define LAPACK_zhpev LAPACK_GLOBAL(zhpev,ZHPEV)\n#define LAPACK_sspevd LAPACK_GLOBAL(sspevd,SSPEVD)\n#define LAPACK_dspevd LAPACK_GLOBAL(dspevd,DSPEVD)\n#define LAPACK_chpevd LAPACK_GLOBAL(chpevd,CHPEVD)\n#define LAPACK_zhpevd LAPACK_GLOBAL(zhpevd,ZHPEVD)\n#define LAPACK_sspevx LAPACK_GLOBAL(sspevx,SSPEVX)\n#define LAPACK_dspevx LAPACK_GLOBAL(dspevx,DSPEVX)\n#define LAPACK_chpevx LAPACK_GLOBAL(chpevx,CHPEVX)\n#define LAPACK_zhpevx LAPACK_GLOBAL(zhpevx,ZHPEVX)\n#define LAPACK_ssbev LAPACK_GLOBAL(ssbev,SSBEV)\n#define LAPACK_dsbev LAPACK_GLOBAL(dsbev,DSBEV)\n#define LAPACK_chbev LAPACK_GLOBAL(chbev,CHBEV)\n#define LAPACK_zhbev LAPACK_GLOBAL(zhbev,ZHBEV)\n#define LAPACK_ssbevd LAPACK_GLOBAL(ssbevd,SSBEVD)\n#define LAPACK_dsbevd LAPACK_GLOBAL(dsbevd,DSBEVD)\n#define LAPACK_chbevd LAPACK_GLOBAL(chbevd,CHBEVD)\n#define LAPACK_zhbevd LAPACK_GLOBAL(zhbevd,ZHBEVD)\n#define LAPACK_ssbevx LAPACK_GLOBAL(ssbevx,SSBEVX)\n#define LAPACK_dsbevx LAPACK_GLOBAL(dsbevx,DSBEVX)\n#define LAPACK_chbevx LAPACK_GLOBAL(chbevx,CHBEVX)\n#define LAPACK_zhbevx LAPACK_GLOBAL(zhbevx,ZHBEVX)\n#define LAPACK_sstev LAPACK_GLOBAL(sstev,SSTEV)\n#define LAPACK_dstev LAPACK_GLOBAL(dstev,DSTEV)\n#define LAPACK_sstevd LAPACK_GLOBAL(sstevd,SSTEVD)\n#define LAPACK_dstevd LAPACK_GLOBAL(dstevd,DSTEVD)\n#define LAPACK_sstevx LAPACK_GLOBAL(sstevx,SSTEVX)\n#define LAPACK_dstevx LAPACK_GLOBAL(dstevx,DSTEVX)\n#define LAPACK_sstevr LAPACK_GLOBAL(sstevr,SSTEVR)\n#define LAPACK_dstevr LAPACK_GLOBAL(dstevr,DSTEVR)\n#define LAPACK_sgees LAPACK_GLOBAL(sgees,SGEES)\n#define LAPACK_dgees LAPACK_GLOBAL(dgees,DGEES)\n#define LAPACK_cgees LAPACK_GLOBAL(cgees,CGEES)\n#define LAPACK_zgees LAPACK_GLOBAL(zgees,ZGEES)\n#define LAPACK_sgeesx LAPACK_GLOBAL(sgeesx,SGEESX)\n#define LAPACK_dgeesx LAPACK_GLOBAL(dgeesx,DGEESX)\n#define LAPACK_cgeesx LAPACK_GLOBAL(cgeesx,CGEESX)\n#define LAPACK_zgeesx LAPACK_GLOBAL(zgeesx,ZGEESX)\n#define LAPACK_sgeev LAPACK_GLOBAL(sgeev,SGEEV)\n#define LAPACK_dgeev LAPACK_GLOBAL(dgeev,DGEEV)\n#define LAPACK_cgeev LAPACK_GLOBAL(cgeev,CGEEV)\n#define LAPACK_zgeev LAPACK_GLOBAL(zgeev,ZGEEV)\n#define LAPACK_sgeevx LAPACK_GLOBAL(sgeevx,SGEEVX)\n#define LAPACK_dgeevx LAPACK_GLOBAL(dgeevx,DGEEVX)\n#define LAPACK_cgeevx LAPACK_GLOBAL(cgeevx,CGEEVX)\n#define LAPACK_zgeevx LAPACK_GLOBAL(zgeevx,ZGEEVX)\n#define LAPACK_sgesvd LAPACK_GLOBAL(sgesvd,SGESVD)\n#define LAPACK_dgesvd LAPACK_GLOBAL(dgesvd,DGESVD)\n#define LAPACK_cgesvd LAPACK_GLOBAL(cgesvd,CGESVD)\n#define LAPACK_zgesvd LAPACK_GLOBAL(zgesvd,ZGESVD)\n#define LAPACK_sgesdd LAPACK_GLOBAL(sgesdd,SGESDD)\n#define LAPACK_dgesdd LAPACK_GLOBAL(dgesdd,DGESDD)\n#define LAPACK_cgesdd LAPACK_GLOBAL(cgesdd,CGESDD)\n#define LAPACK_zgesdd LAPACK_GLOBAL(zgesdd,ZGESDD)\n#define LAPACK_dgejsv LAPACK_GLOBAL(dgejsv,DGEJSV)\n#define LAPACK_sgejsv LAPACK_GLOBAL(sgejsv,SGEJSV)\n#define LAPACK_dgesvj LAPACK_GLOBAL(dgesvj,DGESVJ)\n#define LAPACK_sgesvj LAPACK_GLOBAL(sgesvj,SGESVJ)\n#define LAPACK_sggsvd LAPACK_GLOBAL(sggsvd,SGGSVD)\n#define LAPACK_dggsvd LAPACK_GLOBAL(dggsvd,DGGSVD)\n#define LAPACK_cggsvd LAPACK_GLOBAL(cggsvd,CGGSVD)\n#define LAPACK_zggsvd LAPACK_GLOBAL(zggsvd,ZGGSVD)\n#define LAPACK_ssygv LAPACK_GLOBAL(ssygv,SSYGV)\n#define LAPACK_dsygv LAPACK_GLOBAL(dsygv,DSYGV)\n#define LAPACK_chegv LAPACK_GLOBAL(chegv,CHEGV)\n#define LAPACK_zhegv LAPACK_GLOBAL(zhegv,ZHEGV)\n#define LAPACK_ssygvd LAPACK_GLOBAL(ssygvd,SSYGVD)\n#define LAPACK_dsygvd LAPACK_GLOBAL(dsygvd,DSYGVD)\n#define LAPACK_chegvd LAPACK_GLOBAL(chegvd,CHEGVD)\n#define LAPACK_zhegvd LAPACK_GLOBAL(zhegvd,ZHEGVD)\n#define LAPACK_ssygvx LAPACK_GLOBAL(ssygvx,SSYGVX)\n#define LAPACK_dsygvx LAPACK_GLOBAL(dsygvx,DSYGVX)\n#define LAPACK_chegvx LAPACK_GLOBAL(chegvx,CHEGVX)\n#define LAPACK_zhegvx LAPACK_GLOBAL(zhegvx,ZHEGVX)\n#define LAPACK_sspgv LAPACK_GLOBAL(sspgv,SSPGV)\n#define LAPACK_dspgv LAPACK_GLOBAL(dspgv,DSPGV)\n#define LAPACK_chpgv LAPACK_GLOBAL(chpgv,CHPGV)\n#define LAPACK_zhpgv LAPACK_GLOBAL(zhpgv,ZHPGV)\n#define LAPACK_sspgvd LAPACK_GLOBAL(sspgvd,SSPGVD)\n#define LAPACK_dspgvd LAPACK_GLOBAL(dspgvd,DSPGVD)\n#define LAPACK_chpgvd LAPACK_GLOBAL(chpgvd,CHPGVD)\n#define LAPACK_zhpgvd LAPACK_GLOBAL(zhpgvd,ZHPGVD)\n#define LAPACK_sspgvx LAPACK_GLOBAL(sspgvx,SSPGVX)\n#define LAPACK_dspgvx LAPACK_GLOBAL(dspgvx,DSPGVX)\n#define LAPACK_chpgvx LAPACK_GLOBAL(chpgvx,CHPGVX)\n#define LAPACK_zhpgvx LAPACK_GLOBAL(zhpgvx,ZHPGVX)\n#define LAPACK_ssbgv LAPACK_GLOBAL(ssbgv,SSBGV)\n#define LAPACK_dsbgv LAPACK_GLOBAL(dsbgv,DSBGV)\n#define LAPACK_chbgv LAPACK_GLOBAL(chbgv,CHBGV)\n#define LAPACK_zhbgv LAPACK_GLOBAL(zhbgv,ZHBGV)\n#define LAPACK_ssbgvd LAPACK_GLOBAL(ssbgvd,SSBGVD)\n#define LAPACK_dsbgvd LAPACK_GLOBAL(dsbgvd,DSBGVD)\n#define LAPACK_chbgvd LAPACK_GLOBAL(chbgvd,CHBGVD)\n#define LAPACK_zhbgvd LAPACK_GLOBAL(zhbgvd,ZHBGVD)\n#define LAPACK_ssbgvx LAPACK_GLOBAL(ssbgvx,SSBGVX)\n#define LAPACK_dsbgvx LAPACK_GLOBAL(dsbgvx,DSBGVX)\n#define LAPACK_chbgvx LAPACK_GLOBAL(chbgvx,CHBGVX)\n#define LAPACK_zhbgvx LAPACK_GLOBAL(zhbgvx,ZHBGVX)\n#define LAPACK_sgges LAPACK_GLOBAL(sgges,SGGES)\n#define LAPACK_dgges LAPACK_GLOBAL(dgges,DGGES)\n#define LAPACK_cgges LAPACK_GLOBAL(cgges,CGGES)\n#define LAPACK_zgges LAPACK_GLOBAL(zgges,ZGGES)\n#define LAPACK_sggesx LAPACK_GLOBAL(sggesx,SGGESX)\n#define LAPACK_dggesx LAPACK_GLOBAL(dggesx,DGGESX)\n#define LAPACK_cggesx LAPACK_GLOBAL(cggesx,CGGESX)\n#define LAPACK_zggesx LAPACK_GLOBAL(zggesx,ZGGESX)\n#define LAPACK_sggev LAPACK_GLOBAL(sggev,SGGEV)\n#define LAPACK_dggev LAPACK_GLOBAL(dggev,DGGEV)\n#define LAPACK_cggev LAPACK_GLOBAL(cggev,CGGEV)\n#define LAPACK_zggev LAPACK_GLOBAL(zggev,ZGGEV)\n#define LAPACK_sggevx LAPACK_GLOBAL(sggevx,SGGEVX)\n#define LAPACK_dggevx LAPACK_GLOBAL(dggevx,DGGEVX)\n#define LAPACK_cggevx LAPACK_GLOBAL(cggevx,CGGEVX)\n#define LAPACK_zggevx LAPACK_GLOBAL(zggevx,ZGGEVX)\n#define LAPACK_dsfrk LAPACK_GLOBAL(dsfrk,DSFRK)\n#define LAPACK_ssfrk LAPACK_GLOBAL(ssfrk,SSFRK)\n#define LAPACK_zhfrk LAPACK_GLOBAL(zhfrk,ZHFRK)\n#define LAPACK_chfrk LAPACK_GLOBAL(chfrk,CHFRK)\n#define LAPACK_dtfsm LAPACK_GLOBAL(dtfsm,DTFSM)\n#define LAPACK_stfsm LAPACK_GLOBAL(stfsm,STFSM)\n#define LAPACK_ztfsm LAPACK_GLOBAL(ztfsm,ZTFSM)\n#define LAPACK_ctfsm LAPACK_GLOBAL(ctfsm,CTFSM)\n#define LAPACK_dtfttp LAPACK_GLOBAL(dtfttp,DTFTTP)\n#define LAPACK_stfttp LAPACK_GLOBAL(stfttp,STFTTP)\n#define LAPACK_ztfttp LAPACK_GLOBAL(ztfttp,ZTFTTP)\n#define LAPACK_ctfttp LAPACK_GLOBAL(ctfttp,CTFTTP)\n#define LAPACK_dtfttr LAPACK_GLOBAL(dtfttr,DTFTTR)\n#define LAPACK_stfttr LAPACK_GLOBAL(stfttr,STFTTR)\n#define LAPACK_ztfttr LAPACK_GLOBAL(ztfttr,ZTFTTR)\n#define LAPACK_ctfttr LAPACK_GLOBAL(ctfttr,CTFTTR)\n#define LAPACK_dtpttf LAPACK_GLOBAL(dtpttf,DTPTTF)\n#define LAPACK_stpttf LAPACK_GLOBAL(stpttf,STPTTF)\n#define LAPACK_ztpttf LAPACK_GLOBAL(ztpttf,ZTPTTF)\n#define LAPACK_ctpttf LAPACK_GLOBAL(ctpttf,CTPTTF)\n#define LAPACK_dtpttr LAPACK_GLOBAL(dtpttr,DTPTTR)\n#define LAPACK_stpttr LAPACK_GLOBAL(stpttr,STPTTR)\n#define LAPACK_ztpttr LAPACK_GLOBAL(ztpttr,ZTPTTR)\n#define LAPACK_ctpttr LAPACK_GLOBAL(ctpttr,CTPTTR)\n#define LAPACK_dtrttf LAPACK_GLOBAL(dtrttf,DTRTTF)\n#define LAPACK_strttf LAPACK_GLOBAL(strttf,STRTTF)\n#define LAPACK_ztrttf LAPACK_GLOBAL(ztrttf,ZTRTTF)\n#define LAPACK_ctrttf LAPACK_GLOBAL(ctrttf,CTRTTF)\n#define LAPACK_dtrttp LAPACK_GLOBAL(dtrttp,DTRTTP)\n#define LAPACK_strttp LAPACK_GLOBAL(strttp,STRTTP)\n#define LAPACK_ztrttp LAPACK_GLOBAL(ztrttp,ZTRTTP)\n#define LAPACK_ctrttp LAPACK_GLOBAL(ctrttp,CTRTTP)\n#define LAPACK_sgeqrfp LAPACK_GLOBAL(sgeqrfp,SGEQRFP)\n#define LAPACK_dgeqrfp LAPACK_GLOBAL(dgeqrfp,DGEQRFP)\n#define LAPACK_cgeqrfp LAPACK_GLOBAL(cgeqrfp,CGEQRFP)\n#define LAPACK_zgeqrfp LAPACK_GLOBAL(zgeqrfp,ZGEQRFP)\n#define LAPACK_clacgv LAPACK_GLOBAL(clacgv,CLACGV)\n#define LAPACK_zlacgv LAPACK_GLOBAL(zlacgv,ZLACGV)\n#define LAPACK_slarnv LAPACK_GLOBAL(slarnv,SLARNV)\n#define LAPACK_dlarnv LAPACK_GLOBAL(dlarnv,DLARNV)\n#define LAPACK_clarnv LAPACK_GLOBAL(clarnv,CLARNV)\n#define LAPACK_zlarnv LAPACK_GLOBAL(zlarnv,ZLARNV)\n#define LAPACK_sgeqr2 LAPACK_GLOBAL(sgeqr2,SGEQR2)\n#define LAPACK_dgeqr2 LAPACK_GLOBAL(dgeqr2,DGEQR2)\n#define LAPACK_cgeqr2 LAPACK_GLOBAL(cgeqr2,CGEQR2)\n#define LAPACK_zgeqr2 LAPACK_GLOBAL(zgeqr2,ZGEQR2)\n#define LAPACK_slacpy LAPACK_GLOBAL(slacpy,SLACPY)\n#define LAPACK_dlacpy LAPACK_GLOBAL(dlacpy,DLACPY)\n#define LAPACK_clacpy LAPACK_GLOBAL(clacpy,CLACPY)\n#define LAPACK_zlacpy LAPACK_GLOBAL(zlacpy,ZLACPY)\n#define LAPACK_sgetf2 LAPACK_GLOBAL(sgetf2,SGETF2)\n#define LAPACK_dgetf2 LAPACK_GLOBAL(dgetf2,DGETF2)\n#define LAPACK_cgetf2 LAPACK_GLOBAL(cgetf2,CGETF2)\n#define LAPACK_zgetf2 LAPACK_GLOBAL(zgetf2,ZGETF2)\n#define LAPACK_slaswp LAPACK_GLOBAL(slaswp,SLASWP)\n#define LAPACK_dlaswp LAPACK_GLOBAL(dlaswp,DLASWP)\n#define LAPACK_claswp LAPACK_GLOBAL(claswp,CLASWP)\n#define LAPACK_zlaswp LAPACK_GLOBAL(zlaswp,ZLASWP)\n#define LAPACK_slange LAPACK_GLOBAL(slange,SLANGE)\n#define LAPACK_dlange LAPACK_GLOBAL(dlange,DLANGE)\n#define LAPACK_clange LAPACK_GLOBAL(clange,CLANGE)\n#define LAPACK_zlange LAPACK_GLOBAL(zlange,ZLANGE)\n#define LAPACK_clanhe LAPACK_GLOBAL(clanhe,CLANHE)\n#define LAPACK_zlanhe LAPACK_GLOBAL(zlanhe,ZLANHE)\n#define LAPACK_slansy LAPACK_GLOBAL(slansy,SLANSY)\n#define LAPACK_dlansy LAPACK_GLOBAL(dlansy,DLANSY)\n#define LAPACK_clansy LAPACK_GLOBAL(clansy,CLANSY)\n#define LAPACK_zlansy LAPACK_GLOBAL(zlansy,ZLANSY)\n#define LAPACK_slantr LAPACK_GLOBAL(slantr,SLANTR)\n#define LAPACK_dlantr LAPACK_GLOBAL(dlantr,DLANTR)\n#define LAPACK_clantr LAPACK_GLOBAL(clantr,CLANTR)\n#define LAPACK_zlantr LAPACK_GLOBAL(zlantr,ZLANTR)\n#define LAPACK_slamch LAPACK_GLOBAL(slamch,SLAMCH)\n#define LAPACK_dlamch LAPACK_GLOBAL(dlamch,DLAMCH)\n#define LAPACK_sgelq2 LAPACK_GLOBAL(sgelq2,SGELQ2)\n#define LAPACK_dgelq2 LAPACK_GLOBAL(dgelq2,DGELQ2)\n#define LAPACK_cgelq2 LAPACK_GLOBAL(cgelq2,CGELQ2)\n#define LAPACK_zgelq2 LAPACK_GLOBAL(zgelq2,ZGELQ2)\n#define LAPACK_slarfb LAPACK_GLOBAL(slarfb,SLARFB)\n#define LAPACK_dlarfb LAPACK_GLOBAL(dlarfb,DLARFB)\n#define LAPACK_clarfb LAPACK_GLOBAL(clarfb,CLARFB)\n#define LAPACK_zlarfb LAPACK_GLOBAL(zlarfb,ZLARFB)\n#define LAPACK_slarfg LAPACK_GLOBAL(slarfg,SLARFG)\n#define LAPACK_dlarfg LAPACK_GLOBAL(dlarfg,DLARFG)\n#define LAPACK_clarfg LAPACK_GLOBAL(clarfg,CLARFG)\n#define LAPACK_zlarfg LAPACK_GLOBAL(zlarfg,ZLARFG)\n#define LAPACK_slarft LAPACK_GLOBAL(slarft,SLARFT)\n#define LAPACK_dlarft LAPACK_GLOBAL(dlarft,DLARFT)\n#define LAPACK_clarft LAPACK_GLOBAL(clarft,CLARFT)\n#define LAPACK_zlarft LAPACK_GLOBAL(zlarft,ZLARFT)\n#define LAPACK_slarfx LAPACK_GLOBAL(slarfx,SLARFX)\n#define LAPACK_dlarfx LAPACK_GLOBAL(dlarfx,DLARFX)\n#define LAPACK_clarfx LAPACK_GLOBAL(clarfx,CLARFX)\n#define LAPACK_zlarfx LAPACK_GLOBAL(zlarfx,ZLARFX)\n#define LAPACK_slatms LAPACK_GLOBAL(slatms,SLATMS)\n#define LAPACK_dlatms LAPACK_GLOBAL(dlatms,DLATMS)\n#define LAPACK_clatms LAPACK_GLOBAL(clatms,CLATMS)\n#define LAPACK_zlatms LAPACK_GLOBAL(zlatms,ZLATMS)\n#define LAPACK_slag2d LAPACK_GLOBAL(slag2d,SLAG2D)\n#define LAPACK_dlag2s LAPACK_GLOBAL(dlag2s,DLAG2S)\n#define LAPACK_clag2z LAPACK_GLOBAL(clag2z,CLAG2Z)\n#define LAPACK_zlag2c LAPACK_GLOBAL(zlag2c,ZLAG2C)\n#define LAPACK_slauum LAPACK_GLOBAL(slauum,SLAUUM)\n#define LAPACK_dlauum LAPACK_GLOBAL(dlauum,DLAUUM)\n#define LAPACK_clauum LAPACK_GLOBAL(clauum,CLAUUM)\n#define LAPACK_zlauum LAPACK_GLOBAL(zlauum,ZLAUUM)\n#define LAPACK_slagge LAPACK_GLOBAL(slagge,SLAGGE)\n#define LAPACK_dlagge LAPACK_GLOBAL(dlagge,DLAGGE)\n#define LAPACK_clagge LAPACK_GLOBAL(clagge,CLAGGE)\n#define LAPACK_zlagge LAPACK_GLOBAL(zlagge,ZLAGGE)\n#define LAPACK_slaset LAPACK_GLOBAL(slaset,SLASET)\n#define LAPACK_dlaset LAPACK_GLOBAL(dlaset,DLASET)\n#define LAPACK_claset LAPACK_GLOBAL(claset,CLASET)\n#define LAPACK_zlaset LAPACK_GLOBAL(zlaset,ZLASET)\n#define LAPACK_slasrt LAPACK_GLOBAL(slasrt,SLASRT)\n#define LAPACK_dlasrt LAPACK_GLOBAL(dlasrt,DLASRT)\n#define LAPACK_slagsy LAPACK_GLOBAL(slagsy,SLAGSY)\n#define LAPACK_dlagsy LAPACK_GLOBAL(dlagsy,DLAGSY)\n#define LAPACK_clagsy LAPACK_GLOBAL(clagsy,CLAGSY)\n#define LAPACK_zlagsy LAPACK_GLOBAL(zlagsy,ZLAGSY)\n#define LAPACK_claghe LAPACK_GLOBAL(claghe,CLAGHE)\n#define LAPACK_zlaghe LAPACK_GLOBAL(zlaghe,ZLAGHE)\n#define LAPACK_slapmr LAPACK_GLOBAL(slapmr,SLAPMR)\n#define LAPACK_dlapmr LAPACK_GLOBAL(dlapmr,DLAPMR)\n#define LAPACK_clapmr LAPACK_GLOBAL(clapmr,CLAPMR)\n#define LAPACK_zlapmr LAPACK_GLOBAL(zlapmr,ZLAPMR)\n#define LAPACK_slapy2 LAPACK_GLOBAL(slapy2,SLAPY2)\n#define LAPACK_dlapy2 LAPACK_GLOBAL(dlapy2,DLAPY2)\n#define LAPACK_slapy3 LAPACK_GLOBAL(slapy3,SLAPY3)\n#define LAPACK_dlapy3 LAPACK_GLOBAL(dlapy3,DLAPY3)\n#define LAPACK_slartgp LAPACK_GLOBAL(slartgp,SLARTGP)\n#define LAPACK_dlartgp LAPACK_GLOBAL(dlartgp,DLARTGP)\n#define LAPACK_slartgs LAPACK_GLOBAL(slartgs,SLARTGS)\n#define LAPACK_dlartgs LAPACK_GLOBAL(dlartgs,DLARTGS)\n// LAPACK 3.3.0\n#define LAPACK_cbbcsd LAPACK_GLOBAL(cbbcsd,CBBCSD)\n#define LAPACK_cheswapr LAPACK_GLOBAL(cheswapr,CHESWAPR)\n#define LAPACK_chetri2 LAPACK_GLOBAL(chetri2,CHETRI2)\n#define LAPACK_chetri2x LAPACK_GLOBAL(chetri2x,CHETRI2X)\n#define LAPACK_chetrs2 LAPACK_GLOBAL(chetrs2,CHETRS2)\n#define LAPACK_csyconv LAPACK_GLOBAL(csyconv,CSYCONV)\n#define LAPACK_csyswapr LAPACK_GLOBAL(csyswapr,CSYSWAPR)\n#define LAPACK_csytri2 LAPACK_GLOBAL(csytri2,CSYTRI2)\n#define LAPACK_csytri2x LAPACK_GLOBAL(csytri2x,CSYTRI2X)\n#define LAPACK_csytrs2 LAPACK_GLOBAL(csytrs2,CSYTRS2)\n#define LAPACK_cunbdb LAPACK_GLOBAL(cunbdb,CUNBDB)\n#define LAPACK_cuncsd LAPACK_GLOBAL(cuncsd,CUNCSD)\n#define LAPACK_dbbcsd LAPACK_GLOBAL(dbbcsd,DBBCSD)\n#define LAPACK_dorbdb LAPACK_GLOBAL(dorbdb,DORBDB)\n#define LAPACK_dorcsd LAPACK_GLOBAL(dorcsd,DORCSD)\n#define LAPACK_dsyconv LAPACK_GLOBAL(dsyconv,DSYCONV)\n#define LAPACK_dsyswapr LAPACK_GLOBAL(dsyswapr,DSYSWAPR)\n#define LAPACK_dsytri2 LAPACK_GLOBAL(dsytri2,DSYTRI2)\n#define LAPACK_dsytri2x LAPACK_GLOBAL(dsytri2x,DSYTRI2X)\n#define LAPACK_dsytrs2 LAPACK_GLOBAL(dsytrs2,DSYTRS2)\n#define LAPACK_sbbcsd LAPACK_GLOBAL(sbbcsd,SBBCSD)\n#define LAPACK_sorbdb LAPACK_GLOBAL(sorbdb,SORBDB)\n#define LAPACK_sorcsd LAPACK_GLOBAL(sorcsd,SORCSD)\n#define LAPACK_ssyconv LAPACK_GLOBAL(ssyconv,SSYCONV)\n#define LAPACK_ssyswapr LAPACK_GLOBAL(ssyswapr,SSYSWAPR)\n#define LAPACK_ssytri2 LAPACK_GLOBAL(ssytri2,SSYTRI2)\n#define LAPACK_ssytri2x LAPACK_GLOBAL(ssytri2x,SSYTRI2X)\n#define LAPACK_ssytrs2 LAPACK_GLOBAL(ssytrs2,SSYTRS2)\n#define LAPACK_zbbcsd LAPACK_GLOBAL(zbbcsd,ZBBCSD)\n#define LAPACK_zheswapr LAPACK_GLOBAL(zheswapr,ZHESWAPR)\n#define LAPACK_zhetri2 LAPACK_GLOBAL(zhetri2,ZHETRI2)\n#define LAPACK_zhetri2x LAPACK_GLOBAL(zhetri2x,ZHETRI2X)\n#define LAPACK_zhetrs2 LAPACK_GLOBAL(zhetrs2,ZHETRS2)\n#define LAPACK_zsyconv LAPACK_GLOBAL(zsyconv,ZSYCONV)\n#define LAPACK_zsyswapr LAPACK_GLOBAL(zsyswapr,ZSYSWAPR)\n#define LAPACK_zsytri2 LAPACK_GLOBAL(zsytri2,ZSYTRI2)\n#define LAPACK_zsytri2x LAPACK_GLOBAL(zsytri2x,ZSYTRI2X)\n#define LAPACK_zsytrs2 LAPACK_GLOBAL(zsytrs2,ZSYTRS2)\n#define LAPACK_zunbdb LAPACK_GLOBAL(zunbdb,ZUNBDB)\n#define LAPACK_zuncsd LAPACK_GLOBAL(zuncsd,ZUNCSD)\n// LAPACK 3.4.0\n#define LAPACK_sgemqrt LAPACK_GLOBAL(sgemqrt,SGEMQRT)\n#define LAPACK_dgemqrt LAPACK_GLOBAL(dgemqrt,DGEMQRT)\n#define LAPACK_cgemqrt LAPACK_GLOBAL(cgemqrt,CGEMQRT)\n#define LAPACK_zgemqrt LAPACK_GLOBAL(zgemqrt,ZGEMQRT)\n#define LAPACK_sgeqrt LAPACK_GLOBAL(sgeqrt,SGEQRT)\n#define LAPACK_dgeqrt LAPACK_GLOBAL(dgeqrt,DGEQRT)\n#define LAPACK_cgeqrt LAPACK_GLOBAL(cgeqrt,CGEQRT)\n#define LAPACK_zgeqrt LAPACK_GLOBAL(zgeqrt,ZGEQRT)\n#define LAPACK_sgeqrt2 LAPACK_GLOBAL(sgeqrt2,SGEQRT2)\n#define LAPACK_dgeqrt2 LAPACK_GLOBAL(dgeqrt2,DGEQRT2)\n#define LAPACK_cgeqrt2 LAPACK_GLOBAL(cgeqrt2,CGEQRT2)\n#define LAPACK_zgeqrt2 LAPACK_GLOBAL(zgeqrt2,ZGEQRT2)\n#define LAPACK_sgeqrt3 LAPACK_GLOBAL(sgeqrt3,SGEQRT3)\n#define LAPACK_dgeqrt3 LAPACK_GLOBAL(dgeqrt3,DGEQRT3)\n#define LAPACK_cgeqrt3 LAPACK_GLOBAL(cgeqrt3,CGEQRT3)\n#define LAPACK_zgeqrt3 LAPACK_GLOBAL(zgeqrt3,ZGEQRT3)\n#define LAPACK_stpmqrt LAPACK_GLOBAL(stpmqrt,STPMQRT)\n#define LAPACK_dtpmqrt LAPACK_GLOBAL(dtpmqrt,DTPMQRT)\n#define LAPACK_ctpmqrt LAPACK_GLOBAL(ctpmqrt,CTPMQRT)\n#define LAPACK_ztpmqrt LAPACK_GLOBAL(ztpmqrt,ZTPMQRT)\n#define LAPACK_dtpqrt LAPACK_GLOBAL(dtpqrt,DTPQRT)\n#define LAPACK_ctpqrt LAPACK_GLOBAL(ctpqrt,CTPQRT)\n#define LAPACK_ztpqrt LAPACK_GLOBAL(ztpqrt,ZTPQRT)\n#define LAPACK_stpqrt2 LAPACK_GLOBAL(stpqrt2,STPQRT2)\n#define LAPACK_dtpqrt2 LAPACK_GLOBAL(dtpqrt2,DTPQRT2)\n#define LAPACK_ctpqrt2 LAPACK_GLOBAL(ctpqrt2,CTPQRT2)\n#define LAPACK_ztpqrt2 LAPACK_GLOBAL(ztpqrt2,ZTPQRT2)\n#define LAPACK_stprfb LAPACK_GLOBAL(stprfb,STPRFB)\n#define LAPACK_dtprfb LAPACK_GLOBAL(dtprfb,DTPRFB)\n#define LAPACK_ctprfb LAPACK_GLOBAL(ctprfb,CTPRFB)\n#define LAPACK_ztprfb LAPACK_GLOBAL(ztprfb,ZTPRFB)\n// LAPACK 3.X.X\n#define LAPACK_csyr LAPACK_GLOBAL(csyr,CSYR)\n#define LAPACK_zsyr LAPACK_GLOBAL(zsyr,ZSYR)\n\n\nvoid LAPACK_sgetrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgetrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgetrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgetrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, float* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, double* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgbtrf( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgttrf( lapack_int* n, float* dl, float* d, float* du, float* du2,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgttrf( lapack_int* n, double* dl, double* d, double* du,\n                    double* du2, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgttrf( lapack_int* n, lapack_complex_float* dl,\n                    lapack_complex_float* d, lapack_complex_float* du,\n                    lapack_complex_float* du2, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_zgttrf( lapack_int* n, lapack_complex_double* dl,\n                    lapack_complex_double* d, lapack_complex_double* du,\n                    lapack_complex_double* du2, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_spotrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dpotrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_cpotrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zpotrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dpstrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* piv, lapack_int* rank, double* tol,\n                    double* work, lapack_int *info );\nvoid LAPACK_spstrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* piv, lapack_int* rank, float* tol, float* work,\n                    lapack_int *info );\nvoid LAPACK_zpstrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* piv, lapack_int* rank,\n                    double* tol, double* work, lapack_int *info );\nvoid LAPACK_cpstrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* piv, lapack_int* rank,\n                    float* tol, float* work, lapack_int *info );\nvoid LAPACK_dpftrf( char* transr, char* uplo, lapack_int* n, double* a,\n                    lapack_int *info );\nvoid LAPACK_spftrf( char* transr, char* uplo, lapack_int* n, float* a,\n                    lapack_int *info );\nvoid LAPACK_zpftrf( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_cpftrf( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_spptrf( char* uplo, lapack_int* n, float* ap, lapack_int *info );\nvoid LAPACK_dpptrf( char* uplo, lapack_int* n, double* ap, lapack_int *info );\nvoid LAPACK_cpptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_zpptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_spbtrf( char* uplo, lapack_int* n, lapack_int* kd, float* ab,\n                    lapack_int* ldab, lapack_int *info );\nvoid LAPACK_dpbtrf( char* uplo, lapack_int* n, lapack_int* kd, double* ab,\n                    lapack_int* ldab, lapack_int *info );\nvoid LAPACK_cpbtrf( char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_int *info );\nvoid LAPACK_zpbtrf( char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_int *info );\nvoid LAPACK_spttrf( lapack_int* n, float* d, float* e, lapack_int *info );\nvoid LAPACK_dpttrf( lapack_int* n, double* d, double* e, lapack_int *info );\nvoid LAPACK_cpttrf( lapack_int* n, float* d, lapack_complex_float* e,\n                    lapack_int *info );\nvoid LAPACK_zpttrf( lapack_int* n, double* d, lapack_complex_double* e,\n                    lapack_int *info );\nvoid LAPACK_ssytrf( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dsytrf( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_csytrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zsytrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_chetrf( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zhetrf( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ssptrf( char* uplo, lapack_int* n, float* ap, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_dsptrf( char* uplo, lapack_int* n, double* ap, lapack_int* ipiv,\n                    lapack_int *info );\nvoid LAPACK_csptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zsptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_chptrf( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zhptrf( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_sgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, const lapack_int* ipiv,\n                    float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const lapack_int* ipiv,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgetrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const float* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const double* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_float* ab,\n                    lapack_int* ldab, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zgbtrs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_double* ab,\n                    lapack_int* ldab, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    const float* du2, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    const double* du2, const lapack_int* ipiv, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zgttrs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spotrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpotrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* a, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpftrs( char* transr, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_spptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_spbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const float* ab, lapack_int* ldab, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const double* ab, lapack_int* ldab, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zpbtrs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_spttrs( lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dpttrs( lapack_int* n, lapack_int* nrhs, const double* d,\n                    const double* e, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_cpttrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpttrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ssytrs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const lapack_int* ipiv,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_csytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zsytrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_chetrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zhetrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ssptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const lapack_int* ipiv, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const lapack_int* ipiv, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_csptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zsptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_chptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, const lapack_int* ipiv,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zhptrs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, const lapack_int* ipiv,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_strtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dtrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ctrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztrtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_stptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* ap, float* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dtptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* ap, double* b,\n                    lapack_int* ldb, lapack_int *info );\nvoid LAPACK_ctptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* ap,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztptrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* ap,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_stbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const float* ab,\n                    lapack_int* ldab, float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dtbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const double* ab,\n                    lapack_int* ldab, double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ctbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_ztbtrs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sgecon( char* norm, lapack_int* n, const float* a, lapack_int* lda,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgecon( char* norm, lapack_int* n, const double* a, lapack_int* lda,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgecon( char* norm, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgecon( char* norm, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const float* ab, lapack_int* ldab, const lapack_int* ipiv,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const double* ab, lapack_int* ldab, const lapack_int* ipiv,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbcon( char* norm, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgtcon( char* norm, lapack_int* n, const float* dl, const float* d,\n                    const float* du, const float* du2, const lapack_int* ipiv,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgtcon( char* norm, lapack_int* n, const double* dl,\n                    const double* d, const double* du, const double* du2,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgtcon( char* norm, lapack_int* n, const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zgtcon( char* norm, lapack_int* n, const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_spocon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    float* anorm, float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpocon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpocon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* anorm, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpocon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* anorm, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sppcon( char* uplo, lapack_int* n, const float* ap, float* anorm,\n                    float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dppcon( char* uplo, lapack_int* n, const double* ap, double* anorm,\n                    double* rcond, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cppcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zppcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_spbcon( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,\n                    lapack_int* ldab, float* anorm, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dpbcon( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,\n                    lapack_int* ldab, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpbcon( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    float* anorm, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zpbcon( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    double* anorm, double* rcond, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sptcon( lapack_int* n, const float* d, const float* e, float* anorm,\n                    float* rcond, float* work, lapack_int *info );\nvoid LAPACK_dptcon( lapack_int* n, const double* d, const double* e,\n                    double* anorm, double* rcond, double* work,\n                    lapack_int *info );\nvoid LAPACK_cptcon( lapack_int* n, const float* d,\n                    const lapack_complex_float* e, float* anorm, float* rcond,\n                    float* work, lapack_int *info );\nvoid LAPACK_zptcon( lapack_int* n, const double* d,\n                    const lapack_complex_double* e, double* anorm,\n                    double* rcond, double* work, lapack_int *info );\nvoid LAPACK_ssycon( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsycon( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csycon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* anorm,\n                    float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zsycon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv, double* anorm,\n                    double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_checon( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv, float* anorm,\n                    float* rcond, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zhecon( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv, double* anorm,\n                    double* rcond, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_sspcon( char* uplo, lapack_int* n, const float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dspcon( char* uplo, lapack_int* n, const double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cspcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zspcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_chpcon( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_int* ipiv, float* anorm, float* rcond,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhpcon( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_int* ipiv, double* anorm, double* rcond,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_strcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const float* a, lapack_int* lda, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const double* a, lapack_int* lda, double* rcond,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    float* rcond, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    double* rcond, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const float* ap, float* rcond, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const double* ap, double* rcond, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_float* ap, float* rcond,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztpcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    const lapack_complex_double* ap, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const float* ab, lapack_int* ldab,\n                    float* rcond, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const double* ab, lapack_int* ldab,\n                    double* rcond, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const lapack_complex_float* ab,\n                    lapack_int* ldab, float* rcond, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_ztbcon( char* norm, char* uplo, char* diag, lapack_int* n,\n                    lapack_int* kd, const lapack_complex_double* ab,\n                    lapack_int* ldab, double* rcond,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, const float* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgerfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const double* r,\n                     const double* c, const double* b, lapack_int* ldb,\n                     double* x, lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const float* r,\n                     const float* c, const float* b, lapack_int* ldb, float* x,\n                     lapack_int* ldx, float* rcond, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgerfsx( char* trans, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const float* ab, lapack_int* ldab,\n                    const float* afb, lapack_int* ldafb, const lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const double* ab, lapack_int* ldab,\n                    const double* afb, lapack_int* ldafb,\n                    const lapack_int* ipiv, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_float* ab,\n                    lapack_int* ldab, const lapack_complex_float* afb,\n                    lapack_int* ldafb, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbrfs( char* trans, lapack_int* n, lapack_int* kl, lapack_int* ku,\n                    lapack_int* nrhs, const lapack_complex_double* ab,\n                    lapack_int* ldab, const lapack_complex_double* afb,\n                    lapack_int* ldafb, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_dgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, const double* ab,\n                     lapack_int* ldab, const double* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const double* b, lapack_int* ldb, double* x,\n                     lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, const float* ab,\n                     lapack_int* ldab, const float* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     const lapack_complex_double* ab, lapack_int* ldab,\n                     const lapack_complex_double* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const double* r, const double* c,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgbrfsx( char* trans, char* equed, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     const lapack_complex_float* ab, lapack_int* ldab,\n                     const lapack_complex_float* afb, lapack_int* ldafb,\n                     const lapack_int* ipiv, const float* r, const float* c,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    const float* dlf, const float* df, const float* duf,\n                    const float* du2, const lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    const double* dlf, const double* df, const double* duf,\n                    const double* du2, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du,\n                    const lapack_complex_float* dlf,\n                    const lapack_complex_float* df,\n                    const lapack_complex_float* duf,\n                    const lapack_complex_float* du2, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgtrfs( char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du,\n                    const lapack_complex_double* dlf,\n                    const lapack_complex_double* df,\n                    const lapack_complex_double* duf,\n                    const lapack_complex_double* du2, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sporfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const float* af, lapack_int* ldaf,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zporfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_dporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const double* s, const double* b,\n                     lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const float* s, const float* b,\n                     lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const double* s, const lapack_complex_double* b,\n                     lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                     double* rcond, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cporfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const float* s, const lapack_complex_float* b,\n                     lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_spprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const float* afp, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* ferr,\n                    float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const double* afp, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const float* ab, lapack_int* ldab, const float* afb,\n                    lapack_int* ldafb, const float* b, lapack_int* ldb,\n                    float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const double* ab, lapack_int* ldab, const double* afb,\n                    lapack_int* ldafb, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* afb, lapack_int* ldafb,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zpbrfs( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* afb, lapack_int* ldafb,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sptrfs( lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, const float* df, const float* ef,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int *info );\nvoid LAPACK_dptrfs( lapack_int* n, lapack_int* nrhs, const double* d,\n                    const double* e, const double* df, const double* ef,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int *info );\nvoid LAPACK_cptrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, const float* df,\n                    const lapack_complex_float* ef,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zptrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e,\n                    const double* df, const lapack_complex_double* ef,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_ssyrfs( char* uplo, lapack_int* n, lapack_int* nrhs, const float* a,\n                    lapack_int* lda, const float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const float* b, lapack_int* ldb,\n                    float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, const double* af,\n                    lapack_int* ldaf, const lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_csyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zsyrfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const double* a, lapack_int* lda, const double* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const double* s,\n                     const double* b, lapack_int* ldb, double* x,\n                     lapack_int* ldx, double* rcond, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const float* a, lapack_int* lda, const float* af,\n                     lapack_int* ldaf, const lapack_int* ipiv, const float* s,\n                     const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zsyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* s,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_csyrfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* s,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_cherfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zherfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* af, lapack_int* ldaf,\n                    const lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_zherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_complex_double* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const double* s,\n                     const lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cherfsx( char* uplo, char* equed, lapack_int* n, lapack_int* nrhs,\n                     const lapack_complex_float* a, lapack_int* lda,\n                     const lapack_complex_float* af, lapack_int* ldaf,\n                     const lapack_int* ipiv, const float* s,\n                     const lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* berr, lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_ssprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, const float* afp, const lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, const double* afp, const lapack_int* ipiv,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zsprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_chprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap,\n                    const lapack_complex_float* afp, const lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhprfs( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap,\n                    const lapack_complex_double* afp, const lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* ferr,\n                    double* berr, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_strrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, const float* x,\n                    lapack_int* ldx, float* ferr, float* berr, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, const double* x,\n                    lapack_int* ldx, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, const lapack_complex_float* x,\n                    lapack_int* ldx, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, const lapack_complex_double* x,\n                    lapack_int* ldx, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const float* ap, const float* b,\n                    lapack_int* ldb, const float* x, lapack_int* ldx,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const double* ap, const double* b,\n                    lapack_int* ldb, const double* x, lapack_int* ldx,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_float* ap,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    const lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztprfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* nrhs, const lapack_complex_double* ap,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    const lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_stbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const float* ab,\n                    lapack_int* ldab, const float* b, lapack_int* ldb,\n                    const float* x, lapack_int* ldx, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs, const double* ab,\n                    lapack_int* ldab, const double* b, lapack_int* ldb,\n                    const double* x, lapack_int* ldx, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    const lapack_complex_float* x, lapack_int* ldx, float* ferr,\n                    float* berr, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztbrfs( char* uplo, char* trans, char* diag, lapack_int* n,\n                    lapack_int* kd, lapack_int* nrhs,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    const lapack_complex_double* x, lapack_int* ldx,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgetri( lapack_int* n, float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgetri( lapack_int* n, double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgetri( lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgetri( lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_spotri( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dpotri( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_cpotri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zpotri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dpftri( char* transr, char* uplo, lapack_int* n, double* a,\n                    lapack_int *info );\nvoid LAPACK_spftri( char* transr, char* uplo, lapack_int* n, float* a,\n                    lapack_int *info );\nvoid LAPACK_zpftri( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_cpftri( char* transr, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_spptri( char* uplo, lapack_int* n, float* ap, lapack_int *info );\nvoid LAPACK_dpptri( char* uplo, lapack_int* n, double* ap, lapack_int *info );\nvoid LAPACK_cpptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_zpptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ssytri( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    const lapack_int* ipiv, float* work, lapack_int *info );\nvoid LAPACK_dsytri( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    const lapack_int* ipiv, double* work, lapack_int *info );\nvoid LAPACK_csytri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zsytri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_chetri( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhetri( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_int* ipiv,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_ssptri( char* uplo, lapack_int* n, float* ap,\n                    const lapack_int* ipiv, float* work, lapack_int *info );\nvoid LAPACK_dsptri( char* uplo, lapack_int* n, double* ap,\n                    const lapack_int* ipiv, double* work, lapack_int *info );\nvoid LAPACK_csptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zsptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_chptri( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    const lapack_int* ipiv, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zhptri( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    const lapack_int* ipiv, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_strtri( char* uplo, char* diag, lapack_int* n, float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dtrtri( char* uplo, char* diag, lapack_int* n, double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ctrtri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_ztrtri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dtftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    double* a, lapack_int *info );\nvoid LAPACK_stftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    float* a, lapack_int *info );\nvoid LAPACK_ztftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* a, lapack_int *info );\nvoid LAPACK_ctftri( char* transr, char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* a, lapack_int *info );\nvoid LAPACK_stptri( char* uplo, char* diag, lapack_int* n, float* ap,\n                    lapack_int *info );\nvoid LAPACK_dtptri( char* uplo, char* diag, lapack_int* n, double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctptri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_float* ap, lapack_int *info );\nvoid LAPACK_ztptri( char* uplo, char* diag, lapack_int* n,\n                    lapack_complex_double* ap, lapack_int *info );\nvoid LAPACK_sgeequ( lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_dgeequ( lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* r, double* c, double* rowcnd,\n                    double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_cgeequ( lapack_int* m, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgeequ( lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* r,\n                    double* c, double* rowcnd, double* colcnd, double* amax,\n                    lapack_int *info );\nvoid LAPACK_dgeequb( lapack_int* m, lapack_int* n, const double* a,\n                     lapack_int* lda, double* r, double* c, double* rowcnd,\n                     double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_sgeequb( lapack_int* m, lapack_int* n, const float* a,\n                     lapack_int* lda, float* r, float* c, float* rowcnd,\n                     float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgeequb( lapack_int* m, lapack_int* n,\n                     const lapack_complex_double* a, lapack_int* lda, double* r,\n                     double* c, double* rowcnd, double* colcnd, double* amax,\n                     lapack_int *info );\nvoid LAPACK_cgeequb( lapack_int* m, lapack_int* n,\n                     const lapack_complex_float* a, lapack_int* lda, float* r,\n                     float* c, float* rowcnd, float* colcnd, float* amax,\n                     lapack_int *info );\nvoid LAPACK_sgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* ab, lapack_int* ldab, float* r,\n                    float* c, float* rowcnd, float* colcnd, float* amax,\n                    lapack_int *info );\nvoid LAPACK_dgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* ab, lapack_int* ldab,\n                    double* r, double* c, double* rowcnd, double* colcnd,\n                    double* amax, lapack_int *info );\nvoid LAPACK_cgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const lapack_complex_float* ab,\n                    lapack_int* ldab, float* r, float* c, float* rowcnd,\n                    float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_zgbequ( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const lapack_complex_double* ab,\n                    lapack_int* ldab, double* r, double* c, double* rowcnd,\n                    double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_dgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const double* ab, lapack_int* ldab,\n                     double* r, double* c, double* rowcnd, double* colcnd,\n                     double* amax, lapack_int *info );\nvoid LAPACK_sgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const float* ab, lapack_int* ldab,\n                     float* r, float* c, float* rowcnd, float* colcnd,\n                     float* amax, lapack_int *info );\nvoid LAPACK_zgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const lapack_complex_double* ab,\n                     lapack_int* ldab, double* r, double* c, double* rowcnd,\n                     double* colcnd, double* amax, lapack_int *info );\nvoid LAPACK_cgbequb( lapack_int* m, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, const lapack_complex_float* ab,\n                     lapack_int* ldab, float* r, float* c, float* rowcnd,\n                     float* colcnd, float* amax, lapack_int *info );\nvoid LAPACK_spoequ( lapack_int* n, const float* a, lapack_int* lda, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_dpoequ( lapack_int* n, const double* a, lapack_int* lda, double* s,\n                    double* scond, double* amax, lapack_int *info );\nvoid LAPACK_cpoequ( lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, float* s, float* scond, float* amax,\n                    lapack_int *info );\nvoid LAPACK_zpoequ( lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, double* s, double* scond, double* amax,\n                    lapack_int *info );\nvoid LAPACK_dpoequb( lapack_int* n, const double* a, lapack_int* lda, double* s,\n                     double* scond, double* amax, lapack_int *info );\nvoid LAPACK_spoequb( lapack_int* n, const float* a, lapack_int* lda, float* s,\n                     float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zpoequb( lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_int *info );\nvoid LAPACK_cpoequb( lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_int *info );\nvoid LAPACK_sppequ( char* uplo, lapack_int* n, const float* ap, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_dppequ( char* uplo, lapack_int* n, const double* ap, double* s,\n                    double* scond, double* amax, lapack_int *info );\nvoid LAPACK_cppequ( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    float* s, float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zppequ( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    double* s, double* scond, double* amax, lapack_int *info );\nvoid LAPACK_spbequ( char* uplo, lapack_int* n, lapack_int* kd, const float* ab,\n                    lapack_int* ldab, float* s, float* scond, float* amax,\n                    lapack_int *info );\nvoid LAPACK_dpbequ( char* uplo, lapack_int* n, lapack_int* kd, const double* ab,\n                    lapack_int* ldab, double* s, double* scond, double* amax,\n                    lapack_int *info );\nvoid LAPACK_cpbequ( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_float* ab, lapack_int* ldab, float* s,\n                    float* scond, float* amax, lapack_int *info );\nvoid LAPACK_zpbequ( char* uplo, lapack_int* n, lapack_int* kd,\n                    const lapack_complex_double* ab, lapack_int* ldab,\n                    double* s, double* scond, double* amax, lapack_int *info );\nvoid LAPACK_dsyequb( char* uplo, lapack_int* n, const double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     double* work, lapack_int *info );\nvoid LAPACK_ssyequb( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                     float* s, float* scond, float* amax, float* work,\n                     lapack_int *info );\nvoid LAPACK_zsyequb( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_csyequb( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zheequb( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                     lapack_int* lda, double* s, double* scond, double* amax,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_cheequb( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                     lapack_int* lda, float* s, float* scond, float* amax,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_sgesv( lapack_int* n, lapack_int* nrhs, float* a, lapack_int* lda,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* a,\n                   lapack_int* lda, lapack_int* ipiv, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,\n                   lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dsgesv( lapack_int* n, lapack_int* nrhs, double* a, lapack_int* lda,\n                    lapack_int* ipiv, double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* work, float* swork,\n                    lapack_int* iter, lapack_int *info );\nvoid LAPACK_zcgesv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, lapack_complex_float* swork,\n                    double* rwork, lapack_int* iter, lapack_int *info );\nvoid LAPACK_sgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, float* r, float* c, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, float* r, float* c,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgesvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                     double* rcond, double* rpvgrw, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgesvxx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, float* ab, lapack_int* ldab,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, double* ab, lapack_int* ldab,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_int* ipiv, lapack_complex_float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_zgbsv( lapack_int* n, lapack_int* kl, lapack_int* ku,\n                   lapack_int* nrhs, lapack_complex_double* ab,\n                   lapack_int* ldab, lapack_int* ipiv, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, float* ab,\n                    lapack_int* ldab, float* afb, lapack_int* ldafb,\n                    lapack_int* ipiv, char* equed, float* r, float* c, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, double* ab,\n                    lapack_int* ldab, double* afb, lapack_int* ldafb,\n                    lapack_int* ipiv, char* equed, double* r, double* c,\n                    double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* afb,\n                    lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,\n                    float* c, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgbsvx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, lapack_int* nrhs, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* afb,\n                    lapack_int* ldafb, lapack_int* ipiv, char* equed, double* r,\n                    double* c, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, double* ab,\n                     lapack_int* ldab, double* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     double* b, lapack_int* ldb, double* x, lapack_int* ldx,\n                     double* rcond, double* rpvgrw, double* berr,\n                     lapack_int* n_err_bnds, double* err_bnds_norm,\n                     double* err_bnds_comp, lapack_int* nparams, double* params,\n                     double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, float* ab,\n                     lapack_int* ldab, float* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, float* r, float* c,\n                     float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                     float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs,\n                     lapack_complex_double* ab, lapack_int* ldab,\n                     lapack_complex_double* afb, lapack_int* ldafb,\n                     lapack_int* ipiv, char* equed, double* r, double* c,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cgbsvxx( char* fact, char* trans, lapack_int* n, lapack_int* kl,\n                     lapack_int* ku, lapack_int* nrhs, lapack_complex_float* ab,\n                     lapack_int* ldab, lapack_complex_float* afb,\n                     lapack_int* ldafb, lapack_int* ipiv, char* equed, float* r,\n                     float* c, lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sgtsv( lapack_int* n, lapack_int* nrhs, float* dl, float* d,\n                   float* du, float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dgtsv( lapack_int* n, lapack_int* nrhs, double* dl, double* d,\n                   double* du, double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_float* dl,\n                   lapack_complex_float* d, lapack_complex_float* du,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zgtsv( lapack_int* n, lapack_int* nrhs, lapack_complex_double* dl,\n                   lapack_complex_double* d, lapack_complex_double* du,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_sgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const float* dl, const float* d, const float* du,\n                    float* dlf, float* df, float* duf, float* du2,\n                    lapack_int* ipiv, const float* b, lapack_int* ldb, float* x,\n                    lapack_int* ldx, float* rcond, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const double* dl, const double* d, const double* du,\n                    double* dlf, double* df, double* duf, double* du2,\n                    lapack_int* ipiv, const double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* rcond, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* dl,\n                    const lapack_complex_float* d,\n                    const lapack_complex_float* du, lapack_complex_float* dlf,\n                    lapack_complex_float* df, lapack_complex_float* duf,\n                    lapack_complex_float* du2, lapack_int* ipiv,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgtsvx( char* fact, char* trans, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* dl,\n                    const lapack_complex_double* d,\n                    const lapack_complex_double* du, lapack_complex_double* dlf,\n                    lapack_complex_double* df, lapack_complex_double* duf,\n                    lapack_complex_double* du2, lapack_int* ipiv,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sposv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dsposv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* work, float* swork,\n                    lapack_int* iter, lapack_int *info );\nvoid LAPACK_zcposv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, lapack_complex_float* swork,\n                    double* rwork, lapack_int* iter, lapack_int *info );\nvoid LAPACK_sposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                    char* equed, float* s, float* b, lapack_int* ldb, float* x,\n                    lapack_int* ldx, float* rcond, float* ferr, float* berr,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                    char* equed, double* s, double* b, lapack_int* ldb,\n                    double* x, lapack_int* ldx, double* rcond, double* ferr,\n                    double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf, char* equed,\n                    float* s, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zposvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf, char* equed,\n                    double* s, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     char* equed, double* s, double* b, lapack_int* ldb,\n                     double* x, lapack_int* ldx, double* rcond, double* rpvgrw,\n                     double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     char* equed, float* s, float* b, lapack_int* ldb, float* x,\n                     lapack_int* ldx, float* rcond, float* rpvgrw, float* berr,\n                     lapack_int* n_err_bnds, float* err_bnds_norm,\n                     float* err_bnds_comp, lapack_int* nparams, float* params,\n                     float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf, char* equed,\n                     double* s, lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_cposvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf, char* equed,\n                     float* s, lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sppsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,\n                   float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dppsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,\n                   double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cppsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zppsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    float* ap, float* afp, char* equed, float* s, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    double* ap, double* afp, char* equed, double* s, double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* ap, lapack_complex_float* afp,\n                    char* equed, float* s, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zppsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* ap, lapack_complex_double* afp,\n                    char* equed, double* s, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   float* ab, lapack_int* ldab, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   double* ab, lapack_int* ldab, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zpbsv( char* uplo, lapack_int* n, lapack_int* kd, lapack_int* nrhs,\n                   lapack_complex_double* ab, lapack_int* ldab,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_spbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, float* ab, lapack_int* ldab, float* afb,\n                    lapack_int* ldafb, char* equed, float* s, float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, double* ab, lapack_int* ldab, double* afb,\n                    lapack_int* ldafb, char* equed, double* s, double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_cpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* afb,\n                    lapack_int* ldafb, char* equed, float* s,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zpbsvx( char* fact, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_int* nrhs, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* afb,\n                    lapack_int* ldafb, char* equed, double* s,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sptsv( lapack_int* n, lapack_int* nrhs, float* d, float* e,\n                   float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_dptsv( lapack_int* n, lapack_int* nrhs, double* d, double* e,\n                   double* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_cptsv( lapack_int* n, lapack_int* nrhs, float* d,\n                   lapack_complex_float* e, lapack_complex_float* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zptsv( lapack_int* n, lapack_int* nrhs, double* d,\n                   lapack_complex_double* e, lapack_complex_double* b,\n                   lapack_int* ldb, lapack_int *info );\nvoid LAPACK_sptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const float* e, float* df, float* ef, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int *info );\nvoid LAPACK_dptsvx( char* fact, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const double* e, double* df, double* ef,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* rcond, double* ferr, double* berr,\n                    double* work, lapack_int *info );\nvoid LAPACK_cptsvx( char* fact, lapack_int* n, lapack_int* nrhs, const float* d,\n                    const lapack_complex_float* e, float* df,\n                    lapack_complex_float* ef, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zptsvx( char* fact, lapack_int* n, lapack_int* nrhs,\n                    const double* d, const lapack_complex_double* e, double* df,\n                    lapack_complex_double* ef, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_ssysv( char* uplo, lapack_int* n, lapack_int* nrhs, float* a,\n                   lapack_int* lda, lapack_int* ipiv, float* b, lapack_int* ldb,\n                   float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsysv( char* uplo, lapack_int* n, lapack_int* nrhs, double* a,\n                   lapack_int* lda, lapack_int* ipiv, double* b,\n                   lapack_int* ldb, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_csysv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zsysv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_ssysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* a, lapack_int* lda, float* af,\n                    lapack_int* ldaf, lapack_int* ipiv, const float* b,\n                    lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                    float* ferr, float* berr, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* a, lapack_int* lda, double* af,\n                    lapack_int* ldaf, lapack_int* ipiv, const double* b,\n                    lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                    double* ferr, double* berr, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_csysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zsysvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_dsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     double* a, lapack_int* lda, double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s, double* b,\n                     lapack_int* ldb, double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params, double* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     float* a, lapack_int* lda, float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s, float* b,\n                     lapack_int* ldb, float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params, float* work,\n                     lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zsysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_csysvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_chesv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zhesv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_chesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhesvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* af, lapack_int* ldaf,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_zhesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, double* s,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* x, lapack_int* ldx, double* rcond,\n                     double* rpvgrw, double* berr, lapack_int* n_err_bnds,\n                     double* err_bnds_norm, double* err_bnds_comp,\n                     lapack_int* nparams, double* params,\n                     lapack_complex_double* work, double* rwork,\n                     lapack_int *info );\nvoid LAPACK_chesvxx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* af, lapack_int* ldaf,\n                     lapack_int* ipiv, char* equed, float* s,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* x, lapack_int* ldx, float* rcond,\n                     float* rpvgrw, float* berr, lapack_int* n_err_bnds,\n                     float* err_bnds_norm, float* err_bnds_comp,\n                     lapack_int* nparams, float* params,\n                     lapack_complex_float* work, float* rwork,\n                     lapack_int *info );\nvoid LAPACK_sspsv( char* uplo, lapack_int* n, lapack_int* nrhs, float* ap,\n                   lapack_int* ipiv, float* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_dspsv( char* uplo, lapack_int* n, lapack_int* nrhs, double* ap,\n                   lapack_int* ipiv, double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_cspsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zspsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_sspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const float* ap, float* afp, lapack_int* ipiv,\n                    const float* b, lapack_int* ldb, float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr, float* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const double* ap, double* afp, lapack_int* ipiv,\n                    const double* b, lapack_int* ldb, double* x,\n                    lapack_int* ldx, double* rcond, double* ferr, double* berr,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* afp,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zspsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* afp,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_chpsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* ap, lapack_int* ipiv,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int *info );\nvoid LAPACK_zhpsv( char* uplo, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* ap, lapack_int* ipiv,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_int *info );\nvoid LAPACK_chpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_float* ap, lapack_complex_float* afp,\n                    lapack_int* ipiv, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* x, lapack_int* ldx,\n                    float* rcond, float* ferr, float* berr,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhpsvx( char* fact, char* uplo, lapack_int* n, lapack_int* nrhs,\n                    const lapack_complex_double* ap, lapack_complex_double* afp,\n                    lapack_int* ipiv, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* x, lapack_int* ldx,\n                    double* rcond, double* ferr, double* berr,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqrf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgeqrf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgeqrf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgeqrf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqpf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* jpvt, float* tau, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgeqpf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* jpvt, double* tau, double* work,\n                    lapack_int *info );\nvoid LAPACK_cgeqpf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    float* rwork, lapack_int *info );\nvoid LAPACK_zgeqpf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgeqp3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* jpvt, float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgeqp3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* jpvt, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgeqp3( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgeqp3( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* jpvt,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sorgqr( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgqr( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungqr( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungqr( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmqr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgelqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgelqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgelqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgelqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorglq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorglq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunglq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunglq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmlq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgeqlf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgeqlf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgeqlf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgeqlf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorgql( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgql( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungql( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungql( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunmql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmql( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgerqf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgerqf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgerqf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgerqf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sorgrq( lapack_int* m, lapack_int* n, lapack_int* k, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgrq( lapack_int* m, lapack_int* n, lapack_int* k, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungrq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungrq( lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmrq( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_stzrzf( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dtzrzf( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ctzrzf( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ztzrzf( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmrz( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* k, lapack_int* l,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggqrf( lapack_int* n, lapack_int* m, lapack_int* p, float* a,\n                    lapack_int* lda, float* taua, float* b, lapack_int* ldb,\n                    float* taub, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggqrf( lapack_int* n, lapack_int* m, lapack_int* p, double* a,\n                    lapack_int* lda, double* taua, double* b, lapack_int* ldb,\n                    double* taub, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggqrf( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* taua, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* taub,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zggqrf( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* taua, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* taub,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sggrqf( lapack_int* m, lapack_int* p, lapack_int* n, float* a,\n                    lapack_int* lda, float* taua, float* b, lapack_int* ldb,\n                    float* taub, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggrqf( lapack_int* m, lapack_int* p, lapack_int* n, double* a,\n                    lapack_int* lda, double* taua, double* b, lapack_int* ldb,\n                    double* taub, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggrqf( lapack_int* m, lapack_int* p, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* taua, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* taub,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zggrqf( lapack_int* m, lapack_int* p, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* taua, lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* taub,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgebrd( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* d, float* e, float* tauq, float* taup, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgebrd( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* d, double* e, double* tauq, double* taup,\n                    double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgebrd( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, float* d, float* e,\n                    lapack_complex_float* tauq, lapack_complex_float* taup,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zgebrd( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, double* d, double* e,\n                    lapack_complex_double* tauq, lapack_complex_double* taup,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, float* ab, lapack_int* ldab,\n                    float* d, float* e, float* q, lapack_int* ldq, float* pt,\n                    lapack_int* ldpt, float* c, lapack_int* ldc, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, double* ab,\n                    lapack_int* ldab, double* d, double* e, double* q,\n                    lapack_int* ldq, double* pt, lapack_int* ldpt, double* c,\n                    lapack_int* ldc, double* work, lapack_int *info );\nvoid LAPACK_cgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, lapack_complex_float* ab,\n                    lapack_int* ldab, float* d, float* e,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* pt, lapack_int* ldpt,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgbbrd( char* vect, lapack_int* m, lapack_int* n, lapack_int* ncc,\n                    lapack_int* kl, lapack_int* ku, lapack_complex_double* ab,\n                    lapack_int* ldab, double* d, double* e,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* pt, lapack_int* ldpt,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_sorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    float* a, lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorgbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    double* a, lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zungbr( char* vect, lapack_int* m, lapack_int* n, lapack_int* k,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmbr( char* vect, char* side, char* trans, lapack_int* m,\n                    lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, float* d, float* e,\n                    float* vt, lapack_int* ldvt, float* u, lapack_int* ldu,\n                    float* c, lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, double* d, double* e,\n                    double* vt, lapack_int* ldvt, double* u, lapack_int* ldu,\n                    double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_cbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, float* d, float* e,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* c, lapack_int* ldc, float* work,\n                    lapack_int *info );\nvoid LAPACK_zbdsqr( char* uplo, lapack_int* n, lapack_int* ncvt,\n                    lapack_int* nru, lapack_int* ncc, double* d, double* e,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_sbdsdc( char* uplo, char* compq, lapack_int* n, float* d, float* e,\n                    float* u, lapack_int* ldu, float* vt, lapack_int* ldvt,\n                    float* q, lapack_int* iq, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dbdsdc( char* uplo, char* compq, lapack_int* n, double* d,\n                    double* e, double* u, lapack_int* ldu, double* vt,\n                    lapack_int* ldvt, double* q, lapack_int* iq, double* work,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssytrd( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    float* d, float* e, float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsytrd( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    double* d, double* e, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sorgtr( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    const float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dorgtr( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    const double* tau, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_sormtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* tau, float* c, lapack_int* ldc, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dormtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* tau, double* c, lapack_int* ldc, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_chetrd( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, float* d, float* e,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zhetrd( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, double* d, double* e,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cungtr( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zungtr( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_zunmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ssptrd( char* uplo, lapack_int* n, float* ap, float* d, float* e,\n                    float* tau, lapack_int *info );\nvoid LAPACK_dsptrd( char* uplo, lapack_int* n, double* ap, double* d, double* e,\n                    double* tau, lapack_int *info );\nvoid LAPACK_sopgtr( char* uplo, lapack_int* n, const float* ap,\n                    const float* tau, float* q, lapack_int* ldq, float* work,\n                    lapack_int *info );\nvoid LAPACK_dopgtr( char* uplo, lapack_int* n, const double* ap,\n                    const double* tau, double* q, lapack_int* ldq, double* work,\n                    lapack_int *info );\nvoid LAPACK_sopmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const float* ap, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dopmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const double* ap, const double* tau,\n                    double* c, lapack_int* ldc, double* work,\n                    lapack_int *info );\nvoid LAPACK_chptrd( char* uplo, lapack_int* n, lapack_complex_float* ap,\n                    float* d, float* e, lapack_complex_float* tau,\n                    lapack_int *info );\nvoid LAPACK_zhptrd( char* uplo, lapack_int* n, lapack_complex_double* ap,\n                    double* d, double* e, lapack_complex_double* tau,\n                    lapack_int *info );\nvoid LAPACK_cupgtr( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_complex_float* tau, lapack_complex_float* q,\n                    lapack_int* ldq, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zupgtr( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_complex_double* tau, lapack_complex_double* q,\n                    lapack_int* ldq, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_cupmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* ap,\n                    const lapack_complex_float* tau, lapack_complex_float* c,\n                    lapack_int* ldc, lapack_complex_float* work,\n                    lapack_int *info );\nvoid LAPACK_zupmtr( char* side, char* uplo, char* trans, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* ap,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_ssbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    float* ab, lapack_int* ldab, float* d, float* e, float* q,\n                    lapack_int* ldq, float* work, lapack_int *info );\nvoid LAPACK_dsbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    double* ab, lapack_int* ldab, double* d, double* e,\n                    double* q, lapack_int* ldq, double* work,\n                    lapack_int *info );\nvoid LAPACK_chbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab, float* d,\n                    float* e, lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zhbtrd( char* vect, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab, double* d,\n                    double* e, lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_ssterf( lapack_int* n, float* d, float* e, lapack_int *info );\nvoid LAPACK_dsterf( lapack_int* n, double* d, double* e, lapack_int *info );\nvoid LAPACK_ssteqr( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dsteqr( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_csteqr( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz, float* work,\n                    lapack_int *info );\nvoid LAPACK_zsteqr( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz, double* work,\n                    lapack_int *info );\nvoid LAPACK_sstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    lapack_int* nzc, lapack_int* isuppz, lapack_logical* tryrac,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstemr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, lapack_int* m, double* w, double* z,\n                    lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,\n                    lapack_logical* tryrac, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstemr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_int* nzc, lapack_int* isuppz,\n                    lapack_logical* tryrac, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zstemr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* nzc,\n                    lapack_int* isuppz, lapack_logical* tryrac, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_sstedc( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstedc( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstedc( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zstedc( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, lapack_int* isuppz, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_dstegr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, lapack_int* isuppz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cstegr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_int* isuppz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zstegr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_int* isuppz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_spteqr( char* compz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dpteqr( char* compz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_cpteqr( char* compz, lapack_int* n, float* d, float* e,\n                    lapack_complex_float* z, lapack_int* ldz, float* work,\n                    lapack_int *info );\nvoid LAPACK_zpteqr( char* compz, lapack_int* n, double* d, double* e,\n                    lapack_complex_double* z, lapack_int* ldz, double* work,\n                    lapack_int *info );\nvoid LAPACK_sstebz( char* range, char* order, lapack_int* n, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    const float* d, const float* e, lapack_int* m,\n                    lapack_int* nsplit, float* w, lapack_int* iblock,\n                    lapack_int* isplit, float* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dstebz( char* range, char* order, lapack_int* n, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    const double* d, const double* e, lapack_int* m,\n                    lapack_int* nsplit, double* w, lapack_int* iblock,\n                    lapack_int* isplit, double* work, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_sstein( lapack_int* n, const float* d, const float* e,\n                    lapack_int* m, const float* w, const lapack_int* iblock,\n                    const lapack_int* isplit, float* z, lapack_int* ldz,\n                    float* work, lapack_int* iwork, lapack_int* ifailv,\n                    lapack_int *info );\nvoid LAPACK_dstein( lapack_int* n, const double* d, const double* e,\n                    lapack_int* m, const double* w, const lapack_int* iblock,\n                    const lapack_int* isplit, double* z, lapack_int* ldz,\n                    double* work, lapack_int* iwork, lapack_int* ifailv,\n                    lapack_int *info );\nvoid LAPACK_cstein( lapack_int* n, const float* d, const float* e,\n                    lapack_int* m, const float* w, const lapack_int* iblock,\n                    const lapack_int* isplit, lapack_complex_float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifailv, lapack_int *info );\nvoid LAPACK_zstein( lapack_int* n, const double* d, const double* e,\n                    lapack_int* m, const double* w, const lapack_int* iblock,\n                    const lapack_int* isplit, lapack_complex_double* z,\n                    lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifailv, lapack_int *info );\nvoid LAPACK_sdisna( char* job, lapack_int* m, lapack_int* n, const float* d,\n                    float* sep, lapack_int *info );\nvoid LAPACK_ddisna( char* job, lapack_int* m, lapack_int* n, const double* d,\n                    double* sep, lapack_int *info );\nvoid LAPACK_ssygst( lapack_int* itype, char* uplo, lapack_int* n, float* a,\n                    lapack_int* lda, const float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_dsygst( lapack_int* itype, char* uplo, lapack_int* n, double* a,\n                    lapack_int* lda, const double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_chegst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_zhegst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int *info );\nvoid LAPACK_sspgst( lapack_int* itype, char* uplo, lapack_int* n, float* ap,\n                    const float* bp, lapack_int *info );\nvoid LAPACK_dspgst( lapack_int* itype, char* uplo, lapack_int* n, double* ap,\n                    const double* bp, lapack_int *info );\nvoid LAPACK_chpgst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, const lapack_complex_float* bp,\n                    lapack_int *info );\nvoid LAPACK_zhpgst( lapack_int* itype, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, const lapack_complex_double* bp,\n                    lapack_int *info );\nvoid LAPACK_ssbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, float* ab, lapack_int* ldab,\n                    const float* bb, lapack_int* ldbb, float* x,\n                    lapack_int* ldx, float* work, lapack_int *info );\nvoid LAPACK_dsbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, double* ab, lapack_int* ldab,\n                    const double* bb, lapack_int* ldbb, double* x,\n                    lapack_int* ldx, double* work, lapack_int *info );\nvoid LAPACK_chbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                    const lapack_complex_float* bb, lapack_int* ldbb,\n                    lapack_complex_float* x, lapack_int* ldx,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhbgst( char* vect, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                    const lapack_complex_double* bb, lapack_int* ldbb,\n                    lapack_complex_double* x, lapack_int* ldx,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_spbstf( char* uplo, lapack_int* n, lapack_int* kb, float* bb,\n                    lapack_int* ldbb, lapack_int *info );\nvoid LAPACK_dpbstf( char* uplo, lapack_int* n, lapack_int* kb, double* bb,\n                    lapack_int* ldbb, lapack_int *info );\nvoid LAPACK_cpbstf( char* uplo, lapack_int* n, lapack_int* kb,\n                    lapack_complex_float* bb, lapack_int* ldbb,\n                    lapack_int *info );\nvoid LAPACK_zpbstf( char* uplo, lapack_int* n, lapack_int* kb,\n                    lapack_complex_double* bb, lapack_int* ldbb,\n                    lapack_int *info );\nvoid LAPACK_sgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,\n                    lapack_int* lda, float* tau, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,\n                    lapack_int* lda, double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgehrd( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, float* a,\n                    lapack_int* lda, const float* tau, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dorghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi, double* a,\n                    lapack_int* lda, const double* tau, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sormhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, const float* a,\n                    lapack_int* lda, const float* tau, float* c,\n                    lapack_int* ldc, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dormhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, const double* a,\n                    lapack_int* lda, const double* tau, double* c,\n                    lapack_int* ldc, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunghr( lapack_int* n, lapack_int* ilo, lapack_int* ihi,\n                    lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* tau, lapack_complex_float* c,\n                    lapack_int* ldc, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zunmhr( char* side, char* trans, lapack_int* m, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* tau, lapack_complex_double* c,\n                    lapack_int* ldc, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sgebal( char* job, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ilo, lapack_int* ihi, float* scale,\n                    lapack_int *info );\nvoid LAPACK_dgebal( char* job, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ilo, lapack_int* ihi, double* scale,\n                    lapack_int *info );\nvoid LAPACK_cgebal( char* job, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, lapack_int *info );\nvoid LAPACK_zgebal( char* job, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, lapack_int *info );\nvoid LAPACK_sgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* scale, lapack_int* m,\n                    float* v, lapack_int* ldv, lapack_int *info );\nvoid LAPACK_dgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* scale, lapack_int* m,\n                    double* v, lapack_int* ldv, lapack_int *info );\nvoid LAPACK_cgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* scale, lapack_int* m,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_zgebak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* scale, lapack_int* m,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_shseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, float* h, lapack_int* ldh, float* wr,\n                    float* wi, float* z, lapack_int* ldz, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, double* h, lapack_int* ldh, double* wr,\n                    double* wi, double* z, lapack_int* ldz, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_chseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_float* h, lapack_int* ldh,\n                    lapack_complex_float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zhseqr( char* job, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_double* h, lapack_int* ldh,\n                    lapack_complex_double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_shsein( char* job, char* eigsrc, char* initv,\n                    lapack_logical* select, lapack_int* n, const float* h,\n                    lapack_int* ldh, float* wr, const float* wi, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_dhsein( char* job, char* eigsrc, char* initv,\n                    lapack_logical* select, lapack_int* n, const double* h,\n                    lapack_int* ldh, double* wr, const double* wi, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_chsein( char* job, char* eigsrc, char* initv,\n                    const lapack_logical* select, lapack_int* n,\n                    const lapack_complex_float* h, lapack_int* ldh,\n                    lapack_complex_float* w, lapack_complex_float* vl,\n                    lapack_int* ldvl, lapack_complex_float* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_float* work, float* rwork,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_zhsein( char* job, char* eigsrc, char* initv,\n                    const lapack_logical* select, lapack_int* n,\n                    const lapack_complex_double* h, lapack_int* ldh,\n                    lapack_complex_double* w, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* ifaill, lapack_int* ifailr, lapack_int *info );\nvoid LAPACK_strevc( char* side, char* howmny, lapack_logical* select,\n                    lapack_int* n, const float* t, lapack_int* ldt, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int *info );\nvoid LAPACK_dtrevc( char* side, char* howmny, lapack_logical* select,\n                    lapack_int* n, const double* t, lapack_int* ldt, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctrevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztrevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* vl, lapack_int* ldvl,\n                    lapack_complex_double* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_strsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* t, lapack_int* ldt,\n                    const float* vl, lapack_int* ldvl, const float* vr,\n                    lapack_int* ldvr, float* s, float* sep, lapack_int* mm,\n                    lapack_int* m, float* work, lapack_int* ldwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* t, lapack_int* ldt,\n                    const double* vl, lapack_int* ldvl, const double* vr,\n                    lapack_int* ldvr, double* s, double* sep, lapack_int* mm,\n                    lapack_int* m, double* work, lapack_int* ldwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* t,\n                    lapack_int* ldt, const lapack_complex_float* vl,\n                    lapack_int* ldvl, const lapack_complex_float* vr,\n                    lapack_int* ldvr, float* s, float* sep, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work,\n                    lapack_int* ldwork, float* rwork, lapack_int *info );\nvoid LAPACK_ztrsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* t,\n                    lapack_int* ldt, const lapack_complex_double* vl,\n                    lapack_int* ldvl, const lapack_complex_double* vr,\n                    lapack_int* ldvr, double* s, double* sep, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work,\n                    lapack_int* ldwork, double* rwork, lapack_int *info );\nvoid LAPACK_strexc( char* compq, lapack_int* n, float* t, lapack_int* ldt,\n                    float* q, lapack_int* ldq, lapack_int* ifst,\n                    lapack_int* ilst, float* work, lapack_int *info );\nvoid LAPACK_dtrexc( char* compq, lapack_int* n, double* t, lapack_int* ldt,\n                    double* q, lapack_int* ldq, lapack_int* ifst,\n                    lapack_int* ilst, double* work, lapack_int *info );\nvoid LAPACK_ctrexc( char* compq, lapack_int* n, lapack_complex_float* t,\n                    lapack_int* ldt, lapack_complex_float* q, lapack_int* ldq,\n                    lapack_int* ifst, lapack_int* ilst, lapack_int *info );\nvoid LAPACK_ztrexc( char* compq, lapack_int* n, lapack_complex_double* t,\n                    lapack_int* ldt, lapack_complex_double* q, lapack_int* ldq,\n                    lapack_int* ifst, lapack_int* ilst, lapack_int *info );\nvoid LAPACK_strsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, float* t, lapack_int* ldt, float* q,\n                    lapack_int* ldq, float* wr, float* wi, lapack_int* m,\n                    float* s, float* sep, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dtrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, double* t, lapack_int* ldt, double* q,\n                    lapack_int* ldq, double* wr, double* wi, lapack_int* m,\n                    double* s, double* sep, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ctrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* w, lapack_int* m, float* s,\n                    float* sep, lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_ztrsen( char* job, char* compq, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* w, lapack_int* m, double* s,\n                    double* sep, lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_strsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, float* c, lapack_int* ldc,\n                    float* scale, lapack_int *info );\nvoid LAPACK_dtrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, double* c,\n                    lapack_int* ldc, double* scale, lapack_int *info );\nvoid LAPACK_ctrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, lapack_complex_float* c, lapack_int* ldc,\n                    float* scale, lapack_int *info );\nvoid LAPACK_ztrsyl( char* trana, char* tranb, lapack_int* isgn, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, lapack_complex_double* c, lapack_int* ldc,\n                    double* scale, lapack_int *info );\nvoid LAPACK_sgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* q, lapack_int* ldq, float* z,\n                    lapack_int* ldz, lapack_int *info );\nvoid LAPACK_dgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* q, lapack_int* ldq, double* z,\n                    lapack_int* ldz, lapack_int *info );\nvoid LAPACK_cgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_int *info );\nvoid LAPACK_zgghrd( char* compq, char* compz, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_int *info );\nvoid LAPACK_sggbal( char* job, lapack_int* n, float* a, lapack_int* lda,\n                    float* b, lapack_int* ldb, lapack_int* ilo, lapack_int* ihi,\n                    float* lscale, float* rscale, float* work,\n                    lapack_int *info );\nvoid LAPACK_dggbal( char* job, lapack_int* n, double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, lapack_int* ilo,\n                    lapack_int* ihi, double* lscale, double* rscale,\n                    double* work, lapack_int *info );\nvoid LAPACK_cggbal( char* job, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    lapack_int* ilo, lapack_int* ihi, float* lscale,\n                    float* rscale, float* work, lapack_int *info );\nvoid LAPACK_zggbal( char* job, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    lapack_int* ilo, lapack_int* ihi, double* lscale,\n                    double* rscale, double* work, lapack_int *info );\nvoid LAPACK_sggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* lscale, const float* rscale,\n                    lapack_int* m, float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_dggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* lscale, const double* rscale,\n                    lapack_int* m, double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_cggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const float* lscale, const float* rscale,\n                    lapack_int* m, lapack_complex_float* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_zggbak( char* job, char* side, lapack_int* n, lapack_int* ilo,\n                    lapack_int* ihi, const double* lscale, const double* rscale,\n                    lapack_int* m, lapack_complex_double* v, lapack_int* ldv,\n                    lapack_int *info );\nvoid LAPACK_shgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, float* h, lapack_int* ldh,\n                    float* t, lapack_int* ldt, float* alphar, float* alphai,\n                    float* beta, float* q, lapack_int* ldq, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dhgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, double* h,\n                    lapack_int* ldh, double* t, lapack_int* ldt, double* alphar,\n                    double* alphai, double* beta, double* q, lapack_int* ldq,\n                    double* z, lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_chgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, lapack_complex_float* h,\n                    lapack_int* ldh, lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zhgeqz( char* job, char* compq, char* compz, lapack_int* n,\n                    lapack_int* ilo, lapack_int* ihi, lapack_complex_double* h,\n                    lapack_int* ldh, lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_stgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* s, lapack_int* lds,\n                    const float* p, lapack_int* ldp, float* vl,\n                    lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, float* work,\n                    lapack_int *info );\nvoid LAPACK_dtgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* s, lapack_int* lds,\n                    const double* p, lapack_int* ldp, double* vl,\n                    lapack_int* ldvl, double* vr, lapack_int* ldvr,\n                    lapack_int* mm, lapack_int* m, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* s,\n                    lapack_int* lds, const lapack_complex_float* p,\n                    lapack_int* ldp, lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_ztgevc( char* side, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* s,\n                    lapack_int* lds, const lapack_complex_double* p,\n                    lapack_int* ldp, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* mm, lapack_int* m,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int *info );\nvoid LAPACK_stgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* q, lapack_int* ldq, float* z, lapack_int* ldz,\n                    lapack_int* ifst, lapack_int* ilst, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dtgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* q, lapack_int* ldq, double* z, lapack_int* ldz,\n                    lapack_int* ifst, lapack_int* ilst, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_ctgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz, lapack_int* ifst,\n                    lapack_int* ilst, lapack_int *info );\nvoid LAPACK_ztgexc( lapack_logical* wantq, lapack_logical* wantz, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* ifst,\n                    lapack_int* ilst, lapack_int *info );\nvoid LAPACK_stgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* alphar, float* alphai, float* beta,\n                    float* q, lapack_int* ldq, float* z, lapack_int* ldz,\n                    lapack_int* m, float* pl, float* pr, float* dif,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dtgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* alphar, double* alphai,\n                    double* beta, double* q, lapack_int* ldq, double* z,\n                    lapack_int* ldz, lapack_int* m, double* pl, double* pr,\n                    double* dif, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ctgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* z, lapack_int* ldz, lapack_int* m,\n                    float* pl, float* pr, float* dif,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ztgsen( lapack_int* ijob, lapack_logical* wantq,\n                    lapack_logical* wantz, const lapack_logical* select,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* z, lapack_int* ldz, lapack_int* m,\n                    double* pl, double* pr, double* dif,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_stgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const float* a, lapack_int* lda, const float* b,\n                    lapack_int* ldb, float* c, lapack_int* ldc, const float* d,\n                    lapack_int* ldd, const float* e, lapack_int* lde, float* f,\n                    lapack_int* ldf, float* scale, float* dif, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dtgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const double* a, lapack_int* lda, const double* b,\n                    lapack_int* ldb, double* c, lapack_int* ldc,\n                    const double* d, lapack_int* ldd, const double* e,\n                    lapack_int* lde, double* f, lapack_int* ldf, double* scale,\n                    double* dif, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ctgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    const lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    const lapack_complex_float* d, lapack_int* ldd,\n                    const lapack_complex_float* e, lapack_int* lde,\n                    lapack_complex_float* f, lapack_int* ldf, float* scale,\n                    float* dif, lapack_complex_float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ztgsyl( char* trans, lapack_int* ijob, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    const lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    const lapack_complex_double* d, lapack_int* ldd,\n                    const lapack_complex_double* e, lapack_int* lde,\n                    lapack_complex_double* f, lapack_int* ldf, double* scale,\n                    double* dif, lapack_complex_double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_stgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const float* a, lapack_int* lda,\n                    const float* b, lapack_int* ldb, const float* vl,\n                    lapack_int* ldvl, const float* vr, lapack_int* ldvr,\n                    float* s, float* dif, lapack_int* mm, lapack_int* m,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dtgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const double* a, lapack_int* lda,\n                    const double* b, lapack_int* ldb, const double* vl,\n                    lapack_int* ldvl, const double* vr, lapack_int* ldvr,\n                    double* s, double* dif, lapack_int* mm, lapack_int* m,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_ctgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, const lapack_complex_float* b,\n                    lapack_int* ldb, const lapack_complex_float* vl,\n                    lapack_int* ldvl, const lapack_complex_float* vr,\n                    lapack_int* ldvr, float* s, float* dif, lapack_int* mm,\n                    lapack_int* m, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ztgsna( char* job, char* howmny, const lapack_logical* select,\n                    lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, const lapack_complex_double* b,\n                    lapack_int* ldb, const lapack_complex_double* vl,\n                    lapack_int* ldvl, const lapack_complex_double* vr,\n                    lapack_int* ldvr, double* s, double* dif, lapack_int* mm,\n                    lapack_int* m, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, float* a, lapack_int* lda,\n                    float* b, lapack_int* ldb, float* tola, float* tolb,\n                    lapack_int* k, lapack_int* l, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* q, lapack_int* ldq,\n                    lapack_int* iwork, float* tau, float* work,\n                    lapack_int *info );\nvoid LAPACK_dggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, double* a, lapack_int* lda,\n                    double* b, lapack_int* ldb, double* tola, double* tolb,\n                    lapack_int* k, lapack_int* l, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* q, lapack_int* ldq,\n                    lapack_int* iwork, double* tau, double* work,\n                    lapack_int *info );\nvoid LAPACK_cggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                    float* tola, float* tolb, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq, lapack_int* iwork,\n                    float* rwork, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zggsvp( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                    double* tola, double* tolb, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_int* iwork, double* rwork,\n                    lapack_complex_double* tau, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_stgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* tola, float* tolb, float* alpha, float* beta,\n                    float* u, lapack_int* ldu, float* v, lapack_int* ldv,\n                    float* q, lapack_int* ldq, float* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_dtgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* tola, double* tolb, double* alpha, double* beta,\n                    double* u, lapack_int* ldu, double* v, lapack_int* ldv,\n                    double* q, lapack_int* ldq, double* work,\n                    lapack_int* ncycle, lapack_int *info );\nvoid LAPACK_ctgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* tola,\n                    float* tolb, float* alpha, float* beta,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_ztgsja( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* p, lapack_int* n, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* tola,\n                    double* tolb, double* alpha, double* beta,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, lapack_int* ncycle,\n                    lapack_int *info );\nvoid LAPACK_sgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                   float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                   double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_zgels( char* trans, lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_sgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int* jpvt, float* rcond, lapack_int* rank,\n                    float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb,\n                    lapack_int* jpvt, double* rcond, lapack_int* rank,\n                    double* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, lapack_int* jpvt,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgelsy( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, lapack_int* jpvt,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int *info );\nvoid LAPACK_zgelss( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* s,\n                    float* rcond, lapack_int* rank, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_zgelsd( lapack_int* m, lapack_int* n, lapack_int* nrhs,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* s,\n                    double* rcond, lapack_int* rank,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgglse( lapack_int* m, lapack_int* n, lapack_int* p, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* c,\n                    float* d, float* x, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dgglse( lapack_int* m, lapack_int* n, lapack_int* p, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* c,\n                    double* d, double* x, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cgglse( lapack_int* m, lapack_int* n, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* c, lapack_complex_float* d,\n                    lapack_complex_float* x, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zgglse( lapack_int* m, lapack_int* n, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* c, lapack_complex_double* d,\n                    lapack_complex_double* x, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggglm( lapack_int* n, lapack_int* m, lapack_int* p, float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb, float* d,\n                    float* x, float* y, float* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_dggglm( lapack_int* n, lapack_int* m, lapack_int* p, double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb, double* d,\n                    double* x, double* y, double* work, lapack_int* lwork,\n                    lapack_int *info );\nvoid LAPACK_cggglm( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* d, lapack_complex_float* x,\n                    lapack_complex_float* y, lapack_complex_float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_zggglm( lapack_int* n, lapack_int* m, lapack_int* p,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* d, lapack_complex_double* x,\n                    lapack_complex_double* y, lapack_complex_double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_ssyev( char* jobz, char* uplo, lapack_int* n, float* a,\n                   lapack_int* lda, float* w, float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_dsyev( char* jobz, char* uplo, lapack_int* n, double* a,\n                   lapack_int* lda, double* w, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_cheev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda, float* w,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zheev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda, double* w,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_ssyevd( char* jobz, char* uplo, lapack_int* n, float* a,\n                    lapack_int* lda, float* w, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsyevd( char* jobz, char* uplo, lapack_int* n, double* a,\n                    lapack_int* lda, double* w, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cheevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* w,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zheevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* w,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssyevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsyevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_cheevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zheevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssyevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    lapack_int* isuppz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsyevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    lapack_int* isuppz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_cheevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_int* isuppz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zheevr( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_int* isuppz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspev( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,\n                   float* z, lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dspev( char* jobz, char* uplo, lapack_int* n, double* ap, double* w,\n                   double* z, lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chpev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* ap, float* w, lapack_complex_float* z,\n                   lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zhpev( char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* ap, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_sspevd( char* jobz, char* uplo, lapack_int* n, float* ap, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dspevd( char* jobz, char* uplo, lapack_int* n, double* ap,\n                    double* w, double* z, lapack_int* ldz, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_chpevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* lrwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_zhpevd( char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    float* ap, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dspevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    double* ap, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chpevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhpevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   float* ab, lapack_int* ldab, float* w, float* z,\n                   lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dsbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   double* ab, lapack_int* ldab, double* w, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   lapack_complex_float* ab, lapack_int* ldab, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhbev( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                   lapack_complex_double* ab, lapack_int* ldab, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_ssbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    float* ab, lapack_int* ldab, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    double* ab, lapack_int* ldab, double* w, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_float* ab, lapack_int* ldab, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhbevd( char* jobz, char* uplo, lapack_int* n, lapack_int* kd,\n                    lapack_complex_double* ab, lapack_int* ldab, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, float* ab, lapack_int* ldab, float* q,\n                    lapack_int* ldq, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, double* ab, lapack_int* ldab, double* q,\n                    lapack_int* ldq, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* q, lapack_int* ldq, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhbevx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* kd, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* q, lapack_int* ldq, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sstev( char* jobz, lapack_int* n, float* d, float* e, float* z,\n                   lapack_int* ldz, float* work, lapack_int *info );\nvoid LAPACK_dstev( char* jobz, lapack_int* n, double* d, double* e, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_sstevd( char* jobz, lapack_int* n, float* d, float* e, float* z,\n                    lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dstevd( char* jobz, lapack_int* n, double* d, double* e, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sstevx( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dstevx( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sstevr( char* jobz, char* range, lapack_int* n, float* d, float* e,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, lapack_int* isuppz, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_dstevr( char* jobz, char* range, lapack_int* n, double* d,\n                    double* e, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, lapack_int* isuppz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sgees( char* jobvs, char* sort, LAPACK_S_SELECT2 select,\n                   lapack_int* n, float* a, lapack_int* lda, lapack_int* sdim,\n                   float* wr, float* wi, float* vs, lapack_int* ldvs,\n                   float* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_dgees( char* jobvs, char* sort, LAPACK_D_SELECT2 select,\n                   lapack_int* n, double* a, lapack_int* lda, lapack_int* sdim,\n                   double* wr, double* wi, double* vs, lapack_int* ldvs,\n                   double* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_cgees( char* jobvs, char* sort, LAPACK_C_SELECT1 select,\n                   lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                   lapack_int* sdim, lapack_complex_float* w,\n                   lapack_complex_float* vs, lapack_int* ldvs,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_zgees( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,\n                   lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                   lapack_int* sdim, lapack_complex_double* w,\n                   lapack_complex_double* vs, lapack_int* ldvs,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_sgeesx( char* jobvs, char* sort, LAPACK_S_SELECT2 select,\n                    char* sense, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* sdim, float* wr, float* wi, float* vs,\n                    lapack_int* ldvs, float* rconde, float* rcondv, float* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_dgeesx( char* jobvs, char* sort, LAPACK_D_SELECT2 select,\n                    char* sense, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* sdim, double* wr, double* wi, double* vs,\n                    lapack_int* ldvs, double* rconde, double* rcondv,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cgeesx( char* jobvs, char* sort, LAPACK_C_SELECT1 select,\n                    char* sense, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* sdim, lapack_complex_float* w,\n                    lapack_complex_float* vs, lapack_int* ldvs, float* rconde,\n                    float* rcondv, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zgeesx( char* jobvs, char* sort, LAPACK_Z_SELECT1 select,\n                    char* sense, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* sdim, lapack_complex_double* w,\n                    lapack_complex_double* vs, lapack_int* ldvs, double* rconde,\n                    double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_sgeev( char* jobvl, char* jobvr, lapack_int* n, float* a,\n                   lapack_int* lda, float* wr, float* wi, float* vl,\n                   lapack_int* ldvl, float* vr, lapack_int* ldvr, float* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgeev( char* jobvl, char* jobvr, lapack_int* n, double* a,\n                   lapack_int* lda, double* wr, double* wi, double* vl,\n                   lapack_int* ldvl, double* vr, lapack_int* ldvr, double* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgeev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* w, lapack_complex_float* vl,\n                   lapack_int* ldvl, lapack_complex_float* vr, lapack_int* ldvr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zgeev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* w, lapack_complex_double* vl,\n                   lapack_int* ldvl, lapack_complex_double* vr,\n                   lapack_int* ldvr, lapack_complex_double* work,\n                   lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, float* a, lapack_int* lda, float* wr,\n                    float* wi, float* vl, lapack_int* ldvl, float* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, float* abnrm, float* rconde, float* rcondv,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_dgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, double* a, lapack_int* lda, double* wr,\n                    double* wi, double* vl, lapack_int* ldvl, double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, double* abnrm, double* rconde,\n                    double* rcondv, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* w, lapack_complex_float* vl,\n                    lapack_int* ldvl, lapack_complex_float* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    float* scale, float* abnrm, float* rconde, float* rcondv,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgeevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* w, lapack_complex_double* vl,\n                    lapack_int* ldvl, lapack_complex_double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* scale, double* abnrm, double* rconde,\n                    double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int *info );\nvoid LAPACK_sgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    float* a, lapack_int* lda, float* s, float* u,\n                    lapack_int* ldu, float* vt, lapack_int* ldvt, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    double* a, lapack_int* lda, double* s, double* u,\n                    lapack_int* ldu, double* vt, lapack_int* ldvt, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* s,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int *info );\nvoid LAPACK_zgesvd( char* jobu, char* jobvt, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* s,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int *info );\nvoid LAPACK_sgesdd( char* jobz, lapack_int* m, lapack_int* n, float* a,\n                    lapack_int* lda, float* s, float* u, lapack_int* ldu,\n                    float* vt, lapack_int* ldvt, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgesdd( char* jobz, lapack_int* m, lapack_int* n, double* a,\n                    lapack_int* lda, double* s, double* u, lapack_int* ldu,\n                    double* vt, lapack_int* ldvt, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cgesdd( char* jobz, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda, float* s,\n                    lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* vt, lapack_int* ldvt,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_zgesdd( char* jobz, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda, double* s,\n                    lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* vt, lapack_int* ldvt,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,\n                    char* jobp, lapack_int* m, lapack_int* n, double* a,\n                    lapack_int* lda, double* sva, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_sgejsv( char* joba, char* jobu, char* jobv, char* jobr, char* jobt,\n                    char* jobp, lapack_int* m, lapack_int* n, float* a,\n                    lapack_int* lda, float* sva, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,\n                    lapack_int* n, double* a, lapack_int* lda, double* sva,\n                    lapack_int* mv, double* v, lapack_int* ldv, double* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sgesvj( char* joba, char* jobu, char* jobv, lapack_int* m,\n                    lapack_int* n, float* a, lapack_int* lda, float* sva,\n                    lapack_int* mv, float* v, lapack_int* ldv, float* work,\n                    lapack_int* lwork, lapack_int *info );\nvoid LAPACK_sggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* alpha, float* beta, float* u, lapack_int* ldu,\n                    float* v, lapack_int* ldv, float* q, lapack_int* ldq,\n                    float* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_dggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* alpha, double* beta, double* u, lapack_int* ldu,\n                    double* v, lapack_int* ldv, double* q, lapack_int* ldq,\n                    double* work, lapack_int* iwork, lapack_int *info );\nvoid LAPACK_cggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* alpha,\n                    float* beta, lapack_complex_float* u, lapack_int* ldu,\n                    lapack_complex_float* v, lapack_int* ldv,\n                    lapack_complex_float* q, lapack_int* ldq,\n                    lapack_complex_float* work, float* rwork, lapack_int* iwork,\n                    lapack_int *info );\nvoid LAPACK_zggsvd( char* jobu, char* jobv, char* jobq, lapack_int* m,\n                    lapack_int* n, lapack_int* p, lapack_int* k, lapack_int* l,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* alpha,\n                    double* beta, lapack_complex_double* u, lapack_int* ldu,\n                    lapack_complex_double* v, lapack_int* ldv,\n                    lapack_complex_double* q, lapack_int* ldq,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int *info );\nvoid LAPACK_ssygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                   float* w, float* work, lapack_int* lwork, lapack_int *info );\nvoid LAPACK_dsygv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                   double* w, double* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_chegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, float* w,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zhegv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb, double* w,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_ssygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    float* w, float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsygvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* w, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* w,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhegvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* w,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssygvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* vl, float* vu, lapack_int* il,\n                    lapack_int* iu, float* abstol, lapack_int* m, float* w,\n                    float* z, lapack_int* ldz, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsygvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chegvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, float* vl,\n                    float* vu, lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhegvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   float* ap, float* bp, float* w, float* z, lapack_int* ldz,\n                   float* work, lapack_int *info );\nvoid LAPACK_dspgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   double* ap, double* bp, double* w, double* z,\n                   lapack_int* ldz, double* work, lapack_int *info );\nvoid LAPACK_chpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_float* ap, lapack_complex_float* bp, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhpgv( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                   lapack_complex_double* ap, lapack_complex_double* bp,\n                   double* w, lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_sspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    float* ap, float* bp, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dspgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    double* ap, double* bp, double* w, double* z,\n                    lapack_int* ldz, double* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_float* ap, lapack_complex_float* bp,\n                    float* w, lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhpgvd( lapack_int* itype, char* jobz, char* uplo, lapack_int* n,\n                    lapack_complex_double* ap, lapack_complex_double* bp,\n                    double* w, lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_sspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, float* ap, float* bp, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* iwork, lapack_int* ifail,\n                    lapack_int *info );\nvoid LAPACK_dspgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, double* ap, double* bp, double* vl,\n                    double* vu, lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* iwork, lapack_int* ifail,\n                    lapack_int *info );\nvoid LAPACK_chpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_float* ap,\n                    lapack_complex_float* bp, float* vl, float* vu,\n                    lapack_int* il, lapack_int* iu, float* abstol,\n                    lapack_int* m, float* w, lapack_complex_float* z,\n                    lapack_int* ldz, lapack_complex_float* work, float* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhpgvx( lapack_int* itype, char* jobz, char* range, char* uplo,\n                    lapack_int* n, lapack_complex_double* ap,\n                    lapack_complex_double* bp, double* vl, double* vu,\n                    lapack_int* il, lapack_int* iu, double* abstol,\n                    lapack_int* m, double* w, lapack_complex_double* z,\n                    lapack_int* ldz, lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_ssbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, float* ab, lapack_int* ldab, float* bb,\n                   lapack_int* ldbb, float* w, float* z, lapack_int* ldz,\n                   float* work, lapack_int *info );\nvoid LAPACK_dsbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, double* ab, lapack_int* ldab, double* bb,\n                   lapack_int* ldbb, double* w, double* z, lapack_int* ldz,\n                   double* work, lapack_int *info );\nvoid LAPACK_chbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                   lapack_complex_float* bb, lapack_int* ldbb, float* w,\n                   lapack_complex_float* z, lapack_int* ldz,\n                   lapack_complex_float* work, float* rwork, lapack_int *info );\nvoid LAPACK_zhbgv( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                   lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                   lapack_complex_double* bb, lapack_int* ldbb, double* w,\n                   lapack_complex_double* z, lapack_int* ldz,\n                   lapack_complex_double* work, double* rwork,\n                   lapack_int *info );\nvoid LAPACK_ssbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, float* ab, lapack_int* ldab, float* bb,\n                    lapack_int* ldbb, float* w, float* z, lapack_int* ldz,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_dsbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, double* ab, lapack_int* ldab, double* bb,\n                    lapack_int* ldbb, double* w, double* z, lapack_int* ldz,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_chbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_float* ab, lapack_int* ldab,\n                    lapack_complex_float* bb, lapack_int* ldbb, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* lrwork, lapack_int* iwork, lapack_int* liwork,\n                    lapack_int *info );\nvoid LAPACK_zhbgvd( char* jobz, char* uplo, lapack_int* n, lapack_int* ka,\n                    lapack_int* kb, lapack_complex_double* ab, lapack_int* ldab,\n                    lapack_complex_double* bb, lapack_int* ldbb, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_int *info );\nvoid LAPACK_ssbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, float* ab, lapack_int* ldab,\n                    float* bb, lapack_int* ldbb, float* q, lapack_int* ldq,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w, float* z,\n                    lapack_int* ldz, float* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_dsbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, double* ab,\n                    lapack_int* ldab, double* bb, lapack_int* ldbb, double* q,\n                    lapack_int* ldq, double* vl, double* vu, lapack_int* il,\n                    lapack_int* iu, double* abstol, lapack_int* m, double* w,\n                    double* z, lapack_int* ldz, double* work, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_chbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, lapack_complex_float* ab,\n                    lapack_int* ldab, lapack_complex_float* bb,\n                    lapack_int* ldbb, lapack_complex_float* q, lapack_int* ldq,\n                    float* vl, float* vu, lapack_int* il, lapack_int* iu,\n                    float* abstol, lapack_int* m, float* w,\n                    lapack_complex_float* z, lapack_int* ldz,\n                    lapack_complex_float* work, float* rwork, lapack_int* iwork,\n                    lapack_int* ifail, lapack_int *info );\nvoid LAPACK_zhbgvx( char* jobz, char* range, char* uplo, lapack_int* n,\n                    lapack_int* ka, lapack_int* kb, lapack_complex_double* ab,\n                    lapack_int* ldab, lapack_complex_double* bb,\n                    lapack_int* ldbb, lapack_complex_double* q, lapack_int* ldq,\n                    double* vl, double* vu, lapack_int* il, lapack_int* iu,\n                    double* abstol, lapack_int* m, double* w,\n                    lapack_complex_double* z, lapack_int* ldz,\n                    lapack_complex_double* work, double* rwork,\n                    lapack_int* iwork, lapack_int* ifail, lapack_int *info );\nvoid LAPACK_sgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_S_SELECT3 selctg, lapack_int* n, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb, lapack_int* sdim,\n                   float* alphar, float* alphai, float* beta, float* vsl,\n                   lapack_int* ldvsl, float* vsr, lapack_int* ldvsr,\n                   float* work, lapack_int* lwork, lapack_logical* bwork,\n                   lapack_int *info );\nvoid LAPACK_dgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_D_SELECT3 selctg, lapack_int* n, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb,\n                   lapack_int* sdim, double* alphar, double* alphai,\n                   double* beta, double* vsl, lapack_int* ldvsl, double* vsr,\n                   lapack_int* ldvsr, double* work, lapack_int* lwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_cgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_C_SELECT2 selctg, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,\n                   lapack_complex_float* alpha, lapack_complex_float* beta,\n                   lapack_complex_float* vsl, lapack_int* ldvsl,\n                   lapack_complex_float* vsr, lapack_int* ldvsr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_zgges( char* jobvsl, char* jobvsr, char* sort,\n                   LAPACK_Z_SELECT2 selctg, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,\n                   lapack_complex_double* alpha, lapack_complex_double* beta,\n                   lapack_complex_double* vsl, lapack_int* ldvsl,\n                   lapack_complex_double* vsr, lapack_int* ldvsr,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_sggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_S_SELECT3 selctg, char* sense, lapack_int* n,\n                    float* a, lapack_int* lda, float* b, lapack_int* ldb,\n                    lapack_int* sdim, float* alphar, float* alphai, float* beta,\n                    float* vsl, lapack_int* ldvsl, float* vsr,\n                    lapack_int* ldvsr, float* rconde, float* rcondv,\n                    float* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_dggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_D_SELECT3 selctg, char* sense, lapack_int* n,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    lapack_int* sdim, double* alphar, double* alphai,\n                    double* beta, double* vsl, lapack_int* ldvsl, double* vsr,\n                    lapack_int* ldvsr, double* rconde, double* rcondv,\n                    double* work, lapack_int* lwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_C_SELECT2 selctg, char* sense, lapack_int* n,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb, lapack_int* sdim,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* vsl, lapack_int* ldvsl,\n                    lapack_complex_float* vsr, lapack_int* ldvsr, float* rconde,\n                    float* rcondv, lapack_complex_float* work,\n                    lapack_int* lwork, float* rwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zggesx( char* jobvsl, char* jobvsr, char* sort,\n                    LAPACK_Z_SELECT2 selctg, char* sense, lapack_int* n,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb, lapack_int* sdim,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* vsl, lapack_int* ldvsl,\n                    lapack_complex_double* vsr, lapack_int* ldvsr,\n                    double* rconde, double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_int* liwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_sggev( char* jobvl, char* jobvr, lapack_int* n, float* a,\n                   lapack_int* lda, float* b, lapack_int* ldb, float* alphar,\n                   float* alphai, float* beta, float* vl, lapack_int* ldvl,\n                   float* vr, lapack_int* ldvr, float* work, lapack_int* lwork,\n                   lapack_int *info );\nvoid LAPACK_dggev( char* jobvl, char* jobvr, lapack_int* n, double* a,\n                   lapack_int* lda, double* b, lapack_int* ldb, double* alphar,\n                   double* alphai, double* beta, double* vl, lapack_int* ldvl,\n                   double* vr, lapack_int* ldvr, double* work,\n                   lapack_int* lwork, lapack_int *info );\nvoid LAPACK_cggev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_float* a, lapack_int* lda,\n                   lapack_complex_float* b, lapack_int* ldb,\n                   lapack_complex_float* alpha, lapack_complex_float* beta,\n                   lapack_complex_float* vl, lapack_int* ldvl,\n                   lapack_complex_float* vr, lapack_int* ldvr,\n                   lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                   lapack_int *info );\nvoid LAPACK_zggev( char* jobvl, char* jobvr, lapack_int* n,\n                   lapack_complex_double* a, lapack_int* lda,\n                   lapack_complex_double* b, lapack_int* ldb,\n                   lapack_complex_double* alpha, lapack_complex_double* beta,\n                   lapack_complex_double* vl, lapack_int* ldvl,\n                   lapack_complex_double* vr, lapack_int* ldvr,\n                   lapack_complex_double* work, lapack_int* lwork,\n                   double* rwork, lapack_int *info );\nvoid LAPACK_sggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, float* alphar, float* alphai, float* beta,\n                    float* vl, lapack_int* ldvl, float* vr, lapack_int* ldvr,\n                    lapack_int* ilo, lapack_int* ihi, float* lscale,\n                    float* rscale, float* abnrm, float* bbnrm, float* rconde,\n                    float* rcondv, float* work, lapack_int* lwork,\n                    lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_dggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, double* alphar, double* alphai,\n                    double* beta, double* vl, lapack_int* ldvl, double* vr,\n                    lapack_int* ldvr, lapack_int* ilo, lapack_int* ihi,\n                    double* lscale, double* rscale, double* abnrm,\n                    double* bbnrm, double* rconde, double* rcondv, double* work,\n                    lapack_int* lwork, lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_cggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* vl, lapack_int* ldvl,\n                    lapack_complex_float* vr, lapack_int* ldvr, lapack_int* ilo,\n                    lapack_int* ihi, float* lscale, float* rscale, float* abnrm,\n                    float* bbnrm, float* rconde, float* rcondv,\n                    lapack_complex_float* work, lapack_int* lwork, float* rwork,\n                    lapack_int* iwork, lapack_logical* bwork,\n                    lapack_int *info );\nvoid LAPACK_zggevx( char* balanc, char* jobvl, char* jobvr, char* sense,\n                    lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* vl, lapack_int* ldvl,\n                    lapack_complex_double* vr, lapack_int* ldvr,\n                    lapack_int* ilo, lapack_int* ihi, double* lscale,\n                    double* rscale, double* abnrm, double* bbnrm,\n                    double* rconde, double* rcondv, lapack_complex_double* work,\n                    lapack_int* lwork, double* rwork, lapack_int* iwork,\n                    lapack_logical* bwork, lapack_int *info );\nvoid LAPACK_dsfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, double* alpha, const double* a,\n                   lapack_int* lda, double* beta, double* c );\nvoid LAPACK_ssfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, float* alpha, const float* a, lapack_int* lda,\n                   float* beta, float* c );\nvoid LAPACK_zhfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, double* alpha, const lapack_complex_double* a,\n                   lapack_int* lda, double* beta, lapack_complex_double* c );\nvoid LAPACK_chfrk( char* transr, char* uplo, char* trans, lapack_int* n,\n                   lapack_int* k, float* alpha, const lapack_complex_float* a,\n                   lapack_int* lda, float* beta, lapack_complex_float* c );\nvoid LAPACK_dtfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n, double* alpha,\n                   const double* a, double* b, lapack_int* ldb );\nvoid LAPACK_stfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n, float* alpha,\n                   const float* a, float* b, lapack_int* ldb );\nvoid LAPACK_ztfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n,\n                   lapack_complex_double* alpha, const lapack_complex_double* a,\n                   lapack_complex_double* b, lapack_int* ldb );\nvoid LAPACK_ctfsm( char* transr, char* side, char* uplo, char* trans,\n                   char* diag, lapack_int* m, lapack_int* n,\n                   lapack_complex_float* alpha, const lapack_complex_float* a,\n                   lapack_complex_float* b, lapack_int* ldb );\nvoid LAPACK_dtfttp( char* transr, char* uplo, lapack_int* n, const double* arf,\n                    double* ap, lapack_int *info );\nvoid LAPACK_stfttp( char* transr, char* uplo, lapack_int* n, const float* arf,\n                    float* ap, lapack_int *info );\nvoid LAPACK_ztfttp( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* arf, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctfttp( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* arf, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_dtfttr( char* transr, char* uplo, lapack_int* n, const double* arf,\n                    double* a, lapack_int* lda, lapack_int *info );\nvoid LAPACK_stfttr( char* transr, char* uplo, lapack_int* n, const float* arf,\n                    float* a, lapack_int* lda, lapack_int *info );\nvoid LAPACK_ztfttr( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* arf, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ctfttr( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* arf, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_dtpttf( char* transr, char* uplo, lapack_int* n, const double* ap,\n                    double* arf, lapack_int *info );\nvoid LAPACK_stpttf( char* transr, char* uplo, lapack_int* n, const float* ap,\n                    float* arf, lapack_int *info );\nvoid LAPACK_ztpttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* ap, lapack_complex_double* arf,\n                    lapack_int *info );\nvoid LAPACK_ctpttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* ap, lapack_complex_float* arf,\n                    lapack_int *info );\nvoid LAPACK_dtpttr( char* uplo, lapack_int* n, const double* ap, double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_stpttr( char* uplo, lapack_int* n, const float* ap, float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_ztpttr( char* uplo, lapack_int* n, const lapack_complex_double* ap,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_ctpttr( char* uplo, lapack_int* n, const lapack_complex_float* ap,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dtrttf( char* transr, char* uplo, lapack_int* n, const double* a,\n                    lapack_int* lda, double* arf, lapack_int *info );\nvoid LAPACK_strttf( char* transr, char* uplo, lapack_int* n, const float* a,\n                    lapack_int* lda, float* arf, lapack_int *info );\nvoid LAPACK_ztrttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* arf, lapack_int *info );\nvoid LAPACK_ctrttf( char* transr, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* arf, lapack_int *info );\nvoid LAPACK_dtrttp( char* uplo, lapack_int* n, const double* a, lapack_int* lda,\n                    double* ap, lapack_int *info );\nvoid LAPACK_strttp( char* uplo, lapack_int* n, const float* a, lapack_int* lda,\n                    float* ap, lapack_int *info );\nvoid LAPACK_ztrttp( char* uplo, lapack_int* n, const lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* ap,\n                    lapack_int *info );\nvoid LAPACK_ctrttp( char* uplo, lapack_int* n, const lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* ap,\n                    lapack_int *info );\nvoid LAPACK_sgeqrfp( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* tau, float* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_dgeqrfp( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* tau, double* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_cgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* tau,\n                     lapack_complex_float* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_zgeqrfp( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* tau,\n                     lapack_complex_double* work, lapack_int* lwork,\n                     lapack_int *info );\nvoid LAPACK_clacgv( lapack_int* n, lapack_complex_float* x, lapack_int* incx );\nvoid LAPACK_zlacgv( lapack_int* n, lapack_complex_double* x, lapack_int* incx );\nvoid LAPACK_slarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    float* x );\nvoid LAPACK_dlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    double* x );\nvoid LAPACK_clarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    lapack_complex_float* x );\nvoid LAPACK_zlarnv( lapack_int* idist, lapack_int* iseed, lapack_int* n,\n                    lapack_complex_double* x );\nvoid LAPACK_sgeqr2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int *info );\nvoid LAPACK_dgeqr2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int *info );\nvoid LAPACK_cgeqr2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgeqr2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slacpy( char* uplo, lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* b, lapack_int* ldb );\nvoid LAPACK_dlacpy( char* uplo, lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* b, lapack_int* ldb );\nvoid LAPACK_clacpy( char* uplo, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb );\nvoid LAPACK_zlacpy( char* uplo, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb );\nvoid LAPACK_sgetf2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_dgetf2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_cgetf2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_zgetf2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* ipiv, lapack_int *info );\nvoid LAPACK_slaswp( lapack_int* n, float* a, lapack_int* lda, lapack_int* k1,\n                    lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );\nvoid LAPACK_dlaswp( lapack_int* n, double* a, lapack_int* lda, lapack_int* k1,\n                    lapack_int* k2, const lapack_int* ipiv, lapack_int* incx );\nvoid LAPACK_claswp( lapack_int* n, lapack_complex_float* a, lapack_int* lda,\n                    lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,\n                    lapack_int* incx );\nvoid LAPACK_zlaswp( lapack_int* n, lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* k1, lapack_int* k2, const lapack_int* ipiv,\n                    lapack_int* incx );\nfloat LAPACK_slange( char* norm, lapack_int* m, lapack_int* n, const float* a,\n                    lapack_int* lda, float* work );\ndouble LAPACK_dlange( char* norm, lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, double* work );\nfloat LAPACK_clange( char* norm, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlange( char* norm, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_clanhe( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlanhe( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_slansy( char* norm, char* uplo, lapack_int* n, const float* a,\n                    lapack_int* lda, float* work );\ndouble LAPACK_dlansy( char* norm, char* uplo, lapack_int* n, const double* a,\n                    lapack_int* lda, double* work );\nfloat LAPACK_clansy( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_float* a, lapack_int* lda, float* work );\ndouble LAPACK_zlansy( char* norm, char* uplo, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda, double* work );\nfloat LAPACK_slantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const float* a, lapack_int* lda, float* work );\ndouble LAPACK_dlantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const double* a, lapack_int* lda, double* work );\nfloat LAPACK_clantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const lapack_complex_float* a, lapack_int* lda,\n                    float* work );\ndouble LAPACK_zlantr( char* norm, char* uplo, char* diag, lapack_int* m,\n                    lapack_int* n, const lapack_complex_double* a, lapack_int* lda,\n                    double* work );\nfloat LAPACK_slamch( char* cmach );\ndouble LAPACK_dlamch( char* cmach );\nvoid LAPACK_sgelq2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                    float* tau, float* work, lapack_int *info );\nvoid LAPACK_dgelq2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                    double* tau, double* work, lapack_int *info );\nvoid LAPACK_cgelq2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_complex_float* tau,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgelq2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_complex_double* tau,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, const float* v,\n                    lapack_int* ldv, const float* t, lapack_int* ldt, float* c,\n                    lapack_int* ldc, float* work, lapack_int* ldwork );\nvoid LAPACK_dlarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const double* v, lapack_int* ldv, const double* t,\n                    lapack_int* ldt, double* c, lapack_int* ldc, double* work,\n                    lapack_int* ldwork );\nvoid LAPACK_clarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work, lapack_int* ldwork );\nvoid LAPACK_zlarfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work, lapack_int* ldwork );\nvoid LAPACK_slarfg( lapack_int* n, float* alpha, float* x, lapack_int* incx,\n                    float* tau );\nvoid LAPACK_dlarfg( lapack_int* n, double* alpha, double* x, lapack_int* incx,\n                    double* tau );\nvoid LAPACK_clarfg( lapack_int* n, lapack_complex_float* alpha,\n                    lapack_complex_float* x, lapack_int* incx,\n                    lapack_complex_float* tau );\nvoid LAPACK_zlarfg( lapack_int* n, lapack_complex_double* alpha,\n                    lapack_complex_double* x, lapack_int* incx,\n                    lapack_complex_double* tau );\nvoid LAPACK_slarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const float* v, lapack_int* ldv, const float* tau, float* t,\n                    lapack_int* ldt );\nvoid LAPACK_dlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const double* v, lapack_int* ldv, const double* tau,\n                    double* t, lapack_int* ldt );\nvoid LAPACK_clarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* tau, lapack_complex_float* t,\n                    lapack_int* ldt );\nvoid LAPACK_zlarft( char* direct, char* storev, lapack_int* n, lapack_int* k,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* tau, lapack_complex_double* t,\n                    lapack_int* ldt );\nvoid LAPACK_slarfx( char* side, lapack_int* m, lapack_int* n, const float* v,\n                    float* tau, float* c, lapack_int* ldc, float* work );\nvoid LAPACK_dlarfx( char* side, lapack_int* m, lapack_int* n, const double* v,\n                    double* tau, double* c, lapack_int* ldc, double* work );\nvoid LAPACK_clarfx( char* side, lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* v, lapack_complex_float* tau,\n                    lapack_complex_float* c, lapack_int* ldc,\n                    lapack_complex_float* work );\nvoid LAPACK_zlarfx( char* side, lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* v, lapack_complex_double* tau,\n                    lapack_complex_double* c, lapack_int* ldc,\n                    lapack_complex_double* work );\nvoid LAPACK_slatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, float* d, lapack_int* mode, float* cond,\n                    float* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    float* a, lapack_int* lda, float* work, lapack_int *info );\nvoid LAPACK_dlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, double* d, lapack_int* mode, double* cond,\n                    double* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    double* a, lapack_int* lda, double* work,\n                    lapack_int *info );\nvoid LAPACK_clatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, float* d, lapack_int* mode, float* cond,\n                    float* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlatms( lapack_int* m, lapack_int* n, char* dist, lapack_int* iseed,\n                    char* sym, double* d, lapack_int* mode, double* cond,\n                    double* dmax, lapack_int* kl, lapack_int* ku, char* pack,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slag2d( lapack_int* m, lapack_int* n, const float* sa,\n                    lapack_int* ldsa, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dlag2s( lapack_int* m, lapack_int* n, const double* a,\n                    lapack_int* lda, float* sa, lapack_int* ldsa,\n                    lapack_int *info );\nvoid LAPACK_clag2z( lapack_int* m, lapack_int* n,\n                    const lapack_complex_float* sa, lapack_int* ldsa,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_zlag2c( lapack_int* m, lapack_int* n,\n                    const lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_float* sa, lapack_int* ldsa,\n                    lapack_int *info );\nvoid LAPACK_slauum( char* uplo, lapack_int* n, float* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_dlauum( char* uplo, lapack_int* n, double* a, lapack_int* lda,\n                    lapack_int *info );\nvoid LAPACK_clauum( char* uplo, lapack_int* n, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_zlauum( char* uplo, lapack_int* n, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int *info );\nvoid LAPACK_slagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* d, float* a, lapack_int* lda,\n                    lapack_int* iseed, float* work, lapack_int *info );\nvoid LAPACK_dlagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* d, double* a, lapack_int* lda,\n                    lapack_int* iseed, double* work, lapack_int *info );\nvoid LAPACK_clagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const float* d, lapack_complex_float* a,\n                    lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlagge( lapack_int* m, lapack_int* n, lapack_int* kl,\n                    lapack_int* ku, const double* d, lapack_complex_double* a,\n                    lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_slaset( char* uplo, lapack_int* m, lapack_int* n, float* alpha,\n                    float* beta, float* a, lapack_int* lda );\nvoid LAPACK_dlaset( char* uplo, lapack_int* m, lapack_int* n, double* alpha,\n                    double* beta, double* a, lapack_int* lda );\nvoid LAPACK_claset( char* uplo, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* alpha, lapack_complex_float* beta,\n                    lapack_complex_float* a, lapack_int* lda );\nvoid LAPACK_zlaset( char* uplo, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* alpha, lapack_complex_double* beta,\n                    lapack_complex_double* a, lapack_int* lda );\nvoid LAPACK_slasrt( char* id, lapack_int* n, float* d, lapack_int *info );\nvoid LAPACK_dlasrt( char* id, lapack_int* n, double* d, lapack_int *info );\nvoid LAPACK_claghe( lapack_int* n, lapack_int* k, const float* d,\n                    lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlaghe( lapack_int* n, lapack_int* k, const double* d,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* iseed, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_slagsy( lapack_int* n, lapack_int* k, const float* d, float* a,\n                    lapack_int* lda, lapack_int* iseed, float* work,\n                    lapack_int *info );\nvoid LAPACK_dlagsy( lapack_int* n, lapack_int* k, const double* d, double* a,\n                    lapack_int* lda, lapack_int* iseed, double* work,\n                    lapack_int *info );\nvoid LAPACK_clagsy( lapack_int* n, lapack_int* k, const float* d,\n                    lapack_complex_float* a, lapack_int* lda, lapack_int* iseed,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zlagsy( lapack_int* n, lapack_int* k, const double* d,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_int* iseed, lapack_complex_double* work,\n                    lapack_int *info );\nvoid LAPACK_slapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    float* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_dlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    double* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_clapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    lapack_complex_float* x, lapack_int* ldx, lapack_int* k );\nvoid LAPACK_zlapmr( lapack_logical* forwrd, lapack_int* m, lapack_int* n,\n                    lapack_complex_double* x, lapack_int* ldx, lapack_int* k );\nfloat LAPACK_slapy2( float* x, float* y );\ndouble LAPACK_dlapy2( double* x, double* y );\nfloat LAPACK_slapy3( float* x, float* y, float* z );\ndouble LAPACK_dlapy3( double* x, double* y, double* z );\nvoid LAPACK_slartgp( float* f, float* g, float* cs, float* sn, float* r );\nvoid LAPACK_dlartgp( double* f, double* g, double* cs, double* sn, double* r );\nvoid LAPACK_slartgs( float* x, float* y, float* sigma, float* cs, float* sn );\nvoid LAPACK_dlartgs( double* x, double* y, double* sigma, double* cs,\n                     double* sn );\n// LAPACK 3.3.0\nvoid LAPACK_cbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* theta, float* phi,\n                    lapack_complex_float* u1, lapack_int* ldu1,\n                    lapack_complex_float* u2, lapack_int* ldu2,\n                    lapack_complex_float* v1t, lapack_int* ldv1t,\n                    lapack_complex_float* v2t, lapack_int* ldv2t,\n                    float* b11d, float* b11e, float* b12d,\n                    float* b12e, float* b21d, float* b21e,\n                    float* b22d, float* b22e, float* rwork,\n                    lapack_int* lrwork , lapack_int *info );\nvoid LAPACK_cheswapr( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_chetri2( char* uplo, lapack_int* n,\n                     lapack_complex_float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_chetri2x( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_float* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_chetrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_csyconv( char* uplo, char* way,\n                     lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_csyswapr( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_csytri2( char* uplo, lapack_int* n,\n                     lapack_complex_float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_csytri2x( char* uplo, lapack_int* n,\n                      lapack_complex_float* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_float* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_csytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const lapack_complex_float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work , lapack_int *info );\nvoid LAPACK_cunbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    lapack_complex_float* x11, lapack_int* ldx11,\n                    lapack_complex_float* x12, lapack_int* ldx12,\n                    lapack_complex_float* x21, lapack_int* ldx21,\n                    lapack_complex_float* x22, lapack_int* ldx22,\n                    float* theta, float* phi,\n                    lapack_complex_float* taup1,\n                    lapack_complex_float* taup2,\n                    lapack_complex_float* tauq1,\n                    lapack_complex_float* tauq2,\n                    lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_cuncsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, lapack_complex_float* x11,\n                    lapack_int* ldx11, lapack_complex_float* x12,\n                    lapack_int* ldx12, lapack_complex_float* x21,\n                    lapack_int* ldx21, lapack_complex_float* x22,\n                    lapack_int* ldx22, float* theta,\n                    lapack_complex_float* u1, lapack_int* ldu1,\n                    lapack_complex_float* u2, lapack_int* ldu2,\n                    lapack_complex_float* v1t, lapack_int* ldv1t,\n                    lapack_complex_float* v2t, lapack_int* ldv2t,\n                    lapack_complex_float* work, lapack_int* lwork,\n                    float* rwork, lapack_int* lrwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_dbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* theta, double* phi, double* u1,\n                    lapack_int* ldu1, double* u2, lapack_int* ldu2,\n                    double* v1t, lapack_int* ldv1t, double* v2t,\n                    lapack_int* ldv2t, double* b11d, double* b11e,\n                    double* b12d, double* b12e, double* b21d,\n                    double* b21e, double* b22d, double* b22e,\n                    double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dorbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* x11, lapack_int* ldx11, double* x12,\n                    lapack_int* ldx12, double* x21, lapack_int* ldx21,\n                    double* x22, lapack_int* ldx22, double* theta,\n                    double* phi, double* taup1, double* taup2,\n                    double* tauq1, double* tauq2, double* work,\n                    lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dorcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, double* x11, lapack_int* ldx11,\n                    double* x12, lapack_int* ldx12, double* x21,\n                    lapack_int* ldx21, double* x22, lapack_int* ldx22,\n                    double* theta, double* u1, lapack_int* ldu1,\n                    double* u2, lapack_int* ldu2, double* v1t,\n                    lapack_int* ldv1t, double* v2t, lapack_int* ldv2t,\n                    double* work, lapack_int* lwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_dsyconv( char* uplo, char* way,\n                     lapack_int* n, double* a, lapack_int* lda,\n                     const lapack_int* ipiv, double* work , lapack_int *info );\nvoid LAPACK_dsyswapr( char* uplo, lapack_int* n,\n                      double* a, lapack_int* i1, lapack_int* i2 );\nvoid LAPACK_dsytri2( char* uplo, lapack_int* n,\n                     double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_dsytri2x( char* uplo, lapack_int* n,\n                      double* a, lapack_int* lda,\n                      const lapack_int* ipiv, double* work,\n                      lapack_int* nb , lapack_int *info );\nvoid LAPACK_dsytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const double* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     double* b, lapack_int* ldb, double* work , lapack_int *info );\nvoid LAPACK_sbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* theta, float* phi, float* u1,\n                    lapack_int* ldu1, float* u2, lapack_int* ldu2,\n                    float* v1t, lapack_int* ldv1t, float* v2t,\n                    lapack_int* ldv2t, float* b11d, float* b11e,\n                    float* b12d, float* b12e, float* b21d,\n                    float* b21e, float* b22d, float* b22e,\n                    float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_sorbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    float* x11, lapack_int* ldx11, float* x12,\n                    lapack_int* ldx12, float* x21, lapack_int* ldx21,\n                    float* x22, lapack_int* ldx22, float* theta,\n                    float* phi, float* taup1, float* taup2,\n                    float* tauq1, float* tauq2, float* work,\n                    lapack_int* lwork , lapack_int *info );\nvoid LAPACK_sorcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, float* x11, lapack_int* ldx11,\n                    float* x12, lapack_int* ldx12, float* x21,\n                    lapack_int* ldx21, float* x22, lapack_int* ldx22,\n                    float* theta, float* u1, lapack_int* ldu1,\n                    float* u2, lapack_int* ldu2, float* v1t,\n                    lapack_int* ldv1t, float* v2t, lapack_int* ldv2t,\n                    float* work, lapack_int* lwork,\n                    lapack_int* iwork , lapack_int *info );\nvoid LAPACK_ssyconv( char* uplo, char* way,\n                     lapack_int* n, float* a, lapack_int* lda,\n                     const lapack_int* ipiv, float* work , lapack_int *info );\nvoid LAPACK_ssyswapr( char* uplo, lapack_int* n,\n                      float* a, lapack_int* i1, lapack_int* i2 );\nvoid LAPACK_ssytri2( char* uplo, lapack_int* n,\n                     float* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_float* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_ssytri2x( char* uplo, lapack_int* n,\n                      float* a, lapack_int* lda,\n                      const lapack_int* ipiv, float* work,\n                      lapack_int* nb , lapack_int *info );\nvoid LAPACK_ssytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs, const float* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     float* b, lapack_int* ldb, float* work , lapack_int *info );\nvoid LAPACK_zbbcsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    double* theta, double* phi,\n                    lapack_complex_double* u1, lapack_int* ldu1,\n                    lapack_complex_double* u2, lapack_int* ldu2,\n                    lapack_complex_double* v1t, lapack_int* ldv1t,\n                    lapack_complex_double* v2t, lapack_int* ldv2t,\n                    double* b11d, double* b11e, double* b12d,\n                    double* b12e, double* b21d, double* b21e,\n                    double* b22d, double* b22e, double* rwork,\n                    lapack_int* lrwork , lapack_int *info );\nvoid LAPACK_zheswapr( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_zhetri2( char* uplo, lapack_int* n,\n                     lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zhetri2x( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_double* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_zhetrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zsyconv( char* uplo, char* way,\n                     lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, const lapack_int* ipiv,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zsyswapr( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* i1,\n                      lapack_int* i2 );\nvoid LAPACK_zsytri2( char* uplo, lapack_int* n,\n                     lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zsytri2x( char* uplo, lapack_int* n,\n                      lapack_complex_double* a, lapack_int* lda,\n                      const lapack_int* ipiv,\n                      lapack_complex_double* work, lapack_int* nb , lapack_int *info );\nvoid LAPACK_zsytrs2( char* uplo, lapack_int* n,\n                     lapack_int* nrhs,\n                     const lapack_complex_double* a, lapack_int* lda,\n                     const lapack_int* ipiv,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work , lapack_int *info );\nvoid LAPACK_zunbdb( char* trans, char* signs,\n                    lapack_int* m, lapack_int* p, lapack_int* q,\n                    lapack_complex_double* x11, lapack_int* ldx11,\n                    lapack_complex_double* x12, lapack_int* ldx12,\n                    lapack_complex_double* x21, lapack_int* ldx21,\n                    lapack_complex_double* x22, lapack_int* ldx22,\n                    double* theta, double* phi,\n                    lapack_complex_double* taup1,\n                    lapack_complex_double* taup2,\n                    lapack_complex_double* tauq1,\n                    lapack_complex_double* tauq2,\n                    lapack_complex_double* work, lapack_int* lwork , lapack_int *info );\nvoid LAPACK_zuncsd( char* jobu1, char* jobu2,\n                    char* jobv1t, char* jobv2t, char* trans,\n                    char* signs, lapack_int* m, lapack_int* p,\n                    lapack_int* q, lapack_complex_double* x11,\n                    lapack_int* ldx11, lapack_complex_double* x12,\n                    lapack_int* ldx12, lapack_complex_double* x21,\n                    lapack_int* ldx21, lapack_complex_double* x22,\n                    lapack_int* ldx22, double* theta,\n                    lapack_complex_double* u1, lapack_int* ldu1,\n                    lapack_complex_double* u2, lapack_int* ldu2,\n                    lapack_complex_double* v1t, lapack_int* ldv1t,\n                    lapack_complex_double* v2t, lapack_int* ldv2t,\n                    lapack_complex_double* work, lapack_int* lwork,\n                    double* rwork, lapack_int* lrwork,\n                    lapack_int* iwork , lapack_int *info );\n// LAPACK 3.4.0\nvoid LAPACK_sgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb, const float* v,\n                     lapack_int* ldv, const float* t, lapack_int* ldt, float* c,\n                     lapack_int* ldc, float* work, lapack_int *info );\nvoid LAPACK_dgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb, const double* v,\n                     lapack_int* ldv, const double* t, lapack_int* ldt,\n                     double* c, lapack_int* ldc, double* work,\n                     lapack_int *info );\nvoid LAPACK_cgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb,\n                     const lapack_complex_float* v, lapack_int* ldv,\n                     const lapack_complex_float* t, lapack_int* ldt,\n                     lapack_complex_float* c, lapack_int* ldc,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgemqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* nb,\n                     const lapack_complex_double* v, lapack_int* ldv,\n                     const lapack_complex_double* t, lapack_int* ldt,\n                     lapack_complex_double* c, lapack_int* ldc,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_sgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, float* a,\n                    lapack_int* lda, float* t, lapack_int* ldt, float* work,\n                    lapack_int *info );\nvoid LAPACK_dgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb, double* a,\n                    lapack_int* lda, double* t, lapack_int* ldt, double* work,\n                    lapack_int *info );\nvoid LAPACK_cgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_zgeqrt( lapack_int* m, lapack_int* n, lapack_int* nb,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_sgeqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_dgeqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_cgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_zgeqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_sgeqrt3( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_dgeqrt3( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* t, lapack_int* ldt, lapack_int *info );\nvoid LAPACK_cgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_zgeqrt3( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_stpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const float* v, lapack_int* ldv, const float* t,\n                     lapack_int* ldt, float* a, lapack_int* lda, float* b,\n                     lapack_int* ldb, float* work, lapack_int *info );\nvoid LAPACK_dtpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const double* v, lapack_int* ldv, const double* t,\n                     lapack_int* ldt, double* a, lapack_int* lda, double* b,\n                     lapack_int* ldb, double* work, lapack_int *info );\nvoid LAPACK_ctpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const lapack_complex_float* v, lapack_int* ldv,\n                     const lapack_complex_float* t, lapack_int* ldt,\n                     lapack_complex_float* a, lapack_int* lda,\n                     lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_ztpmqrt( char* side, char* trans, lapack_int* m, lapack_int* n,\n                     lapack_int* k, lapack_int* l, lapack_int* nb,\n                     const lapack_complex_double* v, lapack_int* ldv,\n                     const lapack_complex_double* t, lapack_int* ldt,\n                     lapack_complex_double* a, lapack_int* lda,\n                     lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_dtpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    double* a, lapack_int* lda, double* b, lapack_int* ldb,\n                    double* t, lapack_int* ldt, double* work,\n                    lapack_int *info );\nvoid LAPACK_ctpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* t, lapack_complex_float* b,\n                    lapack_int* ldb, lapack_int* ldt,\n                    lapack_complex_float* work, lapack_int *info );\nvoid LAPACK_ztpqrt( lapack_int* m, lapack_int* n, lapack_int* l, lapack_int* nb,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* work, lapack_int *info );\nvoid LAPACK_stpqrt2( lapack_int* m, lapack_int* n, float* a, lapack_int* lda,\n                     float* b, lapack_int* ldb, float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_dtpqrt2( lapack_int* m, lapack_int* n, double* a, lapack_int* lda,\n                     double* b, lapack_int* ldb, double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_ctpqrt2( lapack_int* m, lapack_int* n, lapack_complex_float* a,\n                     lapack_int* lda, lapack_complex_float* b, lapack_int* ldb,\n                     lapack_complex_float* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_ztpqrt2( lapack_int* m, lapack_int* n, lapack_complex_double* a,\n                     lapack_int* lda, lapack_complex_double* b, lapack_int* ldb,\n                     lapack_complex_double* t, lapack_int* ldt,\n                     lapack_int *info );\nvoid LAPACK_stprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const float* v, lapack_int* ldv, const float* t,\n                    lapack_int* ldt, float* a, lapack_int* lda, float* b,\n                    lapack_int* ldb, const float* mywork,\n                    lapack_int* myldwork );\nvoid LAPACK_dtprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const double* v, lapack_int* ldv, const double* t,\n                    lapack_int* ldt, double* a, lapack_int* lda, double* b,\n                    lapack_int* ldb, const double* mywork,\n                    lapack_int* myldwork );\nvoid LAPACK_ctprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const lapack_complex_float* v, lapack_int* ldv,\n                    const lapack_complex_float* t, lapack_int* ldt,\n                    lapack_complex_float* a, lapack_int* lda,\n                    lapack_complex_float* b, lapack_int* ldb,\n                    const float* mywork, lapack_int* myldwork );\nvoid LAPACK_ztprfb( char* side, char* trans, char* direct, char* storev,\n                    lapack_int* m, lapack_int* n, lapack_int* k, lapack_int* l,\n                    const lapack_complex_double* v, lapack_int* ldv,\n                    const lapack_complex_double* t, lapack_int* ldt,\n                    lapack_complex_double* a, lapack_int* lda,\n                    lapack_complex_double* b, lapack_int* ldb,\n                    const double* mywork, lapack_int* myldwork );\n// LAPACK 3.X.X\nvoid LAPACK_csyr( char* uplo, lapack_int* n, lapack_complex_float* alpha,\n                      const lapack_complex_float* x, lapack_int* incx,\n                      lapack_complex_float* a, lapack_int* lda );\nvoid LAPACK_zsyr( char* uplo, lapack_int* n, lapack_complex_double* alpha,\n                      const lapack_complex_double* x, lapack_int* incx,\n                      lapack_complex_double* a, lapack_int* lda );\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n#endif /* _LAPACKE_H_ */\n\n#endif /* _MKL_LAPACKE_H_ */\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/misc/lapacke_mangling.h",
    "content": "#ifndef LAPACK_HEADER_INCLUDED\n#define LAPACK_HEADER_INCLUDED\n\n#ifndef LAPACK_GLOBAL\n#if defined(LAPACK_GLOBAL_PATTERN_LC) || defined(ADD_)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#elif defined(LAPACK_GLOBAL_PATTERN_UC) || defined(UPPER)\n#define LAPACK_GLOBAL(lcname,UCNAME)  UCNAME\n#elif defined(LAPACK_GLOBAL_PATTERN_MC) || defined(NOCHANGE)\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname\n#else\n#define LAPACK_GLOBAL(lcname,UCNAME)  lcname##_\n#endif\n#endif\n\n#endif\n\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/ArrayCwiseBinaryOps.h",
    "content": "\n/** \\returns an expression of the coefficient wise product of \\c *this and \\a other\n  *\n  * \\sa MatrixBase::cwiseProduct\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)\noperator*(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient wise quotient of \\c *this and \\a other\n  *\n  * \\sa MatrixBase::cwiseQuotient\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived>\noperator/(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<internal::scalar_quotient_op<Scalar,typename OtherDerived::Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise min of \\c *this and \\a other\n  *\n  * Example: \\include Cwise_min.cpp\n  * Output: \\verbinclude Cwise_min.out\n  *\n  * \\sa max()\n  */\nEIGEN_MAKE_CWISE_BINARY_OP(min,min)\n\n/** \\returns an expression of the coefficient-wise min of \\c *this and scalar \\a other\n  *\n  * \\sa max()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived,\n                                        const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >\n#ifdef EIGEN_PARSED_BY_DOXYGEN\nmin\n#else\n(min)\n#endif\n(const Scalar &other) const\n{\n  return (min)(Derived::PlainObject::Constant(rows(), cols(), other));\n}\n\n/** \\returns an expression of the coefficient-wise max of \\c *this and \\a other\n  *\n  * Example: \\include Cwise_max.cpp\n  * Output: \\verbinclude Cwise_max.out\n  *\n  * \\sa min()\n  */\nEIGEN_MAKE_CWISE_BINARY_OP(max,max)\n\n/** \\returns an expression of the coefficient-wise max of \\c *this and scalar \\a other\n  *\n  * \\sa min()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived,\n                                        const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> >\n#ifdef EIGEN_PARSED_BY_DOXYGEN\nmax\n#else\n(max)\n#endif\n(const Scalar &other) const\n{\n  return (max)(Derived::PlainObject::Constant(rows(), cols(), other));\n}\n\n/** \\returns an expression of the coefficient-wise power of \\c *this to the given array of \\a exponents.\n  *\n  * This function computes the coefficient-wise power.\n  *\n  * Example: \\include Cwise_array_power_array.cpp\n  * Output: \\verbinclude Cwise_array_power_array.out\n  */\nEIGEN_MAKE_CWISE_BINARY_OP(pow,pow)\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(pow,pow)\n#else\n/** \\returns an expression of the coefficients of \\c *this rasied to the constant power \\a exponent\n  *\n  * \\tparam T is the scalar type of \\a exponent. It must be compatible with the scalar type of the given expression.\n  *\n  * This function computes the coefficient-wise power. The function MatrixBase::pow() in the\n  * unsupported module MatrixFunctions computes the matrix power.\n  *\n  * Example: \\include Cwise_pow.cpp\n  * Output: \\verbinclude Cwise_pow.out\n  *\n  * \\sa ArrayBase::pow(ArrayBase), square(), cube(), exp(), log()\n  */\ntemplate<typename T>\nconst CwiseBinaryOp<internal::scalar_pow_op<Scalar,T>,Derived,Constant<T> > pow(const T& exponent) const;\n#endif\n\n\n// TODO code generating macros could be moved to Macros.h and could include generation of documentation\n#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \\\ntemplate<typename OtherDerived> \\\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \\\nOP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \\\n{ \\\n  return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, typename OtherDerived::Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \\\n}\\\ntypedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \\\ntypedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \\\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \\\nOP(const Scalar& s) const { \\\n  return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \\\n} \\\nEIGEN_DEVICE_FUNC friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \\\nOP(const Scalar& s, const Derived& d) { \\\n  return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \\\n}\n\n#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \\\ntemplate<typename OtherDerived> \\\nEIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \\\nOP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \\\n{ \\\n  return CwiseBinaryOp<internal::scalar_cmp_op<typename OtherDerived::Scalar, Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \\\n} \\\nEIGEN_DEVICE_FUNC \\\ninline const RCmp ## RCOMPARATOR ## ReturnType \\\nOP(const Scalar& s) const { \\\n  return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \\\n} \\\nfriend inline const Cmp ## RCOMPARATOR ## ReturnType \\\nOP(const Scalar& s, const Derived& d) { \\\n  return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \\\n}\n\n\n\n/** \\returns an expression of the coefficient-wise \\< operator of *this and \\a other\n  *\n  * Example: \\include Cwise_less.cpp\n  * Output: \\verbinclude Cwise_less.out\n  *\n  * \\sa all(), any(), operator>(), operator<=()\n  */\nEIGEN_MAKE_CWISE_COMP_OP(operator<, LT)\n\n/** \\returns an expression of the coefficient-wise \\<= operator of *this and \\a other\n  *\n  * Example: \\include Cwise_less_equal.cpp\n  * Output: \\verbinclude Cwise_less_equal.out\n  *\n  * \\sa all(), any(), operator>=(), operator<()\n  */\nEIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)\n\n/** \\returns an expression of the coefficient-wise \\> operator of *this and \\a other\n  *\n  * Example: \\include Cwise_greater.cpp\n  * Output: \\verbinclude Cwise_greater.out\n  *\n  * \\sa all(), any(), operator>=(), operator<()\n  */\nEIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)\n\n/** \\returns an expression of the coefficient-wise \\>= operator of *this and \\a other\n  *\n  * Example: \\include Cwise_greater_equal.cpp\n  * Output: \\verbinclude Cwise_greater_equal.out\n  *\n  * \\sa all(), any(), operator>(), operator<=()\n  */\nEIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)\n\n/** \\returns an expression of the coefficient-wise == operator of *this and \\a other\n  *\n  * \\warning this performs an exact comparison, which is generally a bad idea with floating-point types.\n  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is\n  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and\n  * isMuchSmallerThan().\n  *\n  * Example: \\include Cwise_equal_equal.cpp\n  * Output: \\verbinclude Cwise_equal_equal.out\n  *\n  * \\sa all(), any(), isApprox(), isMuchSmallerThan()\n  */\nEIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)\n\n/** \\returns an expression of the coefficient-wise != operator of *this and \\a other\n  *\n  * \\warning this performs an exact comparison, which is generally a bad idea with floating-point types.\n  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is\n  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and\n  * isMuchSmallerThan().\n  *\n  * Example: \\include Cwise_not_equal.cpp\n  * Output: \\verbinclude Cwise_not_equal.out\n  *\n  * \\sa all(), any(), isApprox(), isMuchSmallerThan()\n  */\nEIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)\n\n\n#undef EIGEN_MAKE_CWISE_COMP_OP\n#undef EIGEN_MAKE_CWISE_COMP_R_OP\n\n// scalar addition\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_MAKE_SCALAR_BINARY_OP(operator+,sum)\n#else\n/** \\returns an expression of \\c *this with each coeff incremented by the constant \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  *\n  * Example: \\include Cwise_plus.cpp\n  * Output: \\verbinclude Cwise_plus.out\n  *\n  * \\sa operator+=(), operator-()\n  */\ntemplate<typename T>\nconst CwiseBinaryOp<internal::scalar_sum_op<Scalar,T>,Derived,Constant<T> > operator+(const T& scalar) const;\n/** \\returns an expression of \\a expr with each coeff incremented by the constant \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  */\ntemplate<typename T> friend\nconst CwiseBinaryOp<internal::scalar_sum_op<T,Scalar>,Constant<T>,Derived> operator+(const T& scalar, const StorageBaseType& expr);\n#endif\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_MAKE_SCALAR_BINARY_OP(operator-,difference)\n#else\n/** \\returns an expression of \\c *this with each coeff decremented by the constant \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  *\n  * Example: \\include Cwise_minus.cpp\n  * Output: \\verbinclude Cwise_minus.out\n  *\n  * \\sa operator+=(), operator-()\n  */\ntemplate<typename T>\nconst CwiseBinaryOp<internal::scalar_difference_op<Scalar,T>,Derived,Constant<T> > operator-(const T& scalar) const;\n/** \\returns an expression of the constant matrix of value \\a scalar decremented by the coefficients of \\a expr\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  */\ntemplate<typename T> friend\nconst CwiseBinaryOp<internal::scalar_difference_op<T,Scalar>,Constant<T>,Derived> operator-(const T& scalar, const StorageBaseType& expr);\n#endif\n\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n  EIGEN_MAKE_SCALAR_BINARY_OP_ONTHELEFT(operator/,quotient)\n#else\n  /**\n    * \\brief Component-wise division of the scalar \\a s by array elements of \\a a.\n    *\n    * \\tparam Scalar is the scalar type of \\a x. It must be compatible with the scalar type of the given array expression (\\c Derived::Scalar).\n    */\n  template<typename T> friend\n  inline const CwiseBinaryOp<internal::scalar_quotient_op<T,Scalar>,Constant<T>,Derived>\n  operator/(const T& s,const StorageBaseType& a);\n#endif\n\n/** \\returns an expression of the coefficient-wise ^ operator of *this and \\a other\n *\n * \\warning this operator is for expression of bool only.\n *\n * Example: \\include Cwise_boolean_xor.cpp\n * Output: \\verbinclude Cwise_boolean_xor.out\n *\n * \\sa operator&&(), select()\n */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ninline const CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>\noperator^(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),\n                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);\n  return CwiseBinaryOp<internal::scalar_boolean_xor_op, const Derived, const OtherDerived>(derived(),other.derived());\n}\n\n// NOTE disabled until we agree on argument order\n#if 0\n/** \\cpp11 \\returns an expression of the coefficient-wise polygamma function.\n  *\n  * \\specialfunctions_module\n  *\n  * It returns the \\a n -th derivative of the digamma(psi) evaluated at \\c *this.\n  *\n  * \\warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x)\n  *\n  * \\sa Eigen::polygamma()\n  */\ntemplate<typename DerivedN>\ninline const CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived>\npolygamma(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedN> &n) const\n{\n  return CwiseBinaryOp<internal::scalar_polygamma_op<Scalar>, const DerivedN, const Derived>(n.derived(), this->derived());\n}\n#endif\n\n/** \\returns an expression of the coefficient-wise zeta function.\n  *\n  * \\specialfunctions_module\n  *\n  * It returns the Riemann zeta function of two arguments \\c *this and \\a q:\n  *\n  * \\param *this is the exposent, it must be > 1\n  * \\param q is the shift, it must be > 0\n  *\n  * \\note This function supports only float and double scalar types. To support other scalar types, the user has\n  * to provide implementations of zeta(T,T) for any scalar type T to be supported.\n  *\n  * This method is an alias for zeta(*this,q);\n  *\n  * \\sa Eigen::zeta()\n  */\ntemplate<typename DerivedQ>\ninline const CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ>\nzeta(const EIGEN_CURRENT_STORAGE_BASE_CLASS<DerivedQ> &q) const\n{\n  return CwiseBinaryOp<internal::scalar_zeta_op<Scalar>, const Derived, const DerivedQ>(this->derived(), q.derived());\n}\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/ArrayCwiseUnaryOps.h",
    "content": "\n\ntypedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> AbsReturnType;\ntypedef CwiseUnaryOp<internal::scalar_arg_op<Scalar>, const Derived> ArgReturnType;\ntypedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> Abs2ReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> SqrtReturnType;\ntypedef CwiseUnaryOp<internal::scalar_rsqrt_op<Scalar>, const Derived> RsqrtReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> SignReturnType;\ntypedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> InverseReturnType;\ntypedef CwiseUnaryOp<internal::scalar_boolean_not_op<Scalar>, const Derived> BooleanNotReturnType;\n\ntypedef CwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived> ExpReturnType;\ntypedef CwiseUnaryOp<internal::scalar_expm1_op<Scalar>, const Derived> Expm1ReturnType;\ntypedef CwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived> LogReturnType;\ntypedef CwiseUnaryOp<internal::scalar_log1p_op<Scalar>, const Derived> Log1pReturnType;\ntypedef CwiseUnaryOp<internal::scalar_log10_op<Scalar>, const Derived> Log10ReturnType;\ntypedef CwiseUnaryOp<internal::scalar_cos_op<Scalar>, const Derived> CosReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sin_op<Scalar>, const Derived> SinReturnType;\ntypedef CwiseUnaryOp<internal::scalar_tan_op<Scalar>, const Derived> TanReturnType;\ntypedef CwiseUnaryOp<internal::scalar_acos_op<Scalar>, const Derived> AcosReturnType;\ntypedef CwiseUnaryOp<internal::scalar_asin_op<Scalar>, const Derived> AsinReturnType;\ntypedef CwiseUnaryOp<internal::scalar_atan_op<Scalar>, const Derived> AtanReturnType;\ntypedef CwiseUnaryOp<internal::scalar_tanh_op<Scalar>, const Derived> TanhReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sinh_op<Scalar>, const Derived> SinhReturnType;\ntypedef CwiseUnaryOp<internal::scalar_cosh_op<Scalar>, const Derived> CoshReturnType;\ntypedef CwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> SquareReturnType;\ntypedef CwiseUnaryOp<internal::scalar_cube_op<Scalar>, const Derived> CubeReturnType;\ntypedef CwiseUnaryOp<internal::scalar_round_op<Scalar>, const Derived> RoundReturnType;\ntypedef CwiseUnaryOp<internal::scalar_floor_op<Scalar>, const Derived> FloorReturnType;\ntypedef CwiseUnaryOp<internal::scalar_ceil_op<Scalar>, const Derived> CeilReturnType;\ntypedef CwiseUnaryOp<internal::scalar_isnan_op<Scalar>, const Derived> IsNaNReturnType;\ntypedef CwiseUnaryOp<internal::scalar_isinf_op<Scalar>, const Derived> IsInfReturnType;\ntypedef CwiseUnaryOp<internal::scalar_isfinite_op<Scalar>, const Derived> IsFiniteReturnType;\n\n/** \\returns an expression of the coefficient-wise absolute value of \\c *this\n  *\n  * Example: \\include Cwise_abs.cpp\n  * Output: \\verbinclude Cwise_abs.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_abs\">Math functions</a>, abs2()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const AbsReturnType\nabs() const\n{\n  return AbsReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise phase angle of \\c *this\n  *\n  * Example: \\include Cwise_arg.cpp\n  * Output: \\verbinclude Cwise_arg.out\n  *\n  * \\sa abs()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const ArgReturnType\narg() const\n{\n  return ArgReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise squared absolute value of \\c *this\n  *\n  * Example: \\include Cwise_abs2.cpp\n  * Output: \\verbinclude Cwise_abs2.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_abs2\">Math functions</a>, abs(), square()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const Abs2ReturnType\nabs2() const\n{\n  return Abs2ReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise exponential of *this.\n  *\n  * This function computes the coefficient-wise exponential. The function MatrixBase::exp() in the\n  * unsupported module MatrixFunctions computes the matrix exponential.\n  *\n  * Example: \\include Cwise_exp.cpp\n  * Output: \\verbinclude Cwise_exp.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_exp\">Math functions</a>, pow(), log(), sin(), cos()\n  */\nEIGEN_DEVICE_FUNC\ninline const ExpReturnType\nexp() const\n{\n  return ExpReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise exponential of *this minus 1.\n  *\n  * In exact arithmetic, \\c x.expm1() is equivalent to \\c x.exp() - 1,\n  * however, with finite precision, this function is much more accurate when \\c x is close to zero.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_expm1\">Math functions</a>, exp()\n  */\nEIGEN_DEVICE_FUNC\ninline const Expm1ReturnType\nexpm1() const\n{\n  return Expm1ReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise logarithm of *this.\n  *\n  * This function computes the coefficient-wise logarithm. The function MatrixBase::log() in the\n  * unsupported module MatrixFunctions computes the matrix logarithm.\n  *\n  * Example: \\include Cwise_log.cpp\n  * Output: \\verbinclude Cwise_log.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_log\">Math functions</a>, log()\n  */\nEIGEN_DEVICE_FUNC\ninline const LogReturnType\nlog() const\n{\n  return LogReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise logarithm of 1 plus \\c *this.\n  *\n  * In exact arithmetic, \\c x.log() is equivalent to \\c (x+1).log(),\n  * however, with finite precision, this function is much more accurate when \\c x is close to zero.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_log1p\">Math functions</a>, log()\n  */\nEIGEN_DEVICE_FUNC\ninline const Log1pReturnType\nlog1p() const\n{\n  return Log1pReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise base-10 logarithm of *this.\n  *\n  * This function computes the coefficient-wise base-10 logarithm.\n  *\n  * Example: \\include Cwise_log10.cpp\n  * Output: \\verbinclude Cwise_log10.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_log10\">Math functions</a>, log()\n  */\nEIGEN_DEVICE_FUNC\ninline const Log10ReturnType\nlog10() const\n{\n  return Log10ReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise square root of *this.\n  *\n  * This function computes the coefficient-wise square root. The function MatrixBase::sqrt() in the\n  * unsupported module MatrixFunctions computes the matrix square root.\n  *\n  * Example: \\include Cwise_sqrt.cpp\n  * Output: \\verbinclude Cwise_sqrt.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_sqrt\">Math functions</a>, pow(), square()\n  */\nEIGEN_DEVICE_FUNC\ninline const SqrtReturnType\nsqrt() const\n{\n  return SqrtReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise inverse square root of *this.\n  *\n  * This function computes the coefficient-wise inverse square root.\n  *\n  * Example: \\include Cwise_sqrt.cpp\n  * Output: \\verbinclude Cwise_sqrt.out\n  *\n  * \\sa pow(), square()\n  */\nEIGEN_DEVICE_FUNC\ninline const RsqrtReturnType\nrsqrt() const\n{\n  return RsqrtReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise signum of *this.\n  *\n  * This function computes the coefficient-wise signum.\n  *\n  * Example: \\include Cwise_sign.cpp\n  * Output: \\verbinclude Cwise_sign.out\n  *\n  * \\sa pow(), square()\n  */\nEIGEN_DEVICE_FUNC\ninline const SignReturnType\nsign() const\n{\n  return SignReturnType(derived());\n}\n\n\n/** \\returns an expression of the coefficient-wise cosine of *this.\n  *\n  * This function computes the coefficient-wise cosine. The function MatrixBase::cos() in the\n  * unsupported module MatrixFunctions computes the matrix cosine.\n  *\n  * Example: \\include Cwise_cos.cpp\n  * Output: \\verbinclude Cwise_cos.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_cos\">Math functions</a>, sin(), acos()\n  */\nEIGEN_DEVICE_FUNC\ninline const CosReturnType\ncos() const\n{\n  return CosReturnType(derived());\n}\n\n\n/** \\returns an expression of the coefficient-wise sine of *this.\n  *\n  * This function computes the coefficient-wise sine. The function MatrixBase::sin() in the\n  * unsupported module MatrixFunctions computes the matrix sine.\n  *\n  * Example: \\include Cwise_sin.cpp\n  * Output: \\verbinclude Cwise_sin.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_sin\">Math functions</a>, cos(), asin()\n  */\nEIGEN_DEVICE_FUNC\ninline const SinReturnType\nsin() const\n{\n  return SinReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise tan of *this.\n  *\n  * Example: \\include Cwise_tan.cpp\n  * Output: \\verbinclude Cwise_tan.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_tan\">Math functions</a>, cos(), sin()\n  */\nEIGEN_DEVICE_FUNC\ninline const TanReturnType\ntan() const\n{\n  return TanReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise arc tan of *this.\n  *\n  * Example: \\include Cwise_atan.cpp\n  * Output: \\verbinclude Cwise_atan.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_atan\">Math functions</a>, tan(), asin(), acos()\n  */\nEIGEN_DEVICE_FUNC\ninline const AtanReturnType\natan() const\n{\n  return AtanReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise arc cosine of *this.\n  *\n  * Example: \\include Cwise_acos.cpp\n  * Output: \\verbinclude Cwise_acos.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_acos\">Math functions</a>, cos(), asin()\n  */\nEIGEN_DEVICE_FUNC\ninline const AcosReturnType\nacos() const\n{\n  return AcosReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise arc sine of *this.\n  *\n  * Example: \\include Cwise_asin.cpp\n  * Output: \\verbinclude Cwise_asin.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_asin\">Math functions</a>, sin(), acos()\n  */\nEIGEN_DEVICE_FUNC\ninline const AsinReturnType\nasin() const\n{\n  return AsinReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise hyperbolic tan of *this.\n  *\n  * Example: \\include Cwise_tanh.cpp\n  * Output: \\verbinclude Cwise_tanh.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_tanh\">Math functions</a>, tan(), sinh(), cosh()\n  */\nEIGEN_DEVICE_FUNC\ninline const TanhReturnType\ntanh() const\n{\n  return TanhReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise hyperbolic sin of *this.\n  *\n  * Example: \\include Cwise_sinh.cpp\n  * Output: \\verbinclude Cwise_sinh.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_sinh\">Math functions</a>, sin(), tanh(), cosh()\n  */\nEIGEN_DEVICE_FUNC\ninline const SinhReturnType\nsinh() const\n{\n  return SinhReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise hyperbolic cos of *this.\n  *\n  * Example: \\include Cwise_cosh.cpp\n  * Output: \\verbinclude Cwise_cosh.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_cosh\">Math functions</a>, tan(), sinh(), cosh()\n  */\nEIGEN_DEVICE_FUNC\ninline const CoshReturnType\ncosh() const\n{\n  return CoshReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise inverse of *this.\n  *\n  * Example: \\include Cwise_inverse.cpp\n  * Output: \\verbinclude Cwise_inverse.out\n  *\n  * \\sa operator/(), operator*()\n  */\nEIGEN_DEVICE_FUNC\ninline const InverseReturnType\ninverse() const\n{\n  return InverseReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise square of *this.\n  *\n  * Example: \\include Cwise_square.cpp\n  * Output: \\verbinclude Cwise_square.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_squareE\">Math functions</a>, abs2(), cube(), pow()\n  */\nEIGEN_DEVICE_FUNC\ninline const SquareReturnType\nsquare() const\n{\n  return SquareReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise cube of *this.\n  *\n  * Example: \\include Cwise_cube.cpp\n  * Output: \\verbinclude Cwise_cube.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_cube\">Math functions</a>, square(), pow()\n  */\nEIGEN_DEVICE_FUNC\ninline const CubeReturnType\ncube() const\n{\n  return CubeReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise round of *this.\n  *\n  * Example: \\include Cwise_round.cpp\n  * Output: \\verbinclude Cwise_round.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_round\">Math functions</a>, ceil(), floor()\n  */\nEIGEN_DEVICE_FUNC\ninline const RoundReturnType\nround() const\n{\n  return RoundReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise floor of *this.\n  *\n  * Example: \\include Cwise_floor.cpp\n  * Output: \\verbinclude Cwise_floor.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_floor\">Math functions</a>, ceil(), round()\n  */\nEIGEN_DEVICE_FUNC\ninline const FloorReturnType\nfloor() const\n{\n  return FloorReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise ceil of *this.\n  *\n  * Example: \\include Cwise_ceil.cpp\n  * Output: \\verbinclude Cwise_ceil.out\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_ceil\">Math functions</a>, floor(), round()\n  */\nEIGEN_DEVICE_FUNC\ninline const CeilReturnType\nceil() const\n{\n  return CeilReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise isnan of *this.\n  *\n  * Example: \\include Cwise_isNaN.cpp\n  * Output: \\verbinclude Cwise_isNaN.out\n  *\n  * \\sa isfinite(), isinf()\n  */\nEIGEN_DEVICE_FUNC\ninline const IsNaNReturnType\nisNaN() const\n{\n  return IsNaNReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise isinf of *this.\n  *\n  * Example: \\include Cwise_isInf.cpp\n  * Output: \\verbinclude Cwise_isInf.out\n  *\n  * \\sa isnan(), isfinite()\n  */\nEIGEN_DEVICE_FUNC\ninline const IsInfReturnType\nisInf() const\n{\n  return IsInfReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise isfinite of *this.\n  *\n  * Example: \\include Cwise_isFinite.cpp\n  * Output: \\verbinclude Cwise_isFinite.out\n  *\n  * \\sa isnan(), isinf()\n  */\nEIGEN_DEVICE_FUNC\ninline const IsFiniteReturnType\nisFinite() const\n{\n  return IsFiniteReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise ! operator of *this\n  *\n  * \\warning this operator is for expression of bool only.\n  *\n  * Example: \\include Cwise_boolean_not.cpp\n  * Output: \\verbinclude Cwise_boolean_not.out\n  *\n  * \\sa operator!=()\n  */\nEIGEN_DEVICE_FUNC\ninline const BooleanNotReturnType\noperator!() const\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value),\n                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);\n  return BooleanNotReturnType(derived());\n}\n\n\n// --- SpecialFunctions module ---\n\ntypedef CwiseUnaryOp<internal::scalar_lgamma_op<Scalar>, const Derived> LgammaReturnType;\ntypedef CwiseUnaryOp<internal::scalar_digamma_op<Scalar>, const Derived> DigammaReturnType;\ntypedef CwiseUnaryOp<internal::scalar_erf_op<Scalar>, const Derived> ErfReturnType;\ntypedef CwiseUnaryOp<internal::scalar_erfc_op<Scalar>, const Derived> ErfcReturnType;\n\n/** \\cpp11 \\returns an expression of the coefficient-wise ln(|gamma(*this)|).\n  *\n  * \\specialfunctions_module\n  *\n  * Example: \\include Cwise_lgamma.cpp\n  * Output: \\verbinclude Cwise_lgamma.out\n  *\n  * \\note This function supports only float and double scalar types in c++11 mode. To support other scalar types,\n  * or float/double in non c++11 mode, the user has to provide implementations of lgamma(T) for any scalar\n  * type T to be supported.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_lgamma\">Math functions</a>, digamma()\n  */\nEIGEN_DEVICE_FUNC\ninline const LgammaReturnType\nlgamma() const\n{\n  return LgammaReturnType(derived());\n}\n\n/** \\returns an expression of the coefficient-wise digamma (psi, derivative of lgamma).\n  *\n  * \\specialfunctions_module\n  *\n  * \\note This function supports only float and double scalar types. To support other scalar types,\n  * the user has to provide implementations of digamma(T) for any scalar\n  * type T to be supported.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_digamma\">Math functions</a>, Eigen::digamma(), Eigen::polygamma(), lgamma()\n  */\nEIGEN_DEVICE_FUNC\ninline const DigammaReturnType\ndigamma() const\n{\n  return DigammaReturnType(derived());\n}\n\n/** \\cpp11 \\returns an expression of the coefficient-wise Gauss error\n  * function of *this.\n  *\n  * \\specialfunctions_module\n  *\n  * Example: \\include Cwise_erf.cpp\n  * Output: \\verbinclude Cwise_erf.out\n  *\n  * \\note This function supports only float and double scalar types in c++11 mode. To support other scalar types,\n  * or float/double in non c++11 mode, the user has to provide implementations of erf(T) for any scalar\n  * type T to be supported.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_erf\">Math functions</a>, erfc()\n  */\nEIGEN_DEVICE_FUNC\ninline const ErfReturnType\nerf() const\n{\n  return ErfReturnType(derived());\n}\n\n/** \\cpp11 \\returns an expression of the coefficient-wise Complementary error\n  * function of *this.\n  *\n  * \\specialfunctions_module\n  *\n  * Example: \\include Cwise_erfc.cpp\n  * Output: \\verbinclude Cwise_erfc.out\n  *\n  * \\note This function supports only float and double scalar types in c++11 mode. To support other scalar types,\n  * or float/double in non c++11 mode, the user has to provide implementations of erfc(T) for any scalar\n  * type T to be supported.\n  *\n  * \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_erfc\">Math functions</a>, erf()\n  */\nEIGEN_DEVICE_FUNC\ninline const ErfcReturnType\nerfc() const\n{\n  return ErfcReturnType(derived());\n}\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/BlockMethods.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2010 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n\n/// \\internal expression type of a column */\ntypedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ColXpr;\ntypedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, 1, !IsRowMajor> ConstColXpr;\n/// \\internal expression type of a row */\ntypedef Block<Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowXpr;\ntypedef const Block<const Derived, 1, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowXpr;\n/// \\internal expression type of a block of whole columns */\ntypedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ColsBlockXpr;\ntypedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, Dynamic, !IsRowMajor> ConstColsBlockXpr;\n/// \\internal expression type of a block of whole rows */\ntypedef Block<Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> RowsBlockXpr;\ntypedef const Block<const Derived, Dynamic, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> ConstRowsBlockXpr;\n/// \\internal expression type of a block of whole columns */\ntemplate<int N> struct NColsBlockXpr { typedef Block<Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };\ntemplate<int N> struct ConstNColsBlockXpr { typedef const Block<const Derived, internal::traits<Derived>::RowsAtCompileTime, N, !IsRowMajor> Type; };\n/// \\internal expression type of a block of whole rows */\ntemplate<int N> struct NRowsBlockXpr { typedef Block<Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };\ntemplate<int N> struct ConstNRowsBlockXpr { typedef const Block<const Derived, N, internal::traits<Derived>::ColsAtCompileTime, IsRowMajor> Type; };\n/// \\internal expression of a block */\ntypedef Block<Derived> BlockXpr;\ntypedef const Block<const Derived> ConstBlockXpr;\n/// \\internal expression of a block of fixed sizes */\ntemplate<int Rows, int Cols> struct FixedBlockXpr { typedef Block<Derived,Rows,Cols> Type; };\ntemplate<int Rows, int Cols> struct ConstFixedBlockXpr { typedef Block<const Derived,Rows,Cols> Type; };\n\ntypedef VectorBlock<Derived> SegmentReturnType;\ntypedef const VectorBlock<const Derived> ConstSegmentReturnType;\ntemplate<int Size> struct FixedSegmentReturnType { typedef VectorBlock<Derived, Size> Type; };\ntemplate<int Size> struct ConstFixedSegmentReturnType { typedef const VectorBlock<const Derived, Size> Type; };\n\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n/// \\returns an expression of a block in \\c *this with either dynamic or fixed sizes.\n///\n/// \\param  startRow  the first row in the block\n/// \\param  startCol  the first column in the block\n/// \\param  blockRows number of rows in the block, specified at either run-time or compile-time\n/// \\param  blockCols number of columns in the block, specified at either run-time or compile-time\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example using runtime (aka dynamic) sizes: \\include MatrixBase_block_int_int_int_int.cpp\n/// Output: \\verbinclude MatrixBase_block_int_int_int_int.out\n///\n/// \\newin{3.4}:\n///\n/// The number of rows \\a blockRows and columns \\a blockCols can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments. In the later case, \\c n plays the role of a runtime fallback value in case \\c N equals Eigen::Dynamic.\n/// Here is an example with a fixed number of rows \\c NRows and dynamic number of columns \\c cols:\n/// \\code\n/// mat.block(i,j,fix<NRows>,cols)\n/// \\endcode\n///\n/// This function thus fully covers the features offered by the following overloads block<NRows,NCols>(Index, Index),\n/// and block<NRows,NCols>(Index, Index, Index, Index) that are thus obsolete. Indeed, this generic version avoids\n/// redundancy, it preserves the argument order, and prevents the need to rely on the template keyword in templated code.\n///\n/// but with less redundancy and more consistency as it does not modify the argument order\n/// and seamlessly enable hybrid fixed/dynamic sizes.\n///\n/// \\note Even in the case that the returned expression has dynamic size, in the case\n/// when it is applied to a fixed-size matrix, it inherits a fixed maximal size,\n/// which means that evaluating it does not cause a dynamic memory allocation.\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block, fix, fix<N>(int)\n///\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename FixedBlockXpr<...,...>::Type\n#endif\nblock(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols)\n{\n  return typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type(\n            derived(), startRow, startCol, internal::get_runtime_value(blockRows), internal::get_runtime_value(blockCols));\n}\n\n/// This is the const version of block(Index,Index,NRowsType,NColsType)\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstFixedBlockXpr<...,...>::Type\n#endif\nblock(Index startRow, Index startCol, NRowsType blockRows, NColsType blockCols) const\n{\n  return typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type(\n            derived(), startRow, startCol, internal::get_runtime_value(blockRows), internal::get_runtime_value(blockCols));\n}\n\n\n\n/// \\returns a expression of a top-right corner of \\c *this with either dynamic or fixed sizes.\n///\n/// \\param cRows the number of rows in the corner\n/// \\param cCols the number of columns in the corner\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example with dynamic sizes: \\include MatrixBase_topRightCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_topRightCorner_int_int.out\n///\n/// The number of rows \\a blockRows and columns \\a blockCols can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments. See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename FixedBlockXpr<...,...>::Type\n#endif\ntopRightCorner(NRowsType cRows, NColsType cCols)\n{\n  return typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, cols() - internal::get_runtime_value(cCols), internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// This is the const version of topRightCorner(NRowsType, NColsType).\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstFixedBlockXpr<...,...>::Type\n#endif\ntopRightCorner(NRowsType cRows, NColsType cCols) const\n{\n  return typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, cols() - internal::get_runtime_value(cCols), internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// \\returns an expression of a fixed-size top-right corner of \\c *this.\n///\n/// \\tparam CRows the number of rows in the corner\n/// \\tparam CCols the number of columns in the corner\n///\n/// Example: \\include MatrixBase_template_int_int_topRightCorner.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_topRightCorner.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block, block<int,int>(Index,Index)\n///\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner()\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);\n}\n\n/// This is the const version of topRightCorner<int, int>().\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner() const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - CCols);\n}\n\n/// \\returns an expression of a top-right corner of \\c *this.\n///\n/// \\tparam CRows number of rows in corner as specified at compile-time\n/// \\tparam CCols number of columns in corner as specified at compile-time\n/// \\param  cRows number of rows in corner as specified at run-time\n/// \\param  cCols number of columns in corner as specified at run-time\n///\n/// This function is mainly useful for corners where the number of rows is specified at compile-time\n/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time\n/// information should not contradict. In other words, \\a cRows should equal \\a CRows unless\n/// \\a CRows is \\a Dynamic, and the same for the number of columns.\n///\n/// Example: \\include MatrixBase_template_int_int_topRightCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_topRightCorner_int_int.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block\n///\ntemplate<int CRows, int CCols>\ninline typename FixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols)\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);\n}\n\n/// This is the const version of topRightCorner<int, int>(Index, Index).\ntemplate<int CRows, int CCols>\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type topRightCorner(Index cRows, Index cCols) const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, cols() - cCols, cRows, cCols);\n}\n\n\n\n/// \\returns an expression of a top-left corner of \\c *this  with either dynamic or fixed sizes.\n///\n/// \\param cRows the number of rows in the corner\n/// \\param cCols the number of columns in the corner\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include MatrixBase_topLeftCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_topLeftCorner_int_int.out\n///\n/// The number of rows \\a blockRows and columns \\a blockCols can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments. See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename FixedBlockXpr<...,...>::Type\n#endif\ntopLeftCorner(NRowsType cRows, NColsType cCols)\n{\n  return typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, 0, internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// This is the const version of topLeftCorner(Index, Index).\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstFixedBlockXpr<...,...>::Type\n#endif\ntopLeftCorner(NRowsType cRows, NColsType cCols) const\n{\n  return typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, 0, internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// \\returns an expression of a fixed-size top-left corner of \\c *this.\n///\n/// The template parameters CRows and CCols are the number of rows and columns in the corner.\n///\n/// Example: \\include MatrixBase_template_int_int_topLeftCorner.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_topLeftCorner.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner()\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);\n}\n\n/// This is the const version of topLeftCorner<int, int>().\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner() const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0);\n}\n\n/// \\returns an expression of a top-left corner of \\c *this.\n///\n/// \\tparam CRows number of rows in corner as specified at compile-time\n/// \\tparam CCols number of columns in corner as specified at compile-time\n/// \\param  cRows number of rows in corner as specified at run-time\n/// \\param  cCols number of columns in corner as specified at run-time\n///\n/// This function is mainly useful for corners where the number of rows is specified at compile-time\n/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time\n/// information should not contradict. In other words, \\a cRows should equal \\a CRows unless\n/// \\a CRows is \\a Dynamic, and the same for the number of columns.\n///\n/// Example: \\include MatrixBase_template_int_int_topLeftCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_topLeftCorner_int_int.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block\n///\ntemplate<int CRows, int CCols>\ninline typename FixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols)\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);\n}\n\n/// This is the const version of topLeftCorner<int, int>(Index, Index).\ntemplate<int CRows, int CCols>\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type topLeftCorner(Index cRows, Index cCols) const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), 0, 0, cRows, cCols);\n}\n\n\n\n/// \\returns an expression of a bottom-right corner of \\c *this  with either dynamic or fixed sizes.\n///\n/// \\param cRows the number of rows in the corner\n/// \\param cCols the number of columns in the corner\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include MatrixBase_bottomRightCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_bottomRightCorner_int_int.out\n///\n/// The number of rows \\a blockRows and columns \\a blockCols can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments. See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename FixedBlockXpr<...,...>::Type\n#endif\nbottomRightCorner(NRowsType cRows, NColsType cCols)\n{\n  return typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(cRows), cols() - internal::get_runtime_value(cCols),\n                        internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// This is the const version of bottomRightCorner(NRowsType, NColsType).\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstFixedBlockXpr<...,...>::Type\n#endif\nbottomRightCorner(NRowsType cRows, NColsType cCols) const\n{\n  return typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(cRows), cols() - internal::get_runtime_value(cCols),\n                        internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// \\returns an expression of a fixed-size bottom-right corner of \\c *this.\n///\n/// The template parameters CRows and CCols are the number of rows and columns in the corner.\n///\n/// Example: \\include MatrixBase_template_int_int_bottomRightCorner.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_bottomRightCorner.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner()\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);\n}\n\n/// This is the const version of bottomRightCorner<int, int>().\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner() const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, cols() - CCols);\n}\n\n/// \\returns an expression of a bottom-right corner of \\c *this.\n///\n/// \\tparam CRows number of rows in corner as specified at compile-time\n/// \\tparam CCols number of columns in corner as specified at compile-time\n/// \\param  cRows number of rows in corner as specified at run-time\n/// \\param  cCols number of columns in corner as specified at run-time\n///\n/// This function is mainly useful for corners where the number of rows is specified at compile-time\n/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time\n/// information should not contradict. In other words, \\a cRows should equal \\a CRows unless\n/// \\a CRows is \\a Dynamic, and the same for the number of columns.\n///\n/// Example: \\include MatrixBase_template_int_int_bottomRightCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_bottomRightCorner_int_int.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block\n///\ntemplate<int CRows, int CCols>\ninline typename FixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols)\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);\n}\n\n/// This is the const version of bottomRightCorner<int, int>(Index, Index).\ntemplate<int CRows, int CCols>\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomRightCorner(Index cRows, Index cCols) const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);\n}\n\n\n\n/// \\returns an expression of a bottom-left corner of \\c *this  with either dynamic or fixed sizes.\n///\n/// \\param cRows the number of rows in the corner\n/// \\param cCols the number of columns in the corner\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include MatrixBase_bottomLeftCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_bottomLeftCorner_int_int.out\n///\n/// The number of rows \\a blockRows and columns \\a blockCols can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments. See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename FixedBlockXpr<...,...>::Type\n#endif\nbottomLeftCorner(NRowsType cRows, NColsType cCols)\n{\n  return typename FixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(cRows), 0,\n                        internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// This is the const version of bottomLeftCorner(NRowsType, NColsType).\ntemplate<typename NRowsType, typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename ConstFixedBlockXpr<...,...>::Type\n#endif\nbottomLeftCorner(NRowsType cRows, NColsType cCols) const\n{\n  return typename ConstFixedBlockXpr<internal::get_fixed_value<NRowsType>::value,internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(cRows), 0,\n                        internal::get_runtime_value(cRows), internal::get_runtime_value(cCols));\n}\n\n/// \\returns an expression of a fixed-size bottom-left corner of \\c *this.\n///\n/// The template parameters CRows and CCols are the number of rows and columns in the corner.\n///\n/// Example: \\include MatrixBase_template_int_int_bottomLeftCorner.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_bottomLeftCorner.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner()\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);\n}\n\n/// This is the const version of bottomLeftCorner<int, int>().\ntemplate<int CRows, int CCols>\nEIGEN_DEVICE_FUNC\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner() const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - CRows, 0);\n}\n\n/// \\returns an expression of a bottom-left corner of \\c *this.\n///\n/// \\tparam CRows number of rows in corner as specified at compile-time\n/// \\tparam CCols number of columns in corner as specified at compile-time\n/// \\param  cRows number of rows in corner as specified at run-time\n/// \\param  cCols number of columns in corner as specified at run-time\n///\n/// This function is mainly useful for corners where the number of rows is specified at compile-time\n/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time\n/// information should not contradict. In other words, \\a cRows should equal \\a CRows unless\n/// \\a CRows is \\a Dynamic, and the same for the number of columns.\n///\n/// Example: \\include MatrixBase_template_int_int_bottomLeftCorner_int_int.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_bottomLeftCorner_int_int.out\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa class Block\n///\ntemplate<int CRows, int CCols>\ninline typename FixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols)\n{\n  return typename FixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);\n}\n\n/// This is the const version of bottomLeftCorner<int, int>(Index, Index).\ntemplate<int CRows, int CCols>\ninline const typename ConstFixedBlockXpr<CRows,CCols>::Type bottomLeftCorner(Index cRows, Index cCols) const\n{\n  return typename ConstFixedBlockXpr<CRows,CCols>::Type(derived(), rows() - cRows, 0, cRows, cCols);\n}\n\n\n\n/// \\returns a block consisting of the top rows of \\c *this.\n///\n/// \\param n the number of rows in the block\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n///\n/// Example: \\include MatrixBase_topRows_int.cpp\n/// Output: \\verbinclude MatrixBase_topRows_int.out\n///\n/// The number of rows \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline typename NRowsBlockXpr<...>::Type\n#endif\ntopRows(NRowsType n)\n{\n  return typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), 0, 0, internal::get_runtime_value(n), cols());\n}\n\n/// This is the const version of topRows(NRowsType).\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline const typename ConstNRowsBlockXpr<...>::Type\n#endif\ntopRows(NRowsType n) const\n{\n  return typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), 0, 0, internal::get_runtime_value(n), cols());\n}\n\n/// \\returns a block consisting of the top rows of \\c *this.\n///\n/// \\tparam N the number of rows in the block as specified at compile-time\n/// \\param n the number of rows in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_topRows.cpp\n/// Output: \\verbinclude MatrixBase_template_int_topRows.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NRowsBlockXpr<N>::Type topRows(Index n = N)\n{\n  return typename NRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());\n}\n\n/// This is the const version of topRows<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNRowsBlockXpr<N>::Type topRows(Index n = N) const\n{\n  return typename ConstNRowsBlockXpr<N>::Type(derived(), 0, 0, n, cols());\n}\n\n\n\n/// \\returns a block consisting of the bottom rows of \\c *this.\n///\n/// \\param n the number of rows in the block\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n///\n/// Example: \\include MatrixBase_bottomRows_int.cpp\n/// Output: \\verbinclude MatrixBase_bottomRows_int.out\n///\n/// The number of rows \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline typename NRowsBlockXpr<...>::Type\n#endif\nbottomRows(NRowsType n)\n{\n  return typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(n), 0, internal::get_runtime_value(n), cols());\n}\n\n/// This is the const version of bottomRows(NRowsType).\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline const typename ConstNRowsBlockXpr<...>::Type\n#endif\nbottomRows(NRowsType n) const\n{\n  return typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), rows() - internal::get_runtime_value(n), 0, internal::get_runtime_value(n), cols());\n}\n\n/// \\returns a block consisting of the bottom rows of \\c *this.\n///\n/// \\tparam N the number of rows in the block as specified at compile-time\n/// \\param n the number of rows in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_bottomRows.cpp\n/// Output: \\verbinclude MatrixBase_template_int_bottomRows.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NRowsBlockXpr<N>::Type bottomRows(Index n = N)\n{\n  return typename NRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());\n}\n\n/// This is the const version of bottomRows<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNRowsBlockXpr<N>::Type bottomRows(Index n = N) const\n{\n  return typename ConstNRowsBlockXpr<N>::Type(derived(), rows() - n, 0, n, cols());\n}\n\n\n\n/// \\returns a block consisting of a range of rows of \\c *this.\n///\n/// \\param startRow the index of the first row in the block\n/// \\param n the number of rows in the block\n/// \\tparam NRowsType the type of the value handling the number of rows in the block, typically Index.\n///\n/// Example: \\include DenseBase_middleRows_int.cpp\n/// Output: \\verbinclude DenseBase_middleRows_int.out\n///\n/// The number of rows \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline typename NRowsBlockXpr<...>::Type\n#endif\nmiddleRows(Index startRow, NRowsType n)\n{\n  return typename NRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), startRow, 0, internal::get_runtime_value(n), cols());\n}\n\n/// This is the const version of middleRows(Index,NRowsType).\ntemplate<typename NRowsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n#else\ninline const typename ConstNRowsBlockXpr<...>::Type\n#endif\nmiddleRows(Index startRow, NRowsType n) const\n{\n  return typename ConstNRowsBlockXpr<internal::get_fixed_value<NRowsType>::value>::Type\n            (derived(), startRow, 0, internal::get_runtime_value(n), cols());\n}\n\n/// \\returns a block consisting of a range of rows of \\c *this.\n///\n/// \\tparam N the number of rows in the block as specified at compile-time\n/// \\param startRow the index of the first row in the block\n/// \\param n the number of rows in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include DenseBase_template_int_middleRows.cpp\n/// Output: \\verbinclude DenseBase_template_int_middleRows.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N)\n{\n  return typename NRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());\n}\n\n/// This is the const version of middleRows<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNRowsBlockXpr<N>::Type middleRows(Index startRow, Index n = N) const\n{\n  return typename ConstNRowsBlockXpr<N>::Type(derived(), startRow, 0, n, cols());\n}\n\n\n\n/// \\returns a block consisting of the left columns of \\c *this.\n///\n/// \\param n the number of columns in the block\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include MatrixBase_leftCols_int.cpp\n/// Output: \\verbinclude MatrixBase_leftCols_int.out\n///\n/// The number of columns \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename NColsBlockXpr<...>::Type\n#endif\nleftCols(NColsType n)\n{\n  return typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, 0, rows(), internal::get_runtime_value(n));\n}\n\n/// This is the const version of leftCols(NColsType).\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstNColsBlockXpr<...>::Type\n#endif\nleftCols(NColsType n) const\n{\n  return typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, 0, rows(), internal::get_runtime_value(n));\n}\n\n/// \\returns a block consisting of the left columns of \\c *this.\n///\n/// \\tparam N the number of columns in the block as specified at compile-time\n/// \\param n the number of columns in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_leftCols.cpp\n/// Output: \\verbinclude MatrixBase_template_int_leftCols.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NColsBlockXpr<N>::Type leftCols(Index n = N)\n{\n  return typename NColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);\n}\n\n/// This is the const version of leftCols<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNColsBlockXpr<N>::Type leftCols(Index n = N) const\n{\n  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, 0, rows(), n);\n}\n\n\n\n/// \\returns a block consisting of the right columns of \\c *this.\n///\n/// \\param n the number of columns in the block\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include MatrixBase_rightCols_int.cpp\n/// Output: \\verbinclude MatrixBase_rightCols_int.out\n///\n/// The number of columns \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename NColsBlockXpr<...>::Type\n#endif\nrightCols(NColsType n)\n{\n  return typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, cols() - internal::get_runtime_value(n), rows(), internal::get_runtime_value(n));\n}\n\n/// This is the const version of rightCols(NColsType).\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstNColsBlockXpr<...>::Type\n#endif\nrightCols(NColsType n) const\n{\n  return typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, cols() - internal::get_runtime_value(n), rows(), internal::get_runtime_value(n));\n}\n\n/// \\returns a block consisting of the right columns of \\c *this.\n///\n/// \\tparam N the number of columns in the block as specified at compile-time\n/// \\param n the number of columns in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_rightCols.cpp\n/// Output: \\verbinclude MatrixBase_template_int_rightCols.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NColsBlockXpr<N>::Type rightCols(Index n = N)\n{\n  return typename NColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);\n}\n\n/// This is the const version of rightCols<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNColsBlockXpr<N>::Type rightCols(Index n = N) const\n{\n  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, cols() - n, rows(), n);\n}\n\n\n\n/// \\returns a block consisting of a range of columns of \\c *this.\n///\n/// \\param startCol the index of the first column in the block\n/// \\param numCols the number of columns in the block\n/// \\tparam NColsType the type of the value handling the number of columns in the block, typically Index.\n///\n/// Example: \\include DenseBase_middleCols_int.cpp\n/// Output: \\verbinclude DenseBase_middleCols_int.out\n///\n/// The number of columns \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline typename NColsBlockXpr<...>::Type\n#endif\nmiddleCols(Index startCol, NColsType numCols)\n{\n  return typename NColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, startCol, rows(), internal::get_runtime_value(numCols));\n}\n\n/// This is the const version of middleCols(Index,NColsType).\ntemplate<typename NColsType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n#else\ninline const typename ConstNColsBlockXpr<...>::Type\n#endif\nmiddleCols(Index startCol, NColsType numCols) const\n{\n  return typename ConstNColsBlockXpr<internal::get_fixed_value<NColsType>::value>::Type\n            (derived(), 0, startCol, rows(), internal::get_runtime_value(numCols));\n}\n\n/// \\returns a block consisting of a range of columns of \\c *this.\n///\n/// \\tparam N the number of columns in the block as specified at compile-time\n/// \\param startCol the index of the first column in the block\n/// \\param n the number of columns in the block as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include DenseBase_template_int_middleCols.cpp\n/// Output: \\verbinclude DenseBase_template_int_middleCols.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename NColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N)\n{\n  return typename NColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);\n}\n\n/// This is the const version of middleCols<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstNColsBlockXpr<N>::Type middleCols(Index startCol, Index n = N) const\n{\n  return typename ConstNColsBlockXpr<N>::Type(derived(), 0, startCol, rows(), n);\n}\n\n\n\n/// \\returns a fixed-size expression of a block of \\c *this.\n///\n/// The template parameters \\a NRows and \\a NCols are the number of\n/// rows and columns in the block.\n///\n/// \\param startRow the first row in the block\n/// \\param startCol the first column in the block\n///\n/// Example: \\include MatrixBase_block_int_int.cpp\n/// Output: \\verbinclude MatrixBase_block_int_int.out\n///\n/// \\note The usage of of this overload is discouraged from %Eigen 3.4, better used the generic\n/// block(Index,Index,NRowsType,NColsType), here is the one-to-one equivalence:\n/// \\code\n/// mat.template block<NRows,NCols>(i,j)  <-->  mat.block(i,j,fix<NRows>,fix<NCols>)\n/// \\endcode\n///\n/// \\note since block is a templated member, the keyword template has to be used\n/// if the matrix type is also a template parameter: \\code m.template block<3,3>(1,1); \\endcode\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int NRows, int NCols>\nEIGEN_DEVICE_FUNC\ninline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol)\n{\n  return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);\n}\n\n/// This is the const version of block<>(Index, Index). */\ntemplate<int NRows, int NCols>\nEIGEN_DEVICE_FUNC\ninline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol) const\n{\n  return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol);\n}\n\n/// \\returns an expression of a block of \\c *this.\n///\n/// \\tparam NRows number of rows in block as specified at compile-time\n/// \\tparam NCols number of columns in block as specified at compile-time\n/// \\param  startRow  the first row in the block\n/// \\param  startCol  the first column in the block\n/// \\param  blockRows number of rows in block as specified at run-time\n/// \\param  blockCols number of columns in block as specified at run-time\n///\n/// This function is mainly useful for blocks where the number of rows is specified at compile-time\n/// and the number of columns is specified at run-time, or vice versa. The compile-time and run-time\n/// information should not contradict. In other words, \\a blockRows should equal \\a NRows unless\n/// \\a NRows is \\a Dynamic, and the same for the number of columns.\n///\n/// Example: \\include MatrixBase_template_int_int_block_int_int_int_int.cpp\n/// Output: \\verbinclude MatrixBase_template_int_int_block_int_int_int_int.cpp\n///\n/// \\note The usage of of this overload is discouraged from %Eigen 3.4, better used the generic\n/// block(Index,Index,NRowsType,NColsType), here is the one-to-one complete equivalence:\n/// \\code\n/// mat.template block<NRows,NCols>(i,j,rows,cols)     <-->  mat.block(i,j,fix<NRows>(rows),fix<NCols>(cols))\n/// \\endcode\n/// If we known that, e.g., NRows==Dynamic and NCols!=Dynamic, then the equivalence becomes:\n/// \\code\n/// mat.template block<Dynamic,NCols>(i,j,rows,NCols)  <-->  mat.block(i,j,rows,fix<NCols>)\n/// \\endcode\n///\nEIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), class Block\n///\ntemplate<int NRows, int NCols>\ninline typename FixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,\n                                                  Index blockRows, Index blockCols)\n{\n  return typename FixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);\n}\n\n/// This is the const version of block<>(Index, Index, Index, Index).\ntemplate<int NRows, int NCols>\ninline const typename ConstFixedBlockXpr<NRows,NCols>::Type block(Index startRow, Index startCol,\n                                                              Index blockRows, Index blockCols) const\n{\n  return typename ConstFixedBlockXpr<NRows,NCols>::Type(derived(), startRow, startCol, blockRows, blockCols);\n}\n\n/// \\returns an expression of the \\a i-th column of \\c *this. Note that the numbering starts at 0.\n///\n/// Example: \\include MatrixBase_col.cpp\n/// Output: \\verbinclude MatrixBase_col.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(column-major)\n/**\n  * \\sa row(), class Block */\nEIGEN_DEVICE_FUNC\ninline ColXpr col(Index i)\n{\n  return ColXpr(derived(), i);\n}\n\n/// This is the const version of col().\nEIGEN_DEVICE_FUNC\ninline ConstColXpr col(Index i) const\n{\n  return ConstColXpr(derived(), i);\n}\n\n/// \\returns an expression of the \\a i-th row of \\c *this. Note that the numbering starts at 0.\n///\n/// Example: \\include MatrixBase_row.cpp\n/// Output: \\verbinclude MatrixBase_row.out\n///\nEIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(row-major)\n/**\n  * \\sa col(), class Block */\nEIGEN_DEVICE_FUNC\ninline RowXpr row(Index i)\n{\n  return RowXpr(derived(), i);\n}\n\n/// This is the const version of row(). */\nEIGEN_DEVICE_FUNC\ninline ConstRowXpr row(Index i) const\n{\n  return ConstRowXpr(derived(), i);\n}\n\n/// \\returns an expression of a segment (i.e. a vector block) in \\c *this with either dynamic or fixed sizes.\n///\n/// \\only_for_vectors\n///\n/// \\param start the first coefficient in the segment\n/// \\param n the number of coefficients in the segment\n/// \\tparam NType the type of the value handling the number of coefficients in the segment, typically Index.\n///\n/// Example: \\include MatrixBase_segment_int_int.cpp\n/// Output: \\verbinclude MatrixBase_segment_int_int.out\n///\n/// The number of coefficients \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\n/// \\note Even in the case that the returned expression has dynamic size, in the case\n/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,\n/// which means that evaluating it does not cause a dynamic memory allocation.\n///\n/// \\sa block(Index,Index,NRowsType,NColsType), fix<N>, fix<N>(int), class Block\n///\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline typename FixedSegmentReturnType<...>::Type\n#endif\nsegment(Index start, NType n)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n            (derived(), start, internal::get_runtime_value(n));\n}\n\n\n/// This is the const version of segment(Index,NType).\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline const typename ConstFixedSegmentReturnType<...>::Type\n#endif\nsegment(Index start, NType n) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n            (derived(), start, internal::get_runtime_value(n));\n}\n\n/// \\returns an expression of the first coefficients of \\c *this with either dynamic or fixed sizes.\n///\n/// \\only_for_vectors\n///\n/// \\param n the number of coefficients in the segment\n/// \\tparam NType the type of the value handling the number of coefficients in the segment, typically Index.\n///\n/// Example: \\include MatrixBase_start_int.cpp\n/// Output: \\verbinclude MatrixBase_start_int.out\n///\n/// The number of coefficients \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\n/// \\note Even in the case that the returned expression has dynamic size, in the case\n/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,\n/// which means that evaluating it does not cause a dynamic memory allocation.\n///\n/// \\sa class Block, block(Index,Index)\n///\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline typename FixedSegmentReturnType<...>::Type\n#endif\nhead(NType n)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n              (derived(), 0, internal::get_runtime_value(n));\n}\n\n/// This is the const version of head(NType).\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline const typename ConstFixedSegmentReturnType<...>::Type\n#endif\nhead(NType n) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n            (derived(), 0, internal::get_runtime_value(n));\n}\n\n/// \\returns an expression of a last coefficients of \\c *this with either dynamic or fixed sizes.\n///\n/// \\only_for_vectors\n///\n/// \\param n the number of coefficients in the segment\n/// \\tparam NType the type of the value handling the number of coefficients in the segment, typically Index.\n///\n/// Example: \\include MatrixBase_end_int.cpp\n/// Output: \\verbinclude MatrixBase_end_int.out\n///\n/// The number of coefficients \\a n can also be specified at compile-time by passing Eigen::fix<N>,\n/// or Eigen::fix<N>(n) as arguments.\n/// See \\link block(Index,Index,NRowsType,NColsType) block() \\endlink for the details.\n///\n/// \\note Even in the case that the returned expression has dynamic size, in the case\n/// when it is applied to a fixed-size vector, it inherits a fixed maximal size,\n/// which means that evaluating it does not cause a dynamic memory allocation.\n///\n/// \\sa class Block, block(Index,Index)\n///\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline typename FixedSegmentReturnType<...>::Type\n#endif\ntail(NType n)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n            (derived(), this->size() - internal::get_runtime_value(n), internal::get_runtime_value(n));\n}\n\n/// This is the const version of tail(Index).\ntemplate<typename NType>\nEIGEN_DEVICE_FUNC\n#ifndef EIGEN_PARSED_BY_DOXYGEN\ninline const typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n#else\ninline const typename ConstFixedSegmentReturnType<...>::Type\n#endif\ntail(NType n) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<internal::get_fixed_value<NType>::value>::Type\n            (derived(), this->size() - internal::get_runtime_value(n), internal::get_runtime_value(n));\n}\n\n/// \\returns a fixed-size expression of a segment (i.e. a vector block) in \\c *this\n///\n/// \\only_for_vectors\n///\n/// \\tparam N the number of coefficients in the segment as specified at compile-time\n/// \\param start the index of the first element in the segment\n/// \\param n the number of coefficients in the segment as specified at compile-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_segment.cpp\n/// Output: \\verbinclude MatrixBase_template_int_segment.out\n///\n/// \\sa segment(Index,NType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename FixedSegmentReturnType<N>::Type segment(Index start, Index n = N)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<N>::Type(derived(), start, n);\n}\n\n/// This is the const version of segment<int>(Index).\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstFixedSegmentReturnType<N>::Type segment(Index start, Index n = N) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<N>::Type(derived(), start, n);\n}\n\n/// \\returns a fixed-size expression of the first coefficients of \\c *this.\n///\n/// \\only_for_vectors\n///\n/// \\tparam N the number of coefficients in the segment as specified at compile-time\n/// \\param  n the number of coefficients in the segment as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_start.cpp\n/// Output: \\verbinclude MatrixBase_template_int_start.out\n///\n/// \\sa head(NType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename FixedSegmentReturnType<N>::Type head(Index n = N)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<N>::Type(derived(), 0, n);\n}\n\n/// This is the const version of head<int>().\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstFixedSegmentReturnType<N>::Type head(Index n = N) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<N>::Type(derived(), 0, n);\n}\n\n/// \\returns a fixed-size expression of the last coefficients of \\c *this.\n///\n/// \\only_for_vectors\n///\n/// \\tparam N the number of coefficients in the segment as specified at compile-time\n/// \\param  n the number of coefficients in the segment as specified at run-time\n///\n/// The compile-time and run-time information should not contradict. In other words,\n/// \\a n should equal \\a N unless \\a N is \\a Dynamic.\n///\n/// Example: \\include MatrixBase_template_int_end.cpp\n/// Output: \\verbinclude MatrixBase_template_int_end.out\n///\n/// \\sa tail(NType), class Block\n///\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename FixedSegmentReturnType<N>::Type tail(Index n = N)\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename FixedSegmentReturnType<N>::Type(derived(), size() - n);\n}\n\n/// This is the const version of tail<int>.\ntemplate<int N>\nEIGEN_DEVICE_FUNC\ninline typename ConstFixedSegmentReturnType<N>::Type tail(Index n = N) const\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return typename ConstFixedSegmentReturnType<N>::Type(derived(), size() - n);\n}\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/CommonCwiseBinaryOps.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2016 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// This file is a base class plugin containing common coefficient wise functions.\n\n/** \\returns an expression of the difference of \\c *this and \\a other\n  *\n  * \\note If you want to substract a given scalar from all coefficients, see Cwise::operator-().\n  *\n  * \\sa class CwiseBinaryOp, operator-=()\n  */\nEIGEN_MAKE_CWISE_BINARY_OP(operator-,difference)\n\n/** \\returns an expression of the sum of \\c *this and \\a other\n  *\n  * \\note If you want to add a given scalar to all coefficients, see Cwise::operator+().\n  *\n  * \\sa class CwiseBinaryOp, operator+=()\n  */\nEIGEN_MAKE_CWISE_BINARY_OP(operator+,sum)\n\n/** \\returns an expression of a custom coefficient-wise operator \\a func of *this and \\a other\n  *\n  * The template parameter \\a CustomBinaryOp is the type of the functor\n  * of the custom operator (see class CwiseBinaryOp for an example)\n  *\n  * Here is an example illustrating the use of custom functors:\n  * \\include class_CwiseBinaryOp.cpp\n  * Output: \\verbinclude class_CwiseBinaryOp.out\n  *\n  * \\sa class CwiseBinaryOp, operator+(), operator-(), cwiseProduct()\n  */\ntemplate<typename CustomBinaryOp, typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>\nbinaryExpr(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const\n{\n  return CwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other.derived(), func);\n}\n\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_MAKE_SCALAR_BINARY_OP(operator*,product)\n#else\n/** \\returns an expression of \\c *this scaled by the scalar factor \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  */\ntemplate<typename T>\nconst CwiseBinaryOp<internal::scalar_product_op<Scalar,T>,Derived,Constant<T> > operator*(const T& scalar) const;\n/** \\returns an expression of \\a expr scaled by the scalar factor \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  */\ntemplate<typename T> friend\nconst CwiseBinaryOp<internal::scalar_product_op<T,Scalar>,Constant<T>,Derived> operator*(const T& scalar, const StorageBaseType& expr);\n#endif\n\n\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\nEIGEN_MAKE_SCALAR_BINARY_OP_ONTHERIGHT(operator/,quotient)\n#else\n/** \\returns an expression of \\c *this divided by the scalar value \\a scalar\n  *\n  * \\tparam T is the scalar type of \\a scalar. It must be compatible with the scalar type of the given expression.\n  */\ntemplate<typename T>\nconst CwiseBinaryOp<internal::scalar_quotient_op<Scalar,T>,Derived,Constant<T> > operator/(const T& scalar) const;\n#endif\n\n/** \\returns an expression of the coefficient-wise boolean \\b and operator of \\c *this and \\a other\n  *\n  * \\warning this operator is for expression of bool only.\n  *\n  * Example: \\include Cwise_boolean_and.cpp\n  * Output: \\verbinclude Cwise_boolean_and.out\n  *\n  * \\sa operator||(), select()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ninline const CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>\noperator&&(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),\n                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);\n  return CwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>(derived(),other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise boolean \\b or operator of \\c *this and \\a other\n  *\n  * \\warning this operator is for expression of bool only.\n  *\n  * Example: \\include Cwise_boolean_or.cpp\n  * Output: \\verbinclude Cwise_boolean_or.out\n  *\n  * \\sa operator&&(), select()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ninline const CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>\noperator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  EIGEN_STATIC_ASSERT((internal::is_same<bool,Scalar>::value && internal::is_same<bool,typename OtherDerived::Scalar>::value),\n                      THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);\n  return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());\n}\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/CommonCwiseUnaryOps.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// This file is a base class plugin containing common coefficient wise functions.\n\n#ifndef EIGEN_PARSED_BY_DOXYGEN\n\n/** \\internal the return type of conjugate() */\ntypedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                    const CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, const Derived>,\n                    const Derived&\n                  >::type ConjugateReturnType;\n/** \\internal the return type of real() const */\ntypedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                    const CwiseUnaryOp<internal::scalar_real_op<Scalar>, const Derived>,\n                    const Derived&\n                  >::type RealReturnType;\n/** \\internal the return type of real() */\ntypedef typename internal::conditional<NumTraits<Scalar>::IsComplex,\n                    CwiseUnaryView<internal::scalar_real_ref_op<Scalar>, Derived>,\n                    Derived&\n                  >::type NonConstRealReturnType;\n/** \\internal the return type of imag() const */\ntypedef CwiseUnaryOp<internal::scalar_imag_op<Scalar>, const Derived> ImagReturnType;\n/** \\internal the return type of imag() */\ntypedef CwiseUnaryView<internal::scalar_imag_ref_op<Scalar>, Derived> NonConstImagReturnType;\n\ntypedef CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> NegativeReturnType;\n\n#endif // not EIGEN_PARSED_BY_DOXYGEN\n\n/// \\returns an expression of the opposite of \\c *this\n///\nEIGEN_DOC_UNARY_ADDONS(operator-,opposite)\n///\nEIGEN_DEVICE_FUNC\ninline const NegativeReturnType\noperator-() const { return NegativeReturnType(derived()); }\n\n\ntemplate<class NewType> struct CastXpr { typedef typename internal::cast_return_type<Derived,const CwiseUnaryOp<internal::scalar_cast_op<Scalar, NewType>, const Derived> >::type Type; };\n\n/// \\returns an expression of \\c *this with the \\a Scalar type casted to\n/// \\a NewScalar.\n///\n/// The template parameter \\a NewScalar is the type we are casting the scalars to.\n///\nEIGEN_DOC_UNARY_ADDONS(cast,conversion function)\n///\n/// \\sa class CwiseUnaryOp\n///\ntemplate<typename NewType>\nEIGEN_DEVICE_FUNC\ntypename CastXpr<NewType>::Type\ncast() const\n{\n  return typename CastXpr<NewType>::Type(derived());\n}\n\n/// \\returns an expression of the complex conjugate of \\c *this.\n///\nEIGEN_DOC_UNARY_ADDONS(conjugate,complex conjugate)\n///\n/// \\sa <a href=\"group__CoeffwiseMathFunctions.html#cwisetable_conj\">Math functions</a>, MatrixBase::adjoint()\nEIGEN_DEVICE_FUNC\ninline ConjugateReturnType\nconjugate() const\n{\n  return ConjugateReturnType(derived());\n}\n\n/// \\returns a read-only expression of the real part of \\c *this.\n///\nEIGEN_DOC_UNARY_ADDONS(real,real part function)\n///\n/// \\sa imag()\nEIGEN_DEVICE_FUNC\ninline RealReturnType\nreal() const { return RealReturnType(derived()); }\n\n/// \\returns an read-only expression of the imaginary part of \\c *this.\n///\nEIGEN_DOC_UNARY_ADDONS(imag,imaginary part function)\n///\n/// \\sa real()\nEIGEN_DEVICE_FUNC\ninline const ImagReturnType\nimag() const { return ImagReturnType(derived()); }\n\n/// \\brief Apply a unary operator coefficient-wise\n/// \\param[in]  func  Functor implementing the unary operator\n/// \\tparam  CustomUnaryOp Type of \\a func\n/// \\returns An expression of a custom coefficient-wise unary operator \\a func of *this\n///\n/// The function \\c ptr_fun() from the C++ standard library can be used to make functors out of normal functions.\n///\n/// Example:\n/// \\include class_CwiseUnaryOp_ptrfun.cpp\n/// Output: \\verbinclude class_CwiseUnaryOp_ptrfun.out\n///\n/// Genuine functors allow for more possibilities, for instance it may contain a state.\n///\n/// Example:\n/// \\include class_CwiseUnaryOp.cpp\n/// Output: \\verbinclude class_CwiseUnaryOp.out\n///\nEIGEN_DOC_UNARY_ADDONS(unaryExpr,unary function)\n///\n/// \\sa unaryViewExpr, binaryExpr, class CwiseUnaryOp\n///\ntemplate<typename CustomUnaryOp>\nEIGEN_DEVICE_FUNC\ninline const CwiseUnaryOp<CustomUnaryOp, const Derived>\nunaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const\n{\n  return CwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);\n}\n\n/// \\returns an expression of a custom coefficient-wise unary operator \\a func of *this\n///\n/// The template parameter \\a CustomUnaryOp is the type of the functor\n/// of the custom unary operator.\n///\n/// Example:\n/// \\include class_CwiseUnaryOp.cpp\n/// Output: \\verbinclude class_CwiseUnaryOp.out\n///\nEIGEN_DOC_UNARY_ADDONS(unaryViewExpr,unary function)\n///\n/// \\sa unaryExpr, binaryExpr class CwiseUnaryOp\n///\ntemplate<typename CustomViewOp>\nEIGEN_DEVICE_FUNC\ninline const CwiseUnaryView<CustomViewOp, const Derived>\nunaryViewExpr(const CustomViewOp& func = CustomViewOp()) const\n{\n  return CwiseUnaryView<CustomViewOp, const Derived>(derived(), func);\n}\n\n/// \\returns a non const expression of the real part of \\c *this.\n///\nEIGEN_DOC_UNARY_ADDONS(real,real part function)\n///\n/// \\sa imag()\nEIGEN_DEVICE_FUNC\ninline NonConstRealReturnType\nreal() { return NonConstRealReturnType(derived()); }\n\n/// \\returns a non const expression of the imaginary part of \\c *this.\n///\nEIGEN_DOC_UNARY_ADDONS(imag,imaginary part function)\n///\n/// \\sa real()\nEIGEN_DEVICE_FUNC\ninline NonConstImagReturnType\nimag() { return NonConstImagReturnType(derived()); }\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/IndexedViewMethods.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2017 Gael Guennebaud <gael.guennebaud@inria.fr>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n#if !defined(EIGEN_PARSED_BY_DOXYGEN)\n\n// This file is automatically included twice to generate const and non-const versions\n\n#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS\n#define EIGEN_INDEXED_VIEW_METHOD_CONST const\n#define EIGEN_INDEXED_VIEW_METHOD_TYPE  ConstIndexedViewType\n#else\n#define EIGEN_INDEXED_VIEW_METHOD_CONST\n#define EIGEN_INDEXED_VIEW_METHOD_TYPE IndexedViewType\n#endif\n\n#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS\nprotected:\n\n// define some aliases to ease readability\n\ntemplate<typename Indices>\nstruct IvcRowType : public internal::IndexedViewCompatibleType<Indices,RowsAtCompileTime> {};\n\ntemplate<typename Indices>\nstruct IvcColType : public internal::IndexedViewCompatibleType<Indices,ColsAtCompileTime> {};\n\ntemplate<typename Indices>\nstruct IvcType : public internal::IndexedViewCompatibleType<Indices,SizeAtCompileTime> {};\n\ntypedef typename internal::IndexedViewCompatibleType<Index,1>::type IvcIndex;\n\ntemplate<typename Indices>\ntypename IvcRowType<Indices>::type\nivcRow(const Indices& indices) const {\n  return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,RowsAtCompileTime>(derived().rows()),Specialized);\n}\n\ntemplate<typename Indices>\ntypename IvcColType<Indices>::type\nivcCol(const Indices& indices) const {\n  return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,ColsAtCompileTime>(derived().cols()),Specialized);\n}\n\ntemplate<typename Indices>\ntypename IvcColType<Indices>::type\nivcSize(const Indices& indices) const {\n  return internal::makeIndexedViewCompatible(indices, internal::variable_if_dynamic<Index,SizeAtCompileTime>(derived().size()),Specialized);\n}\n\ntemplate<typename RowIndices, typename ColIndices>\nstruct valid_indexed_view_overload {\n  // Here we use is_convertible to Index instead of is_integral in order to treat enums as Index.\n  // In c++11 we could use is_integral<T> && is_enum<T> if is_convertible appears to be too permissive.\n  enum { value = !(internal::is_convertible<RowIndices,Index>::value && internal::is_convertible<ColIndices,Index>::value) };\n};\n\npublic:\n\n#endif\n\ntemplate<typename RowIndices, typename ColIndices>\nstruct EIGEN_INDEXED_VIEW_METHOD_TYPE {\n  typedef IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,\n                      typename IvcRowType<RowIndices>::type,\n                      typename IvcColType<ColIndices>::type> type;\n};\n\n// This is the generic version\n\ntemplate<typename RowIndices, typename ColIndices>\ntypename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value\n  && internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsIndexedView,\n  typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type >::type\noperator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type\n            (derived(), ivcRow(rowIndices), ivcCol(colIndices));\n}\n\n// The following overload returns a Block<> object\n\ntemplate<typename RowIndices, typename ColIndices>\ntypename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value\n  && internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsBlock,\n  typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType>::type\noperator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  typedef typename internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::BlockType BlockType;\n  typename IvcRowType<RowIndices>::type actualRowIndices = ivcRow(rowIndices);\n  typename IvcColType<ColIndices>::type actualColIndices = ivcCol(colIndices);\n  return BlockType(derived(),\n                   internal::first(actualRowIndices),\n                   internal::first(actualColIndices),\n                   internal::size(actualRowIndices),\n                   internal::size(actualColIndices));\n}\n\n// The following overload returns a Scalar\n\ntemplate<typename RowIndices, typename ColIndices>\ntypename internal::enable_if<valid_indexed_view_overload<RowIndices,ColIndices>::value\n  && internal::traits<typename EIGEN_INDEXED_VIEW_METHOD_TYPE<RowIndices,ColIndices>::type>::ReturnAsScalar,\n  CoeffReturnType >::type\noperator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return Base::operator()(internal::eval_expr_given_size(rowIndices,rows()),internal::eval_expr_given_size(colIndices,cols()));\n}\n\n#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE\n\n// The folowing three overloads are needed to handle raw Index[N] arrays.\n\ntemplate<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>\nIndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>\noperator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndices& colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>\n                    (derived(), rowIndices, ivcCol(colIndices));\n}\n\ntemplate<typename RowIndices, typename ColIndicesT, std::size_t ColIndicesN>\nIndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcRowType<RowIndices>::type, const ColIndicesT (&)[ColIndicesN]>\noperator()(const RowIndices& rowIndices, const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcRowType<RowIndices>::type,const ColIndicesT (&)[ColIndicesN]>\n                    (derived(), ivcRow(rowIndices), colIndices);\n}\n\ntemplate<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndicesT, std::size_t ColIndicesN>\nIndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN], const ColIndicesT (&)[ColIndicesN]>\noperator()(const RowIndicesT (&rowIndices)[RowIndicesN], const ColIndicesT (&colIndices)[ColIndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],const ColIndicesT (&)[ColIndicesN]>\n                    (derived(), rowIndices, colIndices);\n}\n\n#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE\n\n// Overloads for 1D vectors/arrays\n\ntemplate<typename Indices>\ntypename internal::enable_if<\n  IsRowMajor && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_integral<Indices>::value)),\n  IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,typename IvcType<Indices>::type> >::type\noperator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,typename IvcType<Indices>::type>\n            (derived(), IvcIndex(0), ivcCol(indices));\n}\n\ntemplate<typename Indices>\ntypename internal::enable_if<\n  (!IsRowMajor) && (!(internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1 || internal::is_integral<Indices>::value)),\n  IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcType<Indices>::type,IvcIndex> >::type\noperator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,typename IvcType<Indices>::type,IvcIndex>\n            (derived(), ivcRow(indices), IvcIndex(0));\n}\n\ntemplate<typename Indices>\ntypename internal::enable_if<\n  (internal::get_compile_time_incr<typename IvcType<Indices>::type>::value==1) && (!internal::is_integral<Indices>::value) && (!Symbolic::is_symbolic<Indices>::value),\n  VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value> >::type\noperator()(const Indices& indices) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  typename IvcType<Indices>::type actualIndices = ivcSize(indices);\n  return VectorBlock<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,internal::array_size<Indices>::value>\n            (derived(), internal::first(actualIndices), internal::size(actualIndices));\n}\n\ntemplate<typename IndexType>\ntypename internal::enable_if<Symbolic::is_symbolic<IndexType>::value, CoeffReturnType >::type\noperator()(const IndexType& id) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  return Base::operator()(internal::eval_expr_given_size(id,size()));\n}\n\n#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE\n\ntemplate<typename IndicesT, std::size_t IndicesN>\ntypename internal::enable_if<IsRowMajor,\n  IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]> >::type\noperator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,IvcIndex,const IndicesT (&)[IndicesN]>\n            (derived(), IvcIndex(0), indices);\n}\n\ntemplate<typename IndicesT, std::size_t IndicesN>\ntypename internal::enable_if<!IsRowMajor,\n  IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const IndicesT (&)[IndicesN],IvcIndex> >::type\noperator()(const IndicesT (&indices)[IndicesN]) EIGEN_INDEXED_VIEW_METHOD_CONST\n{\n  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)\n  return IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const IndicesT (&)[IndicesN],IvcIndex>\n            (derived(), indices, IvcIndex(0));\n}\n\n#endif // EIGEN_HAS_STATIC_ARRAY_TEMPLATE\n\n#undef EIGEN_INDEXED_VIEW_METHOD_CONST\n#undef EIGEN_INDEXED_VIEW_METHOD_TYPE\n\n#ifndef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS\n#define EIGEN_INDEXED_VIEW_METHOD_2ND_PASS\n#include \"IndexedViewMethods.h\"\n#undef EIGEN_INDEXED_VIEW_METHOD_2ND_PASS\n#endif\n\n#else // EIGEN_PARSED_BY_DOXYGEN\n\n/**\n  * \\returns a generic submatrix view defined by the rows and columns indexed \\a rowIndices and \\a colIndices respectively.\n  *\n  * Each parameter must either be:\n  *  - An integer indexing a single row or column\n  *  - Eigen::all indexing the full set of respective rows or columns in increasing order\n  *  - An ArithmeticSequence as returned by the Eigen::seq and Eigen::seqN functions\n  *  - Any %Eigen's vector/array of integers or expressions\n  *  - Plain C arrays: \\c int[N]\n  *  - And more generally any type exposing the following two member functions:\n  * \\code\n  * <integral type> operator[](<integral type>) const;\n  * <integral type> size() const;\n  * \\endcode\n  * where \\c <integral \\c type>  stands for any integer type compatible with Eigen::Index (i.e. \\c std::ptrdiff_t).\n  *\n  * The last statement implies compatibility with \\c std::vector, \\c std::valarray, \\c std::array, many of the Range-v3's ranges, etc.\n  *\n  * If the submatrix can be represented using a starting position \\c (i,j) and positive sizes \\c (rows,columns), then this\n  * method will returns a Block object after extraction of the relevant information from the passed arguments. This is the case\n  * when all arguments are either:\n  *  - An integer\n  *  - Eigen::all\n  *  - An ArithmeticSequence with compile-time increment strictly equal to 1, as returned by Eigen::seq(a,b), and Eigen::seqN(a,N).\n  *\n  * Otherwise a more general IndexedView<Derived,RowIndices',ColIndices'> object will be returned, after conversion of the inputs\n  * to more suitable types \\c RowIndices' and \\c ColIndices'.\n  *\n  * For 1D vectors and arrays, you better use the operator()(const Indices&) overload, which behave the same way but taking a single parameter.\n  *\n  * \\sa operator()(const Indices&), class Block, class IndexedView, DenseBase::block(Index,Index,Index,Index)\n  */\ntemplate<typename RowIndices, typename ColIndices>\nIndexedView_or_Block\noperator()(const RowIndices& rowIndices, const ColIndices& colIndices);\n\n/** This is an overload of operator()(const RowIndices&, const ColIndices&) for 1D vectors or arrays\n  *\n  * \\only_for_vectors\n  */\ntemplate<typename Indices>\nIndexedView_or_VectorBlock\noperator()(const Indices& indices);\n\n#endif  // EIGEN_PARSED_BY_DOXYGEN\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/MatrixCwiseBinaryOps.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// This file is a base class plugin containing matrix specifics coefficient wise functions.\n\n/** \\returns an expression of the Schur product (coefficient wise product) of *this and \\a other\n  *\n  * Example: \\include MatrixBase_cwiseProduct.cpp\n  * Output: \\verbinclude MatrixBase_cwiseProduct.out\n  *\n  * \\sa class CwiseBinaryOp, cwiseAbs2\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)\ncwiseProduct(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return EIGEN_CWISE_BINARY_RETURN_TYPE(Derived,OtherDerived,product)(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise == operator of *this and \\a other\n  *\n  * \\warning this performs an exact comparison, which is generally a bad idea with floating-point types.\n  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is\n  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and\n  * isMuchSmallerThan().\n  *\n  * Example: \\include MatrixBase_cwiseEqual.cpp\n  * Output: \\verbinclude MatrixBase_cwiseEqual.out\n  *\n  * \\sa cwiseNotEqual(), isApprox(), isMuchSmallerThan()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ninline const CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>\ncwiseEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise != operator of *this and \\a other\n  *\n  * \\warning this performs an exact comparison, which is generally a bad idea with floating-point types.\n  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is\n  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and\n  * isMuchSmallerThan().\n  *\n  * Example: \\include MatrixBase_cwiseNotEqual.cpp\n  * Output: \\verbinclude MatrixBase_cwiseNotEqual.out\n  *\n  * \\sa cwiseEqual(), isApprox(), isMuchSmallerThan()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\ninline const CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>\ncwiseNotEqual(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise min of *this and \\a other\n  *\n  * Example: \\include MatrixBase_cwiseMin.cpp\n  * Output: \\verbinclude MatrixBase_cwiseMin.out\n  *\n  * \\sa class CwiseBinaryOp, max()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>\ncwiseMin(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise min of *this and scalar \\a other\n  *\n  * \\sa class CwiseBinaryOp, min()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_min_op<Scalar,Scalar>, const Derived, const ConstantReturnType>\ncwiseMin(const Scalar &other) const\n{\n  return cwiseMin(Derived::Constant(rows(), cols(), other));\n}\n\n/** \\returns an expression of the coefficient-wise max of *this and \\a other\n  *\n  * Example: \\include MatrixBase_cwiseMax.cpp\n  * Output: \\verbinclude MatrixBase_cwiseMax.out\n  *\n  * \\sa class CwiseBinaryOp, min()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>\ncwiseMax(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\n/** \\returns an expression of the coefficient-wise max of *this and scalar \\a other\n  *\n  * \\sa class CwiseBinaryOp, min()\n  */\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_max_op<Scalar,Scalar>, const Derived, const ConstantReturnType>\ncwiseMax(const Scalar &other) const\n{\n  return cwiseMax(Derived::Constant(rows(), cols(), other));\n}\n\n\n/** \\returns an expression of the coefficient-wise quotient of *this and \\a other\n  *\n  * Example: \\include MatrixBase_cwiseQuotient.cpp\n  * Output: \\verbinclude MatrixBase_cwiseQuotient.out\n  *\n  * \\sa class CwiseBinaryOp, cwiseProduct(), cwiseInverse()\n  */\ntemplate<typename OtherDerived>\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>\ncwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const\n{\n  return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());\n}\n\ntypedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;\n\n/** \\returns an expression of the coefficient-wise == operator of \\c *this and a scalar \\a s\n  *\n  * \\warning this performs an exact comparison, which is generally a bad idea with floating-point types.\n  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is\n  * generally a far better idea to use a fuzzy comparison as provided by isApprox() and\n  * isMuchSmallerThan().\n  *\n  * \\sa cwiseEqual(const MatrixBase<OtherDerived> &) const\n  */\nEIGEN_DEVICE_FUNC\ninline const CwiseScalarEqualReturnType\ncwiseEqual(const Scalar& s) const\n{\n  return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,Scalar,internal::cmp_EQ>());\n}\n"
  },
  {
    "path": "app/src/main/cpp/Eigen/src/plugins/MatrixCwiseUnaryOps.h",
    "content": "// This file is part of Eigen, a lightweight C++ template library\n// for linear algebra.\n//\n// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>\n// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>\n//\n// This Source Code Form is subject to the terms of the Mozilla\n// Public License v. 2.0. If a copy of the MPL was not distributed\n// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n// This file is included into the body of the base classes supporting matrix specific coefficient-wise functions.\n// This include MatrixBase and SparseMatrixBase.\n\n\ntypedef CwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> CwiseAbsReturnType;\ntypedef CwiseUnaryOp<internal::scalar_abs2_op<Scalar>, const Derived> CwiseAbs2ReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> CwiseSqrtReturnType;\ntypedef CwiseUnaryOp<internal::scalar_sign_op<Scalar>, const Derived> CwiseSignReturnType;\ntypedef CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> CwiseInverseReturnType;\n\n/// \\returns an expression of the coefficient-wise absolute value of \\c *this\n///\n/// Example: \\include MatrixBase_cwiseAbs.cpp\n/// Output: \\verbinclude MatrixBase_cwiseAbs.out\n///\nEIGEN_DOC_UNARY_ADDONS(cwiseAbs,absolute value)\n///\n/// \\sa cwiseAbs2()\n///\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseAbsReturnType\ncwiseAbs() const { return CwiseAbsReturnType(derived()); }\n\n/// \\returns an expression of the coefficient-wise squared absolute value of \\c *this\n///\n/// Example: \\include MatrixBase_cwiseAbs2.cpp\n/// Output: \\verbinclude MatrixBase_cwiseAbs2.out\n///\nEIGEN_DOC_UNARY_ADDONS(cwiseAbs2,squared absolute value)\n///\n/// \\sa cwiseAbs()\n///\nEIGEN_DEVICE_FUNC\nEIGEN_STRONG_INLINE const CwiseAbs2ReturnType\ncwiseAbs2() const { return CwiseAbs2ReturnType(derived()); }\n\n/// \\returns an expression of the coefficient-wise square root of *this.\n///\n/// Example: \\include MatrixBase_cwiseSqrt.cpp\n/// Output: \\verbinclude MatrixBase_cwiseSqrt.out\n///\nEIGEN_DOC_UNARY_ADDONS(cwiseSqrt,square-root)\n///\n/// \\sa cwisePow(), cwiseSquare()\n///\nEIGEN_DEVICE_FUNC\ninline const CwiseSqrtReturnType\ncwiseSqrt() const { return CwiseSqrtReturnType(derived()); }\n\n/// \\returns an expression of the coefficient-wise signum of *this.\n///\n/// Example: \\include MatrixBase_cwiseSign.cpp\n/// Output: \\verbinclude MatrixBase_cwiseSign.out\n///\nEIGEN_DOC_UNARY_ADDONS(cwiseSign,sign function)\n///\nEIGEN_DEVICE_FUNC\ninline const CwiseSignReturnType\ncwiseSign() const { return CwiseSignReturnType(derived()); }\n\n\n/// \\returns an expression of the coefficient-wise inverse of *this.\n///\n/// Example: \\include MatrixBase_cwiseInverse.cpp\n/// Output: \\verbinclude MatrixBase_cwiseInverse.out\n///\nEIGEN_DOC_UNARY_ADDONS(cwiseInverse,inverse)\n///\n/// \\sa cwiseProduct()\n///\nEIGEN_DEVICE_FUNC\ninline const CwiseInverseReturnType\ncwiseInverse() const { return CwiseInverseReturnType(derived()); }\n\n\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/aten/aten_op_template.h",
    "content": "#pragma once\n#include <unordered_map>\n#include <string>\n#include <ATen/ATen.h>\n#include <caffe2/core/context.h>\n#include <caffe2/core/operator.h>\n#include <google/protobuf/text_format.h>\n#include <iostream>\n\n// a map from descriptor strings (see [DESCRIPTORS])\n// to the key in the switch statement that implements them\nstatic std::unordered_map<std::string, int> op_to_key = {\n  ${mappings}\n};\n\nnamespace caffe2 {\n\nusing at::Half; // for AT_FORALL_SCALAR_TYPES\n\nstd::function<void(void*)> deleterFor(at::Tensor t) {\n  // return a closure that holds a handle to t until it is called\n  // to keep the aten memory alive\n  return [t](void * ptr) mutable {\n    t.reset();\n  };\n}\n\ntemplate <class Context>\nclass ATenOp : public Operator<Context> {\n public:\n  ATenOp(const OperatorDef& operator_def, Workspace* ws)\n  : Operator<Context>(operator_def, ws) {\n    VLOG(2) << \"ATen OpDef: \" << ProtoDebugString(operator_def) << \"\\n\";\n    switch(findImplementation(operator_def)) {\n      ${implementations}\n      default:\n        CAFFE_THROW(\"Unexpected key value for aten operator\");\n    }\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    return run_op();\n  }\nprivate:\n  // actual operator implementation is initialized in ctor.\n  std::function<bool()> run_op;\n  at::Backend backend() const;\n\n  TypeMeta typeMetaFor(const at::Tensor & t) {\n    return typeMetaFor(t.type().scalarType());\n  }\n  TypeMeta typeMetaFor(at::ScalarType st) {\n    #define DEFINE_CASE(ctype,aten_name,_) \\\n      case at::k##aten_name: \\\n        return TypeMeta::Make<ctype>();\n    switch(st) {\n      AT_FORALL_SCALAR_TYPES(DEFINE_CASE)\n      default:\n        CAFFE_THROW(\"Unknown ATen Type\");\n    }\n    #undef DEFINE_CASE\n  }\n\n  at::Type & typeFor(const Tensor<Context> & ten) {\n    return at::getType(backend(), atScalarTypeFor(ten.meta()));\n  }\n  at::Tensor tensorWrapping(Tensor<Context> & ten) {\n    return typeFor(ten).tensorFromBlob(ten.raw_mutable_data(), ten.dims());\n  }\n  at::ScalarType atScalarTypeFor(const TypeMeta & meta) {\n    #define DEFINE_IF(ctype,aten_name,_) \\\n    if(meta.Match<ctype>()) { \\\n      return at::k##aten_name; \\\n    }\n    AT_FORALL_SCALAR_TYPES(DEFINE_IF)\n    #undef DEFINE_IF\n    CAFFE_THROW(\"Unknown type meta\"); // TODO: improve error message...\n  }\n  void assignTo(Tensor<Context> * dst, const at::Tensor & src_) {\n    at::Tensor src = src_.contiguous();\n    auto at_sizes = src.sizes();\n    std::vector<int64_t> dims(at_sizes.begin(),at_sizes.end());\n    dst->Resize(dims);\n    dst->ShareExternalPointer(src.data_ptr(), typeMetaFor(src), 0, deleterFor(src));\n  }\n\n  // the AT_FORALL_SCALAR_TYPES macro just gives a 'i' or 'd' argument\n  // for each type to specify if it is stored as a integer or a double.\n  // We need this workaround here to extract the value in the scalar losslessly\n  // because in some cases like 'sum' Torch promotes float to double\n  // and will complain if we downcast it with toFloat, causing it\n  // to lose precision\n  double extract_d(const at::Scalar & s) {\n    return s.toDouble();\n  }\n  int64_t extract_i(const at::Scalar & s) {\n    return s.toLong();\n  }\n\n  void assignTo(Tensor<Context> * dst, at::Type & inferred_type, at::Scalar scalar) {\n    switch(inferred_type.scalarType()) {\n      #define DEFINE_CASE(ctype,aten_name,native) \\\n        case at::k##aten_name: { \\\n          auto value = extract_##native(scalar); \\\n          assignToValue<ctype>(dst, at::convert<ctype,decltype(value)>(value)); \\\n        } break;\n      AT_FORALL_SCALAR_TYPES(DEFINE_CASE)\n      #undef DEFINE_CASE\n      default:\n        CAFFE_THROW(\"Unknown ATen Type\");\n    }\n  }\n  template<typename T>\n  void assignToValue(Tensor<Context> * dst, T v) {\n    dst->Resize(std::vector<TIndex>());\n    math::Set(1, v, dst->template mutable_data<T>(), &context_);\n  }\n  int findImplementation(const OperatorDef& operator_def) {\n    CAFFE_ENFORCE(HasArgument(\"operator\"));\n    std::string op = OperatorBase::GetSingleArgument<std::string>(\"operator\", \"\");\n    // construct descriptor string ([DESCRIPTORS]) given the attributes\n    // and inputs of this operator_def, and look up the implementation key\n    // for this variant\n    std::stringstream descriptor;\n    descriptor << op << \"-\" << InputSize();\n    std::vector<std::string> attrs;\n    for(size_t i = 0; i < operator_def.arg_size(); i++) {\n      auto & attr = operator_def.arg(i);\n      if(attr.name() == \"operator\" || attr.name() == \"type\" )\n        continue;\n      attrs.push_back(attr.name());\n    }\n    std::sort(attrs.begin(), attrs.end());\n    for(auto & a : attrs)\n      descriptor << \"-\" << a;\n    std::string descriptor_s = descriptor.str();\n    if(op_to_key.count(descriptor_s) == 0) {\n      std::stringstream ss;\n      ss << \"Attempting to run unknown ATen operator configuration: \"\n         << descriptor_s;\n      CAFFE_THROW(ss.str());\n    }\n    return op_to_key.at(descriptor_s);\n  }\n  at::Scalar readScalarAttribute(const std::string & name) {\n    if(OperatorBase::HasSingleArgumentOfType<int64_t>(name)) {\n      return OperatorBase::GetSingleArgument<int64_t>(name, 0);\n    } else {\n      CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<float>(name));\n      return OperatorBase::GetSingleArgument<float>(name, 0);\n    }\n  }\n  template<typename T>\n  T readAttribute(const std::string & name) {\n    CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<int64_t>(name));\n    return OperatorBase::GetSingleArgument<T>(name, 0);\n  }\n  std::vector<int64_t> readIntList(const std::string & name) {\n    CAFFE_ENFORCE(OperatorBase::HasArgument(name));\n    return OperatorBase::GetRepeatedArgument<int64_t>(name, {});\n  }\n  at::ScalarType stringToScalarType(const std::string & name) {\n    #define DEFINE_IF(type,aten) \\\n      if(#type == name) \\\n        return at::k##aten;\n    DEFINE_IF(float16, Half)\n    DEFINE_IF(float, Float)\n    DEFINE_IF(double, Double)\n    DEFINE_IF(uint8, Byte)\n    DEFINE_IF(int8, Char)\n    DEFINE_IF(int16, Short)\n    DEFINE_IF(int32, Int)\n    DEFINE_IF(int64, Long)\n    CAFFE_THROW(\"unsupported type annotation: \", name);\n  }\n  at::Type & stringToType(const std::string & name) {\n    return at::getType(backend(), stringToScalarType(name));\n  }\n  at::Type * readTypeAttribute(const std::string & name) {\n    CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<std::string>(name));\n    return &stringToType(OperatorBase::GetSingleArgument<std::string>(name, \"\"));\n  }\n};\n\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/allreduce_ops.h",
    "content": "#pragma once\n\n#include <algorithm>\n\n#include \"caffe2/contrib/gloo/common.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\n#include <gloo/algorithm.h>\n#include <gloo/common/error.h>\n#include <gloo/context.h>\n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <class Context>\nclass AllreduceOp final : public Operator<Context> {\n  enum Mode { RING_FULL, RING_CHUNKED, HALVING_DOUBLING };\n\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  AllreduceOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        status_blob_(\n            OperatorBase::GetSingleArgument<std::string>(\"status_blob\", \"\")),\n        gpu_direct_(\n            OperatorBase::GetSingleArgument<bool>(\"gpu_direct\", false)) {\n    if (status_blob_ != \"\") {\n      ws_->CreateBlob(status_blob_);\n    }\n  }\n\n  virtual ~AllreduceOp() {}\n\n  bool RunOnDevice() override {\n    std::call_once(once_, [&] { initialize(); });\n\n    // If any parameter has changed in between runs, the initialized\n    // algorithm is invalid and cannot be used.\n    update(current_);\n    CAFFE_ENFORCE(current_ == init_, \"Inputs/outputs have changed\");\n\n    try {\n      algorithm_->run();\n    } catch (::gloo::IoException& ioe) {\n      LOG(ERROR) << \"Caught gloo IO exception: \" << ioe.what();\n      if (status_blob_ != \"\") {\n        signalFailure(ws_->GetBlob(status_blob_), ioe);\n        return false;\n      } else {\n        throw ioe;\n      }\n    }\n    return true;\n  }\n\n protected:\n  void initialize() {\n    Mode mode = HALVING_DOUBLING;\n    auto bytes = Input(1).nbytes();\n\n    // Store which inputs/outputs this instance initialized with\n    update(init_);\n\n    // Verify inputs == ouputs\n    CAFFE_ENFORCE_EQ(init_.inputs.size(), init_.outputs.size());\n    for (auto i = 0; i < init_.inputs.size(); i++) {\n      CAFFE_ENFORCE_EQ(init_.inputs[i], init_.outputs[i]);\n    }\n\n    // Verify tensors all have same size\n    size_t size = Input(1).size();\n    for (auto i = 2; i < InputSize(); i++) {\n      CAFFE_ENFORCE_EQ(Input(i).size(), size);\n    }\n\n    // Verify tensors all have same type\n    TypeMeta meta = Input(1).meta();\n    for (auto i = 2; i < InputSize(); i++) {\n      CAFFE_ENFORCE(Input(i).meta() == meta);\n    }\n\n    switch (mode) {\n      case RING_FULL:\n        initializeRingFull();\n        return;\n      case RING_CHUNKED:\n        initializeRingChunked();\n        return;\n      case HALVING_DOUBLING:\n        initializeHalvingDoubling();\n        return;\n    }\n\n    CAFFE_ENFORCE(false, \"Unreachable code\");\n  }\n\n  void initializeHalvingDoubling();\n  void initializeRingFull();\n  void initializeRingChunked();\n\n  std::once_flag once_;\n  std::unique_ptr<::gloo::Algorithm> algorithm_;\n\n  // Captures the parameters passed to Gloo when first initialized.\n  // An instance is updated every time this op runs and is compared\n  // to the reference instance for equality. If any parameter has\n  // changed from run to run, the initialized algorithm is invalid.\n  struct GlooParameters {\n    std::shared_ptr<::gloo::Context> context;\n    std::vector<const void*> inputs;\n    std::vector<void*> outputs;\n    size_t size;\n    TypeMeta meta;\n\n    template <typename T>\n    std::vector<const T*> getInputs() {\n      std::vector<const T*> result;\n      result.reserve(inputs.size());\n      for (auto& input : inputs) {\n        result.push_back(reinterpret_cast<T*>(input));\n      }\n      return result;\n    }\n\n    template <typename T>\n    std::vector<T*> getOutputs() {\n      std::vector<T*> result;\n      result.reserve(outputs.size());\n      for (auto& output : outputs) {\n        result.push_back(reinterpret_cast<T*>(output));\n      }\n      return result;\n    }\n\n    template <typename T>\n    bool IsType() const {\n      return meta.Match<T>();\n    }\n\n    bool operator==(GlooParameters const& other) const {\n      return context == other.context && inputs == other.inputs &&\n          outputs == other.outputs && size == other.size;\n    }\n  };\n\n  void update(GlooParameters& params) {\n    params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);\n    params.inputs.resize(InputSize() - 1);\n    params.outputs.resize(OutputSize());\n    for (auto i = 0; i < params.inputs.size(); i++) {\n      params.inputs[i] = Input(i + 1).template raw_data();\n      params.outputs[i] = Output(i)->template raw_mutable_data();\n    }\n    params.size = Output(0)->size();\n    params.meta = Output(0)->meta();\n  }\n\n  GlooParameters init_;\n  GlooParameters current_;\n  Workspace* ws_;\n  std::string status_blob_;\n  const bool gpu_direct_;\n};\n\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/barrier_ops.h",
    "content": "#pragma once\n\n#include \"caffe2/contrib/gloo/common.h\"\n#include \"caffe2/core/operator.h\"\n\n#include <gloo/algorithm.h>\n#include <gloo/barrier_all_to_one.h>\n#include <gloo/common/error.h>\n#include <gloo/context.h>\n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <class Context>\nclass BarrierOp final : public Operator<Context> {\n public:\n  BarrierOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        status_blob_(\n            OperatorBase::GetSingleArgument<std::string>(\"status_blob\", \"\")) {\n    if (status_blob_ != \"\") {\n      ws_->CreateBlob(status_blob_);\n    }\n  }\n\n  virtual ~BarrierOp() {}\n\n  bool RunOnDevice() override {\n    auto context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);\n    std::call_once(once_, [&] {\n      initContext_ = context;\n      // Use an all-to-one barrier synchronizing against rank 0\n      algorithm_.reset(new ::gloo::BarrierAllToOne(initContext_, 0));\n    });\n\n    // If any parameter has changed in between runs, the initialized\n    // algorithm is invalid and cannot be used.\n    CAFFE_ENFORCE(context == initContext_, \"Context has changed\");\n\n    try {\n      algorithm_->run();\n    } catch (::gloo::IoException& ioe) {\n      LOG(ERROR) << \"Caught gloo IO exception: \" << ioe.what();\n      if (status_blob_ != \"\") {\n        signalFailure(ws_->GetBlob(status_blob_), ioe);\n        return false;\n      } else {\n        throw ioe;\n      }\n    }\n    return true;\n  }\n\n protected:\n  std::once_flag once_;\n  std::shared_ptr<::gloo::Context> initContext_;\n  std::unique_ptr<::gloo::Algorithm> algorithm_;\n  Workspace* ws_;\n  std::string status_blob_;\n};\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/broadcast_ops.h",
    "content": "#pragma once\n\n#include <algorithm>\n\n#include \"caffe2/contrib/gloo/common.h\"\n#include \"caffe2/core/operator.h\"\n\n#include <gloo/algorithm.h>\n#include <gloo/common/error.h>\n#include <gloo/context.h>\n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <class Context>\nclass BroadcastOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  BroadcastOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        root_(OperatorBase::template GetSingleArgument<int>(\"root\", 0)),\n        ws_(ws),\n        status_blob_(\n            OperatorBase::GetSingleArgument<std::string>(\"status_blob\", \"\")) {\n    if (status_blob_ != \"\") {\n      ws_->CreateBlob(status_blob_);\n    }\n  }\n\n  virtual ~BroadcastOp() {}\n\n  bool RunOnDevice() override {\n    std::call_once(once_, [&] { initialize(); });\n\n    // If any parameter has changed in between runs, the initialized\n    // algorithm is invalid and cannot be used.\n    update(current_);\n    CAFFE_ENFORCE(current_ == init_, \"Inputs/outputs have changed\");\n\n    try {\n      algorithm_->run();\n    } catch (::gloo::IoException& ioe) {\n      LOG(ERROR) << \"Caught gloo IO exception: \" << ioe.what();\n      if (status_blob_ != \"\") {\n        signalFailure(ws_->GetBlob(status_blob_), ioe);\n        return false;\n      } else {\n        throw ioe;\n      }\n    }\n    return true;\n  }\n\n protected:\n  void initialize() {\n    // Store which inputs/outputs this instance initialized with\n    update(init_);\n\n    // Verify inputs == ouputs\n    CAFFE_ENFORCE_EQ(init_.inputs.size(), init_.outputs.size());\n    for (auto i = 0; i < init_.inputs.size(); i++) {\n      CAFFE_ENFORCE_EQ(init_.inputs[i], init_.outputs[i]);\n    }\n\n    // Verify tensors all have same size\n    size_t size = Input(1).size();\n    for (auto i = 2; i < InputSize(); i++) {\n      CAFFE_ENFORCE_EQ(Input(i).size(), size);\n    }\n\n    // Verify tensors all have same size\n    TypeMeta meta = Input(1).meta();\n    for (auto i = 2; i < InputSize(); i++) {\n      CAFFE_ENFORCE(Input(i).meta() == meta);\n    }\n\n    // Finally initialize the algorithm\n    initializeAlgorithm();\n  }\n\n  void initializeAlgorithm();\n\n  const int root_;\n  std::once_flag once_;\n  std::unique_ptr<::gloo::Algorithm> algorithm_;\n\n  // Captures the parameters passed to Gloo when first initialized.\n  // An instance is updated every time this op runs and is compared\n  // to the reference instance for equality. If any parameter has\n  // changed from run to run, the initialized algorithm is invalid.\n  struct GlooParameters {\n    std::shared_ptr<::gloo::Context> context;\n    std::vector<const void*> inputs;\n    std::vector<void*> outputs;\n    size_t size;\n    TypeMeta meta;\n\n    template <typename T>\n    std::vector<const T*> getInputs() {\n      std::vector<const T*> result;\n      result.reserve(inputs.size());\n      for (auto& input : inputs) {\n        result.push_back(reinterpret_cast<T*>(input));\n      }\n      return result;\n    }\n\n    template <typename T>\n    std::vector<T*> getOutputs() {\n      std::vector<T*> result;\n      result.reserve(outputs.size());\n      for (auto& output : outputs) {\n        result.push_back(reinterpret_cast<T*>(output));\n      }\n      return result;\n    }\n\n    template <typename T>\n    bool IsType() const {\n      return meta.Match<T>();\n    }\n\n    bool operator==(GlooParameters const& other) const {\n      return context == other.context && inputs == other.inputs &&\n          outputs == other.outputs && size == other.size && meta == other.meta;\n    }\n  };\n\n  void update(GlooParameters& params) {\n    params.context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);\n    params.inputs.resize(InputSize() - 1);\n    params.outputs.resize(OutputSize());\n    for (auto i = 0; i < params.inputs.size(); i++) {\n      params.inputs[i] = Input(i + 1).template raw_data();\n      params.outputs[i] = Output(i)->template raw_mutable_data();\n    }\n    params.size = Output(0)->size();\n    params.meta = Output(0)->meta();\n  }\n\n  GlooParameters init_;\n  GlooParameters current_;\n  Workspace* ws_;\n  std::string status_blob_;\n};\n\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/common.h",
    "content": "#pragma once\n\n#include <exception>\n\n#include \"caffe2/core/blob.h\"\n\n#include <gloo/config.h>\n#include <gloo/transport/device.h>\n\nnamespace caffe2 {\nnamespace gloo {\n\nvoid signalFailure(Blob* status_blob, std::exception& exception);\n\nstruct createDeviceAttr {\n    // \"tcp\" or \"ibverbs\"\n    std::string transport;\n\n    // E.g. \"eth0\" (tcp), or \"mlx5_0\" (ibverbs).\n    // This may be empty to make Gloo figure it out.\n    std::string interface;\n};\n\nstd::shared_ptr<::gloo::transport::Device> createDevice(\n    const createDeviceAttr attr);\n\n#if defined(GLOO_USE_MPI) && GLOO_USE_MPI\nvoid mpiInitialize();\nvoid mpiFinalize();\n#endif\n\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/common_world_ops.h",
    "content": "#pragma once\n\n#include \"caffe2/contrib/gloo/common.h\"\n#include \"caffe2/contrib/gloo/store_handler.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/distributed/store_handler.h\"\n\n#include <gloo/common/error.h>\n#include <gloo/config.h>\n#include <gloo/rendezvous/context.h>\n#include <gloo/rendezvous/prefix_store.h>\n\n#if defined(GLOO_USE_MPI) && GLOO_USE_MPI\n#include <gloo/mpi/context.h>\n#endif\n\nnamespace caffe2 {\nnamespace gloo {\n\ntemplate <class Context>\nclass CreateCommonWorld final : public Operator<Context> {\n public:\n  using CommonWorld = std::shared_ptr<::gloo::Context>;\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  CreateCommonWorld(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        size_(OperatorBase::template GetSingleArgument<int>(\"size\", 0)),\n        rank_(OperatorBase::template GetSingleArgument<int>(\"rank\", 0)),\n        sync_(OperatorBase::template GetSingleArgument<bool>(\"sync\", false)),\n        transport_(OperatorBase::template GetSingleArgument<std::string>(\n                       \"transport\", \"tcp\")),\n        interface_(OperatorBase::template GetSingleArgument<std::string>(\n                       \"interface\", \"\")),\n        mpi_rendezvous_(OperatorBase::template GetSingleArgument<bool>(\n                       \"mpi_rendezvous\", false)),\n        status_blob_(\n            OperatorBase::GetSingleArgument<std::string>(\"status_blob\", \"\")),\n        timeout_ms_(OperatorBase::GetSingleArgument<int>(\"timeout_ms\", -1)),\n        ws_(ws) {\n    CAFFE_ENFORCE(\n        operator_def.has_name(), \"CreateCommonWorld operator requires name\");\n    CAFFE_ENFORCE(rank_ >= 0 && rank_ < size_);\n    name_ = operator_def.name();\n    if (status_blob_ != \"\") {\n      ws_->CreateBlob(status_blob_);\n    }\n    initialize();\n\n#if defined(GLOO_USE_MPI) && GLOO_USE_MPI\n    if (mpi_rendezvous_) {\n      mpiInitialize();\n    }\n#endif\n  }\n\n  virtual ~CreateCommonWorld() {\n#if defined(GLOO_USE_MPI) && GLOO_USE_MPI\n    if (mpi_rendezvous_) {\n      mpiFinalize();\n    }\n#endif\n  }\n\n  CommonWorld rendezvousWithMPI() {\n#if defined(GLOO_USE_MPI) && GLOO_USE_MPI\n    auto context = std::make_shared<::gloo::mpi::Context>(MPI_COMM_WORLD);\n    if (timeout_ms_ != -1) {\n      context->setTimeout(std::chrono::milliseconds(timeout_ms_));\n    }\n    context->connectFullMesh(device_);\n    return context;\n#else\n    CAFFE_THROW(\n      \"Gloo was not compiled with MPI support. \",\n      \"Please recompile with -DUSE_MPI=1.\");\n#endif\n  }\n\n  CommonWorld rendezvousWithStore(\n      const std::unique_ptr<StoreHandler>& handler) {\n    // Use PrefixStore to isolate different CreateCommonWorld instances\n    StoreHandlerWrapper wrapper(*handler);\n    ::gloo::rendezvous::PrefixStore store(name_, wrapper);\n    auto context = std::make_shared<::gloo::rendezvous::Context>(rank_, size_);\n    if (timeout_ms_ != -1) {\n      context->setTimeout(std::chrono::milliseconds(timeout_ms_));\n    }\n    context->connectFullMesh(store, device_);\n    return context;\n  }\n\n  bool RunOnDevice() override {\n    try {\n      CommonWorld context;\n      if (mpi_rendezvous_) {\n        context = rendezvousWithMPI();\n      } else {\n        CAFFE_ENFORCE_EQ(InputSize(), 1, \"Expected store handler input\");\n        const auto& handler =\n            OperatorBase::Input<std::unique_ptr<StoreHandler>>(STORE_HANDLER);\n        context = rendezvousWithStore(handler);\n      }\n\n      // Switch pairs to synchronous mode if configured to do so\n      if (sync_) {\n        for (int i = 0; i < context->size; i++) {\n          auto& pair = context->getPair(i);\n          if (pair) {\n            pair->setSync(true, false);\n          }\n        }\n      }\n\n      *OperatorBase::Output<CommonWorld>(COMM) = std::move(context);\n    } catch (::gloo::IoException& ioe) {\n      LOG(ERROR) << \"Caught gloo IO exception: \" << ioe.what();\n      return handleException(ioe);\n    } catch (::caffe2::StoreHandlerTimeoutException& te) {\n      LOG(ERROR) << \"Caught store handler timeout exception: \" << te.what();\n      return handleException(te);\n    }\n    return true;\n  }\n\n private:\n  bool handleException(std::exception& ex) {\n    if (status_blob_ != \"\") {\n      signalFailure(ws_->GetBlob(status_blob_), ex);\n      return false;\n    } else {\n      throw ex;\n    }\n  }\n\n  void initialize() {\n    // Share single device between all common worlds.\n    static std::once_flag once;\n    static std::shared_ptr<::gloo::transport::Device> device;\n    std::call_once(once, [&]() {\n        createDeviceAttr attr;\n        attr.transport = transport_;\n        attr.interface = interface_;\n        device = createDevice(attr);\n      });\n    device_ = device;\n\n    // Context specific initialization.\n    initializeForContext();\n  }\n\n  void initializeForContext();\n\n  const int size_;\n  const int rank_;\n  const bool sync_;\n  const std::string transport_;\n  const std::string interface_;\n  const bool mpi_rendezvous_;\n  const std::string status_blob_;\n  const int timeout_ms_;\n  Workspace* ws_;\n\n  std::string name_;\n  std::shared_ptr<::gloo::transport::Device> device_;\n\n  INPUT_TAGS(STORE_HANDLER);\n  OUTPUT_TAGS(COMM);\n};\n\ntemplate <class Context>\nclass CloneCommonWorld final : public Operator<Context> {\n public:\n  using CommonWorld = std::shared_ptr<::gloo::Context>;\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  CloneCommonWorld(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        sync_(OperatorBase::template GetSingleArgument<bool>(\"sync\", false)),\n        ws_(ws),\n        status_blob_(\n            OperatorBase::GetSingleArgument<std::string>(\"status_blob\", \"\")) {\n    if (status_blob_ != \"\") {\n      ws_->CreateBlob(status_blob_);\n    }\n  }\n\n  virtual ~CloneCommonWorld() {}\n\n  bool RunOnDevice() override {\n    try {\n      auto existing = OperatorBase::Input<CommonWorld>(EXISTING_COMM);\n      ::gloo::rendezvous::ContextFactory factory(existing);\n      auto clone = factory.makeContext(existing->getDevice());\n\n      // Switch pairs to synchronous mode if configured to do so\n      if (sync_) {\n        for (int i = 0; i < clone->size; i++) {\n          auto& pair = clone->getPair(i);\n          if (pair) {\n            pair->setSync(true, false);\n          }\n        }\n      }\n\n      *OperatorBase::Output<CommonWorld>(CLONED_COMM) = std::move(clone);\n    } catch (::gloo::IoException& ioe) {\n      LOG(ERROR) << \"Caught gloo IO exception: \" << ioe.what();\n      return handleException(ioe);\n    }\n    return true;\n  }\n\n private:\n  bool handleException(std::exception& ex) {\n    if (status_blob_ != \"\") {\n      signalFailure(ws_->GetBlob(status_blob_), ex);\n      return false;\n    } else {\n      throw ex;\n    }\n  }\n\n  const bool sync_;\n  Workspace* ws_;\n  std::string status_blob_;\n\n  INPUT_TAGS(EXISTING_COMM);\n  OUTPUT_TAGS(CLONED_COMM);\n};\n\nclass DestroyCommonWorld final : public Operator<CPUContext> {\n public:\n  DestroyCommonWorld(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws) {\n    cw_name_ = operator_def.input(0);\n  }\n\n  bool RunOnDevice() override {\n    if (OperatorBase::InputBlob(0).GetRaw() == nullptr) {\n      return true;\n    }\n    const auto& context =\n        OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);\n\n    if (context) {\n      LOG(INFO) << \"Closing connections: \" << cw_name_;\n      context->closeConnections();\n    }\n    return true;\n  }\n\n private:\n  std::string cw_name_;\n};\n\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/context.h",
    "content": "#pragma once\n\n#include <gloo/context.h>\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/gloo/store_handler.h",
    "content": "#pragma once\n\n#include \"caffe2/distributed/store_handler.h\"\n\n#include <gloo/rendezvous/store.h>\n\nnamespace caffe2 {\nnamespace gloo {\n\nclass StoreHandlerWrapper : public ::gloo::rendezvous::Store {\n public:\n  explicit StoreHandlerWrapper(StoreHandler& handler) : handler_(handler) {}\n\n  virtual ~StoreHandlerWrapper() {}\n\n  virtual void set(const std::string& key, const std::vector<char>& data)\n      override;\n\n  virtual std::vector<char> get(const std::string& key) override;\n\n  virtual void wait(const std::vector<std::string>& keys) override {\n    wait(keys, ::gloo::rendezvous::Store::kDefaultTimeout);\n  }\n\n  virtual void wait(\n      const std::vector<std::string>& keys,\n      const std::chrono::milliseconds& timeout) override;\n\n protected:\n  StoreHandler& handler_;\n};\n\n} // namespace gloo\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/nccl/cuda_nccl_gpu.h",
    "content": "#pragma once\n\n#include <cstddef>\n\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n\n#include <nccl.h>\n#include <unordered_map>\n\n#define NCCL_VERSION_MIN(major, minor, patch) \\\n  ((NCCL_MAJOR > major) || \\\n    ((NCCL_MAJOR == major) && ((NCCL_MINOR > minor) || \\\n      ((NCCL_MINOR == minor) && (NCCL_PATCH >= patch)) )))\n\nnamespace caffe2 {\n\nnamespace nccl {\n\n#define CAFFE_NCCL_CHECK(condition)    \\\n  do {                                 \\\n    ncclResult_t status = (condition); \\\n    CAFFE_ENFORCE_EQ(                  \\\n        status,                        \\\n        ncclSuccess,                   \\\n        \" \",                           \\\n        \"Error at: \",                  \\\n        __FILE__,                      \\\n        __LINE__,                      \\\n        \": \",                          \\\n        ncclGetErrorString(status));   \\\n  } while (0)\n\nstruct NCCLElement {\n  const TensorCUDA* src{nullptr};\n  TensorCUDA* dst{nullptr};\n  int device{0};\n};\n\nstruct NCCLExecution {\n  int stream_gpu_id{0};\n  cudaStream_t stream{nullptr};\n  std::vector<NCCLElement> elements;\n  size_t root{0};\n};\n\ntemplate <typename T>\nclass NCCL {\n public:\n  static void AllReduce(const NCCLExecution& ex);\n  static void Broadcast(const NCCLExecution& ex);\n  static void Reduce(const NCCLExecution& ex);\n  static void AllGather(const NCCLExecution& ex);\n  static void ReduceScatter(const NCCLExecution& ex);\n};\n}\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/nervana/nervana.h",
    "content": "#ifndef CAFFE2_FB_NERVANA_INIT_H_\n#define CAFFE2_FB_NERVANA_INIT_H_\n\n#include \"caffe2/core/init.h\"\n#include \"caffe2/core/flags.h\"\n\n#include \"nervana_c_api.h\"\n\n/**\n * A flag that specifies the nervana cubin path.\n */\nCAFFE2_DECLARE_string(nervana_cubin_path);\n\nnamespace caffe2 {\n\n/**\n * An empty class to be used in identifying the engine in the math functions.\n */\nclass NervanaEngine {};\n\n/**\n * Returns whether the nervana kernels are loaded or not.\n */\nbool NervanaKernelLoaded();\n\n/**\n * An initialization function that is run once by caffe2::GlobalInit()\n * that initializes the nervana kernels.\n */\nbool Caffe2InitializeNervanaKernels();\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_FB_NERVANA_INIT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/nervana/nervana_c_api.h",
    "content": "/*\n * Copyright 2015 Baidu USA, Inc. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *    http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <cuda.h>\n\n#ifdef __cplusplus\nextern \"C\" {\n#else\n#include <stdbool.h>\n#endif\n\n/** Load all the sgemm and hgemm cubins from the given path\n * \\param [in] base_path path to the kernel cubins\n * \\return true on success and false if an error was encountered\n */\nbool nervana_loadKernels(const char* const base_path);\n\n/** Unload all currently loaded cubins\n * \\return true on success and false if an error was encountered\n */\nbool nervana_unloadKernels();\n\n/** Return the number of bytes required for the random state\n *  used in stochastic rounding.\n *  \\return bytes required for random state\n */\n size_t nervana_randStateSizeBytes();\n\n/** Perform BLAS sgemm on alpha * A * B + beta * C, with the\n *  additional options of stochastic rounding and applying a\n *  rectified linear unit (relu) to the result.  This routine expects\n *  all matrices to be in row-major order.\n *  \\param [in] A Pointer to the data for matrix A\n *  \\param [in] B Pointer to the data for matrix B\n *  \\param [in, out] C Pointer to the data for matrix C\n *  \\param [in] m number of rows of C\n *  \\param [in] n number of columns of C\n *  \\param [in] k inner dimension of multiplication\n *  \\param [in] lda leading dimension of two-dimensional array A\n *  \\param [in] ldb leading dimension of two-dimensional array B\n *  \\param [in] ldc leading dimension of two-dimensional array C\n *  \\param [in] alpha scalar used for multiplication\n *  \\param [in] beta scalar used for multiplication\n *  \\param [in, out] rand_state pointer to memory used for random state\n *              use nervana_randStateSizeBytes to allocate the correct size\n *              if stochastic_round is false, this can be NULL\n *  \\param [in] stochastic_round true if stochastic rounding should be used\n *  \\param [in] apply_relu true if a relu should be applied to the result\n *  \\param [in] stream The cudaStream on which the kernel should be launched\n *  \\param [in] grid Choose a specific grid configuration: 0=32x128, 1=128x32, 2=128x64, 3=128x128\n */\n bool nervana_sgemm(float *A, float *B, float *C,\n                    bool a_t, bool b_t,\n                    int m, int n, int k,\n                    int lda, int ldb, int ldc,\n                    float alpha, float beta,\n                    unsigned int *rand_state,\n                    bool stochastic_round, bool apply_relu,\n                    CUstream stream, int grid=-1\n                    );\n\n/** Perform BLAS hgemm on alpha * A * B + beta * C, with the\n *  additional options of stochastic rounding and applying a\n *  rectified linear unit (relu) to the result.  This routine expects\n *  all matrices to be in row-major order.\n *  \\param [in] A Pointer to the data for matrix A\n *  \\param [in] B Pointer to the data for matrix B\n *  \\param [in, out] C Pointer to the data for matrix C\n *  \\param [in] m number of rows of C\n *  \\param [in] n number of columns of C\n *  \\param [in] k inner dimension of multiplication\n *  \\param [in] lda leading dimension of two-dimensional array A\n *  \\param [in] ldb leading dimension of two-dimensional array B\n *  \\param [in] ldc leading dimension of two-dimensional array C\n *  \\param [in] alpha scalar used for multiplication\n *  \\param [in] beta scalar used for multiplication\n *  \\param [in, out] rand_state pointer to memory used for random state\n *              use nervana_randStateSizeBytes to allocate the correct size\n *              if stochastic_round is false, this can be NULL\n *  \\param [in] stochastic_round true if stochastic rounding should be used\n *  \\param [in] apply_relu true if a relu should be applied to the result\n *  \\param [in] stream The cudaStream on which the kernel should be launched\n *  \\param [in] grid Choose a specific grid configuration: 0=32x128, 1=128x32, 2=128x64, 3=128x128\n */\n bool nervana_hgemm(short *A, short *B, short *C,\n                    bool a_t, bool b_t,\n                    int m, int n, int k,\n                    int lda, int ldb, int ldc,\n                    float alpha, float beta,\n                    unsigned int *rand_state,\n                    bool stochastic_round, bool apply_relu,\n                    CUstream stream, int grid=-1\n                    );\n\n bool nervana_sgemm_colmajor(float *A, float *B, float *C,\n                             bool a_t, bool b_t,\n                             int m, int n, int k,\n                             int lda, int ldb, int ldc,\n                             float alpha, float beta,\n                             unsigned int *rand_state,\n                             bool stochastic_round, bool apply_relu,\n                             CUstream stream, int grid=-1\n                             );\n\n bool nervana_hgemm_colmajor(short *A, short *B, short *C,\n                             bool a_t, bool b_t,\n                             int m, int n, int k,\n                             int lda, int ldb, int ldc,\n                             float alpha, float beta,\n                             unsigned int *rand_state,\n                             bool stochastic_round, bool apply_relu,\n                             CUstream stream, int grid=-1\n                             );\n\n#ifdef __cplusplus\n}\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/observers/time_observer.h",
    "content": "#ifndef CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_\n#define CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_\n\n#include <unordered_map>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/observer.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/timer.h\"\n\nnamespace caffe2 {\n\ntemplate <class T>\nclass TimeObserver final : public ObserverBase<T> {\n public:\n  explicit TimeObserver<T>(T* subject) : ObserverBase<T>(subject) {}\n  inline float average_time() const {\n    return total_time_ / iterations_;\n  }\n  float average_time_children() const {\n    float sum = 0.0f;\n    for (auto* op : this->subject_->GetOperators()) {\n      auto* observer =\n          dynamic_cast_if_rtti<TimeObserver<OperatorBase>*>(op->GetObserver());\n      sum += observer->average_time();\n    }\n    return sum / this->subject_->GetOperators().size();\n  }\n  ~TimeObserver() {}\n\n private:\n  Timer timer_;\n  float start_time_ = 0.0f;\n  float total_time_ = 0.0f;\n  int iterations_ = 0;\n\n  bool Start() override;\n  bool Stop() override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/prof/htrace_conf.h",
    "content": "#pragma once\n\n#include \"caffe2/core/flags.h\"\n\nCAFFE2_DECLARE_string(caffe2_htrace_span_log_path);\n\nnamespace caffe2 {\n\nconst string defaultHTraceConf(const string& net_name);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/prof/prof_dag_net.h",
    "content": "#pragma once\n\n#include \"caffe2/core/net_dag.h\"\n#include \"caffe2/proto/prof_dag.pb.h\"\n\nnamespace caffe2 {\n\nstruct Stats {\n  float sum;\n  float sqrsum;\n  size_t cnt;\n};\n\n/**\n * This net type is identical to DAGNet, except that it\n * measures the time taken for each and every operator.\n *\n * To collect statistics from stable runs, this net ignores the first run.\n * Thus, at least two runs are required for this net to print operator metrics.\n */\nclass ProfDAGNet : public DAGNetBase {\n public:\n  ProfDAGNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n  ~ProfDAGNet();\n  bool SupportsAsync() override {\n    return false;\n  }\n  bool RunAsync() override;\n  ProfDAGProtos GetOperatorStats();\n\n protected:\n  bool RunAt(const std::vector<int>& chain) override;\n  void PrintStats();\n  void ValidateOpTensorDevices();\n  ProfDAGProto ProtoMsg(std::pair<std::string, Stats> op_stat) const;\n  std::vector<Stats> time_per_op_;\n  CaffeMap<std::string, Stats> time_per_op_type_;\n  int runs_ = 0;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/prof/prof_dag_stats_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n\n#include \"caffe2/contrib/prof/prof_dag_net.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// This operator outputs the ProfDAGNet stats\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass GetProfDagStatsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  GetProfDagStatsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        net_name_(\n            OperatorBase::GetSingleArgument<std::string>(\"net_name\", \"\")) {\n    ws_ = ws;\n  }\n  ~GetProfDagStatsOp() {}\n\n  bool RunOnDevice() override {\n    // Read operator statistics for net_name_\n    CAFFE_ENFORCE(!net_name_.empty(), \"You need to provide net_name\");\n    auto* net = ws_->GetNet(net_name_);\n\n    auto prof_dag_net = dynamic_cast_if_rtti<ProfDAGNet*>(net);\n    CAFFE_ENFORCE(prof_dag_net);\n    auto stats = prof_dag_net->GetOperatorStats();\n\n    // Write protobuf message to the output blob\n    std::string serialized_data;\n    CAFFE_ENFORCE(stats.SerializeToString(&serialized_data));\n    Output(0)->Resize(1);\n    Output(0)->template mutable_data<std::string>()[0] = serialized_data;\n\n    return true;\n  }\n\n protected:\n  std::string net_name_;\n  Workspace* ws_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/shm_mutex/shm_mutex.h",
    "content": "/*\n * This implements a machine-wide mutex to be used\n * to synchronize CUDA calls (memory allocation and frees) and\n * NCCL calls. This prevents a potential deadlock that\n * can occur.\n *\n * The implementation has a few caveats:\n *   - it assumes that PID are not reused\n *   - there is a possible race between the creation (shm_open followed\n *     by ftruncate) and the spin on 'isInitialized' (if the memory region is\n *     not all zeroes).\n *\n * There are two implementations of the mutex and they vary mostly by how\n * they wait:\n *   - The ShmTicketMutex_t is a simple ticket based lock and processes will\n *     queue up and only attempt to grab the lock when it is their turn\n *   - The ShmTTSetMutex_t is a simple test-test-and-set mutex. It is possibly\n *     faster for low contention.\n *\n * Use both as you would use any std::mutex. Both mutexes support try_lock as\n * well.\n */\n#pragma once\n\n#include <fcntl.h>\n#include <signal.h>\n#include <sys/mman.h>\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <unistd.h>\n#include <climits>\n\n#include <atomic>\n#include <mutex>\n#include <string>\n#include <unordered_set>\n\n#include \"caffe2/core/logging.h\"\n\nconst int kTicketDelay = 1000;\nconst int kTimeout = 1000;\n\nclass ShmProcessMutexCheck {\n public:\n  static ShmProcessMutexCheck& getInstance();\n  ShmProcessMutexCheck(const ShmProcessMutexCheck&) = delete;\n  ShmProcessMutexCheck& operator=(const ShmProcessMutexCheck&) = delete;\n\n  bool addLock(const std::string& name);\n  bool removeLock(const std::string& name);\n\n protected:\n  ShmProcessMutexCheck() = default;\n  std::mutex m_;\n  std::unordered_set<std::string> shmLocks_;\n};\n\ntemplate <class Derived>\nstruct shm_traits;\n\nusing ShmBaseHeader = struct {\n  std::atomic<bool> isInitialized;\n  std::atomic<int> countMapped;\n  std::atomic<pid_t> owner;\n};\n\ntemplate <class Impl>\nclass ShmProcessMutex {\n public:\n  using header_t = typename shm_traits<Impl>::header_t;\n\n  explicit ShmProcessMutex(const char* name)\n      : name_(name), check_(ShmProcessMutexCheck::getInstance()) {\n    CAFFE_ENFORCE(check_.addLock(name_), \"Creating duplicate lock: \", name_);\n    myPid_ = getpid();\n    // Try to open and map the shared memory location\n    int fd = -1;\n    while (true) {\n      fd = shm_open(name, O_RDWR, 0);\n      if (fd == -1) {\n        CAFFE_ENFORCE(\n            errno == ENOENT,\n            \"shm_open failed with not ENOENT: \",\n            strerror(errno));\n\n        // Create new object\n        fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0700);\n        if (fd == -1 && errno == EEXIST) {\n          // Some other process created first; loop around to re-open\n          continue;\n        }\n        CAFFE_ENFORCE(\n            fd != -1, \"shm_open failed with create: \", strerror(errno));\n        // At this point, we are the creator of the shared object.\n        // Initialize the header_ (it's all 0 right now)\n        auto rv = ftruncate(fd, sizeof(header_t));\n        CAFFE_ENFORCE(rv != -1, \"ftruncate: \", strerror(errno));\n\n        // Map memory and initialize\n        header_ = (header_t*)mmap(\n            nullptr,\n            sizeof(header_t),\n            PROT_READ | PROT_WRITE,\n            MAP_SHARED,\n            fd,\n            0);\n        CAFFE_ENFORCE(header_ != MAP_FAILED, \"mmap: \", strerror(errno));\n\n        header_->countMapped = 1;\n        header_->owner = 0;\n        header_->isInitialized.store(true, std::memory_order_release);\n        close(fd);\n        break;\n      } else {\n        // Object exists, we just map it\n        header_ = (header_t*)mmap(\n            nullptr,\n            sizeof(header_t),\n            PROT_READ | PROT_WRITE,\n            MAP_SHARED,\n            fd,\n            0);\n        CAFFE_ENFORCE(header_ != MAP_FAILED, \"mmap: \", strerror(errno));\n\n        // Wait for memory to be initialized\n        while (header_->isInitialized.load(std::memory_order_acquire) == 0) {\n          // Spin; should be done soon\n        }\n        // Now check if we can register ourself by incrementing countMapped.\n        // If we are \"locked-out\" (shared object being destroyed), retry\n        if (header_->countMapped.fetch_add(1, std::memory_order_relaxed) < 0) {\n          header_->countMapped.fetch_sub(1, std::memory_order_relaxed);\n          int rv = munmap(header_, sizeof(header_t));\n          CAFFE_ENFORCE(rv == 0, \"munmap (to retry) failed: \", strerror(errno));\n          close(fd);\n          continue;\n        }\n        close(fd);\n        break;\n      }\n    }\n  }\n\n  ~ShmProcessMutex() {\n    if (header_ != nullptr) {\n      // We are participating in a lock. Destroy\n      internalDestroy();\n    }\n  }\n\n  // Copy and assignment operator are implicitly deleted\n\n  ShmProcessMutex(ShmProcessMutex&& toMove) noexcept\n      : header_(toMove.header_),\n        myPid_(toMove.myPid_),\n        name_(toMove.name_),\n        check_(toMove.check_) {\n    toMove.header_ = nullptr;\n    toMove.myPid_ = -1;\n  }\n\n  ShmProcessMutex& operator=(ShmProcessMutex&& toMove) {\n    CAFFE_ENFORCE(toMove.myPid_ == this->myPid_);\n    if (&toMove != this) {\n      internalDestroy();\n      header_ = toMove.header_;\n      name_ = toMove.name_;\n      toMove.header_ = nullptr;\n      toMove.myPid_ = -1;\n    }\n    return *this;\n  }\n\n  void lock() {\n    pid_t expectedPid = 0;\n    while (not header_->owner.compare_exchange_weak(\n        expectedPid,\n        myPid_,\n        std::memory_order_relaxed,\n        std::memory_order_relaxed)) {\n      if (expectedPid == 0) {\n        continue;\n      }\n      // Someone else has the lock. We check if that process is\n      // still alive\n      if (kill(expectedPid, 0) < 0 && errno == ESRCH) {\n        // The process no longer exists. Try to \"steal\" the lock\n        continue;\n      }\n      while (true) {\n        if (static_cast<Impl*>(this)->waitForLock()) {\n          return;\n        }\n        expectedPid = header_->owner.load(std::memory_order_relaxed);\n        if (expectedPid == 0 || (kill(expectedPid, 0) < 0 && errno == ESRCH)) {\n          break;\n        }\n      }\n    }\n  }\n\n  bool try_lock() {\n    pid_t expectedPid = 0;\n    bool firstTry = true;\n    while (not header_->owner.compare_exchange_weak(\n        expectedPid,\n        myPid_,\n        std::memory_order_relaxed,\n        std::memory_order_relaxed)) {\n      if (expectedPid == 0) {\n        continue;\n      }\n      // Someone else has the lock. We check if that process is\n      // still alive\n      if (firstTry && kill(expectedPid, 0) < 0 && errno == ESRCH) {\n        firstTry = false;\n        // The process no longer exists. Try to \"steal\" the lock once\n        continue;\n      }\n      return false;\n    }\n    return true;\n  }\n\n  void unlock() noexcept {\n    header_->owner.store(0, std::memory_order_relaxed);\n    static_cast<Impl*>(this)->subUnlock();\n  }\n\n protected:\n  header_t* header_;\n  pid_t myPid_;\n  std::string name_;\n\n  ShmProcessMutexCheck& check_;\n\n private:\n  void internalDestroy() {\n    CAFFE_ENFORCE(header_ != nullptr, \"Internal error\");\n    CAFFE_ENFORCE(check_.removeLock(name_), \"Double free of lock: \", name_);\n    // Unmap the memory. If we are the last one, \"lock\" the\n    // shared memory and free it if successful\n    int oldCount = header_->countMapped.fetch_sub(1, std::memory_order_relaxed);\n    bool doUnlink = false;\n    if (oldCount == 1) {\n      // We were the last one. We attempt to lock out\n      // future processes by exchanging with something very negative\n      // This simplifies the checks when checking for lock out\n      oldCount = 0;\n      if (header_->countMapped.compare_exchange_strong(\n              oldCount,\n              INT_MIN,\n              std::memory_order_relaxed,\n              std::memory_order_relaxed)) {\n        doUnlink = true;\n      }\n    }\n    int rv = munmap(header_, sizeof(header_t));\n    CAFFE_ENFORCE(rv == 0, \"munmap failed: \", strerror(errno));\n    if (doUnlink) {\n      rv = shm_unlink(name_.c_str());\n      CAFFE_ENFORCE(rv == 0, \"shm_unlink failed: \", strerror(errno));\n    }\n  }\n};\n\ntemplate <class T>\nclass ShmTTSetMutex : public ShmProcessMutex<ShmTTSetMutex<T>> {\n public:\n  friend class ShmProcessMutex<ShmTTSetMutex<T>>;\n  explicit ShmTTSetMutex(const char* name, int timeout = kTimeout)\n      : ShmProcessMutex<ShmTTSetMutex>(name), timeout_(timeout) {}\n\n protected:\n  bool waitForLock() {\n    int delay = timeout_;\n    pid_t expectedPid = 0;\n    while (--delay > 0 &&\n           this->header_->owner.load(std::memory_order_relaxed)) {\n      // Empty loop\n      __asm__ __volatile__(\"\");\n    }\n    return this->header_->owner.compare_exchange_strong(\n        expectedPid, this->myPid_, std::memory_order_relaxed);\n  }\n\n  void subUnlock() noexcept {}\n  int timeout_;\n};\n\ntemplate <class T>\nclass ShmTicketMutex : public ShmProcessMutex<ShmTicketMutex<T>> {\n public:\n  friend class ShmProcessMutex<ShmTicketMutex<T>>;\n  explicit ShmTicketMutex(const char* name, int delay = kTicketDelay)\n      : ShmProcessMutex<ShmTicketMutex>(name), delay_(delay) {}\n\n protected:\n  bool waitForLock() {\n    pid_t expectedPid = 0;\n    int slot = this->header_->ticket.fetch_add(1, std::memory_order_relaxed);\n    for (;;) {\n      int spintime =\n          (slot - this->header_->now.load(std::memory_order_relaxed)) * delay_;\n      for (int i = 0; i < spintime; i++) {\n        // Empty loop\n        __asm__ __volatile__(\"\");\n      }\n      if (this->header_->now.load(std::memory_order_relaxed) == slot) {\n        break;\n      }\n    }\n    return this->header_->owner.compare_exchange_strong(\n        expectedPid, this->myPid_, std::memory_order_relaxed);\n  }\n\n  void subUnlock() noexcept {\n    this->header_->now.fetch_add(1, std::memory_order_relaxed);\n  }\n\n  int delay_;\n};\n\ntemplate <class T>\nstruct shm_traits<ShmTTSetMutex<T>> {\n  using header_t = T;\n};\n\ntemplate <class T>\nstruct shm_traits<ShmTicketMutex<T>> {\n  using header_t = T;\n};\n\nusing TicketStruct = struct : ShmBaseHeader {\n  std::atomic<unsigned> ticket;\n  std::atomic<unsigned> now;\n};\n\ntemplate class ShmTicketMutex<TicketStruct>;\ntemplate class ShmTTSetMutex<ShmBaseHeader>;\n\nusing ShmTicketMutex_t = ShmTicketMutex<TicketStruct>;\nusing ShmTTSetMutex_t = ShmTTSetMutex<ShmBaseHeader>;\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/torch/torch_op.h",
    "content": "#pragma once\n#include <unordered_map>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n\nextern \"C\" {\n#include <TH/THStorage.h>\n#include <TH/THTensor.h>\n#include <lua.h>\n#include <luaT.h>\n#include <lualib.h>\n}\n\nnamespace caffe2 {\n\nnamespace torch {\n\ntemplate <typename Context>\nstruct TyTraits {};\n\ntemplate <>\nstruct TyTraits<CPUContext> {\n  static const char* moduleTy;\n  static const char* prelude;\n  static const char* tensorTy;\n  using Tensor = THFloatTensor;\n};\n\ntemplate <typename Context>\nclass Torch final {\n public:\n  using Traits = TyTraits<Context>;\n  Torch() {\n    L_ = luaL_newstate();\n    luaL_openlibs(L_);\n    luaL_loadstring(L_, Traits::prelude);\n    int err = lua_pcall(L_, 0, 0, 0);\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L_, -1));\n  };\n\n  ~Torch() {\n    lua_close(L_);\n  }\n\n  lua_State* L() {\n    return L_;\n  }\n\n  static const char* tensorTy(const Blob& blob) {\n    CAFFE_ENFORCE(blob.template IsType<Tensor<Context>>());\n    const auto& tc = blob.template Get<Tensor<Context>>();\n    CAFFE_ENFORCE(\n        tc.template IsType<float>() + tc.meta().name(), \", \", tc.size());\n    return Traits::tensorTy;\n  }\n\n  void setContext(Context* /*context*/) {}\n\n  void setTensor(typename Traits::Tensor* t, Blob* blob) {\n    CAFFE_ENFORCE_EQ(tensorTy(*blob), Traits::tensorTy);\n    auto* tc = blob->template GetMutable<Tensor<Context>>();\n    CAFFE_ENFORCE_EQ(THFloatTensor_nElement(t), tc->size());\n    THFloatStorage* storage = THFloatStorage_newWithData(\n        tc->template mutable_data<float>(), tc->size());\n    THFloatStorage_clearFlag(storage, TH_STORAGE_FREEMEM);\n    THFloatStorage* original = t->storage;\n    t->storage = storage;\n    THFloatStorage_free(original);\n  }\n\n  std::vector<TIndex> tensorShape(typename Traits::Tensor* t) {\n    auto* size = t->size;\n    return std::vector<TIndex>(size, size + THFloatTensor_nDimension(t));\n  }\n\n  typename Traits::Tensor* newTensorAs(const Tensor<Context>& tc) {\n    THLongStorage* thshape = THLongStorage_newWithSize(tc.ndim());\n    for (uint32_t i = 0; i < tc.ndim(); ++i) {\n      THLongStorage_set(thshape, i, tc.dim(i));\n    }\n    THFloatTensor* d = THFloatTensor_newWithSize(thshape, nullptr);\n    THLongStorage_free(thshape);\n    return d;\n  }\n\n  typename Traits::Tensor* blobToTensor(Blob* blob) {\n    CAFFE_ENFORCE_EQ(tensorTy(*blob), Traits::tensorTy);\n    auto* tc = blob->template GetMutable<Tensor<Context>>();\n\n    size_t size = tc->size();\n    THLongStorage* thshape = THLongStorage_newWithSize(tc->ndim());\n    for (int i = 0; i < tc->ndim(); ++i) {\n      THLongStorage_set(thshape, i, tc->dim(i));\n    }\n    THFloatStorage* storage =\n        THFloatStorage_newWithData(tc->template mutable_data<float>(), size);\n    THFloatStorage_clearFlag(storage, TH_STORAGE_FREEMEM);\n    auto* th = THFloatTensor_newWithStorage(storage, 0, thshape, nullptr);\n    THFloatStorage_free(storage);\n    THLongStorage_free(thshape);\n    CAFFE_ENFORCE_EQ(\n        THFloatTensor_storage(th)->data, tc->template mutable_data<float>());\n    return th;\n  }\n\n  std::vector<typename Traits::Tensor*> pushTable(\n      const std::vector<Blob*>& blobs) {\n    if (blobs.empty()) {\n      lua_pushnil(L());\n      return {};\n    }\n\n    if (blobs.size() == 1) {\n      auto* th = blobToTensor(blobs[0]);\n      luaT_pushudata(L(), th, tensorTy(*blobs[0]));\n      return {th};\n    }\n\n    std::vector<typename Traits::Tensor*> res;\n    lua_createtable(L(), blobs.size(), 0);\n    int index = 1;\n    for (auto* blob : blobs) {\n      auto* th = blobToTensor(blob);\n      res.push_back(th);\n      luaT_pushudata(L(), th, tensorTy(*blob));\n      lua_rawseti(L(), -2, index++);\n    }\n    return res;\n  }\n\n  void verifyOutput(Blob* dst, typename Traits::Tensor* torchDst) {\n    if (!luaT_isudata(L(), -1, Traits::tensorTy)) {\n      LOG(FATAL) << \"Unsupported Torch tensor type \" << luaT_typename(L(), -1);\n    }\n\n    // Invariant: dst has the same size as src, and has the same data\n    // values as src.\n    auto* src = static_cast<typename Traits::Tensor*>(\n        luaT_toudata(L(), -1, Traits::tensorTy));\n    auto* thDst = static_cast<typename Traits::Tensor*>(torchDst);\n    auto* tcDst = dst->template GetMutable<Tensor<Context>>();\n    CAFFE_ENFORCE(src->storage->data);\n    CAFFE_ENFORCE(src->storage->size);\n    CAFFE_ENFORCE_EQ(src->storage->data, thDst->storage->data);\n    CAFFE_ENFORCE_EQ(src->storage->data, tcDst->template data<float>());\n    CAFFE_ENFORCE_EQ(src->storage->size, thDst->storage->size);\n    CAFFE_ENFORCE_EQ(src->storage->size, tcDst->size());\n  }\n\n  void verifyOutputs(\n      const std::vector<Blob*>& blobs,\n      const std::vector<typename Traits::Tensor*>& tensors) {\n    CAFFE_ENFORCE_EQ(tensors.size(), blobs.size());\n\n    if (blobs.empty()) {\n      return;\n    }\n\n    if (blobs.size() == 1) {\n      verifyOutput(blobs[0], tensors[0]);\n      return;\n    }\n\n    CAFFE_ENFORCE(lua_istable(L(), -1));\n    lua_pushnil(L());\n    for (auto i = 0; i < blobs.size(); ++i) {\n      CAFFE_ENFORCE(lua_next(L(), -2));\n      verifyOutput(blobs[i], tensors[i]);\n      lua_pop(L(), 1);\n    }\n    lua_pop(L(), 1);\n  }\n\nprivate:\n  lua_State* L_;\n};\n}\n\ntemplate <typename Context>\nclass TorchOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using OperatorBase::Outputs;\n  using OperatorBase::Inputs;\n  TorchOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    lua_State* L = state_.L();\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    const auto initString = \"return \" +\n        OperatorBase::GetSingleArgument<std::string>(\"init\", \"\") + \":\" +\n        torch::Torch<Context>::Traits::moduleTy + \"()\";\n    CAFFE_ENFORCE_EQ(luaL_loadstring(L, initString.c_str()), 0);\n    int err = lua_pcall(L, 0, 1, 0);\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n    // Get number of parameters\n    uint32_t numParams = 0;\n    lua_getfield(L, -1, \"parameters\");\n    lua_pushvalue(L, -2);\n    CAFFE_ENFORCE_EQ(lua_pcall(L, 1, LUA_MULTRET, 0), 0);\n    if (lua_gettop(L) == 1) {\n      numParams = 0;\n    } else {\n      CAFFE_ENFORCE_EQ(lua_gettop(L), 3);\n      numParams = lua_objlen(L, -2);\n      lua_pop(L, 2);\n    }\n    CAFFE_ENFORCE_EQ(\n        numParams, OperatorBase::GetSingleArgument<int>(\"num_params\", 0));\n    // TODO: free parameters?\n    self_ = luaL_ref(L, LUA_REGISTRYINDEX);\n  }\n\n  void reshapeBlobs(\n      const std::vector<Blob*>& inputBlobs,\n      const std::vector<Blob*>& paramBlobs,\n      const std::vector<Blob*>& outputBlobs) {\n    auto cacheEqual = [=]() {\n      if (cachedInputSizes_.size() != inputBlobs.size()) {\n        return false;\n      }\n\n      for (auto i = 0; i < inputBlobs.size(); ++i) {\n        const auto& current =\n            inputBlobs[i]->template Get<Tensor<Context>>().dims();\n        const auto& cached = cachedInputSizes_[i];\n        if (current != cached) {\n          return false;\n        }\n      }\n      return true;\n    };\n\n    if (cacheEqual()) {\n      return;\n    }\n    LOG(INFO) << \"Cached blobs not equal, running :updateOutput to reshape\";\n    lua_State* L = state_.L();\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    lua_rawgeti(L, LUA_REGISTRYINDEX, self_);\n    lua_getfield(L, -1, \"updateOutput\");\n    lua_pushvalue(L, -2); // self\n    if (inputBlobs.size() == 1) {\n      const auto& tc = inputBlobs[0]->template Get<Tensor<Context>>();\n      auto* inputData = state_.newTensorAs(tc);\n      luaT_pushudata(L, inputData, torch::Torch<Context>::Traits::tensorTy);\n    } else if (inputBlobs.size() > 1) {\n      lua_createtable(L, inputBlobs.size(), 0);\n      for (auto i = 0; i < inputBlobs.size(); ++i) {\n        const auto* blob = inputBlobs[i];\n        const auto& tc = blob->template Get<Tensor<Context>>();\n        auto* inputData = state_.newTensorAs(tc);\n        luaT_pushudata(L, inputData, torch::Torch<Context>::Traits::tensorTy);\n        lua_rawseti(L, -2, i + 1);\n      }\n    }\n    int err = lua_pcall(L, 2, 0, 0);\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n    if (paramBlobs.size() != 0) {\n      lua_getfield(L, -1, \"parameters\");\n      lua_pushvalue(L, -2);\n      int err2 = lua_pcall(L, 1, LUA_MULTRET, 0);\n      CAFFE_ENFORCE_EQ(err2, 0);\n      CAFFE_ENFORCE_EQ(lua_gettop(L), 3);\n      lua_pushnil(L);\n      int i = 0;\n      while (lua_next(L, -3) && i < paramBlobs.size()) {\n        CAFFE_ENFORCE(\n            luaT_isudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n        auto* param =\n            static_cast<typename torch::Torch<Context>::Traits::Tensor*>(\n                luaT_toudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n        auto paramShape = state_.tensorShape(param);\n        auto* blob = paramBlobs[i];\n        auto* tc = blob->template GetMutable<Tensor<Context>>();\n        if (tc->size() == 0) {\n          tc->Resize(paramShape);\n          tc->template mutable_data<float>();\n        } else {\n          CAFFE_ENFORCE(tc->dims() == paramShape);\n        }\n        lua_pop(L, 1);\n        i++;\n      }\n      CAFFE_ENFORCE_EQ(i, paramBlobs.size());\n      lua_pop(L, 2);\n    }\n    lua_getfield(L, -1, \"output\");\n    if (outputBlobs.size() == 0) {\n    } else if (outputBlobs.size() == 1) {\n      CAFFE_ENFORCE(\n          luaT_isudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n      auto* output =\n          static_cast<typename torch::Torch<Context>::Traits::Tensor*>(\n              luaT_toudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n      auto outputShape = state_.tensorShape(output);\n      auto* blob = outputBlobs[0];\n      auto* tc = blob->template GetMutable<Tensor<Context>>();\n      tc->Resize(outputShape);\n      tc->template mutable_data<float>();\n    } else {\n      lua_pushnil(L);\n      auto i = 0;\n      while (lua_next(L, -2) && i < outputBlobs.size()) {\n        CAFFE_ENFORCE(\n            luaT_isudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n        auto* output =\n            static_cast<typename torch::Torch<Context>::Traits::Tensor*>(\n                luaT_toudata(L, -1, torch::Torch<Context>::Traits::tensorTy));\n        auto outputShape = state_.tensorShape(output);\n        auto* blob = outputBlobs[i];\n        auto* tc = blob->template GetMutable<Tensor<Context>>();\n        if (tc->size() == 0) {\n          tc->Resize(outputShape);\n          tc->template mutable_data<float>();\n        } else {\n          CAFFE_ENFORCE(tc->dims() == outputShape);\n        }\n        ++i;\n      }\n      CAFFE_ENFORCE_EQ(i, outputBlobs.size());\n    }\n    lua_pop(L, 2);\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n\n    cachedInputSizes_.clear();\n    for (const auto* blob : inputBlobs) {\n      const auto& dims = blob->template Get<Tensor<Context>>().dims();\n      cachedInputSizes_.push_back(dims);\n    }\n  }\n\n protected:\n  torch::Torch<Context> state_;\n  int self_{0};\n  std::vector<std::vector<TIndex>> cachedInputSizes_;\n};\n\ntemplate <typename Context>\nclass TorchOp : public TorchOpBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using OperatorBase::Outputs;\n  using OperatorBase::Inputs;\n  using TorchOpBase<Context>::state_;\n  using TorchOpBase<Context>::self_;\n\n  using TorchOpBase<Context>::TorchOpBase;\n\n  bool RunOnDevice() final {\n    const auto numInputs =\n        OperatorBase::GetSingleArgument<int>(\"num_inputs\", 1);\n    const auto numParams =\n        OperatorBase::GetSingleArgument<int>(\"num_params\", 0);\n    const auto numOutputs =\n        OperatorBase::GetSingleArgument<int>(\"num_outputs\", 1);\n    CAFFE_ENFORCE_EQ(InputSize(), numInputs + numParams);\n    CAFFE_ENFORCE_EQ(OutputSize(), numOutputs);\n\n    std::vector<Blob*> inputBlobs;\n    for (auto i = 0; i < numInputs; ++i) {\n      inputBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> paramBlobs;\n    for (auto i = numInputs; i < numInputs + numParams; ++i) {\n      paramBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    // Outputs must already be pre-sized\n    this->reshapeBlobs(inputBlobs, paramBlobs, Outputs());\n\n    lua_State* L = state_.L();\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    state_.setContext(&context_);\n\n    // Deserialize self table\n    lua_rawgeti(L, LUA_REGISTRYINDEX, self_);\n\n    auto torchOutputs = state_.pushTable(Outputs());\n    // set the output field\n    lua_setfield(L, -2, \"output\");\n    // set the parameters\n    if (numParams != 0) {\n      // get the parameters into the stack\n      lua_getfield(L, -1, \"parameters\");\n      lua_pushvalue(L, -2);\n      int err = lua_pcall(L, 1, 1, 0);\n      CAFFE_ENFORCE_EQ(err, 0);\n      // iterate the parameters table to put tblobs inside\n      lua_pushnil(L);\n      auto i = 0;\n      while (lua_next(L, -2) && i < numParams) {\n        CAFFE_ENFORCE(\n            luaT_isudata(L, -1, state_.tensorTy(*paramBlobs[i])),\n            luaT_typename(L, -1));\n        auto* udata = luaT_toudata(L, -1, state_.tensorTy(*paramBlobs[i]));\n        state_.setTensor(\n            static_cast<typename torch::Torch<Context>::Traits::Tensor*>(udata),\n            const_cast<Blob*>(paramBlobs[i]));\n        i++;\n        lua_pop(L, 1);\n      }\n      CAFFE_ENFORCE_EQ(i, numParams);\n      lua_pop(L, 1); // pop the parameter table\n    }\n    // call updateOutput\n    // | self\n    lua_getfield(L, -1, \"updateOutput\");\n    // | self | updateOutput\n    lua_pushvalue(L, -2);\n    // | self | updateOutput | self\n    auto torchInputs = state_.pushTable(inputBlobs);\n    // | self | updateOutput | self | inputs\n    int err = lua_pcall(L, 2, 1, 0); // doesn't need the output\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n    state_.verifyOutputs(Outputs(), torchOutputs);\n    lua_pop(L, 2);\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    return true;\n  }\n};\n\ntemplate <typename Context>\nclass TorchInitOp : public TorchOpBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using OperatorBase::Outputs;\n  using OperatorBase::Inputs;\n  using TorchOpBase<Context>::TorchOpBase;\n\n  bool RunOnDevice() final {\n    const auto numInputs =\n        OperatorBase::GetSingleArgument<int>(\"num_inputs\", 1);\n    const auto numParams =\n        OperatorBase::GetSingleArgument<int>(\"num_params\", 0);\n    const auto numOutputs =\n        OperatorBase::GetSingleArgument<int>(\"num_outputs\", 1);\n    std::vector<Blob*> inputBlobs;\n    for (auto i = 0; i < numInputs; ++i) {\n      inputBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> paramBlobs;\n    for (auto i = numInputs; i < numInputs + numParams; ++i) {\n      paramBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    this->reshapeBlobs(inputBlobs, paramBlobs, Outputs());\n    return true;\n  }\n};\n\ntemplate <typename Context>\nclass TorchGradientOp : public TorchOpBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using OperatorBase::Outputs;\n  using OperatorBase::Inputs;\n  using TorchOpBase<Context>::state_;\n  using TorchOpBase<Context>::self_;\n  using TorchOpBase<Context>::TorchOpBase;\n\n  bool RunOnDevice() final {\n    const auto numInputs =\n        OperatorBase::GetSingleArgument<int>(\"num_inputs\", 1);\n    const auto numParams =\n        OperatorBase::GetSingleArgument<int>(\"num_params\", 0);\n    const auto numOutputs =\n        OperatorBase::GetSingleArgument<int>(\"num_outputs\", 1);\n    lua_State* L = state_.L();\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    // inputs, params, outputs, grad outputs\n    CAFFE_ENFORCE_EQ(InputSize(), numInputs + numParams + 2 * numOutputs);\n    // grad inputs, grad params\n    CAFFE_ENFORCE_EQ(OutputSize(), numInputs + numParams);\n    state_.setContext(&context_);\n\n    std::vector<Blob*> outputBlobs;\n    for (auto i = numInputs + numParams; i < numInputs + numParams + numOutputs;\n         ++i) {\n      outputBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> inputBlobs;\n    for (auto i = 0; i < numInputs; ++i) {\n      inputBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> gradOutputBlobs;\n    for (auto i = numInputs + numParams + numOutputs;\n         i < numInputs + numParams + numOutputs + numOutputs;\n         ++i) {\n      gradOutputBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> gradInputBlobs;\n    for (auto i = 0; i < numInputs; ++i) {\n      gradInputBlobs.push_back(Outputs()[i]);\n    }\n    std::vector<Blob*> paramBlobs;\n    for (auto i = numInputs; i < numInputs + numParams; ++i) {\n      paramBlobs.push_back(const_cast<Blob*>(Inputs()[i]));\n    }\n    std::vector<Blob*> gradParamBlobs;\n    for (auto i = numInputs; i < numInputs + numParams; ++i) {\n      gradParamBlobs.push_back(Outputs()[i]);\n    }\n\n    // Ensure shapes are correct.\n    for (auto i = 0; i < OutputSize(); ++i) {\n      Output(i)->ResizeLike(Input(i));\n      Output(i)->template mutable_data<float>();\n    }\n\n    lua_rawgeti(L, LUA_REGISTRYINDEX, self_);\n    state_.pushTable(outputBlobs);\n    lua_setfield(L, -2, \"output\");\n\n    const auto& torchGradInputs = state_.pushTable(gradInputBlobs);\n    lua_setfield(L, -2, \"gradInput\");\n    if (numParams != 0) {\n      // get the parameters into the stack\n      lua_getfield(L, -1, \"parameters\");\n      lua_pushvalue(L, -2);\n      int err = lua_pcall(L, 1, LUA_MULTRET, 0);\n      CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n      // iterate the parameters table to put tblobs inside\n      lua_pushnil(L);\n      auto i = 0;\n      while (lua_next(L, -3) && i < numParams) {\n        CAFFE_ENFORCE(luaT_isudata(L, -1, state_.tensorTy(*paramBlobs[i])));\n        auto* udata = luaT_toudata(L, -1, state_.tensorTy(*paramBlobs[i]));\n        state_.setTensor(\n            static_cast<typename torch::Torch<Context>::Traits::Tensor*>(udata),\n            const_cast<Blob*>(paramBlobs[i]));\n        i++;\n        lua_pop(L, 1);\n      }\n      CAFFE_ENFORCE_EQ(i, numParams);\n      // iterate the grad of params\n      lua_pushnil(L);\n      i = 0;\n      while (lua_next(L, -2) && i < numParams) {\n        CAFFE_ENFORCE(luaT_isudata(L, -1, state_.tensorTy(*gradParamBlobs[i])));\n        auto* udata = luaT_toudata(L, -1, state_.tensorTy(*gradParamBlobs[i]));\n        state_.setTensor(\n            static_cast<typename torch::Torch<Context>::Traits::Tensor*>(udata),\n            const_cast<Blob*>(gradParamBlobs[i]));\n        i++;\n        lua_pop(L, 1);\n      }\n      CAFFE_ENFORCE_EQ(i, numParams);\n      lua_pop(L, 2); // pop the parameters\n    }\n    lua_getfield(L, -1, \"zeroGradParameters\");\n    lua_pushvalue(L, -2);\n    CAFFE_ENFORCE_EQ(lua_pcall(L, 1, 0, 0), 0);\n    state_.pushTable(inputBlobs);\n    state_.pushTable(gradOutputBlobs);\n    // call\n    lua_getfield(L, -3, \"accGradParameters\");\n    lua_pushvalue(L, -4);\n    lua_pushvalue(L, -4);\n    lua_pushvalue(L, -4);\n    lua_pushnumber(L, 1);\n    int err = lua_pcall(L, 4, 0, 0); // doesn't need the output\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n    lua_getfield(L, -3, \"updateGradInput\");\n    lua_pushvalue(L, -4);\n    lua_pushvalue(L, -4);\n    lua_pushvalue(L, -4);\n    err = lua_pcall(L, 3, 1, 0); // doesn't need the output\n    CAFFE_ENFORCE_EQ(err, 0, lua_tostring(L, -1));\n    state_.verifyOutputs(gradInputBlobs, torchGradInputs);\n    lua_pop(L, 4);\n    CAFFE_ENFORCE_EQ(lua_gettop(L), 0);\n    return true;\n  }\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/contrib/warpctc/ctc_op.h",
    "content": "#pragma once\n\n#include <ctc.h>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/core/common_cudnn.h\"\n\n#define CTC_CHECK(condition)           \\\n  do {                                 \\\n    ctcStatus_t status = condition;    \\\n    CAFFE_ENFORCE_EQ(                  \\\n        status,                        \\\n        CTC_STATUS_SUCCESS,            \\\n        \" Error at: \",                 \\\n        __FILE__,                      \\\n        \":\",                           \\\n        __LINE__,                      \\\n        \": \",                          \\\n        ::ctcGetStatusString(status)); \\\n  } while (0)\n\nnamespace caffe2 {\n\nnamespace detail {\n\ntemplate <typename Context>\nctcComputeInfo workspaceInfo(const Context& context);\n\n}\n\ntemplate <typename T, typename Context>\nclass CTCOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CTCOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    // inputs\n    const auto& inputs = Input(INPUTS);\n    const auto minibatchSize = inputs.dim(1);\n    const auto alphabetSize = inputs.dim(2);\n    const auto& labels = OperatorBase::template Input<TensorCPU>(LABELS);\n    const auto& labelLengths =\n        OperatorBase::template Input<TensorCPU>(LABEL_LENGTHS);\n    const auto& inputLengths =\n        OperatorBase::template Input<TensorCPU>(INPUT_LENGTHS);\n\n    // outputs\n    auto* costs = OperatorBase::template Output<TensorCPU>(COSTS);\n    costs->ResizeLike(labelLengths);\n    auto* gradients = Output(GRADIENTS);\n    gradients->ResizeLike(inputs);\n    auto* workspace = Output(WORKSPACE);\n\n    size_t workspaceSizeBytes;\n    CTC_CHECK(get_workspace_size(\n        labelLengths.template data<int>(),\n        inputLengths.template data<int>(),\n        alphabetSize,\n        minibatchSize,\n        detail::workspaceInfo(context_),\n        &workspaceSizeBytes));\n    workspace->Resize(workspaceSizeBytes);\n    CTC_CHECK(compute_ctc_loss(\n        inputs.template data<T>(),\n        gradients->template mutable_data<T>(),\n        labels.template data<int>(),\n        labelLengths.template data<int>(),\n        inputLengths.template data<int>(),\n        alphabetSize,\n        minibatchSize,\n        costs->template mutable_data<T>(),\n        workspace->template mutable_data<uint8_t>(),\n        detail::workspaceInfo(context_)));\n    return true;\n  }\n\nprivate:\n  INPUT_TAGS(INPUTS, LABELS, LABEL_LENGTHS, INPUT_LENGTHS);\n  OUTPUT_TAGS(GRADIENTS, COSTS, WORKSPACE);\n};\n}\n\n#undef CTC_CHECK\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/allocator.h",
    "content": "#ifndef CAFFE2_CORE_ALLOCATOR_H_\n#define CAFFE2_CORE_ALLOCATOR_H_\n\n#include <unordered_map>\n\n#include \"caffe2/core/logging.h\"\n\nCAFFE2_DECLARE_bool(caffe2_report_cpu_memory_usage);\nCAFFE2_DECLARE_bool(caffe2_cpu_allocator_do_zero_fill);\n\nnamespace caffe2 {\n\n// Use 32-byte alignment should be enough for computation up to AVX512.\nconstexpr size_t gCaffe2Alignment = 32;\n\nusing MemoryDeleter = void (*)(void*);\n\n// A helper function that is basically doing nothing.\nvoid NoDelete(void*);\n\n// A virtual allocator class to do memory allocation and deallocation.\nstruct CPUAllocator {\n  CPUAllocator() {}\n  virtual ~CPUAllocator() noexcept {}\n  virtual std::pair<void*, MemoryDeleter> New(size_t nbytes) = 0;\n  virtual MemoryDeleter GetDeleter() = 0;\n};\n\n// A virtual struct that is used to report Caffe2's memory allocation and\n// deallocation status\nclass MemoryAllocationReporter {\n public:\n  MemoryAllocationReporter() : allocated_(0) {}\n  void New(void* ptr, size_t nbytes);\n  void Delete(void* ptr);\n\n private:\n  std::mutex mutex_;\n  std::unordered_map<void*, size_t> size_table_;\n  size_t allocated_;\n};\n\nstruct DefaultCPUAllocator final : CPUAllocator {\n  DefaultCPUAllocator() {}\n  ~DefaultCPUAllocator() override {}\n  std::pair<void*, MemoryDeleter> New(size_t nbytes) override {\n    void* data = nullptr;\n#ifdef __ANDROID__\n    data = memalign(gCaffe2Alignment, nbytes);\n#elif defined(_MSC_VER)\n    data = _aligned_malloc(nbytes, gCaffe2Alignment);\n#else\n    CAFFE_ENFORCE_EQ(posix_memalign(&data, gCaffe2Alignment, nbytes), 0);\n#endif\n    CAFFE_ENFORCE(data);\n    if (FLAGS_caffe2_cpu_allocator_do_zero_fill) {\n      memset(data, 0, nbytes);\n    }\n    return {data, Delete};\n  }\n\n#ifdef _MSC_VER\n  static void Delete(void* data) {\n    _aligned_free(data);\n  }\n#else\n  static void Delete(void* data) {\n    free(data);\n  }\n#endif\n\n  MemoryDeleter GetDeleter() override {\n    return Delete;\n  }\n};\n\n// Get the CPU Alloctor.\nCPUAllocator* GetCPUAllocator();\n// Sets the CPU allocator to the given allocator: the caller gives away the\n// ownership of the pointer.\nvoid SetCPUAllocator(CPUAllocator* alloc);\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_ALLOCATOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/asan.h",
    "content": "#pragma once\n\n// Detect address sanitizer as some stuff doesn't work with it\n\n#undef CAFFE2_ASAN_ENABLED\n\n// for clang\n#if defined(__has_feature)\n#if ((__has_feature(address_sanitizer)))\n#define CAFFE2_ASAN_ENABLED 1\n#endif\n#endif\n\n// for gcc\n#if defined(__SANITIZE_ADDRESS__)\n#if __SANITIZE_ADDRESS__\n#if !defined(CAFFE2_ASAN_ENABLED)\n#define CAFFE2_ASAN_ENABLED 1\n#endif\n#endif\n#endif\n\n#if !defined(CAFFE2_ASAN_ENABLED)\n#define CAFFE2_ASAN_ENABLED 0\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/blob.h",
    "content": "#ifndef CAFFE2_CORE_BLOB_H_\n#define CAFFE2_CORE_BLOB_H_\n\n#include <cstddef>\n#include <sstream>\n#include <typeinfo>\n#include <type_traits>\n#include <vector>\n\n#include \"caffe2/core/blob_serializer_base.h\"\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/typeid.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n/**\n * @brief Blob is a general container that hosts a typed pointer.\n *\n * A Blob hosts a pointer as well as its type, and takes charge of deleting it\n * properly when the blob is deallocated or re-allocated with a new type. A blob\n * could contain anything, although the most common case is to contain a Tensor.\n */\nclass Blob {\n public:\n  /**\n   * Initializes an empty Blob.\n   */\n  Blob() : meta_(), pointer_(nullptr) {}\n  ~Blob() { Reset(); }\n\n  Blob(Blob&& other) noexcept\n      : meta_(std::move(other.meta_)),\n        pointer_(std::move(other.pointer_)),\n        destroy_(std::move(other.destroy_)) {\n    other.meta_ = {};\n    other.pointer_ = nullptr;\n    other.destroy_ = nullptr;\n  }\n\n  Blob& operator=(Blob&& other) noexcept {\n    meta_ = std::move(other.meta_);\n    pointer_ = std::move(other.pointer_);\n    destroy_ = std::move(other.destroy_);\n    other.meta_ = {};\n    other.pointer_ = nullptr;\n    other.destroy_ = nullptr;\n    return *this;\n  }\n\n  /**\n   * Checks if the content stored in the blob is of type T.\n   */\n  template <class T>\n  bool IsType() const { return meta_.Match<T>(); }\n\n  /**\n   * Returns the meta info of the blob.\n   */\n  inline const TypeMeta& meta() const { return meta_; }\n\n  /**\n   * Returns a printable typename of the blob.\n   */\n  inline const char* TypeName() const { return meta_.name(); }\n\n  /**\n   * @brief Gets the const reference of the stored object. The code checks if\n   * the stored object is of the desired type.\n   */\n  template <class T>\n  const T& Get() const {\n    CAFFE_ENFORCE(IsType<T>(),\n        \"wrong type for the Blob instance. Blob contains \",\n        meta_.name(), \" while caller expects \", TypeMeta::Name<T>());\n    return *static_cast<const T*>(pointer_);\n  }\n\n  const void* GetRaw() const {\n    return pointer_;\n  }\n  void* GetRaw() {\n    return pointer_;\n  }\n\n  /**\n   * @brief Gets a mutable pointer to the stored object.\n   *\n   * If the current object is not of the right type, a new object is created\n   * and the old object is freed. Note that type T should have a default\n   * constructor. Otherwise, create the object yourself first, and use\n   * Reset().\n   */\n  template <class T>\n  T* GetMutable(bool* is_new_object=nullptr) {\n    if (IsType<T>()) {\n      if (is_new_object) *is_new_object = false;\n      return static_cast<T*>(pointer_);\n    } else {\n      if (is_new_object) *is_new_object = true;\n      VLOG(1) << \"Create new mutable object \" << TypeMeta::Name<T>();\n      return Reset<T>(new T());\n    }\n  }\n\n  /**\n   * Sets the underlying object to the allocated one. The Blob then takes over\n   * the ownership of the passed in pointer. If there is already an object in\n   * the Blob, the old object is freed.\n   *\n   * This is used when the underlying class T does not have a default ctor, or\n   * complex initializations needs to be done outside the blob.\n   */\n  template <class T>\n  T* Reset(T* allocated) {\n    if (pointer_ && destroy_) {\n      destroy_(pointer_);\n    }\n    meta_ = TypeMeta::Make<T>();\n    pointer_ = static_cast<void*>(allocated);\n    destroy_ = &Destroy<T>;\n    return allocated;\n  }\n\n  /**\n   * Sets the underlying object to the allocated one, but does not take over\n   * the ownership of the passed in pointer. If there is already an object in\n   * the Blob, the old object is freed.\n   *\n   * Unlike Reset, this does not take over the ownership of the pointer and the\n   * caller is responsible for making sure that the lifetime of the allocated\n   * blob outlasts the lifetime of any access to this blob, until another Reset\n   * call is made or the blob is destructed.\n   */\n  template <class T>\n  typename std::remove_const<T>::type* ShareExternal(\n      typename std::remove_const<T>::type* allocated) {\n    return static_cast<T*>(ShareExternal(\n        static_cast<void*>(allocated),\n        TypeMeta::Make<typename std::remove_const<T>::type>()));\n  }\n\n  void* ShareExternal(void* allocated, const TypeMeta& meta) {\n    if (pointer_ && destroy_) {\n      destroy_(pointer_);\n    }\n    meta_ = meta;\n    pointer_ = static_cast<void*>(allocated);\n    destroy_ = nullptr;\n    return allocated;\n  }\n\n  /**\n   * Resets the Blob to an empty one.\n   */\n  inline void Reset() {\n    if (pointer_ && destroy_) {\n      destroy_(pointer_);\n    }\n    pointer_ = nullptr;\n    meta_ = TypeMeta();\n    destroy_ = nullptr;\n  }\n\n  /**\n   * Serializes the current blob, if possible. Note that this serialization uses\n   * the registration mechanism and one has to implement specific serialization\n   * approaches for specific classes. Acceptor should take care of writing data\n   * to the actual storage.\n   */\n  void Serialize(\n      const string& name,\n      BlobSerializerBase::SerializationAcceptor acceptor,\n      int chunk_size = kDefaultChunkSize) const;\n\n  /**\n   * @brief Convenience function to serialize a blob to a string.\n   *\n   * This is a conveinence function to serialize small Blobs that produce\n   * manageable serialized strings. To serialize big blobs such as\n   * large sparse tensors, use the fully-functional interface in\n   * blob_serializer_base.h.\n   *\n   * NOTE: this function doesn't do chunking and might break with big tensors.\n   */\n  string Serialize(const string& name) const;\n\n  /**\n   * @brief Swaps the underlying storage of two blobs.\n   */\n  void swap(Blob& rhs) {\n    using std::swap;\n    swap(meta_, rhs.meta_);\n    swap(pointer_, rhs.pointer_);\n    swap(destroy_, rhs.destroy_);\n  }\n\n  /**\n   * Deserializes from a string containing either BlobProto or TensorProto. If\n   * the deserialization fails, the content in the blob should no longer be\n   * trusted.\n   */\n  void Deserialize(const string& content);\n  void Deserialize(const BlobProto& proto);\n\n private:\n  /**\n   * @brief A destroy call that is used to properly deconstruct objects.\n   */\n  template <class T>\n  static void Destroy(void* pointer) {\n    delete static_cast<T*>(pointer);\n  }\n  typedef void (*DestroyCall)(void *);\n  TypeMeta meta_;\n  void* pointer_ = nullptr;\n  DestroyCall destroy_ = nullptr;\n\n  DISABLE_COPY_AND_ASSIGN(Blob);\n};\n\ninline void swap(Blob& lhs, Blob& rhs) {\n  lhs.swap(rhs);\n}\n\n}  // namespace caffe2\n#endif  // CAFFE2_CORE_BLOB_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/blob_serialization.h",
    "content": "#ifndef CAFFE2_CORE_BLOB_SERIALIZATION_H_\n#define CAFFE2_CORE_BLOB_SERIALIZATION_H_\n\n#include <limits>\n#include <future>\n\n#include <google/protobuf/repeated_field.h>\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/blob_serializer_base.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/typeid.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/utils/simple_queue.h\"\n\nCAFFE2_DECLARE_int(caffe2_tensor_chunk_size);\nCAFFE2_DECLARE_int(caffe2_max_tensor_serializer_threads);\nCAFFE2_DECLARE_bool(caffe2_serialize_fp16_as_bytes);\n\nnamespace caffe2 {\n\nconstexpr auto kTensorBlobType = \"Tensor\";\n// String used to separate chunk id from the blob name when storing in DB\nconstexpr auto kChunkIdSeparator = \"#%\";\n\n// The Blob serialization registry and serializer creator functions.\nCAFFE_DECLARE_TYPED_REGISTRY(\n    BlobSerializerRegistry,\n    CaffeTypeId,\n    BlobSerializerBase);\n#define REGISTER_BLOB_SERIALIZER(id, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(BlobSerializerRegistry, id, __VA_ARGS__)\n// Creates an operator with the given operator definition.\ninline unique_ptr<BlobSerializerBase> CreateSerializer(CaffeTypeId id) {\n  return BlobSerializerRegistry()->Create(id);\n}\n\n/**\n * @brief TensorSerializer is the serializer for Tensors.\n *\n * TensorSerializer takes in a blob that contains a Tensor, and serializes it\n * into a TensorProto protocol buffer.\n */\ntemplate <class Context>\nclass TensorSerializer : public BlobSerializerBase {\n public:\n  TensorSerializer() : context_() {}\n  ~TensorSerializer() override {}\n  /**\n   * Serializes a Blob. Note that this blob has to contain Tensor<Context>,\n   * otherwise this function produces a fatal error.\n   */\n  void Serialize(\n      const Blob& blob,\n      const string& name,\n      SerializationAcceptor acceptor) override;\n  void SerializeWithChunkSize(\n      const Blob& blob,\n      const string& name,\n      SerializationAcceptor acceptor,\n      int chunk_size) override;\n\n  void Serialize(const Tensor<Context>& tensor, const string& name,\n                 TensorProto* proto, size_t chunkBegin, int32_t chunkSize);\n\n private:\n  // A utility function to store the device context detauls.\n  void StoreDeviceDetail(const Tensor<Context>& input, TensorProto* proto);\n  Context context_;\n};\n\n/**\n * @brief BlobDeserializerBase is an abstract class that deserializes a blob\n * from a BlobProto or a TensorProto.\n */\nclass BlobDeserializerBase {\n public:\n  virtual ~BlobDeserializerBase() {}\n\n  // Deserializes from a BlobProto object.\n  virtual void Deserialize(const BlobProto& proto, Blob* blob) = 0;\n};\n\nCAFFE_DECLARE_REGISTRY(BlobDeserializerRegistry, BlobDeserializerBase);\n#define REGISTER_BLOB_DESERIALIZER(name, ...) \\\n  CAFFE_REGISTER_CLASS(BlobDeserializerRegistry, name, __VA_ARGS__)\n// Creates an operator with the given operator definition.\ninline unique_ptr<BlobDeserializerBase> CreateDeserializer(const string& type) {\n  return BlobDeserializerRegistry()->Create(type);\n}\n\n/**\n * @brief TensorDeserializer is the deserializer for Tensors.\n *\n * The device that the deserialized Tensor will live under is determined by the\n * device_detail field. If you want to specify the device of the deserialized\n * tensor, change the TensorProto's corresponding fields before calling\n * Deserialize.\n */\ntemplate <class Context>\nclass TensorDeserializer : public BlobDeserializerBase {\n public:\n  void Deserialize(const BlobProto& proto, Blob* blob) override;\n  void Deserialize(const TensorProto& proto, Tensor<Context>* tensor);\n};\n\n////////////////////////////////////////////////////////////////////////////////\n// Implementations\n////////////////////////////////////////////////////////////////////////////////\n\nnamespace detail {\ntemplate <typename SrcType, typename DstType, class Context>\ninline void CopyToProtoAsIs(\n    const size_t size,\n    const SrcType* src,\n    google::protobuf::RepeatedField<DstType>* field,\n    Context* context) {\n  static_assert(\n      sizeof(SrcType) == sizeof(DstType),\n      \"The source type and dest type cannot be copied as-is. Did \"\n      \"you mean CopyToProtoWithCast?\");\n  field->Reserve(size);\n  for (int i = 0; i < size; ++i) {\n    field->Add(0);\n  }\n  context->template Copy<SrcType, Context, CPUContext>(\n      size, src, reinterpret_cast<SrcType*>(field->mutable_data()));\n  // Make sure that we finish the copy into the protobuf.\n  context->FinishDeviceComputation();\n}\n\ntemplate <typename SrcType, typename DstType, class Context>\ninline void CopyToProtoWithCast(\n    const size_t size,\n    const SrcType* src,\n    google::protobuf::RepeatedField<DstType>* field,\n    Context* context) {\n  // TODO: we are having one unnecessary copy here if the context is already\n  // CPUContext. Remove it if it is performance critical.\n  unique_ptr<SrcType[]> buffer(new SrcType[size]);\n  context->template Copy<SrcType, Context, CPUContext>(\n      size, src, buffer.get());\n  context->FinishDeviceComputation();\n  field->Reserve(size);\n  for (int i = 0; i < size; ++i) {\n    field->Add(static_cast<DstType>(buffer[i]));\n  }\n}\n\ntemplate <typename SrcType, typename DstType, class Context>\ninline void CopyFromProtoAsIs(\n    const size_t size,\n    const google::protobuf::RepeatedField<SrcType>& field,\n    DstType* dst,\n    Context* context) {\n  static_assert(\n      sizeof(SrcType) == sizeof(DstType),\n      \"The source type and dest type cannot be copied as-is. Did \"\n      \"you mean CopyFromProtoWithCast?\");\n  CAFFE_ENFORCE_EQ(size, field.size(), \"Incorrect proto field size.\");\n  context->template Copy<DstType, CPUContext, Context>(\n      size, reinterpret_cast<const DstType*>(field.data()), dst);\n}\n\ntemplate <typename SrcType, typename DstType, class Context>\ninline void CopyFromProtoWithCast(\n    const size_t size,\n    const google::protobuf::RepeatedField<SrcType>& field,\n    DstType* dst,\n    Context* context) {\n  CAFFE_ENFORCE_EQ(size, field.size(), \"Incorrect proto field size.\");\n  // TODO: we are having one unnecessary copy here if the context is already\n  // CPUContext. Remove it if it is performance critical.\n  unique_ptr<DstType[]> buffer(new DstType[size]);\n  const SrcType* src = field.data();\n  for (int i = 0; i < size; ++i) {\n    buffer[i] = static_cast<DstType>(src[i]);\n  }\n  context->template Copy<DstType, CPUContext, Context>(size, buffer.get(), dst);\n}\n\n}  // namespace detail\n\ntemplate <class Context>\nvoid TensorSerializer<Context>::Serialize(\n    const Blob& blob,\n    const string& name,\n    BlobSerializerBase::SerializationAcceptor acceptor) {\n  this->SerializeWithChunkSize(blob, name, acceptor, kDefaultChunkSize);\n}\n\ntemplate <class Context>\nvoid TensorSerializer<Context>::SerializeWithChunkSize(\n    const Blob& blob,\n    const string& name,\n    BlobSerializerBase::SerializationAcceptor acceptor,\n    int chunk_size) {\n  CAFFE_ENFORCE(blob.IsType<Tensor<Context>>());\n  const auto& tensor = blob.template Get<Tensor<Context>>();\n  if (chunk_size == kNoChunking) {\n    chunk_size = tensor.size() + 1; // to account for empty tensors\n  } else if (chunk_size == kDefaultChunkSize) {\n    chunk_size = FLAGS_caffe2_tensor_chunk_size;\n  }\n\n  auto processChunk = [&](int64_t chunkStart) {\n    BlobProto blob_proto;\n    blob_proto.set_name(name);\n    blob_proto.set_type(kTensorBlobType);\n    TensorProto& proto = *blob_proto.mutable_tensor();\n    proto.set_name(name);\n    this->Serialize(\n        tensor, name, blob_proto.mutable_tensor(), chunkStart, chunk_size);\n    acceptor(\n        MakeString(name, kChunkIdSeparator, chunkStart / chunk_size),\n        blob_proto.SerializeAsString());\n  };\n\n#ifndef __ANDROID__\n  std::vector<std::future<void>> futures;\n  // Poorman's IOBound ThreadPool\n  SimpleQueue<size_t> chunkQueue;\n  auto task = [&]() {\n    size_t chunkStart;\n    while (chunkQueue.Pop(&chunkStart)) {\n      processChunk(chunkStart);\n    }\n  };\n  if (tensor.size() > chunk_size) {\n    for (int i = 0; i < FLAGS_caffe2_max_tensor_serializer_threads; ++i) {\n      futures.emplace_back(std::async(std::launch::async, task));\n    }\n  }\n#endif\n\n  VLOG(1) << \"Serializing blob \" << name;\n  // Serialize whole vector. If vector is empty, it's shape still needs to be\n  // serialized in empty proto\n  for (size_t chunkBegin = 0;\n       chunkBegin < std::max(tensor.size(), static_cast<TIndex>(1));\n       chunkBegin += chunk_size) {\n    VLOG(2) << \"Starting a chunk at \" << chunkBegin;\n#ifndef __ANDROID__\n    if (tensor.size() > chunk_size) {\n      chunkQueue.Push(chunkBegin);\n    } else {\n      // Sync mode for small tensors\n      processChunk(chunkBegin);\n    }\n#else\n    // Since Android does not have std::future, we will always do sync mode\n    processChunk(chunkBegin);\n#endif\n  }\n\n#ifndef __ANDROID__\n  chunkQueue.NoMoreJobs();\n  for (auto& fut : futures) {\n    fut.get();\n  }\n#endif\n}\n\ntemplate <class Context>\nvoid TensorSerializer<Context>::Serialize(\n    const Tensor<Context>& input,\n    const string& /*name*/,\n    TensorProto* proto_ptr,\n    size_t chunkBegin,\n    int32_t chunkSize) {\n  CAFFE_ENFORCE(\n      chunkBegin <= input.size(),\n      \"Chunk begin is out of tensor: \",\n      chunkBegin,\n      ' ',\n      input.size());\n  if (chunkBegin + chunkSize > input.size()) {\n    chunkSize = input.size() - chunkBegin;\n  }\n\n  CAFFE_ENFORCE(\n      input.raw_data() || chunkSize == 0,\n      \"The input does not have data input yet. This is probably because you \"\n      \"created a tensor of non-zero shape but never filled its data via \"\n      \"mutable_data() calls. This means that it makes no sense to serialize \"\n      \"the tensor content.\");\n\n  TensorProto& proto = *proto_ptr;\n  proto.mutable_segment()->set_begin(chunkBegin);\n  proto.mutable_segment()->set_end(chunkBegin + chunkSize);\n\n  for (int i = 0; i < input.ndim(); ++i) {\n    proto.add_dims(input.dim(i));\n  }\n  const TensorProto::DataType data_type = TypeMetaToDataType(input.meta());\n  proto.set_data_type(data_type);\n  StoreDeviceDetail(input, &proto);\n\n  // A lot of copypaste is error prone. Should we create a macro for this?\n  switch (data_type) {\n  case TensorProto_DataType_FLOAT:\n    detail::CopyToProtoAsIs(\n        chunkSize,\n        input.template data<float>() + chunkBegin,\n        proto.mutable_float_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_INT32:\n    detail::CopyToProtoAsIs(\n        chunkSize,\n        input.template data<int>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_BYTE:\n    LOG(FATAL) << \"This should not happen. When serializing, \"\n                  \"BYTE is deprecated and moved to UINT8.\";\n    break;\n  case TensorProto_DataType_STRING:\n    {\n      proto.mutable_string_data()->Reserve(chunkSize);\n      const string* content = input.template data<string>();\n      for (int i = chunkBegin; i < chunkBegin + chunkSize; ++i) {\n        proto.add_string_data(content[i]);\n      }\n      break;\n    }\n  case TensorProto_DataType_BOOL:\n    detail::CopyToProtoWithCast(\n        chunkSize,\n        input.template data<bool>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_UINT8:\n    detail::CopyToProtoWithCast(\n        chunkSize,\n        input.template data<uint8_t>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_INT8:\n    detail::CopyToProtoWithCast(\n        chunkSize,\n        input.template data<int8_t>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_UINT16:\n    detail::CopyToProtoWithCast(\n        chunkSize,\n        input.template data<uint16_t>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_INT16:\n    detail::CopyToProtoWithCast(\n        chunkSize,\n        input.template data<int16_t>() + chunkBegin,\n        proto.mutable_int32_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_INT64:\n    detail::CopyToProtoAsIs(\n        chunkSize,\n        input.template data<int64_t>() + chunkBegin,\n        proto.mutable_int64_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_FLOAT16: {\n    if (FLAGS_caffe2_serialize_fp16_as_bytes) {\n      const int kValue = 1;\n      CAFFE_ENFORCE_EQ(\n          reinterpret_cast<const char*>(&kValue)[0],\n          1,\n          \"Serialization of FLOAT16 on big endian platform \"\n          \"is not written yet.\");\n      unique_ptr<char[]> buffer(new char[2 * chunkSize]);\n      this->context_.template Copy<char, Context, CPUContext>(\n          2 * chunkSize,\n          reinterpret_cast<const char*>(\n              input.template data<float16>() + chunkBegin),\n          buffer.get());\n      this->context_.FinishDeviceComputation();\n      proto.set_byte_data(buffer.release(), 2 * chunkSize);\n    } else {\n      detail::CopyToProtoWithCast(\n          chunkSize,\n          reinterpret_cast<const uint16_t*>(input.template data<float16>()) +\n              chunkBegin,\n          proto.mutable_int32_data(),\n          &this->context_);\n    }\n  } break;\n  case TensorProto_DataType_DOUBLE:\n    detail::CopyToProtoAsIs(\n        chunkSize,\n        input.template data<double>() + chunkBegin,\n        proto.mutable_double_data(),\n        &this->context_);\n    break;\n  case TensorProto_DataType_UNDEFINED:\n    LOG(FATAL) << \"TensorSerializer does not have a serialization \"\n                  \"implementation for \" << input.meta().name();\n    break;\n    // Note: we intentially do not provide \"default:\" so if any new data types\n    // are added, the compiler should warn the user to add the case here.\n  }\n}\n\ntemplate <class Context>\nvoid TensorDeserializer<Context>::Deserialize(\n    const BlobProto& blob_proto,\n    Blob* blob) {\n  Deserialize(blob_proto.tensor(), blob->GetMutable<Tensor<Context>>());\n}\n\ntemplate <class Context>\nvoid TensorDeserializer<Context>::Deserialize(\n    const TensorProto& proto,\n    Tensor<Context>* tensor) {\n  // We create a local context for deserializing. Since Caffe2 contexts are\n  // usually lightweighted, this should not involve too much overhead.\n  Context context(proto.device_detail());\n  context.SwitchToDevice(0);\n  vector<TIndex> dims;\n  for (const TIndex d : proto.dims()) {\n    dims.push_back(d);\n  }\n  tensor->Resize(dims);\n\n  int64_t chunkBegin = 0;\n  auto chunkEnd = tensor->size();\n  if (proto.has_segment()) {\n    chunkBegin = proto.segment().begin();\n    chunkEnd = proto.segment().end();\n  }\n  CAFFE_ENFORCE(\n      0 <= chunkBegin && chunkBegin <= chunkEnd && chunkEnd <= tensor->size(),\n      \"Invalid chunk \",\n      chunkBegin,\n      ' ',\n      chunkEnd,\n      \" with total tensor size \",\n      tensor->size());\n  auto chunkSize = chunkEnd - chunkBegin;\n\n  switch (proto.data_type()) {\n    case TensorProto_DataType_FLOAT:\n      detail::CopyFromProtoAsIs(\n          chunkSize,\n          proto.float_data(),\n          tensor->template mutable_data<float>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_INT32:\n      detail::CopyFromProtoAsIs(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<int>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_BYTE:\n      // Since BYTE stores the data in a string field instead of a repreated\n      // field we will have it special cased.\n      CAFFE_ENFORCE_EQ(\n          chunkSize, proto.byte_data().size(), \"Incorrect proto field size.\");\n      context.template Copy<uint8_t, Context, CPUContext>(\n          chunkSize,\n          reinterpret_cast<const uint8_t*>(proto.byte_data().data()),\n          tensor->template mutable_data<uint8_t>() + chunkBegin);\n      break;\n    case TensorProto_DataType_STRING:\n      // Special handing of string because it is a non-fundamental type.\n      {\n        string* content = tensor->template mutable_data<string>();\n        for (int i = 0; i < chunkSize; ++i) {\n          content[i + chunkBegin] = proto.string_data(i);\n        }\n      }\n      break;\n    case TensorProto_DataType_BOOL:\n      detail::CopyFromProtoWithCast(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<bool>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_UINT8:\n      detail::CopyFromProtoWithCast(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<uint8_t>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_INT8:\n      detail::CopyFromProtoWithCast(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<int8_t>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_UINT16:\n      detail::CopyFromProtoWithCast(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<uint16_t>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_INT16:\n      detail::CopyFromProtoWithCast(\n          chunkSize,\n          proto.int32_data(),\n          tensor->template mutable_data<int16_t>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_INT64:\n      detail::CopyFromProtoAsIs(\n          chunkSize,\n          proto.int64_data(),\n          tensor->template mutable_data<int64_t>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_FLOAT16:\n      if (proto.has_byte_data()) {\n        const int kValue = 1;\n        CAFFE_ENFORCE_EQ(\n            reinterpret_cast<const char*>(&kValue)[0],\n            1,\n            \"Serialization of FLOAT16 on big endian platform \"\n            \"is not written yet.\");\n        CAFFE_ENFORCE_EQ(\n            2 * chunkSize,\n            proto.byte_data().size(),\n            \"Incorrect proto field size.\");\n        context.template Copy<float16, Context, CPUContext>(\n            chunkSize,\n            reinterpret_cast<const float16*>(proto.byte_data().data()),\n            tensor->template mutable_data<float16>() + chunkBegin);\n      } else {\n        // Backward compatibility with models which used int32_data field\n        detail::CopyFromProtoWithCast(\n            chunkSize,\n            proto.int32_data(),\n            reinterpret_cast<uint16_t*>(\n                tensor->template mutable_data<float16>()) +\n                chunkBegin,\n            &context);\n      }\n      break;\n    case TensorProto_DataType_DOUBLE:\n      detail::CopyFromProtoAsIs(\n          chunkSize,\n          proto.double_data(),\n          tensor->template mutable_data<double>() + chunkBegin,\n          &context);\n      break;\n    case TensorProto_DataType_UNDEFINED:\n      CAFFE_THROW(\"Cannot deserialize from a TensorProto UNDEFINED data type.\");\n  }\n  context.FinishDeviceComputation();\n}\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_BLOB_SERIALIZATION_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/blob_serializer_base.h",
    "content": "#pragma once\n\n#include <string>\n#include <functional>\n\nnamespace caffe2 {\n\nclass Blob;\n\nconstexpr int kDefaultChunkSize = -1;\nconstexpr int kNoChunking = 0;\n\n/**\n * @brief BlobSerializerBase is an abstract class that serializes a blob to a\n * string.\n *\n * This class exists purely for the purpose of registering type-specific\n * serialization code. If you need to serialize a specific type, you should\n * write your own Serializer class, and then register it using\n * REGISTER_BLOB_SERIALIZER. For a detailed example, see TensorSerializer for\n * details.\n */\nclass BlobSerializerBase {\n public:\n  virtual ~BlobSerializerBase() {}\n  using SerializationAcceptor =\n     std::function<void(const std::string& blobName, const std::string& data)>;\n  /**\n   * @brief The virtual function that returns a serialized string for the input\n   * blob.\n   * @param blob\n   *     the input blob to be serialized.\n   * @param name\n   *     the blob name to be used in the serialization implementation. It is up\n   *     to the implementation whether this name field is going to be used or\n   *     not.\n   * @param acceptor\n   *     a lambda which accepts key value pairs to save them to storage.\n   *     serailizer can use it to save blob in several chunks\n   *     acceptor should be thread-safe\n   */\n  virtual void Serialize(const Blob& blob, const std::string& name,\n                        SerializationAcceptor acceptor) = 0;\n\n  virtual void SerializeWithChunkSize(\n      const Blob& blob,\n      const std::string& name,\n      SerializationAcceptor acceptor,\n      int /*chunk_size*/) {\n    // Base implementation.\n    Serialize(blob, name, acceptor);\n  }\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/blob_stats.h",
    "content": "#pragma once\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/typeid.h\"\n\n#include <unordered_map>\n\nnamespace caffe2 {\n\nstruct BlobStatGetter {\n  virtual size_t sizeBytes(const Blob& blob) const = 0;\n  virtual ~BlobStatGetter() {}\n};\n\nstruct BlobStatRegistry {\n private:\n  std::unordered_map<CaffeTypeId, std::unique_ptr<BlobStatGetter>> map_;\n  void doRegister(CaffeTypeId id, std::unique_ptr<BlobStatGetter>&& v);\n\n public:\n  template <typename T, typename Getter>\n  struct Registrar {\n    Registrar() {\n      BlobStatRegistry::instance().doRegister(\n          TypeMeta::Id<T>(), std::unique_ptr<Getter>(new Getter));\n    }\n  };\n\n  const BlobStatGetter* get(CaffeTypeId id);\n  static BlobStatRegistry& instance();\n};\n\n#define REGISTER_BLOB_STAT_GETTER(Type, BlobStatGetterClass)    \\\n  static BlobStatRegistry::Registrar<Type, BlobStatGetterClass> \\\n      CAFFE_ANONYMOUS_VARIABLE(BlobStatRegistry)\n\nnamespace BlobStat {\n\n/**\n * Return size in bytes of the blob, if available for a blob of given type.\n * If not available, return 0.\n */\nsize_t sizeBytes(const Blob& blob);\n}\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/common.h",
    "content": "#ifndef CAFFE2_CORE_COMMON_H_\n#define CAFFE2_CORE_COMMON_H_\n\n#include <algorithm>\n#include <map>\n#include <memory>\n#include <numeric>\n#include <set>\n#include <sstream>\n#include <string>\n#include <type_traits>\n#include <vector>\n\n#ifdef __APPLE__\n#include <TargetConditionals.h>\n#endif\n\n#if defined(_MSC_VER)\n#include <io.h>\n#else\n#include <unistd.h>\n#endif\n\n// Macros used during the build of this caffe2 instance. This header file\n// is automatically generated by the cmake script during build.\n#include \"caffe2/core/macros.h\"\n\nnamespace caffe2 {\n\n// Data type for caffe2 Index/Size. We use size_t to be safe here as well as for\n// large matrices that are common in sparse math.\ntypedef int64_t TIndex;\n\n// Note(Yangqing): NVCC does not play well with unordered_map on some platforms,\n// forcing us to use std::map instead of unordered_map. This may affect speed\n// in some cases, but in most of the computation code we do not access map very\n// often, so it should be fine for us. I am putting a CaffeMap alias so we can\n// change it more easily if things work out for unordered_map down the road.\ntemplate <typename Key, typename Value>\nusing CaffeMap = std::map<Key, Value>;\n// using CaffeMap = std::unordered_map;\n\n// Using statements for common classes that we refer to in caffe2 very often.\n// Note that we only place it inside caffe2 so the global namespace is not\n// polluted.\n/* using override */\nusing std::set;\nusing std::string;\nusing std::unique_ptr;\nusing std::vector;\n\n// Just in order to mark things as not implemented. Do not use in final code.\n#define CAFFE_NOT_IMPLEMENTED CAFFE_THROW(\"Not Implemented.\")\n\n// suppress an unused variable.\n#ifdef _MSC_VER\n#define CAFFE2_UNUSED\n#define CAFFE2_USED\n#else\n#define CAFFE2_UNUSED __attribute__((__unused__))\n#define CAFFE2_USED __attribute__((__used__))\n#endif //_MSC_VER\n\n// Disable the copy and assignment operator for a class. Note that this will\n// disable the usage of the class in std containers.\n#ifndef DISABLE_COPY_AND_ASSIGN\n#define DISABLE_COPY_AND_ASSIGN(classname)                              \\\nprivate:                                                                       \\\n  classname(const classname&) = delete;                                        \\\n  classname& operator=(const classname&) = delete\n#endif\n\n// Define enabled when building for iOS or Android devices\n#if !defined(CAFFE2_MOBILE)\n#if defined(__ANDROID__)\n#define CAFFE2_ANDROID 1\n#define CAFFE2_MOBILE 1\n#elif (defined(__APPLE__) &&                                            \\\n       (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE))\n#define CAFFE2_IOS 1\n#define CAFFE2_MOBILE 1\n#elif (defined(__APPLE__) && TARGET_OS_MAC)\n#define CAFFE2_IOS 1\n#define CAFFE2_MOBILE 0\n#else\n#define CAFFE2_MOBILE 0\n#endif // ANDROID / IOS / MACOS\n#endif // CAFFE2_MOBILE\n\n// Define alignment macro that is cross platform\n#if defined(_MSC_VER)\n#define CAFFE2_ALIGNED(x) __declspec(align(x))\n#else\n#define CAFFE2_ALIGNED(x) __attribute__((aligned(x)))\n#endif\n\n/**\n * Macro for marking functions as having public visibility.\n * Ported from folly/CPortability.h\n */\n#ifndef __GNUC_PREREQ\n#if defined __GNUC__ && defined __GNUC_MINOR__\n#define __GNUC_PREREQ(maj, min) \\\n  ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))\n#else\n#define __GNUC_PREREQ(maj, min) 0\n#endif\n#endif\n\n#if defined(__GNUC__)\n#if __GNUC_PREREQ(4, 9)\n#define CAFFE2_EXPORT [[gnu::visibility(\"default\")]]\n#else\n#define CAFFE2_EXPORT __attribute__((__visibility__(\"default\")))\n#endif\n#else\n#define CAFFE2_EXPORT\n#endif\n\n// make_unique is a C++14 feature. If we don't have 14, we will emulate\n// its behavior. This is copied from folly/Memory.h\n#if __cplusplus >= 201402L ||                                              \\\n    (defined __cpp_lib_make_unique && __cpp_lib_make_unique >= 201304L) || \\\n    (defined(_MSC_VER) && _MSC_VER >= 1900)\n/* using override */\nusing std::make_unique;\n#else\n\ntemplate<typename T, typename... Args>\ntypename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type\nmake_unique(Args&&... args) {\n  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));\n}\n\n// Allows 'make_unique<T[]>(10)'. (N3690 s20.9.1.4 p3-4)\ntemplate<typename T>\ntypename std::enable_if<std::is_array<T>::value, std::unique_ptr<T>>::type\nmake_unique(const size_t n) {\n  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());\n}\n\n// Disallows 'make_unique<T[10]>()'. (N3690 s20.9.1.4 p5)\ntemplate<typename T, typename... Args>\ntypename std::enable_if<\n  std::extent<T>::value != 0, std::unique_ptr<T>>::type\nmake_unique(Args&&...) = delete;\n\n#endif\n\n// to_string implementation for Android related stuff.\n#ifndef __ANDROID__\nusing std::to_string;\nusing std::stoi;\n#else\ntemplate <typename T>\nstd::string to_string(T value)\n{\n  std::ostringstream os;\n  os << value;\n  return os.str();\n}\n\ninline int stoi(const string& str)\n{\n  std::stringstream ss;\n  int n = 0;\n  ss << str;\n  ss >> n;\n  return n;\n}\n#endif\n\n// dynamic cast reroute: if RTTI is disabled, go to reinterpret_cast\ntemplate <typename Dst, typename Src>\ninline Dst dynamic_cast_if_rtti(Src ptr) {\n#ifdef __GXX_RTTI\n  return dynamic_cast<Dst>(ptr);\n#else\n  return reinterpret_cast<Dst>(ptr);\n#endif\n}\n\n// SkipIndices are used in operator_fallback_gpu.h and operator_fallback_mkl.h\n// as utilty functions that marks input / output indices to skip when we use a\n// CPU operator as the fallback of GPU/MKL operator option.\ntemplate <int... values>\nclass SkipIndices {\n private:\n  template <int V>\n  static inline bool ContainsInternal(const int i) {\n    return (i == V);\n  }\n  template <int First, int Second, int... Rest>\n  static inline bool ContainsInternal(const int i) {\n    return (i == First) && ContainsInternal<Second, Rest...>(i);\n  }\n\n public:\n  static inline bool Contains(const int i) {\n    return ContainsInternal<values...>(i);\n  }\n};\n\ntemplate <>\nclass SkipIndices<> {\n public:\n  static inline bool Contains(const int /*i*/) {\n    return false;\n  }\n};\n\n// A global variable to mark if Caffe2 has cuda linked to the current runtime.\n// Do not directly use this variable, but instead use the HasCudaRuntime()\n// function below.\nextern bool g_caffe2_has_cuda_linked;\n\n// HasCudaRuntime() tells the program whether the binary has Cuda runtime\n// linked. This function should not be used in static initialization functions\n// as the underlying boolean variable is going to be switched on when one\n// loads libcaffe2_gpu.so.\ninline bool HasCudaRuntime() {\n  return g_caffe2_has_cuda_linked;\n}\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_COMMON_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/common_cudnn.h",
    "content": "#ifndef CAFFE2_CORE_COMMON_CUDNN_H_\n#define CAFFE2_CORE_COMMON_CUDNN_H_\n\n#include <array>\n#include <mutex>\n\n#include <cudnn.h>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nstatic_assert(\n    CUDNN_VERSION >= 5000,\n    \"Caffe2 requires cudnn version 5.0 or above.\");\n\n#define CUDNN_VERSION_MIN(major, minor, patch) \\\n  (CUDNN_VERSION >= ((major) * 1000 + (minor) * 100 + (patch)))\n\nnamespace caffe2 {\n\nnamespace internal {\n/**\n * A helper function to obtain cudnn error strings.\n */\ninline const char* cudnnGetErrorString(cudnnStatus_t status) {\n  switch (status) {\n    case CUDNN_STATUS_SUCCESS:\n      return \"CUDNN_STATUS_SUCCESS\";\n    case CUDNN_STATUS_NOT_INITIALIZED:\n      return \"CUDNN_STATUS_NOT_INITIALIZED\";\n    case CUDNN_STATUS_ALLOC_FAILED:\n      return \"CUDNN_STATUS_ALLOC_FAILED\";\n    case CUDNN_STATUS_BAD_PARAM:\n      return \"CUDNN_STATUS_BAD_PARAM\";\n    case CUDNN_STATUS_INTERNAL_ERROR:\n      return \"CUDNN_STATUS_INTERNAL_ERROR\";\n    case CUDNN_STATUS_INVALID_VALUE:\n      return \"CUDNN_STATUS_INVALID_VALUE\";\n    case CUDNN_STATUS_ARCH_MISMATCH:\n      return \"CUDNN_STATUS_ARCH_MISMATCH\";\n    case CUDNN_STATUS_MAPPING_ERROR:\n      return \"CUDNN_STATUS_MAPPING_ERROR\";\n    case CUDNN_STATUS_EXECUTION_FAILED:\n      return \"CUDNN_STATUS_EXECUTION_FAILED\";\n    case CUDNN_STATUS_NOT_SUPPORTED:\n      return \"CUDNN_STATUS_NOT_SUPPORTED\";\n    case CUDNN_STATUS_LICENSE_ERROR:\n      return \"CUDNN_STATUS_LICENSE_ERROR\";\n    default:\n      return \"Unknown cudnn error number\";\n  }\n}\n} // namespace internal\n\n// A macro that wraps around a cudnn statement so we can check if the cudnn\n// execution finishes or not.\n#define CUDNN_ENFORCE(condition)                          \\\n  do {                                                    \\\n    cudnnStatus_t status = condition;                     \\\n    CAFFE_ENFORCE_EQ(                                     \\\n        status,                                           \\\n        CUDNN_STATUS_SUCCESS,                             \\\n        \", Error at: \",                                   \\\n        __FILE__,                                         \\\n        \":\",                                              \\\n        __LINE__,                                         \\\n        \": \",                                             \\\n        ::caffe2::internal::cudnnGetErrorString(status)); \\\n  } while (0)\n#define CUDNN_CHECK(condition)                              \\\n  do {                                                      \\\n    cudnnStatus_t status = condition;                       \\\n    CHECK(status == CUDNN_STATUS_SUCCESS)                   \\\n        << ::caffe2::internal::cudnnGetErrorString(status); \\\n  } while (0)\n\n// report the version of cuDNN Caffe2 was compiled with\ninline size_t cudnnCompiledVersion() {\n  return CUDNN_VERSION;\n}\n// report the runtime version of cuDNN\ninline size_t cudnnRuntimeVersion() {\n  return cudnnGetVersion();\n}\n\n// Check compatibility of compiled and runtime cuDNN versions\ninline void CheckCuDNNVersions() {\n  // Version format is major*1000 + minor*100 + patch\n  // Major, minor and patch versions must all match\n  bool version_match = cudnnCompiledVersion() == cudnnRuntimeVersion();\n  CAFFE_ENFORCE(version_match,\n                \"cuDNN compiled (\", cudnnCompiledVersion(), \") and \"\n                \"runtime (\", cudnnRuntimeVersion(), \") versions mismatch\");\n}\n\n/**\n * cudnnTypeWrapper is a wrapper class that allows us to refer to the cudnn type\n * in a template function. The class is specialized explicitly for different\n * data types below.\n */\ntemplate <typename T>\nclass cudnnTypeWrapper;\n\ntemplate <>\nclass cudnnTypeWrapper<float> {\n public:\n  static const cudnnDataType_t type = CUDNN_DATA_FLOAT;\n  typedef const float ScalingParamType;\n  typedef float BNParamType;\n  static ScalingParamType* kOne() {\n    static ScalingParamType v = 1.0;\n    return &v;\n  }\n  static const ScalingParamType* kZero() {\n    static ScalingParamType v = 0.0;\n    return &v;\n  }\n};\n\ntemplate <>\nclass cudnnTypeWrapper<double> {\n public:\n  static const cudnnDataType_t type = CUDNN_DATA_DOUBLE;\n  typedef const double ScalingParamType;\n  typedef double BNParamType;\n  static ScalingParamType* kOne() {\n    static ScalingParamType v = 1.0;\n    return &v;\n  }\n  static ScalingParamType* kZero() {\n    static ScalingParamType v = 0.0;\n    return &v;\n  }\n};\n\ntemplate <>\nclass cudnnTypeWrapper<float16> {\n public:\n  static const cudnnDataType_t type = CUDNN_DATA_HALF;\n  typedef const float ScalingParamType;\n  typedef float BNParamType;\n  static ScalingParamType* kOne() {\n    static ScalingParamType v = 1.0;\n    return &v;\n  }\n  static ScalingParamType* kZero() {\n    static ScalingParamType v = 0.0;\n    return &v;\n  }\n};\n\n/**\n * A wrapper function to convert the Caffe storage order to cudnn storage order\n * enum values.\n */\ninline cudnnTensorFormat_t GetCudnnTensorFormat(const StorageOrder& order) {\n  switch (order) {\n    case StorageOrder::NHWC:\n      return CUDNN_TENSOR_NHWC;\n    case StorageOrder::NCHW:\n      return CUDNN_TENSOR_NCHW;\n    default:\n      LOG(FATAL) << \"Unknown cudnn equivalent for order: \" << order;\n  }\n  // Just to suppress compiler warnings\n  return CUDNN_TENSOR_NCHW;\n}\n\n/**\n * cudnnTensorDescWrapper is the placeholder that wraps around a\n * cudnnTensorDescriptor_t, allowing us to do descriptor change as-needed during\n * runtime.\n */\nclass cudnnTensorDescWrapper {\n public:\n  cudnnTensorDescWrapper() {\n    CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&desc_));\n  }\n  ~cudnnTensorDescWrapper() noexcept {\n    CUDNN_CHECK(cudnnDestroyTensorDescriptor(desc_));\n  }\n\n  inline cudnnTensorDescriptor_t Descriptor(\n      const cudnnTensorFormat_t format,\n      const cudnnDataType_t type,\n      const vector<int>& dims,\n      bool* changed) {\n    if (type_ == type && format_ == format && dims_ == dims) {\n      // if not changed, simply return the current descriptor.\n      if (changed)\n        *changed = false;\n      return desc_;\n    }\n    CAFFE_ENFORCE_EQ(\n        dims.size(), 4, \"Currently only 4-dimensional descriptor supported.\");\n    format_ = format;\n    type_ = type;\n    dims_ = dims;\n    CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(\n        desc_,\n        format,\n        type,\n        dims_[0],\n        (format == CUDNN_TENSOR_NCHW ? dims_[1] : dims_[3]),\n        (format == CUDNN_TENSOR_NCHW ? dims_[2] : dims_[1]),\n        (format == CUDNN_TENSOR_NCHW ? dims_[3] : dims_[2])));\n    if (changed)\n      *changed = true;\n    return desc_;\n  }\n\n  template <typename T>\n  inline cudnnTensorDescriptor_t Descriptor(\n      const StorageOrder& order,\n      const vector<int>& dims) {\n    return Descriptor(\n        GetCudnnTensorFormat(order), cudnnTypeWrapper<T>::type, dims, nullptr);\n  }\n\n private:\n  cudnnTensorDescriptor_t desc_;\n  cudnnTensorFormat_t format_;\n  cudnnDataType_t type_;\n  vector<int> dims_;\n  DISABLE_COPY_AND_ASSIGN(cudnnTensorDescWrapper);\n};\n\nclass cudnnFilterDescWrapper {\n public:\n  cudnnFilterDescWrapper() {\n    CUDNN_ENFORCE(cudnnCreateFilterDescriptor(&desc_));\n  }\n  ~cudnnFilterDescWrapper() noexcept {\n    CUDNN_CHECK(cudnnDestroyFilterDescriptor(desc_));\n  }\n\n  inline cudnnFilterDescriptor_t Descriptor(\n      const StorageOrder& order,\n      const cudnnDataType_t type,\n      const vector<int>& dims,\n      bool* changed) {\n    if (type_ == type && order_ == order && dims_ == dims) {\n      // if not changed, simply return the current descriptor.\n      if (changed)\n        *changed = false;\n      return desc_;\n    }\n    CAFFE_ENFORCE_EQ(\n        dims.size(), 4, \"Currently only 4-dimensional descriptor supported.\");\n    order_ = order;\n    type_ = type;\n    dims_ = dims;\n    CUDNN_ENFORCE(cudnnSetFilter4dDescriptor(\n        desc_,\n        type,\n        GetCudnnTensorFormat(order),\n        dims_[0],\n        // TODO - confirm that this is correct for NHWC\n        (order == StorageOrder::NCHW ? dims_[1] : dims_[3]),\n        (order == StorageOrder::NCHW ? dims_[2] : dims_[1]),\n        (order == StorageOrder::NCHW ? dims_[3] : dims_[2])));\n    if (changed)\n      *changed = true;\n    return desc_;\n  }\n\n  template <typename T>\n  inline cudnnFilterDescriptor_t Descriptor(\n      const StorageOrder& order,\n      const vector<int>& dims) {\n    return Descriptor(order, cudnnTypeWrapper<T>::type, dims, nullptr);\n  }\n\n private:\n  cudnnFilterDescriptor_t desc_;\n  StorageOrder order_;\n  cudnnDataType_t type_;\n  vector<int> dims_;\n  DISABLE_COPY_AND_ASSIGN(cudnnFilterDescWrapper);\n};\n\nclass CuDNNWrapper;\n/**\n * CuDNNHandles wraps around cudnnHandle_t so they can be\n * properly destructed when threads exit.\n */\nclass CuDNNHandles {\n  friend class CuDNNWrapper;\n\n private:\n  CuDNNHandles() {\n    for (int i = 0; i < CAFFE2_COMPILE_TIME_MAX_GPUS; ++i) {\n      cudnn_handle_[i] = nullptr;\n    }\n  }\n\n  ~CuDNNHandles() noexcept {\n    for (int i = 0; i < CAFFE2_COMPILE_TIME_MAX_GPUS; ++i) {\n      if (cudnn_handle_[i]) {\n        CUDNN_CHECK(cudnnDestroy(cudnn_handle_[i]));\n      }\n    }\n  }\n\n  cudnnHandle_t cudnn_handle_[CAFFE2_COMPILE_TIME_MAX_GPUS];\n};\n\n/**\n * CuDNNWorkspace is a wrapper around a raw cuda pointer that holds the cudnn\n * scratch space. This struct is meant to be only used in CuDNNWrapper to\n * provide a program-wide scratch space for CuDNN. The reason behind it is that\n * cudnn function calls are usually very efficient, hence one probably does not\n * want to run multiple cudnn calls at the same time. As a result, one should\n * not need more than one cudnn workspace per device.\n */\nstruct CuDNNWorkspace {\n  ~CuDNNWorkspace() noexcept {}\n\n  void* get(size_t nbytes) {\n    if (nbytes_ < nbytes) {\n      reset();\n      auto data_and_deleter = CUDAContext::New(nbytes);\n      data_ = {data_and_deleter.first, data_and_deleter.second};\n      nbytes_ = nbytes;\n    }\n    CAFFE_ENFORCE_GE(nbytes_, nbytes);\n    return data_.get();\n  }\n\n  void reset() {\n    data_ = nullptr;\n    nbytes_ = 0;\n  }\n\n private:\n  std::unique_ptr<void, MemoryDeleter> data_{nullptr, NoDelete};\n  size_t nbytes_{0};\n};\n\n// CuDNNState is the owner of the CuDNNWorkspace, and serializes all\n// executions of operations that use the state onto it's own stream\n// (so multiple Net workers can reuse the same workspace from\n// different threads and CUDA streams).\nclass CuDNNState {\n public:\n  explicit CuDNNState(size_t gpu_id) : gpu_id_(gpu_id) {\n    DeviceGuard g(gpu_id_);\n    CUDNN_ENFORCE(cudnnCreate(&cudnn_handle_));\n    CUDA_ENFORCE(cudaEventCreate(&before_));\n    CUDA_ENFORCE(cudaEventCreate(&after_));\n    CUDA_ENFORCE(cudaStreamCreate(&stream_));\n    CUDNN_ENFORCE(cudnnSetStream(cudnn_handle_, stream_));\n  }\n\n  ~CuDNNState() noexcept {\n    DeviceGuard g(gpu_id_);\n    CUDNN_CHECK(cudnnDestroy(cudnn_handle_));\n    CUDA_CHECK(cudaStreamDestroy(stream_));\n    CUDA_CHECK(cudaEventDestroy(after_));\n    CUDA_CHECK(cudaEventDestroy(before_));\n  }\n\n  cudnnHandle_t& cudnn_handle() {\n    return cudnn_handle_;\n  }\n\n  CuDNNWorkspace& workspace() {\n    return workspace_;\n  }\n\n  template <typename F>\n  void execute(cudaStream_t stream, F&& f) {\n    CUDA_ENFORCE(cudaEventRecord(before_, stream));\n    CUDA_ENFORCE(cudaStreamWaitEvent(stream_, before_, 0));\n    f(this);\n    CUDA_ENFORCE(cudaEventRecord(after_, stream_));\n    CUDA_ENFORCE(cudaStreamWaitEvent(stream, after_, 0));\n  }\n\n private:\n  cudnnHandle_t cudnn_handle_{nullptr};\n  cudaEvent_t before_{nullptr};\n  cudaEvent_t after_{nullptr};\n  cudaStream_t stream_{nullptr};\n  CuDNNWorkspace workspace_;\n  size_t gpu_id_{0};\n  DISABLE_COPY_AND_ASSIGN(CuDNNState);\n};\n\n/**\n * CuDNNWrapper is a class that wraps the cudnn handles and cudnn workspaces.\n *\n * The wrapper ensures that for each thread and each gpu, there is one\n * identical cudnn handle, which is also associated with the thread-local\n * per-device cuda stream. The wrapper also hosts the device-specific cudnn\n * workspace (scratch space for some cudnn functions).\n *\n */\nclass CuDNNWrapper {\n public:\n  /**\n   * Creates a cudnn wrapper associated with a CUDAContext object. Note that\n   * the CUDAContext object should outlive the CuDNNWrapper.\n   */\n  explicit CuDNNWrapper(CUDAContext* context) : context_(context) {}\n\n  /**\n   * Returns the inline cudnn handle that executes on the current\n   * thread's cuda_stream.\n   */\n  cudnnHandle_t& inline_cudnn_handle() {\n    int gpu_id = context_->cuda_gpu_id();\n    auto& cudnn_handle_ = tls_cudnn_handles_.cudnn_handle_[gpu_id];\n    if (!cudnn_handle_) {\n      context_->SwitchToDevice();\n      CUDNN_ENFORCE(cudnnCreate(&cudnn_handle_));\n    }\n    CUDNN_ENFORCE(cudnnSetStream(cudnn_handle_, context_->cuda_stream()));\n    return cudnn_handle_;\n  }\n\n  // Executes the closure F on the CuDNNState associated with state_idx\n  template <typename F>\n  void with_cudnn_state(size_t state_idx, F&& f) {\n    CAFFE_ENFORCE(\n        state_idx < CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES, \"Invalid state_idx\");\n    auto& sync_state = cudnn_states()[context_->cuda_gpu_id()][state_idx];\n\n    DeviceGuard dg(context_->cuda_gpu_id());\n\n    // We need to serialize execution on the CuDNNState as we can't\n    // allow multiple threads to race through the cudaEventRecord\n    // calls (so a worker thread might wait on another worker thread's\n    // execution)\n    std::lock_guard<std::mutex> g(sync_state.mutex);\n    if (!sync_state.state.get()) {\n      sync_state.state.reset(new CuDNNState(context_->cuda_gpu_id()));\n    }\n    CHECK_NOTNULL(sync_state.state.get())->execute(context_->cuda_stream(), f);\n  }\n\n protected:\n  // Pointer to an external cuda context that the cudnn wrapper will use.\n  CUDAContext* context_;\n  static thread_local CuDNNHandles tls_cudnn_handles_;\n\n  static constexpr size_t CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES = 4;\n\n  struct SyncedCuDNNState {\n    std::mutex mutex;\n    std::unique_ptr<CuDNNState> state;\n  };\n\n  using PerGPUCuDNNStates = std::array<\n      std::array<SyncedCuDNNState, CAFFE2_COMPILE_TIME_MAX_CUDNN_STATES>,\n      CAFFE2_COMPILE_TIME_MAX_GPUS>;\n  static PerGPUCuDNNStates& cudnn_states();\n\n  DISABLE_COPY_AND_ASSIGN(CuDNNWrapper);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_COMMON_CUDNN_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/common_gpu.h",
    "content": "#ifndef CAFFE2_CORE_COMMON_GPU_H_\n#define CAFFE2_CORE_COMMON_GPU_H_\n\n#include <assert.h>\n#include <cuda.h>\n#include <cuda_runtime.h>\n\n// Disable strict aliasing errors for CUDA 9.\n// The cuda_fp16.h header in CUDA 9 RC triggers this diagnostic.\n// It is included by cusparse.h as well, so guarding the\n// inclusion of that header here is not enough.\n#if CUDA_VERSION >= 9000\n#ifdef __GNUC__\n#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n#pragma GCC diagnostic push\n#endif\n#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n#endif // __GNUC__\n#endif // CUDA_VERSION >= 9000\n\n#include <cublas_v2.h>\n#include <curand.h>\n#include <driver_types.h>\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/common.h\"\n\n// This is a macro defined for cuda fp16 support. In default, cuda fp16 is\n// supported by NVCC 7.5, but it is also included in the Tegra X1 platform with\n// a (custom?) NVCC 7.0. As a result, we would normally just check the cuda\n// version here, but would also allow a use to pass in the flag\n// CAFFE_HAS_CUDA_FP16 manually.\n\n#ifndef CAFFE_HAS_CUDA_FP16\n#if CUDA_VERSION >= 7050\n#define CAFFE_HAS_CUDA_FP16\n#endif  // CUDA_VERSION >= 7050\n#endif  // CAFFE_HAS_CUDA_FP16\n\n#ifdef CAFFE_HAS_CUDA_FP16\n#include <cuda_fp16.h>\n#endif\n\n// Re-enable strict aliasing diagnostic if it was disabled.\n#if CUDA_VERSION >= 9000\n#ifdef __GNUC__\n#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n#pragma GCC diagnostic pop\n#endif\n#endif // __GNUC__\n#endif // CUDA_VERSION >= 9000\n\n/**\n * The maximum number of GPUs that caffe2 recognizes.\n */\n#define CAFFE2_COMPILE_TIME_MAX_GPUS 16\n/**\n * The maximum number of peers that each gpu can have when doing p2p setup.\n * Currently, according to NVidia documentation, each device can support a\n * system-wide maximum of eight peer connections.\n * When Caffe2 sets up peer access resources, if we have more than 8 gpus,\n * we will enable peer access in groups of 8.\n */\n#define CAFFE2_CUDA_MAX_PEER_SIZE 8\n\nnamespace caffe2 {\n\n#if CUDA_VERSION >= 9000\n/**\n * Empty class to identify TensorCore-based math\n */\nclass TensorCoreEngine {};\n#endif\n\n/**\n * A runtime function to report the cuda version that Caffe2 is built with.\n */\ninline int CudaVersion() { return CUDA_VERSION; }\n\n/**\n * Returns the number of devices.\n */\nint NumCudaDevices();\n\n/**\n * Check if the current running session has a cuda gpu present.\n *\n * Note that this is different from having caffe2 built with cuda. Building\n * Caffe2 with cuda only guarantees that this function exists. If there are no\n * cuda gpus present in the machine, or there are hardware configuration\n * problems like an insufficient driver, this function will still return false,\n * meaning that there is no usable GPU present.\n *\n * In the open source build, it is possible that Caffe2's GPU code is\n * dynamically loaded, and as a result a library could be only linked to the\n * CPU code, but want to test if cuda is later available or not. In this case,\n * one should use HasCudaRuntime() from common.h.\n */\ninline bool HasCudaGPU() { return NumCudaDevices() > 0; }\n\n/**\n * Sets the default GPU id for Caffe2.\n *\n * If an operator is set to run on Cuda GPU but no gpu id is given, we will use\n * the default gpu id to run the operator. Before this function is explicitly\n * called, GPU 0 will be the default GPU id.\n */\nvoid SetDefaultGPUID(const int deviceid);\n\n/**\n * Gets the default GPU id for Caffe2.\n */\nint GetDefaultGPUID();\n\n/**\n * Gets the current GPU id. This is a simple wrapper around cudaGetDevice().\n */\nint CaffeCudaGetDevice();\n\n/**\n * Gets the current GPU id. This is a simple wrapper around cudaGetDevice().\n */\nvoid CaffeCudaSetDevice(const int id);\n\n/**\n * Gets the GPU id that the current pointer is located at.\n */\nint GetGPUIDForPointer(const void* ptr);\n\n/**\n * Gets the device property for the given device. This function is thread safe.\n */\nconst cudaDeviceProp& GetDeviceProperty(const int device);\n\n/**\n * Runs a device query function and prints out the results to LOG(INFO).\n */\nvoid DeviceQuery(const int deviceid);\n\n/**\n * Return a peer access pattern by returning a matrix (in the format of a\n * nested vector) of boolean values specifying whether peer access is possible.\n *\n * This function returns false if anything wrong happens during the query of\n * the GPU access pattern.\n */\nbool GetCudaPeerAccessPattern(vector<vector<bool> >* pattern);\n\n/**\n * Return the availability of TensorCores for math\n */\nbool TensorCoreAvailable();\n\n/**\n * Return a human readable cublas error string.\n */\nconst char* cublasGetErrorString(cublasStatus_t error);\n\n/**\n * Return a human readable curand error string.\n */\nconst char* curandGetErrorString(curandStatus_t error);\n\n// CUDA: various checks for different function calls.\n#define CUDA_ENFORCE(condition, ...)     \\\n  do {                              \\\n    cudaError_t error = condition;  \\\n    CAFFE_ENFORCE_EQ(               \\\n        error,                      \\\n        cudaSuccess,                \\\n        \"Error at: \",               \\\n        __FILE__,                   \\\n        \":\",                        \\\n        __LINE__,                   \\\n        \": \",                       \\\n        cudaGetErrorString(error), ##__VA_ARGS__); \\\n  } while (0)\n#define CUDA_CHECK(condition)                                 \\\n  do {                                                        \\\n    cudaError_t error = condition;                            \\\n    CHECK(error == cudaSuccess) << cudaGetErrorString(error); \\\n  } while (0)\n\n#define CUDA_DRIVERAPI_ENFORCE(condition)                            \\\n  do {                                                               \\\n    CUresult result = condition;                                     \\\n    if (result != CUDA_SUCCESS) {                                    \\\n      const char* msg;                                               \\\n      cuGetErrorName(result, &msg);                                  \\\n      CAFFE_THROW(\"Error at: \", __FILE__, \":\", __LINE__, \": \", msg); \\\n    }                                                                \\\n  } while (0)\n#define CUDA_DRIVERAPI_CHECK(condition)                                 \\\n  do {                                                                  \\\n    CUresult result = condition;                                        \\\n    if (result != CUDA_SUCCESS) {                                       \\\n      const char* msg;                                                  \\\n      cuGetErrorName(result, &msg);                                     \\\n      LOG(FATAL) << \"Error at: \" << __FILE__ << \":\" << __LINE__ << \": \" \\\n                 << msg;                                                \\\n    }                                                                   \\\n  } while (0)\n\n#define CUBLAS_ENFORCE(condition)                \\\n  do {                                           \\\n    cublasStatus_t status = condition;           \\\n    CAFFE_ENFORCE_EQ(                            \\\n        status,                                  \\\n        CUBLAS_STATUS_SUCCESS,                   \\\n        \"Error at: \",                            \\\n        __FILE__,                                \\\n        \":\",                                     \\\n        __LINE__,                                \\\n        \": \",                                    \\\n        ::caffe2::cublasGetErrorString(status)); \\\n  } while (0)\n#define CUBLAS_CHECK(condition)                    \\\n  do {                                             \\\n    cublasStatus_t status = condition;             \\\n    CHECK(status == CUBLAS_STATUS_SUCCESS)         \\\n        << ::caffe2::cublasGetErrorString(status); \\\n  } while (0)\n\n#define CURAND_ENFORCE(condition)                \\\n  do {                                           \\\n    curandStatus_t status = condition;           \\\n    CAFFE_ENFORCE_EQ(                            \\\n        status,                                  \\\n        CURAND_STATUS_SUCCESS,                   \\\n        \"Error at: \",                            \\\n        __FILE__,                                \\\n        \":\",                                     \\\n        __LINE__,                                \\\n        \": \",                                    \\\n        ::caffe2::curandGetErrorString(status)); \\\n  } while (0)\n#define CURAND_CHECK(condition)                    \\\n  do {                                             \\\n    curandStatus_t status = condition;             \\\n    CHECK(status == CURAND_STATUS_SUCCESS)         \\\n        << ::caffe2::curandGetErrorString(status); \\\n  } while (0)\n\n#define CUDA_1D_KERNEL_LOOP(i, n)                                              \\\n  for (int i = blockIdx.x * blockDim.x + threadIdx.x;                          \\\n       i < (n);                                                                \\\n       i += blockDim.x * gridDim.x)\n\n// CUDA_KERNEL_ASSERT is a macro that wraps an assert() call inside cuda\n// kernels. This is not supported by Apple platforms so we special case it.\n// See http://docs.nvidia.com/cuda/cuda-c-programming-guide/#assertion\n#ifdef __APPLE__\n#define CUDA_KERNEL_ASSERT(...)\n#else  // __APPLE__\n#define CUDA_KERNEL_ASSERT(...) assert(__VA_ARGS__)\n#endif  // __APPLE__\n\n// The following helper functions are here so that you can write a kernel call\n// when you are not particularly interested in maxing out the kernels'\n// performance. Usually, this will give you a reasonable speed, but if you\n// really want to find the best performance, it is advised that you tune the\n// size of the blocks and grids more reasonably.\n// A legacy note: this is derived from the old good Caffe days, when I simply\n// hard-coded the number of threads and wanted to keep backward compatibility\n// for different computation capabilities.\n// For more info on CUDA compute capabilities, visit the NVidia website at:\n//    http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities\n\n// The number of cuda threads to use. 512 is used for backward compatibility,\n// and it is observed that setting it to 1024 usually does not bring much\n// performance gain (which makes sense, because warp size being 32 means that\n// blindly setting a huge block for a random kernel isn't optimal).\nconstexpr int CAFFE_CUDA_NUM_THREADS = 512;\n// The maximum number of blocks to use in the default kernel call. We set it to\n// 4096 which would work for compute capability 2.x (where 65536 is the limit).\n// This number is very carelessly chosen. Ideally, one would like to look at\n// the hardware at runtime, and pick the number of blocks that makes most\n// sense for the specific runtime environment. This is a todo item.\nconstexpr int CAFFE_MAXIMUM_NUM_BLOCKS = 4096;\n\n/**\n * @brief Compute the number of blocks needed to run N threads.\n */\ninline int CAFFE_GET_BLOCKS(const int N) {\n  return std::min((N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS,\n                  CAFFE_MAXIMUM_NUM_BLOCKS);\n}\n\nclass DeviceGuard {\n public:\n  explicit DeviceGuard(int newDevice) : previous_(CaffeCudaGetDevice()) {\n    if (previous_ != newDevice) {\n      CaffeCudaSetDevice(newDevice);\n    }\n  }\n\n  ~DeviceGuard() noexcept {\n    CaffeCudaSetDevice(previous_);\n  }\n\n private:\n  int previous_;\n};\n\n}  // namespace caffe2\n#endif  // CAFFE2_CORE_COMMON_GPU_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/common_omp.h",
    "content": "#ifndef CAFFE2_CORE_COMMON_OMP_H_\n#define CAFFE2_CORE_COMMON_OMP_H_\n\n#ifdef _OPENMP\n#include <omp.h>\n#endif // _OPENMP\n\n#endif // CAFFE2_CORE_COMMON_OMP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/context.h",
    "content": "#ifndef CAFFE2_CORE_CONTEXT_H_\n#define CAFFE2_CORE_CONTEXT_H_\n\n#include <cstdlib>\n#include <ctime>\n#include <random>\n#include <unordered_map>\n\n#include \"caffe2/core/allocator.h\"\n#include \"caffe2/core/event.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/typeid.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/math.h\"\n\nCAFFE2_DECLARE_bool(caffe2_report_cpu_memory_usage);\n\nnamespace caffe2 {\n\n/**\n * The CPU Context, representing the bare minimum of what a Context class in\n * Caffe2 should implement.\n *\n * See operator.h, especially Operator<Context>, for how Context are used in\n * actual operator implementations that are associated with specific devices.\n * In general, the Context class is passed in as a template argument, and\n * the operator can use the functions defined in the context to execute whatever\n * computation it has.\n *\n * A Context defines all the necessities to run an operator on a specific\n * device. Specific Context classes have the freedom to choose what functions it\n * implements, but there are a few functions that you should consider\n * implementing if you want to write your own context class:\n * - void SwitchToDevice(): any necessary code to switch to the device before\n *     running anything.\n * - void WaitEvent(const Event& ev): make the current context to wait on\n *     an event. For example, for cuda, this is the equivalent of\n *     cudaStreamWaitEvent. For CPU context, it essentially synchronizes the\n *     event.\n * - void Record(Event* ev): record the async activities on the current context\n *     to the event. For example, for cuda, this is the equivalent of\n *     cudaEventRecord on the current stream. For CPU context, it is always\n *     synchronous.\n * - void FinishDeviceComputation(): any wrapping-up work after all the\n *     computation of the operator is done. If there are errors during the\n *     execution, throw exception. For example, in a CUDAContext, this function\n *     carries out a stream synchronization and spots potential errors for\n *     the cuda kernel calls.\n * - static std::pair<void*, MemoryDeleter> New(size_t nbytes): allocates\n       memory and returns a deleter.\n * - template <class SrcContext, class DstContext> void CopyBytes(...): does\n *     cross context memory copy.\n * - template <typename T, class SrcContext, class DstContext> void Copy(...):\n *     usually a simple wrapper around the above CopyBytes function.\n *\n * We intentionally did not create a base class for the various possible Context\n * classes there might be, since they are intended to be specified during\n * compile time using templates rather than via polymorphism. You should also\n * not have classes derived from existing context classes.\n */\nclass CPUContext final {\n public:\n  typedef std::mt19937 rand_gen_type;\n  CPUContext() : random_seed_(math::randomNumberSeed()) {}\n  explicit CPUContext(const DeviceOption& option)\n      : random_seed_(\n            option.has_random_seed() ? option.random_seed()\n                                     : math::randomNumberSeed()) {\n    CAFFE_ENFORCE_EQ(option.device_type(), CPU);\n  }\n\n  ~CPUContext() noexcept {}\n\n  inline void SwitchToDevice(int /*stream_id*/) {}\n  inline void SwitchToDevice() {\n    SwitchToDevice(0);\n  }\n\n  inline void WaitEvent(const Event& ev) {\n    ev.Wait(CPU, this);\n  }\n  inline void Record(Event* ev) const {\n    CAFFE_ENFORCE(ev, \"Event must not be null.\");\n    ev->Record(CPU, this);\n  }\n\n  inline void FinishDeviceComputation() {}\n\n  inline rand_gen_type& RandGenerator() {\n    if (!random_generator_.get()) {\n      random_generator_.reset(new rand_gen_type(random_seed_));\n    }\n    return *random_generator_.get();\n  }\n\n  static std::pair<void*, MemoryDeleter> New(size_t nbytes) {\n    auto data_and_deleter = GetCPUAllocator()->New(nbytes);\n    if (FLAGS_caffe2_report_cpu_memory_usage) {\n      reporter_.New(data_and_deleter.first, nbytes);\n      data_and_deleter.second = ReportAndDelete;\n    }\n    return data_and_deleter;\n  }\n\n  // Two copy functions that deals with cross-device copies.\n  template <class SrcContext, class DstContext>\n  inline void CopyBytes(size_t nbytes, const void* src, void* dst);\n\n  template <typename T, class SrcContext, class DstContext>\n  inline void Copy(size_t n, const T* src, T* dst) {\n    if (std::is_fundamental<T>::value) {\n      CopyBytes<SrcContext, DstContext>(n * sizeof(T),\n                                     static_cast<const void*>(src),\n                                     static_cast<void*>(dst));\n    } else {\n      for (int i = 0; i < n; ++i) {\n        dst[i] = src[i];\n      }\n    }\n  }\n\n  template <class SrcContext, class DstContext>\n  inline void\n  CopyItems(const TypeMeta& meta, size_t n, const void* src, void* dst) {\n    if (meta.copy()) {\n      meta.copy()(src, dst, n);\n    } else {\n      CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);\n    }\n  }\n\n protected:\n  // TODO(jiayq): instead of hard-coding a generator, make it more flexible.\n  int random_seed_{1701};\n  std::unique_ptr<rand_gen_type> random_generator_;\n  static MemoryAllocationReporter reporter_;\n\n private:\n  static void ReportAndDelete(void* ptr) {\n    reporter_.Delete(ptr);\n    GetCPUAllocator()->GetDeleter()(ptr);\n  }\n};\n\ntemplate<>\ninline void CPUContext::CopyBytes<CPUContext, CPUContext>(\n    size_t nbytes, const void* src, void* dst) {\n  if (nbytes == 0) {\n    return;\n  }\n  CAFFE_ENFORCE(src);\n  CAFFE_ENFORCE(dst);\n  memcpy(dst, src, nbytes);\n}\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_CONTEXT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/context_gpu.h",
    "content": "#ifndef CAFFE2_CORE_CONTEXT_GPU_H_\n#define CAFFE2_CORE_CONTEXT_GPU_H_\n\n#include <ctime>\n#include <mutex>\n\n#include \"caffe2/core/common_gpu.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\n\nenum class CudaMemoryPoolType {\n  NONE = 0,\n  CUB = 1,\n};\n\n/**\n * Gets the current memory pool type used by Caffe2.\n *\n * The memory pool is set up during caffe2's global initialization time.\n */\nCudaMemoryPoolType GetCudaMemoryPoolType();\n\n/**\n * A struct to host thread-local cuda objects.\n *\n * In Caffe2, each thread has its own non-default cuda stream as well as\n * related objects such as cublas and curand handles. This is achieved by\n * having the ThreadLocalCUDAObjects wrapper that takes care of allocating\n * and deallocating these objects at the thread scope. This class is solely\n * used inside CUDAContext and should not be used externally.\n */\nclass ThreadLocalCUDAObjects {\n  friend class CUDAContext;\n private:\n  ThreadLocalCUDAObjects() {\n    for (int i = 0; i < CAFFE2_COMPILE_TIME_MAX_GPUS; ++i) {\n      cuda_streams_[i] = vector<cudaStream_t>();\n      cublas_handles_[i] = vector<cublasHandle_t>();\n    }\n  }\n\n  cudaStream_t GetStream(int gpu, int stream_id) {\n    vector<cudaStream_t> &gpu_streams = cuda_streams_[gpu];\n    if (gpu_streams.size() <= stream_id) {\n      gpu_streams.resize(stream_id + 1, nullptr);\n    }\n    if (!gpu_streams[stream_id]) {\n      DeviceGuard guard(gpu);\n      CUDA_ENFORCE(cudaStreamCreateWithFlags(\n          &gpu_streams[stream_id], cudaStreamNonBlocking));\n    }\n    return gpu_streams[stream_id];\n  }\n\n  cublasHandle_t GetHandle(int gpu, int stream_id) {\n    DeviceGuard guard(gpu);\n    vector<cublasHandle_t> &gpu_handles = cublas_handles_[gpu];\n    if (gpu_handles.size() <= stream_id) {\n      gpu_handles.resize(stream_id + 1, nullptr);\n    }\n    if (!gpu_handles[stream_id]) {\n      CUBLAS_ENFORCE(cublasCreate(&gpu_handles[stream_id]));\n      // The default is CUBLAS_POINTER_MODE_HOST. You can override\n      // it after obtaining the cublas handle, but do that with\n      // caution.\n      CUBLAS_ENFORCE(cublasSetPointerMode(\n          gpu_handles[stream_id], CUBLAS_POINTER_MODE_HOST));\n      CUBLAS_ENFORCE(\n          cublasSetStream(gpu_handles[stream_id], GetStream(gpu, stream_id)));\n    }\n    return gpu_handles[stream_id];\n  }\n\n  ~ThreadLocalCUDAObjects() noexcept {\n    for (int i = 0; i < CAFFE2_COMPILE_TIME_MAX_GPUS; ++i) {\n      for (auto& handle : cublas_handles_[i]) {\n        if (handle) {\n          CUBLAS_CHECK(cublasDestroy(handle));\n        }\n      }\n      for (auto& stream : cuda_streams_[i]) {\n        if (stream) {\n          CUDA_CHECK(cudaStreamDestroy(stream));\n        }\n      }\n    }\n  }\n  vector<cudaStream_t> cuda_streams_[CAFFE2_COMPILE_TIME_MAX_GPUS];\n  vector<cublasHandle_t> cublas_handles_[CAFFE2_COMPILE_TIME_MAX_GPUS];\n};\n\nclass CUDAContext final {\n public:\n  // The default cuda context constructor.\n  explicit CUDAContext(const int gpu_id = -1);\n  explicit CUDAContext(const DeviceOption& option);\n\n  ~CUDAContext() {\n    if (curand_generator_) {\n      CURAND_ENFORCE(curandDestroyGenerator(curand_generator_));\n    }\n    FinishDeviceComputation();\n  }\n\n  inline void SwitchToDevice(int stream_id) {\n    set_stream_id(stream_id);\n    CaffeCudaSetDevice(gpu_id_);\n  }\n  inline void SwitchToDevice() {\n    SwitchToDevice(0);\n  }\n\n  inline void WaitEvent(const Event& ev) {\n    ev.Wait(CUDA, this);\n  }\n\n  inline void Record(Event* ev) const {\n    CAFFE_ENFORCE(ev, \"Event must not be null.\");\n    ev->Record(CUDA, this);\n  }\n\n  void FinishDeviceComputation() {\n    cudaStreamSynchronize(cuda_objects_.GetStream(gpu_id_, stream_id_));\n    cudaError_t error = cudaGetLastError();\n    if (error != cudaSuccess) {\n      CAFFE_THROW(\"Encountered CUDA error: \", cudaGetErrorString(error));\n    }\n  }\n\n  inline int cuda_gpu_id() const { return gpu_id_; }\n\n  inline cudaStream_t cuda_stream() {\n    return cuda_stream(gpu_id_, stream_id_);\n  }\n\n  inline cudaStream_t cuda_stream() const {\n    return cuda_stream(gpu_id_, stream_id_);\n  }\n\n  static cudaStream_t cuda_stream(int gpu_id, int stream_id) {\n    return cuda_objects_.GetStream(gpu_id, stream_id);\n  }\n\n  cublasHandle_t cublas_handle() {\n    return cuda_objects_.GetHandle(gpu_id_, stream_id_);\n  }\n\n  curandGenerator_t& curand_generator() {\n    if (!curand_generator_) {\n      DeviceGuard guard(gpu_id_);\n      CURAND_ENFORCE(\n          curandCreateGenerator(&curand_generator_, CURAND_RNG_PSEUDO_DEFAULT));\n      CURAND_ENFORCE(\n          curandSetPseudoRandomGeneratorSeed(curand_generator_, random_seed_));\n      CHECK_NOTNULL(curand_generator_);\n    }\n    CURAND_ENFORCE(curandSetStream(curand_generator_, cuda_stream()));\n    return curand_generator_;\n  }\n\n  static std::pair<void*, MemoryDeleter> New(size_t nbytes);\n\n  // Get a mutex to lock out cudaMalloc / cudaFree calls when\n  // NCCL kernels are being launched. Should remove threat of\n  // deadlocks\n  static std::mutex& mutex();\n\n  // Functions to query memory stats. Only available if flag\n  // --caffe2_gpu_memory_tracking is enabled.\n  static std::vector<long> TotalMemoryByGpu();\n  static std::vector<long> MaxMemoryByGpu();\n\n  template <class SrcContext, class DstContext>\n  inline void CopyBytes(size_t nbytes, const void* src, void* dst) {\n    CUDA_ENFORCE(cudaMemcpyAsync(\n        dst,\n        src,\n        nbytes,\n        cudaMemcpyDefault,\n        cuda_objects_.GetStream(gpu_id_, stream_id_)));\n  }\n\n  template <typename T, class SrcContext, class DstContext>\n  inline void Copy(int n, const T* src, T* dst) {\n    CopyBytes<SrcContext, DstContext>(n * sizeof(T),\n                                 static_cast<const void*>(src),\n                                 static_cast<void*>(dst));\n  }\n\n  template <class SrcContext, class DstContext>\n  inline void\n  CopyItems(const TypeMeta& meta, size_t n, const void* src, void* dst) {\n    CAFFE_ENFORCE(!meta.copy(), \"CUDAContext requires fundamental types.\");\n    CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);\n  }\n\n protected:\n  static void Delete(void* data);\n  void set_stream_id(int stream_id) {\n    stream_id_ = stream_id;\n  }\n\n  int gpu_id_;\n  int stream_id_ = 0;\n  int random_seed_;\n  curandGenerator_t curand_generator_{nullptr};\n  static thread_local ThreadLocalCUDAObjects cuda_objects_;\n};\n\n// For the CPU context, we also allow a (probably expensive) function\n// to copy the data from a cuda context. Inside the function, we create\n// a temporary CUDAContext object to carry out the copy. From the caller's\n// side, these functions are synchronous with respect to the host, similar\n// to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call.\ntemplate<>\ninline void CPUContext::CopyBytes<CUDAContext, CPUContext>(\n    size_t nbytes, const void* src, void* dst) {\n  CUDAContext context(GetGPUIDForPointer(src));\n  context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);\n}\ntemplate<>\ninline void CPUContext::CopyBytes<CPUContext, CUDAContext>(\n    size_t nbytes, const void* src, void* dst) {\n  CUDAContext context(GetGPUIDForPointer(dst));\n  context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);\n}\n\n/**\n * An allocator that does the CPU memory allocation with pinned memory.\n *\n * This is needed because if we want to do any asynchronous cuda memcpy,\n * the underlying CPU memory also needs to be allocated into pinned memory\n * space. As a result, whenever Caffe2 is built with GPU and there is\n * GPU present during runtime, at global initialization time we will set\n * the CPU memory allocator to allocate pinned memory.\n */\nstruct PinnedCPUAllocator final : CPUAllocator {\n  PinnedCPUAllocator() {}\n  ~PinnedCPUAllocator() override {}\n  std::pair<void*, MemoryDeleter> New(size_t nbytes) override {\n    void* data;\n    std::lock_guard<std::mutex> lock(CUDAContext::mutex());\n    CUDA_ENFORCE(cudaMallocHost(&data, nbytes));\n    memset(data, 0, nbytes);\n    return {data, Delete};\n  }\n\n  MemoryDeleter GetDeleter() override {\n    return Delete;\n  }\n\n private:\n  static void Delete(void* data) {\n    // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs\n    // or not. If a CUDAContext::New() call is made, inside the CUDAContext\n    // function we will switch the cpu side allocator to a PinnedCPUAllocator.\n    // But, if one calls CPUContext::New() before any cuda allocations,\n    // PinnedCPUAllocator can still delete the corresponding memory.\n    std::lock_guard<std::mutex> lock(CUDAContext::mutex());\n    cudaError_t err = cudaFreeHost(data);\n    if (err == cudaErrorInvalidValue) {\n      free(data);\n      // Calling cudaGetLastError will reset the cuda error.\n      cudaGetLastError();\n    } else {\n      // For all other errors, still do a cuda check.\n      CUDA_ENFORCE(err);\n    }\n  }\n};\n\n// For simplicity, we will typedef Tensor<CPUContext> to TensorCPU.\ntypedef Tensor<CUDAContext> TensorCUDA;\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_CONTEXT_GPU_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/db.h",
    "content": "#ifndef CAFFE2_CORE_DB_H_\n#define CAFFE2_CORE_DB_H_\n\n#include <mutex>\n\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\nnamespace db {\n\n/**\n * The mode of the database, whether we are doing a read, write, or creating\n * a new database.\n */\nenum Mode { READ, WRITE, NEW };\n\n/**\n * An abstract class for the cursor of the database while reading.\n */\nclass Cursor {\n public:\n  Cursor() { }\n  virtual ~Cursor() { }\n  /**\n   * Seek to a specific key (or if the key does not exist, seek to the\n   * immediate next). This is optional for dbs, and in default, SupportsSeek()\n   * returns false meaning that the db cursor does not support it.\n   */\n  virtual void Seek(const string& key) = 0;\n  virtual bool SupportsSeek() { return false; }\n  /**\n   * Seek to the first key in the database.\n   */\n  virtual void SeekToFirst() = 0;\n  /**\n   * Go to the next location in the database.\n   */\n  virtual void Next() = 0;\n  /**\n   * Returns the current key.\n   */\n  virtual string key() = 0;\n  /**\n   * Returns the current value.\n   */\n  virtual string value() = 0;\n  /**\n   * Returns whether the current location is valid - for example, if we have\n   * reached the end of the database, return false.\n   */\n  virtual bool Valid() = 0;\n\n  DISABLE_COPY_AND_ASSIGN(Cursor);\n};\n\n/**\n * An abstract class for the current database transaction while writing.\n */\nclass Transaction {\n public:\n  Transaction() { }\n  virtual ~Transaction() { }\n  /**\n   * Puts the key value pair to the database.\n   */\n  virtual void Put(const string& key, const string& value) = 0;\n  /**\n   * Commits the current writes.\n   */\n  virtual void Commit() = 0;\n\n  DISABLE_COPY_AND_ASSIGN(Transaction);\n};\n\n/**\n * An abstract class for accessing a database of key-value pairs.\n */\nclass DB {\n public:\n  DB(const string& /*source*/, Mode mode) : mode_(mode) {}\n  virtual ~DB() { }\n  /**\n   * Closes the database.\n   */\n  virtual void Close() = 0;\n  /**\n   * Returns a cursor to read the database. The caller takes the ownership of\n   * the pointer.\n   */\n  virtual std::unique_ptr<Cursor> NewCursor() = 0;\n  /**\n   * Returns a transaction to write data to the database. The caller takes the\n   * ownership of the pointer.\n   */\n  virtual std::unique_ptr<Transaction> NewTransaction() = 0;\n\n protected:\n  Mode mode_;\n\n  DISABLE_COPY_AND_ASSIGN(DB);\n};\n\n// Database classes are registered by their names so we can do optional\n// dependencies.\nCAFFE_DECLARE_REGISTRY(Caffe2DBRegistry, DB, const string&, Mode);\n#define REGISTER_CAFFE2_DB(name, ...) \\\n  CAFFE_REGISTER_CLASS(Caffe2DBRegistry, name, __VA_ARGS__)\n\n/**\n * Returns a database object of the given database type, source and mode. The\n * caller takes the ownership of the pointer. If the database type is not\n * supported, a nullptr is returned. The caller is responsible for examining the\n * validity of the pointer.\n */\ninline unique_ptr<DB> CreateDB(\n    const string& db_type, const string& source, Mode mode) {\n  auto result = Caffe2DBRegistry()->Create(db_type, source, mode);\n  VLOG(1) << ((!result) ? \"not found db \" : \"found db \") << db_type;\n  return result;\n}\n\n/**\n * Returns whether or not a database exists given the database type and path.\n */\ninline bool DBExists(const string& db_type, const string& full_db_name) {\n  // Warning! We assume that creating a DB throws an exception if the DB\n  // does not exist. If the DB constructor does not follow this design\n  // pattern,\n  // the returned output (the existence tensor) can be wrong.\n  try {\n    std::unique_ptr<DB> db(\n        caffe2::db::CreateDB(db_type, full_db_name, caffe2::db::READ));\n    return true;\n  } catch (...) {\n    return false;\n  }\n}\n\n/**\n * A reader wrapper for DB that also allows us to serialize it.\n */\nclass DBReader {\n public:\n\n  friend class DBReaderSerializer;\n  DBReader() {}\n\n  DBReader(\n      const string& db_type,\n      const string& source,\n      const int32_t num_shards = 1,\n      const int32_t shard_id = 0) {\n    Open(db_type, source, num_shards, shard_id);\n  }\n\n  explicit DBReader(const DBReaderProto& proto) {\n    Open(proto.db_type(), proto.source());\n    if (proto.has_key()) {\n      CAFFE_ENFORCE(cursor_->SupportsSeek(),\n          \"Encountering a proto that needs seeking but the db type \"\n          \"does not support it.\");\n      cursor_->Seek(proto.key());\n    }\n    num_shards_ = 1;\n    shard_id_ = 0;\n  }\n\n  explicit DBReader(std::unique_ptr<DB> db)\n      : db_type_(\"<memory-type>\"),\n        source_(\"<memory-source>\"),\n        db_(std::move(db)) {\n    CAFFE_ENFORCE(db_.get(), \"Passed null db\");\n    cursor_ = db_->NewCursor();\n  }\n\n  void Open(\n      const string& db_type,\n      const string& source,\n      const int32_t num_shards = 1,\n      const int32_t shard_id = 0) {\n    // Note(jiayq): resetting is needed when we re-open e.g. leveldb where no\n    // concurrent access is allowed.\n    cursor_.reset();\n    db_.reset();\n    db_type_ = db_type;\n    source_ = source;\n    db_ = CreateDB(db_type_, source_, READ);\n    CAFFE_ENFORCE(db_, \"Cannot open db: \", source_, \" of type \", db_type_);\n    InitializeCursor(num_shards, shard_id);\n  }\n\n  void Open(\n      unique_ptr<DB>&& db,\n      const int32_t num_shards = 1,\n      const int32_t shard_id = 0) {\n    cursor_.reset();\n    db_.reset();\n    db_ = std::move(db);\n    CAFFE_ENFORCE(db_.get(), \"Passed null db\");\n    InitializeCursor(num_shards, shard_id);\n  }\n\n public:\n  /**\n   * Read a set of key and value from the db and move to next. Thread safe.\n   *\n   * The string objects key and value must be created by the caller and\n   * explicitly passed in to this function. This saves one additional object\n   * copy.\n   *\n   * If the cursor reaches its end, the reader will go back to the head of\n   * the db. This function can be used to enable multiple input ops to read\n   * the same db.\n   *\n   * Note(jiayq): we loosen the definition of a const function here a little\n   * bit: the state of the cursor is actually changed. However, this allows\n   * us to pass in a DBReader to an Operator without the need of a duplicated\n   * output blob.\n   */\n  void Read(string* key, string* value) const {\n    CAFFE_ENFORCE(cursor_ != nullptr, \"Reader not initialized.\");\n    std::unique_lock<std::mutex> mutex_lock(reader_mutex_);\n    *key = cursor_->key();\n    *value = cursor_->value();\n\n    // In sharded mode, each read skips num_shards_ records\n    for (int s = 0; s < num_shards_; s++) {\n      cursor_->Next();\n      if (!cursor_->Valid()) {\n        MoveToBeginning();\n        break;\n      }\n    }\n  }\n\n  /**\n   * @brief Seeks to the first key. Thread safe.\n   */\n  void SeekToFirst() const {\n    CAFFE_ENFORCE(cursor_ != nullptr, \"Reader not initialized.\");\n    std::unique_lock<std::mutex> mutex_lock(reader_mutex_);\n    MoveToBeginning();\n  }\n\n  /**\n   * Returns the underlying cursor of the db reader.\n   *\n   * Note that if you directly use the cursor, the read will not be thread\n   * safe, because there is no mechanism to stop multiple threads from\n   * accessing the same cursor. You should consider using Read() explicitly.\n   */\n  inline Cursor* cursor() const {\n    LOG(ERROR) << \"Usually for a DBReader you should use Read() to be \"\n                  \"thread safe. Consider refactoring your code.\";\n    return cursor_.get();\n  }\n\n private:\n  void InitializeCursor(const int32_t num_shards, const int32_t shard_id) {\n    CAFFE_ENFORCE(num_shards >= 1);\n    CAFFE_ENFORCE(shard_id >= 0);\n    CAFFE_ENFORCE(shard_id < num_shards);\n    num_shards_ = num_shards;\n    shard_id_ = shard_id;\n    cursor_ = db_->NewCursor();\n    SeekToFirst();\n  }\n\n  void MoveToBeginning() const {\n    cursor_->SeekToFirst();\n    for (auto s = 0; s < shard_id_; s++) {\n      cursor_->Next();\n      CAFFE_ENFORCE(\n          cursor_->Valid(), \"Db has less rows than shard id: \", s, shard_id_);\n    }\n  }\n\n  string db_type_;\n  string source_;\n  unique_ptr<DB> db_;\n  unique_ptr<Cursor> cursor_;\n  mutable std::mutex reader_mutex_;\n  uint32_t num_shards_;\n  uint32_t shard_id_;\n\n  DISABLE_COPY_AND_ASSIGN(DBReader);\n};\n\nclass DBReaderSerializer : public BlobSerializerBase {\n public:\n  /**\n   * Serializes a DBReader. Note that this blob has to contain DBReader,\n   * otherwise this function produces a fatal error.\n   */\n  void Serialize(\n      const Blob& blob,\n      const string& name,\n      BlobSerializerBase::SerializationAcceptor acceptor) override;\n};\n\nclass DBReaderDeserializer : public BlobDeserializerBase {\n public:\n  void Deserialize(const BlobProto& proto, Blob* blob) override;\n};\n\n}  // namespace db\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_DB_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/event.h",
    "content": "#ifndef CAFFE2_CORE_EVENT_H_\n#define CAFFE2_CORE_EVENT_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\nconstexpr int MaxDeviceTypes = DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES;\nclass Event;\n\n// For the following functions, void* shall be interpreted as the corresponding\n// context object corresponding to the device type associated with the\n// functions.\ntypedef void (*EventCreateFunction)(const DeviceOption& option, Event*);\ntypedef void (*EventRecordFunction)(const void*, Event*);\ntypedef void (*EventWaitFunction)(const Event*, void*);\ntypedef void (*EventFinishFunction)(const Event*);\n\nclass Event {\n public:\n  explicit Event(const DeviceOption& option)\n      : event_(), type_(option.device_type()) {\n    CAFFE_ENFORCE_LT(type_, MaxDeviceTypes);\n    CAFFE_ENFORCE(event_creator_[type_]);\n    event_creator_[type_](option, this);\n  }\n\n  // Nothing needs to be done in the destructor, as the event creator should\n  // set the proper destruction process for the unique_ptr.\n  ~Event() {}\n\n  void Record(int recorder_type, const void* context) {\n    CAFFE_ENFORCE_EQ(\n        recorder_type,\n        type_,\n        \"You are trying to record with a wrong device type.\");\n    CAFFE_ENFORCE(event_recorder_[recorder_type]);\n    event_recorder_[recorder_type](context, this);\n  }\n\n  void Wait(int waiter_type, void* context) const {\n    CAFFE_ENFORCE(event_waiter_[waiter_type][type_]);\n    event_waiter_[waiter_type][type_](this, context);\n  }\n\n  void Finish() const {\n    CAFFE_ENFORCE(event_finisher_[type_]);\n    event_finisher_[type_](this);\n  }\n\n  // event_ is going to be accessed by the EventCreate/Record/Wait/Finish\n  // functions, but one should not use it outside the own Event functionalities.\n  // In the future we may move it to a private member.\n  std::shared_ptr<void> event_;\n\n private:\n  int type_;\n  static EventCreateFunction event_creator_[MaxDeviceTypes];\n  static EventRecordFunction event_recorder_[MaxDeviceTypes];\n  static EventWaitFunction event_waiter_[MaxDeviceTypes][MaxDeviceTypes];\n  static EventFinishFunction event_finisher_[MaxDeviceTypes];\n\n  template <int d>\n  friend struct EventCreateFunctionRegisterer;\n  template <int d>\n  friend struct EventRecordFunctionRegisterer;\n  template <int w, int d>\n  friend struct EventWaitFunctionRegisterer;\n  template <int d>\n  friend struct EventFinishFunctionRegisterer;\n};\n\ntemplate <int d>\nstruct EventCreateFunctionRegisterer {\n  explicit EventCreateFunctionRegisterer(EventCreateFunction f) {\n    static_assert(d < MaxDeviceTypes, \"\");\n    Event::event_creator_[d] = f;\n  }\n};\n#define REGISTER_EVENT_CREATE_FUNCTION(d, f)                     \\\n  namespace {                                                    \\\n  static EventCreateFunctionRegisterer<d> g_event_create_##d(f); \\\n  }\n\ntemplate <int d>\nstruct EventRecordFunctionRegisterer {\n  explicit EventRecordFunctionRegisterer(EventRecordFunction f) {\n    static_assert(d < MaxDeviceTypes, \"\");\n    Event::event_recorder_[d] = f;\n  }\n};\n#define REGISTER_EVENT_RECORD_FUNCTION(d, f)                     \\\n  namespace {                                                    \\\n  static EventRecordFunctionRegisterer<d> g_event_record_##d(f); \\\n  }\n\ntemplate <int waiter_type, int event_type>\nstruct EventWaitFunctionRegisterer {\n  explicit EventWaitFunctionRegisterer(EventWaitFunction f) {\n    static_assert(waiter_type < MaxDeviceTypes, \"\");\n    static_assert(event_type < MaxDeviceTypes, \"\");\n    Event::event_waiter_[waiter_type][event_type] = f;\n  }\n};\n#define REGISTER_EVENT_WAIT_FUNCTION(w, d, f)                           \\\n  namespace {                                                           \\\n  static EventWaitFunctionRegisterer<w, d> g_event_record_##w##_##d(f); \\\n  }\n\ntemplate <int d>\nstruct EventFinishFunctionRegisterer {\n  explicit EventFinishFunctionRegisterer(EventFinishFunction f) {\n    static_assert(d < MaxDeviceTypes, \"\");\n    Event::event_finisher_[d] = f;\n  }\n};\n#define REGISTER_EVENT_FINISH_FUNCTION(d, f)                     \\\n  namespace {                                                    \\\n  static EventFinishFunctionRegisterer<d> g_event_finish_##d(f); \\\n  }\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_EVENT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/flags.h",
    "content": "/**\n * @file flags.h\n * @brief Commandline flags support for Caffe2.\n *\n * This is a portable commandline flags tool for caffe2, so we can optionally\n * choose to use gflags or a lightweighted custom implementation if gflags is\n * not possible on a certain platform. If you have gflags installed, set the\n * macro CAFFE2_USE_GFLAGS will seamlessly route everything to gflags.\n *\n * To define a flag foo of type bool default to true, do the following in the\n * *global* namespace:\n *     CAFFE2_DEFINE_bool(foo, true, \"An example.\");\n *\n * To use it in another .cc file, you can use CAFFE2_DECLARE_* as follows:\n *     CAFFE2_DECLARE_bool(foo);\n *\n * In both cases, you can then access the flag via caffe2::FLAGS_foo.\n */\n\n#ifndef CAFFE2_CORE_FLAGS_H_\n#define CAFFE2_CORE_FLAGS_H_\n\n#include \"caffe2/core/registry.h\"\n\nnamespace caffe2 {\n/**\n * Sets the usage message when a commandline tool is called with \"--help\".\n */\nvoid SetUsageMessage(const string& str);\n\n/**\n * Returns the usage message for the commandline tool set by SetUsageMessage.\n */\nconst char* UsageMessage();\n\n/**\n * Parses the commandline flags.\n *\n * This command parses all the commandline arguments passed in via pargc\n * and argv. Once it is finished, partc and argv will contain the remaining\n * commandline args that caffe2 does not deal with. Note that following\n * convention, argv[0] contains the binary name and is not parsed.\n */\nbool ParseCaffeCommandLineFlags(int* pargc, char*** pargv);\n/**\n * Checks if the commandline flags has already been passed.\n */\nbool CommandLineFlagsHasBeenParsed();\n\n}  // namespace caffe2\n\n\n////////////////////////////////////////////////////////////////////////////////\n// Below are gflags and non-gflags specific implementations.\n////////////////////////////////////////////////////////////////////////////////\n\n#ifdef CAFFE2_USE_GFLAGS\n\n#include <gflags/gflags.h>\n\n// gflags before 2.0 uses namespace google and after 2.1 uses namespace gflags.\n// Using GFLAGS_GFLAGS_H_ to capture this change.\n#ifndef GFLAGS_GFLAGS_H_\nnamespace gflags = google;\n#endif  // GFLAGS_GFLAGS_H_\n\n#define CAFFE2_GFLAGS_DEF_WRAPPER(type, name, default_value, help_str)         \\\n  DEFINE_##type(name, default_value, help_str);                                \\\n  namespace caffe2 {                                                           \\\n    using ::FLAGS_##name;                                                      \\\n  }\n\n#define CAFFE2_DEFINE_int(name, default_value, help_str)                       \\\n  CAFFE2_GFLAGS_DEF_WRAPPER(int32, name, default_value, help_str)\n#define CAFFE2_DEFINE_int64(name, default_value, help_str)                     \\\n  CAFFE2_GFLAGS_DEF_WRAPPER(int64, name, default_value, help_str)              \n#define CAFFE2_DEFINE_double(name, default_value, help_str)                    \\\n  CAFFE2_GFLAGS_DEF_WRAPPER(double, name, default_value, help_str)\n#define CAFFE2_DEFINE_bool(name, default_value, help_str)                      \\\n  CAFFE2_GFLAGS_DEF_WRAPPER(bool, name, default_value, help_str)\n#define CAFFE2_DEFINE_string(name, default_value, help_str) \\\n  CAFFE2_GFLAGS_DEF_WRAPPER(string, name, default_value, help_str)\n\n// DECLARE_typed_var should be used in header files and in the global namespace.\n#define CAFFE2_GFLAGS_DECLARE_WRAPPER(type, name)                             \\\n  DECLARE_##type(name);                                                       \\\n  namespace caffe2 {                                                          \\\n    using ::FLAGS_##name;                                                     \\\n  }  // namespace caffe2\n\n#define CAFFE2_DECLARE_int(name) CAFFE2_GFLAGS_DECLARE_WRAPPER(int32, name)\n#define CAFFE2_DECLARE_int64(name) CAFFE2_GFLAGS_DECLARE_WRAPPER(int64, name)\n#define CAFFE2_DECLARE_double(name) CAFFE2_GFLAGS_DECLARE_WRAPPER(double, name)\n#define CAFFE2_DECLARE_bool(name) CAFFE2_GFLAGS_DECLARE_WRAPPER(bool, name)\n#define CAFFE2_DECLARE_string(name) CAFFE2_GFLAGS_DECLARE_WRAPPER(string, name)\n\n#else   // CAFFE2_USE_GFLAGS\n\nnamespace caffe2 {\n\nclass Caffe2FlagParser {\n public:\n  Caffe2FlagParser() {}\n  bool success() { return success_; }\n\n protected:\n  template <typename T>\n  bool Parse(const string& content, T* value);\n  bool success_;\n};\n\nCAFFE_DECLARE_REGISTRY(Caffe2FlagsRegistry, Caffe2FlagParser, const string&);\n\n}  // namespace caffe2\n\n// The macros are defined outside the caffe2 namespace. In your code, you should\n// write the CAFFE2_DEFINE_* and CAFFE2_DECLARE_* macros outside any namespace\n// as well.\n\n#define CAFFE2_DEFINE_typed_var(type, name, default_value, help_str)           \\\n  namespace caffe2 {                                                           \\\n    type FLAGS_##name = default_value;                                         \\\n    namespace {                                                                \\\n      class Caffe2FlagParser_##name : public Caffe2FlagParser {                \\\n       public:                                                                 \\\n        explicit Caffe2FlagParser_##name(const string& content) {              \\\n          success_ = Caffe2FlagParser::Parse<type>(content, &FLAGS_##name);    \\\n        }                                                                      \\\n      };                                                                       \\\n    }                                                                          \\\n    RegistererCaffe2FlagsRegistry g_Caffe2FlagsRegistry_##name(                \\\n      #name, Caffe2FlagsRegistry(),                                            \\\n      RegistererCaffe2FlagsRegistry::DefaultCreator<Caffe2FlagParser_##name>,  \\\n      \"(\" #type \", default \" #default_value \") \" help_str);                    \\\n  }\n\n#define CAFFE2_DEFINE_int(name, default_value, help_str)                       \\\n  CAFFE2_DEFINE_typed_var(int, name, default_value, help_str)\n#define CAFFE2_DEFINE_int64(name, default_value, help_str) \\\n  CAFFE2_DEFINE_typed_var(int64_t, name, default_value, help_str)\n#define CAFFE2_DEFINE_double(name, default_value, help_str) \\\n  CAFFE2_DEFINE_typed_var(double, name, default_value, help_str)\n#define CAFFE2_DEFINE_bool(name, default_value, help_str)                      \\\n  CAFFE2_DEFINE_typed_var(bool, name, default_value, help_str)\n#define CAFFE2_DEFINE_string(name, default_value, help_str)                    \\\n  CAFFE2_DEFINE_typed_var(string, name, default_value, help_str)\n\n// DECLARE_typed_var should be used in header files and in the global namespace.\n#define CAFFE2_DECLARE_typed_var(type, name)                                   \\\n  namespace caffe2 {                                                           \\\n    extern type FLAGS_##name;                                                  \\\n  }  // namespace caffe2\n\n#define CAFFE2_DECLARE_int(name) CAFFE2_DECLARE_typed_var(int, name)\n#define CAFFE2_DECLARE_int64(name) CAFFE2_DECLARE_typed_var(int64_t, name)\n#define CAFFE2_DECLARE_double(name) CAFFE2_DECLARE_typed_var(double, name)\n#define CAFFE2_DECLARE_bool(name) CAFFE2_DECLARE_typed_var(bool, name)\n#define CAFFE2_DECLARE_string(name) CAFFE2_DECLARE_typed_var(string, name)\n\n#endif  // CAFFE2_USE_GFLAGS\n\n#endif  // CAFFE2_CORE_FLAGS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/graph.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n#include \"caffe2/utils/string_utils.h\"\n\n#include <algorithm>\n#include <unordered_map>\n#include <unordered_set>\n\nnamespace caffe2 {\n\nnamespace transform {\n\n/**\n *  Graph representation of an operator.\n */\nstruct Node {\n public:\n  // Empty constructor for resize\n  Node() {}\n\n  // Alternate constructor\n  Node(\n      const OperatorDef& op,\n      bool active,\n      std::map<int, std::vector<string>> parents,\n      std::map<int, std::vector<string>> children)\n      : op(op), active(active), parents(parents), children(children) {}\n\n  // The OperatorDef which this node represents.\n  OperatorDef op;\n\n  // Keeps track of if an operator has been deleted through a transformation.\n  bool active = true;\n\n  // Stores a pair (idx, blob_list),\n  //  idx = index of the child\n  //  blob_list = a list of strings, containing the blobs that connect the nodes\n  std::map<int, std::vector<string>> parents;\n  std::map<int, std::vector<string>> children;\n};\n\n/**\n *  Graph representation of a Netdef.\n */\nstruct Graph {\n public:\n  /**\n   * Given a subgraph, gets all of the parents of the subgraph, as well as\n   * their associated blob names. Sorted by blob names.\n   *\n   * <string, int> := (name of blob writing into subgraph,\n   *                  index of node that writes into subgraph using that blob)\n   */\n  const std::vector<std::pair<string, int>> GetSubgraphInput(\n      const std::vector<int>& subgraph);\n\n  /**\n   * Given a subgraph, gets all of the children of the subgraph, as well as\n   * their associated blob names. Sorted by blob names.\n   *\n   * <string, int> := (name of blob reading from subgraph,\n   *                  index of node that reads from subgraph using that blob)\n   */\n  const std::vector<std::pair<string, int>> GetSubgraphOutput(\n      const std::vector<int>& subgraph);\n\n  /**\n   * Graph generation.\n   * Given a netdef, returns a Graph.\n   *\n   * Each node represents an operator.\n   * An edge exists between two nodes if the parent op writes to a blob, which\n   * is the input of the child blob, with no other op writing to the blob in\n   * between the execution order.\n   *\n   * Time Complexity: O(E), where E is the number of blobs\n   */\n  explicit Graph(const NetDef& net_def);\n\n  /**\n   * Generates a NetDef Representation for the current graph.\n   * Nodes are visited in topological order, which is proper Opdef ordering.\n   * TODO(benz):\n   * There exists conflicts with repeated blob names, where topological sorting\n   * is not sufficient for correct netdef representation, unless blobs are\n   * renamed.\n   * For example, if after a transformation, We have operator ancestry:\n   * A --> B --> C, and also A --> D --> E, where B -> C and D -> E uses the\n   * same blob name, then A, B, D, E, C is a correct topological ordering,\n   * but D will write to the blob that C reads from, instead of B.\n   * Currently believe that there will always be ambiguity unless blobs are\n   * renamed.\n   * This is solved by performing SSA on all transformed blob names.\n   */\n  NetDef GetNetDef();\n\n  /**\n   * Deactivate a subgraph, and get rid of all edges into this subgraph.\n   */\n  void DeactivateSubgraph(std::vector<int> subgraph);\n\n  const size_t size() const {\n    return nodes_.size();\n  }\n\n  void push_node(const Node& new_node) {\n    return nodes_.push_back(new_node);\n  }\n\n  void resize_nodes(size_t new_size) {\n    nodes_.resize(new_size);\n  }\n\n  // Index safe, less verbose way to access nodes\n  inline const Node& node(size_t idx) const {\n    return nodes_.at(idx);\n  }\n\n  inline Node& node(size_t idx) {\n    return nodes_.at(idx);\n  }\n\n  inline bool is_node_active(size_t idx) {\n    return node(idx).active;\n  }\n\n  inline const std::set<string>& external_input() const {\n    return external_input_;\n  }\n\n  inline const std::set<string>& external_output() const {\n    return external_output_;\n  }\n\n private:\n  const std::vector<std::pair<string, int>> GetSubgraphPerimeterHelper(\n      bool from_children,\n      const std::vector<int>& match);\n\n  // Stores the netdef representation. Is updated upon calls to GetNetDef.\n  NetDef netdef_;\n\n  // Stores which blobs the graph reads from, and writes to.\n  std::set<string> external_input_;\n  std::set<string> external_output_;\n\n  // Keeps track of all the Operators currently within graph, even if inactive.\n  std::vector<Node> nodes_;\n};\n\n} // namespace transform\n\n// Adds an operator def to a netdef.\n// Returns the ptr, if you want to add anything extra (such as device_option)\nOperatorDef* AddOp(\n    NetDef* netdef_ptr,\n    string op_type,\n    std::vector<string> inputs,\n    std::vector<string> outputs);\n\n/**\n * This allows for the use of * and | to match operator types,\n * engines, or any other property that is represented by strings.\n *\n * For example, if we wanted to match an operator to Conv or FC, we can give:\n * \"Conv|FC\" as the type() of that op.\n */\nbool MatchStrings(string p, string s);\n\n/**\n * This ensures that each named arg that exists in the pattern exists in g_op,\n * is equal in value.\n */\nbool MatchArguments(const OperatorDef& p_op, const OperatorDef& g_op);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/init.h",
    "content": "#ifndef CAFFE2_CORE_INIT_H_\n#define CAFFE2_CORE_INIT_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\n\nnamespace internal {\nclass Caffe2InitializeRegistry {\n public:\n  typedef bool (*InitFunction)(int*, char***);\n  // Registry() is defined in .cpp file to make registration work across\n  // multiple shared libraries loaded with RTLD_LOCAL\n  static Caffe2InitializeRegistry* Registry();\n\n  void Register(InitFunction function, bool run_early,\n                const char* description) {\n    if (run_early) {\n      early_init_functions_.emplace_back(function, description);\n    } else {\n      init_functions_.emplace_back(function, description);\n    }\n  }\n\n  bool RunRegisteredEarlyInitFunctions(int* pargc, char*** pargv) {\n    return RunRegisteredInitFunctionsInternal(\n        early_init_functions_, pargc, pargv);\n  }\n\n  bool RunRegisteredInitFunctions(int* pargc, char*** pargv) {\n    return RunRegisteredInitFunctionsInternal(init_functions_, pargc, pargv);\n  }\n\n private:\n  // Run all registered initialization functions. This has to be called AFTER\n  // all static initialization are finished and main() has started, since we are\n  // using logging.\n  bool RunRegisteredInitFunctionsInternal(\n      vector<std::pair<InitFunction, const char*>>& functions,\n      int* pargc, char*** pargv) {\n    for (const auto& init_pair : functions) {\n      VLOG(1) << \"Running init function: \" << init_pair.second;\n      if (!(*init_pair.first)(pargc, pargv)) {\n        LOG(ERROR) << \"Initialization function failed.\";\n        return false;\n      }\n    }\n    return true;\n  }\n\n  Caffe2InitializeRegistry() {}\n  vector<std::pair<InitFunction, const char*> > early_init_functions_;\n  vector<std::pair<InitFunction, const char*> > init_functions_;\n};\n}  // namespace internal\n\nclass InitRegisterer {\n public:\n  InitRegisterer(internal::Caffe2InitializeRegistry::InitFunction function,\n                 bool run_early, const char* description) {\n    internal::Caffe2InitializeRegistry::Registry()\n        ->Register(function, run_early, description);\n  }\n};\n\n#define REGISTER_CAFFE2_INIT_FUNCTION(name, function, description)             \\\n  namespace {                                                                  \\\n  ::caffe2::InitRegisterer g_caffe2_initregisterer_##name(                     \\\n      function, false, description);                                           \\\n  }  // namespace\n\n#define REGISTER_CAFFE2_EARLY_INIT_FUNCTION(name, function, description)       \\\n  namespace {                                                                  \\\n  ::caffe2::InitRegisterer g_caffe2_initregisterer_##name(                     \\\n      function, true, description);                                            \\\n  }  // namespace\n\n/**\n * @brief Initialize the global environment of caffe2.\n *\n * Caffe2 uses a registration pattern for initialization functions. Custom\n * initialization functions should take the signature\n *     bool (*func)(int*, char***)\n * where the pointers to argc and argv are passed in. Caffe2 then runs the\n * initialization in three phases:\n * (1) Functions registered with REGISTER_CAFFE2_EARLY_INIT_FUNCTION. Note that\n *     since it is possible the logger is not initialized yet, any logging in\n *     such early init functions may not be printed correctly.\n * (2) Parses Caffe-specific commandline flags, and initializes caffe logging.\n * (3) Functions registered with REGISTER_CAFFE2_INIT_FUNCTION.\n * If there is something wrong at each stage, the function returns false. If\n * the global initialization has already been run, the function returns false\n * as well.\n */\nbool GlobalInit(int* pargc, char*** argv);\n\n}  // namespace caffe2\n#endif  // CAFFE2_CORE_INIT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/logging.h",
    "content": "#ifndef CAFFE2_CORE_LOGGING_H_\n#define CAFFE2_CORE_LOGGING_H_\n\n#include <climits>\n#include <exception>\n#include <functional>\n#include <limits>\n#include <sstream>\n\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\n// CAFFE2_LOG_THRESHOLD is a compile time flag that would allow us to turn off\n// logging at compile time so no logging message below that level is produced\n// at all. The value should be between INT_MIN and CAFFE_FATAL.\n#ifndef CAFFE2_LOG_THRESHOLD\n// If we have not defined the compile time log threshold, we keep all the\n// log cases.\n#define CAFFE2_LOG_THRESHOLD INT_MIN\n#endif // CAFFE2_LOG_THRESHOLD\n\n// Below are different implementations for glog and non-glog cases.\n#ifdef CAFFE2_USE_GOOGLE_GLOG\n#include \"caffe2/core/logging_is_google_glog.h\"\n#else // !CAFFE2_USE_GOOGLE_GLOG\n#include \"caffe2/core/logging_is_not_google_glog.h\"\n#endif // CAFFE2_USE_GOOGLE_GLOG\n\nCAFFE2_DECLARE_int(caffe2_log_level);\nCAFFE2_DECLARE_bool(caffe2_use_fatal_for_enforce);\n\nnamespace caffe2 {\n// Functions that we use for initialization.\nbool InitCaffeLogging(int* argc, char** argv);\n\nconstexpr bool IsUsingGoogleLogging() {\n#ifdef CAFFE2_USE_GOOGLE_GLOG\n  return true;\n#else\n  return false;\n#endif\n}\n\n/**\n * A utility to allow one to show log info to stderr after the program starts.\n *\n * This is similar to calling GLOG's --logtostderr, or setting caffe2_log_level\n * to smaller than INFO. You are recommended to only use this in a few sparse\n * cases, such as when you want to write a tutorial or something. Normally, use\n * the commandline flags to set the log level.\n */\nvoid ShowLogInfoToStderr();\n\ninline void MakeStringInternal(std::stringstream& /*ss*/) {}\n\ntemplate <typename T>\ninline void MakeStringInternal(std::stringstream& ss, const T& t) {\n  ss << t;\n}\n\ntemplate <typename T, typename... Args>\ninline void\nMakeStringInternal(std::stringstream& ss, const T& t, const Args&... args) {\n  MakeStringInternal(ss, t);\n  MakeStringInternal(ss, args...);\n}\n\ntemplate <typename... Args>\nstring MakeString(const Args&... args) {\n  std::stringstream ss;\n  MakeStringInternal(ss, args...);\n  return string(ss.str());\n}\n\n// Specializations for already-a-string types.\ntemplate <>\ninline string MakeString(const string& str) {\n  return str;\n}\ninline string MakeString(const char* c_str) {\n  return string(c_str);\n}\n\ntemplate <class Container>\ninline string Join(const string& delimiter, const Container& v) {\n  std::stringstream s;\n  int cnt = static_cast<int64_t>(v.size()) - 1;\n  for (auto i = v.begin(); i != v.end(); ++i, --cnt) {\n    s << (*i) << (cnt ? delimiter : \"\");\n  }\n  return s.str();\n}\n\n// Obtains the base name from a full path.\nstring StripBasename(const std::string& full_path);\n\n// Replace all occurrences of \"from\" substring to \"to\" string.\n// Returns number of replacements\nsize_t ReplaceAll(string& s, const char* from, const char* to);\n\nvoid SetStackTraceFetcher(std::function<string(void)> fetcher);\n\nvoid SetOperatorLogger(std::function<void(const OperatorDef&)> tracer);\nstd::function<void(const OperatorDef&)> GetOperatorLogger();\n\nclass EnforceNotMet : public std::exception {\n public:\n  EnforceNotMet(\n      const char* file,\n      const int line,\n      const char* condition,\n      const string& msg,\n      const void* caller=nullptr);\n  void AppendMessage(const string& msg);\n  string msg() const;\n  inline const vector<string>& msg_stack() const {\n    return msg_stack_;\n  }\n\n  const char* what() const noexcept override;\n\n  const void* caller() const noexcept;\n\n private:\n  vector<string> msg_stack_;\n  string full_msg_;\n  string stack_trace_;\n  const void* caller_;\n};\n\n#define CAFFE_ENFORCE(condition, ...)                                         \\\n  do {                                                                        \\\n    if (!(condition)) {                                                       \\\n      throw ::caffe2::EnforceNotMet(                                          \\\n          __FILE__, __LINE__, #condition, ::caffe2::MakeString(__VA_ARGS__)); \\\n    }                                                                         \\\n  } while (false)\n\n#define CAFFE_ENFORCE_WITH_CALLER(condition, ...)                             \\\n  do {                                                                        \\\n    if (!(condition)) {                                                       \\\n      throw ::caffe2::EnforceNotMet(                                          \\\n          __FILE__, __LINE__, #condition, ::caffe2::MakeString(__VA_ARGS__), this); \\\n    }                                                                         \\\n  } while (false)\n\n#define CAFFE_THROW(...)         \\\n  throw ::caffe2::EnforceNotMet( \\\n      __FILE__, __LINE__, \"\", ::caffe2::MakeString(__VA_ARGS__))\n\n/**\n * Rich logging messages\n *\n * CAFFE_ENFORCE_THAT can be used with one of the \"checker functions\" that\n * capture input argument values and add it to the exception message. E.g.\n * `CAFFE_ENFORCE_THAT(Equals(foo(x), bar(y)), \"Optional additional message\")`\n * would evaluate both foo and bar only once and if the results are not equal -\n * include them in the exception message.\n *\n * Some of the basic checker functions like Equals or Greater are already\n * defined below. Other header might define customized checkers by adding\n * functions to caffe2::enforce_detail namespace. For example:\n *\n *   namespace caffe2 { namespace enforce_detail {\n *   inline EnforceFailMessage IsVector(const vector<TIndex>& shape) {\n *     if (shape.size() == 1) { return EnforceOK(); }\n *     return MakeString(\"Shape \", shape, \" is not a vector\");\n *   }\n *   }}\n *\n * With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`\n *\n * Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided\n * too. Please use them instead of CHECK_EQ and friends for failures in\n * user-provided input.\n */\n\nnamespace enforce_detail {\n\nstruct EnforceOK {};\n\nclass EnforceFailMessage {\n public:\n#ifdef _MSC_VER\n  // MSVC + NVCC ignores constexpr and will issue a warning if included.\n  /* implicit */ EnforceFailMessage(EnforceOK) : msg_(nullptr) {}\n#else\n  constexpr /* implicit */ EnforceFailMessage(EnforceOK) : msg_(nullptr) {}\n#endif\n  EnforceFailMessage(EnforceFailMessage&&) = default;\n  EnforceFailMessage(const EnforceFailMessage&) = delete;\n  EnforceFailMessage& operator=(EnforceFailMessage&&) = delete;\n  EnforceFailMessage& operator=(const EnforceFailMessage&) = delete;\n\n  // Catch all wrong usages like CAFFE_ENFORCE_THAT(x < y)\n  template <class... Args>\n  /* implicit */ EnforceFailMessage(Args...) {\n    static_assert(\n        // This stands for an \"impossible\" condition. Plain `false` doesn't\n        // trick compiler enough.\n        sizeof...(Args) == std::numeric_limits<std::size_t>::max(),\n        \"CAFFE_ENFORCE_THAT has to be used with one of special check functions \"\n        \"like `Equals`. Use CAFFE_ENFORCE for simple boolean checks.\");\n  }\n\n  /* implicit */ EnforceFailMessage(std::string&& msg) {\n    msg_ = new std::string(std::move(msg));\n  }\n  inline bool bad() const {\n    return msg_ != nullptr;\n  }\n  std::string get_message_and_free(std::string&& extra) const {\n    std::string r;\n    if (extra.empty()) {\n      r = std::move(*msg_);\n    } else {\n      r = ::caffe2::MakeString(std::move(*msg_), \". \", std::move(extra));\n    }\n    delete msg_;\n    return r;\n  }\n\n private:\n  std::string* msg_;\n};\n\n#define BINARY_COMP_HELPER(name, op)                         \\\n  template <typename T1, typename T2>                        \\\n  inline EnforceFailMessage name(const T1& x, const T2& y) { \\\n    if (x op y) {                                            \\\n      return EnforceOK();                                    \\\n    }                                                        \\\n    return MakeString(x, \" vs \", y);                         \\\n  }\nBINARY_COMP_HELPER(Equals, ==)\nBINARY_COMP_HELPER(NotEquals, !=)\nBINARY_COMP_HELPER(Greater, >)\nBINARY_COMP_HELPER(GreaterEquals, >=)\nBINARY_COMP_HELPER(Less, <)\nBINARY_COMP_HELPER(LessEquals, <=)\n#undef BINARY_COMP_HELPER\n\n#define CAFFE_ENFORCE_THAT_IMPL(condition, expr, ...)                   \\\n  do {                                                                  \\\n    using namespace ::caffe2::enforce_detail;                           \\\n    const EnforceFailMessage& CAFFE_ENFORCE_THAT_IMPL_r_ = (condition); \\\n    if (CAFFE_ENFORCE_THAT_IMPL_r_.bad()) {                             \\\n      throw ::caffe2::EnforceNotMet(                                    \\\n          __FILE__,                                                     \\\n          __LINE__,                                                     \\\n          expr,                                                         \\\n          CAFFE_ENFORCE_THAT_IMPL_r_.get_message_and_free(              \\\n              ::caffe2::MakeString(__VA_ARGS__)));                      \\\n    }                                                                   \\\n  } while (false)\n\n#define CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(condition, expr, ...)      \\\n  do {                                                                 \\\n    using namespace ::caffe2::enforce_detail;                          \\\n    const EnforceFailMessage& CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER_r_ = \\\n        (condition);                                                   \\\n    if (CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER_r_.bad()) {                \\\n      throw ::caffe2::EnforceNotMet(                                   \\\n          __FILE__,                                                    \\\n          __LINE__,                                                    \\\n          expr,                                                        \\\n          CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER_r_.get_message_and_free( \\\n              ::caffe2::MakeString(__VA_ARGS__)),                      \\\n          this);                                                       \\\n    }                                                                  \\\n  } while (false)\n}\n\n#define CAFFE_ENFORCE_THAT(condition, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL((condition), #condition, __VA_ARGS__)\n\n#define CAFFE_ENFORCE_EQ(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(Equals((x), (y)), #x \" == \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_NE(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(NotEquals((x), (y)), #x \" != \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_LE(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(LessEquals((x), (y)), #x \" <= \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_LT(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(Less((x), (y)), #x \" < \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_GE(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(GreaterEquals((x), (y)), #x \" >= \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_GT(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL(Greater((x), (y)), #x \" > \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_EQ_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(Equals((x), (y)), #x \" == \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_NE_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(NotEquals((x), (y)), #x \" != \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_LE_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(LessEquals((x), (y)), #x \" <= \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_LT_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(Less((x), (y)), #x \" < \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_GE_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(GreaterEquals((x), (y)), #x \" >= \" #y, __VA_ARGS__)\n#define CAFFE_ENFORCE_GT_WITH_CALLER(x, y, ...) \\\n  CAFFE_ENFORCE_THAT_IMPL_WITH_CALLER(Greater((x), (y)), #x \" > \" #y, __VA_ARGS__)\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_LOGGING_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/logging_is_google_glog.h",
    "content": "#ifndef CAFFE2_CORE_LOGGING_IS_GOOGLE_GLOG_H_\n#define CAFFE2_CORE_LOGGING_IS_GOOGLE_GLOG_H_\n\n#include <iomanip>  // because some of the caffe2 code uses e.g. std::setw\n// Using google glog. For glog 0.3.2 versions, stl_logging.h needs to be before\n// logging.h to actually use stl_logging. Because template magic.\n// In addition, we do not do stl logging in .cu files because nvcc does not like\n// it. Some mobile platforms do not like stl_logging, so we add an\n// overload in that case as well.\n\n#if !defined(__CUDACC__) && !defined(CAFFE2_USE_MINIMAL_GOOGLE_GLOG)\n#include <glog/stl_logging.h>\n#else // !defined(__CUDACC__) && !!defined(CAFFE2_USE_MINIMAL_GOOGLE_GLOG)\n\n// here, we need to register a fake overload for vector/string - here,\n// we just ignore the entries in the logs.\n\n#define INSTANTIATE_FOR_CONTAINER(container)                                \\\n  template <class... Types>                                                 \\\n  std::ostream& operator<<(std::ostream& out, const container<Types...>&) { \\\n    return out;                                                             \\\n  }\n\nINSTANTIATE_FOR_CONTAINER(std::vector)\nINSTANTIATE_FOR_CONTAINER(std::map)\nINSTANTIATE_FOR_CONTAINER(std::set)\n#undef INSTANTIATE_FOR_CONTAINER\n\n#endif\n\n#include <glog/logging.h>\n\n\n#endif  // CAFFE2_CORE_LOGGING_IS_GOOGLE_GLOG_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/logging_is_not_google_glog.h",
    "content": "#ifndef CAFFE2_CORE_LOGGING_IS_NOT_GOOGLE_GLOG_H_\n#define CAFFE2_CORE_LOGGING_IS_NOT_GOOGLE_GLOG_H_\n\n#include <chrono>\n#include <climits>\n#include <ctime>\n#include <iomanip>\n#include <string>\n#include <fstream>\n#include <set>\n#include <sstream>\n#include <vector>\n\n#include \"caffe2/core/flags.h\"\n\n// Log severity level constants.\nconst int FATAL   = 3;\n#if !defined(_MSC_VER) || !defined(ERROR)\n// Windows defines the ERROR macro already, and as a result we will\n// simply use that one. The downside is that one will now mix LOG(INFO)\n// and LOG(ERROR) because ERROR is defined to be zero. Anyway, the\n// recommended way is to use glog so fixing this is a low-pri item.\nconst int ERROR   = 2;\n#endif\nconst int WARNING = 1;\nconst int INFO    = 0;\nconst char CAFFE2_SEVERITY_PREFIX[] = \"FEWIV\";\n\nnamespace caffe2 {\nclass MessageLogger {\n public:\n  MessageLogger(const char *file, int line, int severity);\n  ~MessageLogger();\n  // Return the stream associated with the logger object.\n  std::stringstream &stream() { return stream_; }\n\n private:\n  // When there is a fatal log, we simply abort.\n  void DealWithFatal() { abort(); }\n\n  const char* tag_;\n  std::stringstream stream_;\n  int severity_;\n};\n\n// This class is used to explicitly ignore values in the conditional\n// logging macros.  This avoids compiler warnings like \"value computed\n// is not used\" and \"statement has no effect\".\nclass LoggerVoidify {\n public:\n  LoggerVoidify() { }\n  // This has to be an operator with a precedence lower than << but\n  // higher than ?:\n  void operator&(const std::ostream &s) { }\n};\n\n// Log a message and terminate.\ntemplate<class T>\nvoid LogMessageFatal(const char *file, int line, const T &message) {\n  MessageLogger(file, line, FATAL).stream() << message;\n}\n\n// Helpers for CHECK_NOTNULL(). Two are necessary to support both raw pointers\n// and smart pointers.\ntemplate <typename T>\nT& CheckNotNullCommon(const char *file, int line, const char *names, T& t) {\n  if (t == nullptr) {\n    LogMessageFatal(file, line, std::string(names));\n  }\n  return t;\n}\n\ntemplate <typename T>\nT* CheckNotNull(const char *file, int line, const char *names, T* t) {\n  return CheckNotNullCommon(file, line, names, t);\n}\n\ntemplate <typename T>\nT& CheckNotNull(const char *file, int line, const char *names, T& t) {\n  return CheckNotNullCommon(file, line, names, t);\n}\n}  // namespace caffe2\n\n// ---------------------- Logging Macro definitions --------------------------\n\n\nstatic_assert(CAFFE2_LOG_THRESHOLD <= FATAL,\n              \"CAFFE2_LOG_THRESHOLD should at most be FATAL.\");\n// If n is under the compile time caffe log threshold, The _CAFFE_LOG(n)\n// should not generate anything in optimized code.\n#define LOG(n) \\\n  if (n >= CAFFE2_LOG_THRESHOLD) \\\n    ::caffe2::MessageLogger((char*)__FILE__, __LINE__, n).stream()\n#define VLOG(n) LOG((-n))\n\n#define LOG_IF(n, condition)                    \\\n  if (n >= CAFFE2_LOG_THRESHOLD && (condition)) \\\n  ::caffe2::MessageLogger((char*)__FILE__, __LINE__, n).stream()\n#define VLOG_IF(n, condition) LOG_IF((-n), (condition))\n\n// Log only if condition is met.  Otherwise evaluates to void.\n#define FATAL_IF(condition) \\\n  condition ? (void) 0 : ::caffe2::LoggerVoidify() & \\\n      ::caffe2::MessageLogger((char*)__FILE__, __LINE__, FATAL).stream()\n\n// Check for a given boolean condition.\n#define CHECK(condition) FATAL_IF(condition) \\\n        << \"Check failed: \" #condition \" \"\n\n#ifndef NDEBUG\n// Debug only version of CHECK\n#define DCHECK(condition) FATAL_IF(condition) \\\n        << \"Check failed: \" #condition \" \"\n#else\n// Optimized version - generates no code.\n#define DCHECK(condition) if(false) CHECK(condition)\n#endif  // NDEBUG\n\n#define CHECK_OP(val1, val2, op) FATAL_IF((val1 op val2)) \\\n  << \"Check failed: \" #val1 \" \" #op \" \" #val2 \" \"\n\n// Check_op macro definitions\n#define CHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)\n#define CHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)\n#define CHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)\n#define CHECK_LT(val1, val2) CHECK_OP(val1, val2, <)\n#define CHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)\n#define CHECK_GT(val1, val2) CHECK_OP(val1, val2, >)\n\n#ifndef NDEBUG\n// Debug only versions of CHECK_OP macros.\n#define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)\n#define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)\n#define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)\n#define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)\n#define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)\n#define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)\n#else  // !NDEBUG\n// These versions generate no code in optimized mode.\n#define DCHECK_EQ(val1, val2) if(false) CHECK_OP(val1, val2, ==)\n#define DCHECK_NE(val1, val2) if(false) CHECK_OP(val1, val2, !=)\n#define DCHECK_LE(val1, val2) if(false) CHECK_OP(val1, val2, <=)\n#define DCHECK_LT(val1, val2) if(false) CHECK_OP(val1, val2, <)\n#define DCHECK_GE(val1, val2) if(false) CHECK_OP(val1, val2, >=)\n#define DCHECK_GT(val1, val2) if(false) CHECK_OP(val1, val2, >)\n#endif  // NDEBUG\n\n// Check that a pointer is not null.\n#define CHECK_NOTNULL(val) \\\n  ::caffe2::CheckNotNull( \\\n      __FILE__, __LINE__, \"Check failed: '\" #val \"' Must be non NULL\", (val))\n\n#ifndef NDEBUG\n// Debug only version of CHECK_NOTNULL\n#define DCHECK_NOTNULL(val) \\\n  ::caffe2::CheckNotNull( \\\n      __FILE__, __LINE__, \"Check failed: '\" #val \"' Must be non NULL\", (val))\n#else  // !NDEBUG\n// Optimized version - generates no code.\n#define DCHECK_NOTNULL(val) if (false) CHECK_NOTNULL(val)\n#endif  // NDEBUG\n\n// ---------------------- Support for std objects --------------------------\n// These are adapted from glog to support a limited set of logging capability\n// for STL objects.\n\nnamespace caffe2 {\n// Forward declare these two, and define them after all the container streams\n// operators so that we can recurse from pair -> container -> container -> pair\n// properly.\ntemplate<class First, class Second>\nstd::ostream& operator<<(\n    std::ostream& out, const std::pair<First, Second>& p);\ntemplate <class Iter>\nvoid PrintSequence(std::ostream& ss, Iter begin, Iter end);\n\n#define INSTANTIATE_FOR_CONTAINER(container) \\\ntemplate <class... Types> \\\nstd::ostream& operator<<( \\\n    std::ostream& out, const container<Types...>& seq) { \\\n  PrintSequence(out, seq.begin(), seq.end()); \\\n  return out; \\\n}\n\nINSTANTIATE_FOR_CONTAINER(std::vector)\nINSTANTIATE_FOR_CONTAINER(std::map)\nINSTANTIATE_FOR_CONTAINER(std::set)\n#undef INSTANTIATE_FOR_CONTAINER\n\ntemplate<class First, class Second>\ninline std::ostream& operator<<(\n    std::ostream& out, const std::pair<First, Second>& p) {\n  out << '(' << p.first << \", \" << p.second << ')';\n  return out;\n}\n\ntemplate <class Iter>\ninline void PrintSequence(std::ostream& out, Iter begin, Iter end) {\n  // Output at most 100 elements -- appropriate if used for logging.\n  for (int i = 0; begin != end && i < 100; ++i, ++begin) {\n    if (i > 0) out << ' ';\n    out << *begin;\n  }\n  if (begin != end) {\n    out << \" ...\";\n  }\n}\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_LOGGING_IS_NOT_GOOGLE_GLOG_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/macros.h",
    "content": "// Automatically generated header file for caffe2 macros. These\n// macros are used to build the Caffe2 binary, and if you are\n// building a dependent library, they will need to be set as well\n// for your program to link correctly.\n\n#pragma once\n\n// Caffe2 version. The plan is to increment the minor version every other week\n// as a track point for bugs, until we find a proper versioning cycle.\n\n#define CAFFE2_VERSION_MAJOR 0\n#define CAFFE2_VERSION_MINOR 8\n#define CAFFE2_VERSION_PATCH 1\n#define CAFFE2_GIT_VERSION \"\"\n\nstatic_assert(\n    CAFFE2_VERSION_MINOR < 100,\n    \"Programming error: you set a minor version that is too big.\");\nstatic_assert(\n    CAFFE2_VERSION_PATCH < 100,\n    \"Programming error: you set a patch version that is too big.\");\n\n#define CAFFE2_VERSION                                         \\\n  (CAFFE2_VERSION_MAJOR * 10000 + CAFFE2_VERSION_MINOR * 100 + \\\n   CAFFE2_VERSION_PATCH)\n\n/* #undef CAFFE2_ANDROID */\n/* #undef CAFFE2_FORCE_FALLBACK_CUDA_MPI */\n/* #undef CAFFE2_HAS_MKL_DNN */\n/* #undef CAFFE2_HAS_MKL_SGEMM_PACK */\n/* #undef CAFFE2_PERF_WITH_AVX */\n/* #undef CAFFE2_PERF_WITH_AVX2 */\n/* #undef CAFFE2_THREADPOOL_MAIN_IMBALANCE */\n/* #undef CAFFE2_THREADPOOL_STATS */\n#define CAFFE2_UNIQUE_LONG_TYPEMETA\n/* #undef CAFFE2_USE_ACCELERATE */\n#define CAFFE2_USE_EIGEN_FOR_BLAS\n/* #undef CAFFE2_USE_FBCODE */\n/* #undef CAFFE2_USE_GFLAGS */\n/* #undef CAFFE2_USE_GOOGLE_GLOG */\n/* #undef CAFFE2_USE_LITE_PROTO */\n/* #undef CAFFE2_USE_MKL */\n/* #undef CAFFE2_USE_NVTX */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/memonger.h",
    "content": "#ifndef CAFFE2_CORE_MEMONGER_H_\n#define CAFFE2_CORE_MEMONGER_H_\n\n#include <unordered_set>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\nnamespace memonger {\n\nNetDef optimize_inference_net(\n    const NetDef& net,\n    const std::set<string>& static_blobs);\n\nNetDef compute_blob_recycling_for_dag(\n    const NetDef& net,\n    const std::vector<string>& heads,\n    const std::vector<int>& op_indices,\n    const std::unordered_set<string>& shareable_blob_names,\n    const string& namescope,\n    const std::unordered_set<string>& dont_share_blob_names,\n    const std::unordered_map<string, vector<int>>& blob_shapes);\n\n} // memonger\n} // caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/net.h",
    "content": "#ifndef CAFFE2_CORE_NET_H_\n#define CAFFE2_CORE_NET_H_\n\n#include <atomic>\n#include <climits>\n#include <cstddef>\n#include <thread>  // NOLINT\n#include <typeinfo>\n#include <vector>\n#include <unordered_map>\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/observer.h\"\n#include \"caffe2/core/operator_schema.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/simple_queue.h\"\n\nnamespace caffe2 {\n\nclass NetBase;\ntypedef ObserverBase<NetBase> NetObserver;\ntypedef std::function<std::unique_ptr<NetObserver>(NetBase*)>\n    NetObserverCreator;\n\nclass OperatorBase;\nclass Workspace;\n// Net is a thin struct that owns all the operators together with the operator\n// contexts.\nclass NetBase {\n public:\n  NetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n  virtual ~NetBase() noexcept {}\n  virtual bool RunAsync() = 0;\n  virtual bool SupportsAsync() = 0;\n  inline const vector<const Event*>& events() const {\n    return events_;\n  }\n\n  inline bool Run() {\n    if (!RunAsync()) {\n      return false;\n    }\n    for (const Event* event : events_) {\n      event->Finish();\n    }\n    return true;\n  }\n\n  /**\n   * Benchmarks a network.\n   *\n   * This function returns a vector of float recording the number of milli-\n   * seconds spent during the benchmark. The 0-th item is the time spent per\n   * each network run, and if a net instantiation supports run_individual,\n   * the remainder of the vector returns the number of milliseconds spent per\n   * opeartor.\n   */\n  virtual vector<float> TEST_Benchmark(\n      const int /*warmup_runs*/,\n      const int /*main_runs*/,\n      const bool /*run_individual*/) {\n    LOG(ERROR) << \"Benchmark not implemented for this net type.\";\n    return vector<float>();\n  }\n\n  inline const vector<string>& external_output() const {\n    return external_output_;\n  }\n\n  inline const vector<string>& external_input() const {\n    return external_input_;\n  }\n\n  /* Used to attach Observers to operators of a Net\n   *\n   * Returns pointers to objects owned with unique_ptrs.\n   * Use with caution.\n   */\n  virtual vector<OperatorBase*> GetOperators() const = 0;\n\n  void SetObserver(std::unique_ptr<NetObserver> observer) {\n    observer_ = std::move(observer);\n  }\n\n  void RemoveObserver() {\n    observer_ = nullptr;\n  }\n\n  NetObserver* GetObserver() {\n    return observer_.get();\n  }\n\n  const string& Name() const {\n    return name_;\n  }\n\n protected:\n  vector<string> external_input_;\n  vector<string> external_output_;\n  string name_;\n  std::unique_ptr<NetObserver> observer_;\n  vector<const Event*> events_;\n\n  DISABLE_COPY_AND_ASSIGN(NetBase);\n};\n\nCAFFE_DECLARE_REGISTRY(\n    NetRegistry,\n    NetBase,\n    const std::shared_ptr<const NetDef>&,\n    Workspace*);\n#define REGISTER_NET_CREATOR(key, ...) \\\n  CAFFE_REGISTER_CREATOR(NetRegistry, key, __VA_ARGS__)\n#define REGISTER_NET(name, ...) \\\n  CAFFE_REGISTER_CLASS(NetRegistry, name, __VA_ARGS__)\n\n/**\n * @brief Creates a network, accessing / creating blobs in the given workspace.\n *\n * Note that this is different from Workspace::CreateNet. The latter adds the\n * created net object to the workspace's net map, while this function returns\n * a standalone net object.\n */\nunique_ptr<NetBase> CreateNet(const NetDef& net_def, Workspace* ws);\nunique_ptr<NetBase> CreateNet(\n    const std::shared_ptr<const NetDef>& net_def,\n    Workspace* ws);\n\nvoid SetGlobalNetObserverCreator(NetObserverCreator creator);\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_NET_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/net_async_dag_gpu.h",
    "content": "#ifndef CAFFE2_CORE_NET_ASYNC_DAG_GPU_H_\n#define CAFFE2_CORE_NET_ASYNC_DAG_GPU_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/net_dag.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n// Run an event-driven graph - before each operator chain, wait on each parent\n// operator for the chain source, then execute each operator. Due to the chain\n// construction mechanism, operators in the same chain implicitly runs on the\n// same stream.\n// AsyncDAGNet is only registered in gpu mode, because CPU code is always sync\n// and a CPU only AsyncDAG net is essentially a DAG net.\nclass AsyncDAGNet : public DAGNetBase {\n public:\n  AsyncDAGNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n  bool SupportsAsync() override {\n    return true;\n  }\n  bool RunAt(const std::vector<int>& chain) override;\n  bool RunAsync() override;\n\n protected:\n  // Tracks whether a given op has had an event recorded in each\n  // RunAt() iteration.\n  std::vector<int32_t> eventRecorded_;\n  DISABLE_COPY_AND_ASSIGN(AsyncDAGNet);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_NET_ASYNC_DAG_GPU_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/net_dag.h",
    "content": "#ifndef CAFFE2_CORE_NET_DAG_H_\n#define CAFFE2_CORE_NET_DAG_H_\n\n#include <atomic>\n#include <climits>\n#include <cstddef>\n#include <thread> // NOLINT\n#include <typeinfo>\n#include <unordered_map>\n#include <vector>\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/observer.h\"\n#include \"caffe2/core/operator_schema.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/simple_queue.h\"\n\nnamespace caffe2 {\n\nnamespace internal {\nstruct OperatorNode {\n  unique_ptr<OperatorBase> operator_;\n  vector<int> children_;\n  vector<int> parents_;\n  std::atomic<int> runtime_parent_count_;\n  bool is_chain_start_ = false;\n};\n\nstruct OpGraphNode {\n  vector<int> children_;\n  vector<int> parents_;\n  int visited_inputs = 0;\n  int num_orig_parents;\n};\n}\n\nclass DAGNetBase : public NetBase {\n public:\n  using ExecutionChains = std::unordered_map<int, std::vector<int>>;\n  DAGNetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n  ~DAGNetBase() override;\n  bool RunAsync() override;\n  // WorkerFunction() is a function wrapper to allow us to run worker threads.\n  // It checks out one ready-to-run operator from the job queue, runs it,\n  // notifies all its children, and for any children that is ready, enqueues\n  // it to the job queue.\n  void WorkerFunction();\n  vector<float> TEST_Benchmark(\n      const int warmup_runs,\n      const int main_runs,\n      const bool run_individual) override;\n\n  const ExecutionChains& TEST_execution_chains() const {\n    return execution_chains_;\n  }\n\n  vector<OperatorBase*> GetOperators() const override {\n    vector<OperatorBase*> op_list;\n    for (auto& op_node : operator_nodes_) {\n      op_list.push_back(op_node.operator_.get());\n    }\n    return op_list;\n  }\n\n protected:\n  virtual bool RunAt(const std::vector<int>& chain) = 0;\n\n  vector<internal::OperatorNode> operator_nodes_;\n  ExecutionChains execution_chains_;\n  vector<int> initial_frontier_;\n  std::unique_ptr<SimpleQueue<int>> job_queue_;\n  std::vector<std::thread> workers_;\n  int num_workers_;\n  int num_workers_first_iteration_;\n  int remaining_ops_;\n\n  bool success_;\n  int iter_;\n  std::mutex remaining_ops_mutex_;\n  std::condition_variable cv_;\n  std::mutex run_in_progress_;\n\n  DISABLE_COPY_AND_ASSIGN(DAGNetBase);\n};\n\nclass DAGNet : public DAGNetBase {\n public:\n  using DAGNetBase::DAGNetBase;\n\n protected:\n  bool RunAt(const std::vector<int>& chain) override;\n  bool SupportsAsync() override {\n    return false;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_NET_DAG_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/net_simple.h",
    "content": "#ifndef CAFFE2_CORE_NET_SIMPLE_H_\n#define CAFFE2_CORE_NET_SIMPLE_H_\n\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n// This is the very basic structure you need to run a network - all it\n// does is simply to run everything in sequence. If you want more fancy control\n// such as a DAG-like execution, check out other better net implementations.\nclass SimpleNet : public NetBase {\n public:\n  SimpleNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n  bool SupportsAsync() override {\n    return false;\n  }\n  bool RunAsync() override;\n  vector<float> TEST_Benchmark(\n      const int warmup_runs,\n      const int main_runs,\n      const bool run_individual) override;\n\n  /*\n   * This returns a list of pointers to objects stored in unique_ptrs.\n   * Used by Observers.\n   *\n   * Think carefully before using.\n   */\n  vector<OperatorBase*> GetOperators() const override {\n    vector<OperatorBase*> op_list;\n    for (auto& op : operators_) {\n      op_list.push_back(op.get());\n    }\n    return op_list;\n  }\n\n protected:\n  vector<unique_ptr<OperatorBase>> operators_;\n\n  DISABLE_COPY_AND_ASSIGN(SimpleNet);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_NET_SIMPLE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/net_simple_async.h",
    "content": "#ifndef CAFFE2_CORE_NET_SIMPLE_ASYNC_H_\n#define CAFFE2_CORE_NET_SIMPLE_ASYNC_H_\n\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n// This is the very basic structure you need to run a network - all it\n// does is simply to run everything in sequence. If you want more fancy control\n// such as a DAG-like execution, check out other better net implementations.\nclass AsyncSimpleNet : public NetBase {\n public:\n  AsyncSimpleNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);\n\n  bool SupportsAsync() override {\n    return true;\n  }\n  bool RunAsync() override;\n\n  vector<float> TEST_Benchmark(\n      const int warmup_runs,\n      const int main_runs,\n      const bool run_individual) override;\n\n  /*\n   * This returns a list of pointers to objects stored in unique_ptrs.\n   * Used by Observers.\n   *\n   * Think carefully before using.\n   */\n  vector<OperatorBase*> GetOperators() const override {\n    vector<OperatorBase*> op_list;\n    for (auto& op : operators_) {\n      op_list.push_back(op.get());\n    }\n    return op_list;\n  }\n\n protected:\n  vector<unique_ptr<OperatorBase>> operators_;\n\n  DISABLE_COPY_AND_ASSIGN(AsyncSimpleNet);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_NET_SIMPLE_ASYNC_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/observer.h",
    "content": "#pragma once\n\nnamespace caffe2 {\n\n/**\n *  Use this to implement a Observer using the Observer Pattern template.\n */\n\ntemplate <class T>\nclass ObserverBase {\n public:\n  explicit ObserverBase(T* subject) : subject_(subject) {}\n\n  virtual bool Start() {\n    return false;\n  }\n  virtual bool Stop() {\n    return false;\n  }\n\n  virtual ~ObserverBase() noexcept {};\n\n  T* subject() const {\n    return subject_;\n  }\n\n protected:\n  T* subject_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/operator.h",
    "content": "#ifndef CAFFE2_CORE_OPERATOR_H_\n#define CAFFE2_CORE_OPERATOR_H_\n\n#include <array>\n#include <climits>\n#include <cstddef>\n#include <exception>\n#include <typeinfo>\n#include <vector>\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/observer.h\"\n#include \"caffe2/core/operator_gradient.h\"\n#include \"caffe2/core/operator_schema.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\nclass OperatorBase {\n public:\n  explicit OperatorBase(const OperatorDef& operator_def, Workspace* ws);\n  virtual ~OperatorBase() noexcept {}\n\n  /** @brief Checks if the operator has an argument of the given name.\n   */\n  inline bool HasArgument(const string& name) const {\n    CAFFE_ENFORCE(operator_def_, \"operator_def was null!\");\n    return ArgumentHelper::HasArgument(*operator_def_, name);\n  }\n\n  // Functions that deal with arguments. Basically, this allows us to map an\n  // argument name to a specific type of argument that we are trying to access.\n  template <typename T>\n  inline T GetSingleArgument(const string& name, const T& default_value) const {\n    CAFFE_ENFORCE(operator_def_, \"operator_def was null!\");\n    return ArgumentHelper::GetSingleArgument<OperatorDef, T>(\n        *operator_def_, name, default_value);\n  }\n  template <typename T>\n  inline bool HasSingleArgumentOfType(const string& name) const {\n    CAFFE_ENFORCE(operator_def_, \"operator_def was null!\");\n    return ArgumentHelper::HasSingleArgumentOfType<OperatorDef, T>(\n        *operator_def_, name);\n  }\n  template <typename T>\n  inline vector<T> GetRepeatedArgument(\n      const string& name,\n      const vector<T>& default_value = {}) const {\n    CAFFE_ENFORCE(operator_def_, \"operator_def was null!\");\n    return ArgumentHelper::GetRepeatedArgument<OperatorDef, T>(\n        *operator_def_, name, default_value);\n  }\n\n  // Get the inputs and outputs as specific types.\n  template <typename T>\n  inline const T& Input(int idx) {\n    DCHECK_LT(idx, inputs_.size());\n    try {\n      return inputs_.at(idx)->template Get<T>();\n    } catch (::caffe2::EnforceNotMet& enf) {\n      if (has_debug_def()) {\n        enf.AppendMessage(\".\\nOffending Blob name: \");\n        enf.AppendMessage(debug_def().input(idx));\n        enf.AppendMessage(\".\\n\");\n      }\n      throw enf;\n    }\n  }\n\n  template <typename T>\n  inline T* Output(int idx) {\n    return outputs_.at(idx)->template GetMutable<T>();\n  }\n\n  inline const Blob& InputBlob(int idx) {\n    return *inputs_.at(idx);\n  }\n\n  inline Blob* OutputBlob(int idx) {\n    return outputs_.at(idx);\n  }\n\n  template <typename T>\n  inline bool InputIsType(int idx) {\n    return inputs_.at(idx)->template IsType<T>();\n  }\n\n  template <typename T>\n  inline bool OutputIsType(int idx) {\n    return outputs_.at(idx)->template IsType<T>();\n  }\n\n  inline int InputSize() { return inputs_.size(); }\n  inline int OutputSize() { return outputs_.size(); }\n  inline const vector<const Blob*>& Inputs() const { return inputs_; }\n  inline const vector<Blob*>& Outputs() { return outputs_; }\n  vector<TensorShape> InputTensorShapes();\n\n  virtual void WaitEvent(const Event& ev) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  inline void Wait(const OperatorBase& other) {\n    WaitEvent(other.event());\n  }\n\n  virtual void Record() {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  virtual bool Run(int /* unused */ /*stream_id*/ = 0) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  // RunAsync, if implemenented by the specific operators, will schedule the\n  // computation on the corresponding context and record the event in its\n  // event_ member object. If the specific operator does not support RunAsync,\n  // it will simply be synchronous as a fallback.\n  virtual bool RunAsync(int stream_id = 0) {\n    return Run(stream_id);\n  }\n\n  virtual void AddRelatedBlobInfo(EnforceNotMet* err) {\n    if (!has_debug_def()) {\n      return;\n    }\n\n    bool found_input;\n    if (err->caller() != nullptr) {\n      for (int i = 0; i < inputs_.size(); i++) {\n        if (inputs_[i]->GetRaw() == err->caller()) {\n          found_input = true;\n          err->AppendMessage(\n              \"\\n** while accessing input: \" + debug_def().input(i));\n          break;\n        }\n      }\n      for (int i = 0; i < outputs_.size(); i++) {\n        if (outputs_[i]->GetRaw() == err->caller()) {\n          if (found_input) {\n            err->AppendMessage(\"\\n OR \");\n          }\n          err->AppendMessage(\n              \"\\n** while accessing output: \" + debug_def().output(i));\n          break;\n        }\n      }\n    }\n  }\n\n  inline const OperatorDef& debug_def() const {\n    CAFFE_ENFORCE(has_debug_def(), \"operator_def was null!\");\n    return *operator_def_;\n  }\n\n  inline void set_debug_def(\n      const std::shared_ptr<const OperatorDef>& operator_def) {\n    operator_def_ = operator_def;\n  }\n\n  inline bool has_debug_def() const {\n    return operator_def_ != nullptr;\n  }\n\n public:\n  void SetObserver(std::unique_ptr<ObserverBase<OperatorBase>> observer) {\n    observer_ = std::move(observer);\n  }\n\n  void RemoveObserver() {\n    observer_ = nullptr;\n  }\n\n  void RecordLastFailedOpNetPosition() {\n    if (net_position_ != kNoNetPositionSet) {\n      VLOG(1) << \"Operator with id \" << net_position_ << \" failed\";\n      operator_ws_->last_failed_op_net_position = net_position_;\n    } else {\n      VLOG(1) << \"Failed operator doesn't have id set\";\n    }\n  }\n\n  int net_position() const {\n    return net_position_;\n  }\n\n  void set_net_position(int idx) {\n    net_position_ = idx;\n  }\n\n  const DeviceOption& device_option() {\n    return device_option_;\n  }\n\n  const Event& event() const {\n    return event_;\n  }\n\n  const std::string& type() {\n    CAFFE_ENFORCE(operator_def_.get() != nullptr);\n    return operator_def_->type();\n  }\n\n public:\n  static constexpr int kNoNetPositionSet = -1;\n\n  ObserverBase<OperatorBase>* GetObserver() {\n    return observer_.get();\n  }\n\n  const ObserverBase<OperatorBase>* GetObserver() const {\n    return observer_.get();\n  }\n\n private:\n  Workspace* operator_ws_;\n  std::shared_ptr<const OperatorDef> operator_def_;\n  DeviceOption device_option_;\n  vector<const Blob*> inputs_;\n  vector<Blob*> outputs_;\n\n  int net_position_{kNoNetPositionSet};\n\n protected:\n  std::unique_ptr<ObserverBase<OperatorBase>> observer_;\n  // An event used by asynchronous execution.\n  Event event_;\n\n  DISABLE_COPY_AND_ASSIGN(OperatorBase);\n};\n\n// If your operator does not need any specialized contructor or destructor,\n// you can simply use this to save two lines of code.\n#define USE_SIMPLE_BASE_CTOR_DTOR(name)                                        \\\n  name(const OperatorDef& operator_def, Workspace* ws)                         \\\n      : OperatorBase(operator_def, ws) {}                                      \\\n  virtual ~name() noexcept {}\n\n// OP_SINGLE_ARG provides a shorter initialization choice for initialization of\n// member variables for the class constructors.\n#define OP_SINGLE_ARG(type, name, variable, default)                           \\\n  variable(OperatorBase::GetSingleArgument<type>(name, (default)))\n\n// INPUT_TAGS and OUTPUT_TAGS are optional features to name the indices of the\n// operator's inputs and outputs, in order to avoid confusion. For example, for\n// a fully convolution layer that has input, weight and bias, you can define its\n// input tags as:\n//     INPUT_TAGS(INPUT, WEIGHT, BIAS);\n// And in the code, instead of doing\n//     auto& weight = Input(1);\n// you can now do\n//     auto& weight = Input(WEIGHT);\n// to make it more clear.\n#define INPUT_TAGS(first_input, ...)                                           \\\n  enum _InputTags { first_input = 0, __VA_ARGS__ }\n#define OUTPUT_TAGS(first_input, ...)                                          \\\n  enum _OutputTags { first_input = 0, __VA_ARGS__ }\n\n\n// Operator is the class that you usually want to derive, if your operator will\n// run on different devices. You should then implement the RunOnDevice()\n// function.\ntemplate <class Context>\nclass Operator : public OperatorBase {\n public:\n  explicit Operator(const OperatorDef& operator_def, Workspace* ws)\n      : OperatorBase(operator_def, ws),\n        context_(operator_def.device_option()) {\n    // In the constructor, we switch to the device so that the child class\n    // constructors will run on that device.\n    context_.SwitchToDevice(0);\n  }\n  ~Operator() noexcept override {}\n\n  inline const Tensor<Context>& Input(int idx) {\n    return OperatorBase::template Input<Tensor<Context> >(idx); }\n  inline Tensor<Context>* Output(int idx) {\n    return OperatorBase::template Output<Tensor<Context>>(idx);\n  }\n\n  void WaitEvent(const Event& ev) final {\n    context_.SwitchToDevice();\n    context_.WaitEvent(ev);\n  }\n\n  void Record() final {\n    context_.SwitchToDevice();\n    context_.Record(&event_);\n  }\n\n  // The run function of Operator switches to the device, and then carries out\n  // the actual computation with RunOnDevice(). You should implement RunOnDevice\n  // instead of Run().\n  bool Run(int stream_id = 0) final {\n    try {\n      if (observer_) {\n        observer_->Start();\n      }\n      context_.SwitchToDevice(stream_id);\n      bool result = RunOnDevice();\n      if (!result) {\n        this->RecordLastFailedOpNetPosition();\n      }\n      context_.FinishDeviceComputation(); // throws on error\n      if (observer_) {\n        observer_->Stop();\n      }\n\n      return result;\n    } catch (EnforceNotMet& err) {\n      if (has_debug_def()) {\n        err.AppendMessage(\n            \"Error from operator: \\n\" + ProtoDebugString(debug_def()));\n        AddRelatedBlobInfo(&err);\n      }\n      this->RecordLastFailedOpNetPosition();\n      throw;\n    } catch (...) {\n      this->RecordLastFailedOpNetPosition();\n      throw;\n    }\n  }\n\n  bool RunAsync(int stream_id = 0) final {\n    try {\n      context_.SwitchToDevice(stream_id);\n      auto result = RunOnDevice();\n      if (!result) {\n        this->RecordLastFailedOpNetPosition();\n      }\n      context_.Record(&event_);\n      return result;\n    } catch (EnforceNotMet& err) {\n      if (has_debug_def()) {\n        err.AppendMessage(\n            \"Error from operator: \\n\" + ProtoDebugString(debug_def()));\n        AddRelatedBlobInfo(&err);\n      }\n      this->RecordLastFailedOpNetPosition();\n      throw;\n    } catch (...) {\n      this->RecordLastFailedOpNetPosition();\n      throw;\n    }\n  }\n\n  virtual bool RunOnDevice() = 0;\n\n protected:\n  Context context_;\n};\n\n#define USE_OPERATOR_BASE_FUNCTIONS                                 \\\n  /* using override */ using OperatorBase::HasArgument;             \\\n  /* using override */ using OperatorBase::GetSingleArgument;       \\\n  /* using override */ using OperatorBase::HasSingleArgumentOfType; \\\n  /* using override */ using OperatorBase::GetRepeatedArgument;     \\\n  /* using override */ using OperatorBase::InputIsType;             \\\n  /* using override */ using OperatorBase::InputSize;               \\\n  /* using override */ using OperatorBase::OutputSize\n\n#define USE_OPERATOR_FUNCTIONS(context)                   \\\n  USE_OPERATOR_BASE_FUNCTIONS;                            \\\n  /* using override */ using Operator<context>::context_; \\\n  /* using override */ using Operator<context>::Input;    \\\n  /* using override */ using Operator<context>::Output\n\n#define USE_OPERATOR_CONTEXT_FUNCTIONS USE_OPERATOR_FUNCTIONS(Context)\n\n#define USE_SIMPLE_CTOR_DTOR(name)                                             \\\n  name(const OperatorDef& operator_def, Workspace* ws)                         \\\n      : Operator<Context>(operator_def, ws) {}                                 \\\n  virtual ~name() noexcept {}\n\n// Helpers to implement runtime op polymorphism. Often it's convenient to make\n// an op work on different input types (e.g. i32 vs i64 indices) or special-case\n// it for particular input size (e.g. ScatterWeightedSum for block size of 1\n// doesn't need to call Eigen).\n//\n// DispatchHelper provides compile-time generation of nested \"if\" statements,\n// e.g. `DispatchHelper<FixedValues<1, 4>>::call(this, block_size);`\n// unrolls into:\n//   if (block_size == 1) {\n//     return DoRunWithValue<1>();\n//   } else if (block_size = 4) {\n//     return DoRunWithValue<4>();\n//   } else {\n//     return DoRunWithValue<-1>();\n//   }`\n//\n// DoRunWithValue implementation can use template arguments to do \"if\"\n// statements\n// or proxy to functions in math.h which often provide fixed size\n// implementation.\n//\n// Similarly `TensorTypes<int32_t, int64_t>(this, Input(0))` provides branching\n// based on type of the first input and calls DoRunWithType.\n//\n// Note, that the same instance of Op class is used as the method, not class is\n// templated. We might consider adding static class-level polymorphism later.\n//\n// Convenient macro USE_DISPATCH_HELPER is provided for declaring friendship in\n// case DoRunWithValue or DoRunWithType are declared non-public.\n\n#define USE_DISPATCH_HELPER                           \\\n  template <typename FirstArg, typename... ExtraArgs> \\\n  friend struct DispatchHelper\n\ntemplate <int... Values>\nstruct FixedValues {};\n\ntemplate <typename... Types>\nstruct TensorTypes {};\n\n// Special tag that can be listed in TensorTypes to denote that a special\n// implementation in 'RunWithOtherType' needs to be called instead of failing\n// Obviously this needs to be the last item in lists, e.g.\n// TensorTypes<float, double, GenericTensorImplementation>\nstruct GenericTensorImplementation {};\n\n// Same as TensorTypes but call DoRunWithType2\ntemplate <typename... Types>\nstruct TensorTypes2 {};\n\ntemplate <typename Sizes, typename... ExtraArgs>\nstruct DispatchHelper;\n\ntemplate <int FirstVal, int... Values, typename... ExtraArgs>\nstruct DispatchHelper<FixedValues<FirstVal, Values...>, ExtraArgs...> {\n  template <typename Op>\n  static bool call(Op* op, int value) {\n    if (FirstVal == value) {\n      return op->template DoRunWithValue<ExtraArgs..., FirstVal>();\n    }\n    return DispatchHelper<FixedValues<Values...>, ExtraArgs...>::template call<\n        Op>(op, value);\n  }\n};\n\ntemplate <typename... ExtraArgs>\nstruct DispatchHelper<FixedValues<>, ExtraArgs...> {\n  template <typename Op>\n  static bool call(Op* op, TIndex /*size*/) {\n    return op->template DoRunWithValue<ExtraArgs..., -1>();\n  }\n};\n\n#define CAFFE2_DEFINE_TENSOR_TYPES_DISPATCHER(                                 \\\n    TensorTypes, DoRunWithType, DoRunWithOtherType)                            \\\n  template <typename FirstType, typename... Types, typename... ExtraArgs>      \\\n  struct DispatchHelper<TensorTypes<FirstType, Types...>, ExtraArgs...> {      \\\n    template <typename Op>                                                     \\\n    static bool call(Op* op, const TypeMeta& meta) {                           \\\n      static_assert(                                                           \\\n          !std::is_same<GenericTensorImplementation, FirstType>::value,        \\\n          \"GenericTensorImplementation must be the last in TensorTypes list\"); \\\n      if (meta.Match<FirstType>()) {                                           \\\n        return op->template DoRunWithType<ExtraArgs..., FirstType>();          \\\n      }                                                                        \\\n      return DispatchHelper<TensorTypes<Types...>, ExtraArgs...>::             \\\n          template call<Op>(op, meta);                                         \\\n    }                                                                          \\\n    template <typename Op, typename Context>                                   \\\n    static bool call(Op* op, const Tensor<Context>& tensor) {                  \\\n      return call<Op>(op, tensor.meta());                                      \\\n    }                                                                          \\\n    template <typename Op>                                                     \\\n    static bool call(Op* op, const Blob& blob) {                               \\\n      return call<Op>(op, blob.meta());                                        \\\n    }                                                                          \\\n  };                                                                           \\\n                                                                               \\\n  template <typename... ExtraArgs>                                             \\\n  struct DispatchHelper<TensorTypes<>, ExtraArgs...> {                         \\\n    template <typename Op>                                                     \\\n    static bool call(Op* /* unused */, const TypeMeta& meta) {                 \\\n      CAFFE_THROW(\"Unsupported type of tensor: \", meta.name());                \\\n    }                                                                          \\\n    template <typename Op, typename Context>                                   \\\n    static bool call(Op* op, const Tensor<Context>& tensor) {                  \\\n      return call<Op>(op, tensor.meta());                                      \\\n    }                                                                          \\\n    template <typename Op>                                                     \\\n    static bool call(Op* op, const Blob& blob) {                               \\\n      return call<Op>(op, blob.meta());                                        \\\n    }                                                                          \\\n  };                                                                           \\\n                                                                               \\\n  template <typename... ExtraArgs>                                             \\\n  struct DispatchHelper<                                                       \\\n      TensorTypes<GenericTensorImplementation>,                                \\\n      ExtraArgs...> {                                                          \\\n    template <typename Op>                                                     \\\n    static bool call(Op* op, const TypeMeta&) {                                \\\n      return op->template DoRunWithOtherType<ExtraArgs...>();                  \\\n    }                                                                          \\\n    template <typename Op, typename Context>                                   \\\n    static bool call(Op* op, const Tensor<Context>& tensor) {                  \\\n      return call<Op>(op, tensor.meta());                                      \\\n    }                                                                          \\\n    template <typename Op>                                                     \\\n    static bool call(Op* op, const Blob& blob) {                               \\\n      return call<Op>(op, blob.meta());                                        \\\n    }                                                                          \\\n  };\nCAFFE2_DEFINE_TENSOR_TYPES_DISPATCHER(\n    TensorTypes,\n    DoRunWithType,\n    DoRunWithOtherType)\nCAFFE2_DEFINE_TENSOR_TYPES_DISPATCHER(\n    TensorTypes2,\n    DoRunWithType2,\n    DoRunWithOtherType2)\n#undef CAFFE2_DEFINE_TENSOR_TYPES_DISPATCHER\n\n// The device type registry. This works in two phases:\n// (1) gDeviceTypeRegistry() maps the device types values to the actual operator\n//     registry function.\n// (2) Then, one can call the operator registry function to further create the\n//     operators.\ntypedef Registry<std::string, OperatorBase, const OperatorDef&, Workspace*>\n    OperatorRegistry;\ntypedef Registry<std::string, OperatorBase, const OperatorDef&, Workspace*>* (\n    *RegistryFunction)();\nstd::map<int32_t, OperatorRegistry*>* gDeviceTypeRegistry();\n\nstruct DeviceTypeRegisterer {\n  explicit DeviceTypeRegisterer(int32_t type, RegistryFunction func) {\n    if (gDeviceTypeRegistry()->count(type)) {\n      std::cerr << \"Device type \" << type\n                << \"registered twice. This should not happen. Did you have \"\n                   \"duplicated numbers assigned to different devices?\";\n      std::exit(1);\n    }\n    // Calling the registry function to get the actual registry pointer.\n    gDeviceTypeRegistry()->emplace(type, func());\n  }\n};\n\n#define CAFFE_REGISTER_DEVICE_TYPE(type, registry_function) \\\n  namespace {                                               \\\n  static DeviceTypeRegisterer CAFFE_ANONYMOUS_VARIABLE(     \\\n      DeviceType)(type, &registry_function);                \\\n  }\n\n// The operator registry. Since we are not expecting a great number of devices,\n// we will simply have an if-then type command and allocate the actual\n// generation to device-specific registerers.\n// Note that although we have CUDA and CUDNN here, the registerers themselves do\n// not depend on specific cuda or cudnn libraries. This means that we will be\n// able to compile it even when there is no cuda available - we simply do not\n// link any cuda or cudnn operators.\nCAFFE_DECLARE_REGISTRY(\n    CPUOperatorRegistry,\n    OperatorBase,\n    const OperatorDef&,\n    Workspace*);\n#define REGISTER_CPU_OPERATOR_CREATOR(key, ...) \\\n  CAFFE_REGISTER_CREATOR(CPUOperatorRegistry, key, __VA_ARGS__)\n#define REGISTER_CPU_OPERATOR(name, ...)                           \\\n  extern void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name();      \\\n  static void CAFFE2_UNUSED CAFFE_ANONYMOUS_VARIABLE_CPU##name() { \\\n    CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name();                \\\n  }                                                                \\\n  CAFFE_REGISTER_CLASS(CPUOperatorRegistry, name, __VA_ARGS__)\n#define REGISTER_CPU_OPERATOR_STR(str_name, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(CPUOperatorRegistry, str_name, __VA_ARGS__)\n\n#define REGISTER_CPU_OPERATOR_WITH_ENGINE(name, engine, ...) \\\n  CAFFE_REGISTER_CLASS(CPUOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__)\n\nCAFFE_DECLARE_REGISTRY(\n    CUDAOperatorRegistry,\n    OperatorBase,\n    const OperatorDef&,\n    Workspace*);\n#define REGISTER_CUDA_OPERATOR_CREATOR(key, ...) \\\n  CAFFE_REGISTER_CREATOR(CUDAOperatorRegistry, key, __VA_ARGS__)\n#define REGISTER_CUDA_OPERATOR(name, ...)                           \\\n  extern void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name();       \\\n  static void CAFFE2_UNUSED CAFFE_ANONYMOUS_VARIABLE_CUDA##name() { \\\n    CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name();                 \\\n  }                                                                 \\\n  CAFFE_REGISTER_CLASS(CUDAOperatorRegistry, name, __VA_ARGS__)\n#define REGISTER_CUDA_OPERATOR_STR(str_name, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(CUDAOperatorRegistry, str_name, __VA_ARGS__)\n\n#define REGISTER_CUDA_OPERATOR_WITH_ENGINE(name, engine, ...) \\\n  CAFFE_REGISTER_CLASS(                                       \\\n      CUDAOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__)\n\n// Macros for cudnn since we use it often\n#define REGISTER_CUDNN_OPERATOR(name, ...) \\\n  REGISTER_CUDA_OPERATOR_WITH_ENGINE(name, CUDNN, __VA_ARGS__)\n\n// StaticLinkingProtector is a helper class that ensures that the Caffe2\n// library is linked correctly with whole archives (in the case of static\n// linking). What happens is that when CreateOperator is called for the first\n// time, it instantiates an OperatorLinkingProtector object to check if the\n// operator registry is empty. If it is empty, this means that we are not\n// properly linking the library.\n//\n// You should not need to use this class.\nstruct StaticLinkingProtector {\n  StaticLinkingProtector() {\n    const int registered_ops = CPUOperatorRegistry()->Keys().size();\n    // Note: this is a check failure instead of an exception, because if\n    // the linking is wrong, Caffe2 won't be able to run properly anyway,\n    // so it's better to fail loud.\n    // If Caffe2 is properly linked with whole archive, there should be more\n    // than zero registered ops.\n    if (registered_ops == 0) {\n      LOG(FATAL) <<\n        \"You might have made a build error: the Caffe2 library does not seem \"\n        \"to be linked with whole-static library option. To do so, use \"\n        \"-Wl,-force_load (clang) or -Wl,--whole-archive (gcc) to link the \"\n        \"Caffe2 library.\";\n    }\n  }\n};\n\n// An exception that can be thrown by an operator constructor that notifies\n// that it does not support the given setting. This can be usually used for\n// specific engines that only implement a subset of the features required by\n// the original operator schema.\n// TODO(jiayq): make more feature-complete exception message.\nclass UnsupportedOperatorFeature : public std::exception {\n public:\n  UnsupportedOperatorFeature(const string& msg) : msg_(msg) {}\n  const char* what() const noexcept override {\n    return msg_.c_str();\n  }\n\n private:\n  string msg_;\n};\n\n// A helper macro that should ONLY be used in the operator constructor to check\n// if needed features are met. If not, throws the UnsupportedOperatorFeature\n// exception with the given message.\n#define OPERATOR_NEEDS_FEATURE(condition, ...)                           \\\n  if (!(condition)) {                                                    \\\n    throw UnsupportedOperatorFeature(::caffe2::MakeString(__VA_ARGS__)); \\\n  }\n\n// Creates an operator with the given operator definition.\n// Throws on error and never returns nullptr\nunique_ptr<OperatorBase> CreateOperator(\n    const OperatorDef& operator_def,\n    Workspace* ws,\n    int net_position = OperatorBase::kNoNetPositionSet);\n\n// User can set the preferred engines as a list of engine names, in\n// descending order of preference.\nusing EnginePrefType = std::vector<std::string>;\n// {device_type -> {operator_name -> EnginePrefType}}\nusing PerOpEnginePrefType =\n    CaffeMap<int, CaffeMap<std::string, EnginePrefType>>;\n// {device_type -> EnginePrefType}\nusing GlobalEnginePrefType = CaffeMap<int, EnginePrefType>;\nvoid SetPerOpEnginePref(const PerOpEnginePrefType& per_op_engine_pref);\nvoid SetGlobalEnginePref(const GlobalEnginePrefType& global_engine_pref);\nvoid SetEnginePref(\n    const PerOpEnginePrefType& per_op_engine_pref,\n    const GlobalEnginePrefType& global_engine_pref);\nvoid SetOpEnginePref(\n    const std::string& op_type,\n    const CaffeMap<int, EnginePrefType>& op_pref);\n\nTensorShapes InferBlobShapesAndTypesFromWorkspace(\n    Workspace* ws,\n    const vector<std::unique_ptr<NetDef>>& nets);\n\nTensorShapes InferBlobShapesAndTypesFromMap(\n    const CaffeMap<std::string, std::vector<TIndex>>& blob_dimensions,\n    const vector<std::unique_ptr<NetDef>>& nets);\n\nstd::map<string, std::pair<DeviceOption, DeviceOption>> ValidateTensorDevices(\n    OperatorBase& op,\n    const OperatorDef& op_def);\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_OPERATOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/operator_gradient.h",
    "content": "#ifndef CAFFE2_CORE_OPERATOR_GRADIENT_H_\n#define CAFFE2_CORE_OPERATOR_GRADIENT_H_\n\n#include \"caffe2/core/operator_schema.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n/* @brief A struct that abstracts on top of dense and sparse blobs.\n *\n * For a dense blob, its gradient name should be written into dense_, and for\n * a sparse blob, its gradient name should be written into indice_ for\n * the sparse indices and value_ for the values.\n */\nstruct GradientWrapper {\n  string dense_;\n  string indices_;\n  string values_;\n\n  inline bool IsDense() const {\n    return dense_.size();\n  }\n  inline bool IsSparse() const {\n    return (indices_.size() || values_.size());\n  }\n  inline bool IsEmpty() const {\n    return (!IsDense() && !IsSparse());\n  }\n};\n\n/**\n * A struct that holds the gradient operators and related gradient maps.\n */\nstruct GradientOpsMeta {\n  vector<OperatorDef> ops_;\n  vector<GradientWrapper> g_input_;\n\n  GradientOpsMeta() {}\n  GradientOpsMeta(\n      const vector<OperatorDef>& ops,\n      const vector<GradientWrapper>& v)\n      : ops_(ops), g_input_(v) {}\n};\n\nclass GradientMakerBase {\n public:\n  GradientMakerBase(\n      const OperatorDef& def,\n      const vector<GradientWrapper>& g_output)\n      : def_(def), g_output_(g_output), g_input_(def.input_size()){};\n  virtual ~GradientMakerBase() {}\n  virtual bool CopyDeviceOption() const {\n    return true;\n  }\n  virtual bool CopyEngine() const {\n    return true;\n  }\n  virtual bool CopyArguments() const {\n    return true;\n  }\n\n  virtual void VerifyOp() const {\n    auto* schema = OpSchemaRegistry::Schema(def_.type());\n    if (schema) {\n      CAFFE_ENFORCE(\n          schema->Verify(def_),\n          \"(GradientMaker) Operator def did not pass schema checking: \",\n          ProtoDebugString(def_));\n    }\n  }\n\n  /**\n   * @brief Returns the gradient ops meta.\n   *\n   * If your gradient op generator only use standard input and output\n   * manipulations, you can simply implement GetGradientDefs() that\n   * returns vector<OperatorDef>. In that, you can call GI, GI_V and GI_I\n   * that will automatically create the gradient registration for you.\n   *\n   * If you need to do custom gradient name registration, overload this\n   * function directly.\n   */\n  virtual GradientOpsMeta Get() {\n    VerifyOp();\n    vector<OperatorDef> new_defs = GetGradientDefs();\n    for (auto& opdef : new_defs) {\n      opdef.set_is_gradient_op(true);\n    }\n    return GradientOpsMeta(new_defs, g_input_);\n  };\n\n  const OperatorDef& Def() const {\n    return def_;\n  }\n\n protected:\n  virtual vector<OperatorDef> GetGradientDefs() {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  // Helper functions to return names for the gradient computation.\n  // I(idx), O(idx): return the input and output names.\n  // GO(idx): return the name of the gradient for output idx.\n  // GI(idx), GI_I(idx), GI_V(idx): return the name of the gradient for\n  //     input idx, and also registers that name into the gradient\n  //     registry to be returned.\n  string I(const int i) {\n    CAFFE_ENFORCE((i >= 0) && (i < def_.input().size()));\n    return def_.input(i);\n  }\n  string O(const int i) {\n    CAFFE_ENFORCE((i >= 0) && (i < def_.output().size()));\n    return def_.output(i);\n  }\n  string GI(const int i) {\n    CAFFE_ENFORCE(\n        !g_input_.at(i).IsSparse(),\n        \"Input \",\n        def_.input(i),\n        \" already set to sparse.\");\n    g_input_.at(i).dense_ = GradientName(def_.input(i));\n    return GradientName(def_.input(i));\n  }\n  string GI_I(const int i) {\n    CAFFE_ENFORCE(\n        !g_input_.at(i).IsDense(),\n        \"Input \",\n        def_.input(i),\n        \" already set to dense.\");\n    g_input_.at(i).indices_ = GradientSliceIndices(def_.input(i));\n    return GradientSliceIndices(def_.input(i));\n  }\n  string GI_V(const int i) {\n    CAFFE_ENFORCE(\n        !g_input_.at(i).IsDense(),\n        \"Input \",\n        def_.input(i),\n        \" already set to dense.\");\n    g_input_.at(i).values_ = GradientSliceValues(def_.input(i));\n    return GradientSliceValues(def_.input(i));\n  }\n  string GO(const int i) {\n    CAFFE_ENFORCE(\n        g_output_.at(i).IsDense(),\n        \"Gradient of output \",\n        def_.output(i),\n        (g_output_.at(i).IsSparse() ? \" is sparse (expected dense).\"\n                                    : \" is not provided!\"));\n    return g_output_.at(i).dense_;\n  }\n  string GO_I(const int i) {\n    CAFFE_ENFORCE(\n        g_output_.at(i).IsSparse(),\n        \"Gradient of output \",\n        def_.output(i),\n        (g_output_.at(i).IsDense() ? \" is dense (expected sparse).\"\n                                   : \" is not provided!\"));\n    return g_output_.at(i).indices_;\n  }\n  string GO_V(const int i) {\n    CAFFE_ENFORCE(\n        g_output_.at(i).IsSparse(),\n        \"Gradient of output \",\n        def_.output(i),\n        (g_output_.at(i).IsDense() ? \" is dense (expected sparse).\"\n                                   : \" is not provided!\"));\n    return g_output_.at(i).values_;\n  }\n  const GradientWrapper& GradOut(int i) {\n    return g_output_.at(i);\n  }\n\n  // Function to add a gradient pair to map.\n  void SetDense(const int i, const string& name) {\n    CAFFE_ENFORCE(\n        !g_input_.at(i).IsSparse(),\n        \"Input \",\n        def_.input(i),\n        \" already set to sparse.\");\n    g_input_.at(i).dense_ = name;\n  }\n  void SetSparse(const int i, const string& indices, const string& values) {\n    CAFFE_ENFORCE(\n        !g_input_.at(i).IsDense(),\n        \"Input \",\n        def_.input(i),\n        \" already set to dense.\");\n    g_input_.at(i).indices_ = indices;\n    g_input_.at(i).values_ = values;\n  }\n\n  /**\n   * @brief a helper function to allow one to create one single operator\n   * def, which is usually the case for many simple operators.\n   */\n  template <class... Args>\n  inline static vector<OperatorDef> SingleGradientDef(const Args&... args) {\n    return vector<OperatorDef>{CreateOperatorDef(args...)};\n  }\n\n public:\n  /**\n    * Returns map that returns the parameters that the gradients are for.\n    */\n  static CaffeMap<string, string> MatchGradsToParams(const OperatorDef& op) {\n    // NOTE: how to go beyond string-matching?\n    CaffeMap<string, string> m;\n    for (auto& out : op.output()) {\n      if (IsGradientBlob(out)) {\n        m[out] = out.substr(0, out.length() - 5);\n      }\n    }\n    return m;\n  }\n\n private:\n  // Utility functions for gradient name computation. We don't expose them\n  // in order to discourage the use of such names explicitly.\n  static string GradientName(const string& name) {\n    return name + \"_grad\";\n  }\n\n  static bool IsGradientBlob(const string& name) {\n    return name.length() > 5 && name.find(\"_grad\") == name.length() - 5;\n  }\n\n  static string GradientNameToParam(const string& name) {\n    CHECK(IsGradientBlob(name));\n    return name.substr(0, name.length() - 5);\n  }\n\n  static string GradientSliceIndices(const string& name) {\n    return name + \"_grad_indices\";\n  }\n\n  static string GradientSliceValues(const string& name) {\n    return name + \"_grad_values\";\n  }\n\n protected:\n  // We make the member variables protected in case someone wants to write\n  // a fully custom Get() function.\n  const OperatorDef& def_;\n  const vector<GradientWrapper>& g_output_;\n  vector<GradientWrapper> g_input_;\n};\n\n/**\n * @brief A helper class to indicate that the operator does not need gradient\n * computation.\n *\n * Use the macro NO_GRADIENT to register operators that do not have gradients.\n * Note that this is different fron SHOULD_NOT_DO_GRADIENT: the latter means\n * that the gradient computation should not flow through it at all, and throws\n * an error if it is called.\n */\nclass NoGradient : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  vector<OperatorDef> GetGradientDefs() override {\n    return vector<OperatorDef>();\n  }\n};\n\n/**\n * @brief A helper class to indicate that the operator should have no gradient.\n *\n * This is used when the operator definition is designed to not have a gradient.\n * Calling a gradient on this operator def will cause Caffe2 to quit.\n */\nstruct ThrowInTheTowelIfGradientIsCalled : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  GradientOpsMeta Get() override {\n    CAFFE_ENFORCE(\n        false, \"One should not call gradient for operator \", def_.type(), \".\");\n  }\n};\n\n/**\n * @brief A helper class to indicate that the gradient mechanism is not ready.\n *\n * This should only be used sparsely when the gradient does exist, but we have\n * not implemented it yet and are using this as a lazy excuse. Eventually, a\n * gradient operator should be implemented.\n */\nstruct GradientNotImplementedYet : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  GradientOpsMeta Get() override {\n    CAFFE_ENFORCE(\n        false,\n        \"Operator \",\n        def_.type(),\n        \" should have a gradient but is not implemented yet.\");\n  }\n};\n\nCAFFE_DECLARE_REGISTRY(\n    GradientRegistry,\n    GradientMakerBase,\n    const OperatorDef&,\n    const vector<GradientWrapper>&);\n\n#define REGISTER_GRADIENT(name, ...) \\\n  CAFFE_REGISTER_CLASS(GradientRegistry, name, __VA_ARGS__)\n#define REGISTER_GRADIENT_STR(str_name, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(GradientRegistry, str_name, __VA_ARGS__)\n\n// NO_GRADIENT means that the operator does not need any gradient computation.\n#define NO_GRADIENT(name) REGISTER_GRADIENT(name, NoGradient)\n\n// SHOULD_NOT_DO_GRADIENT means that the operator is not designed to have\n// gradient operators. If you attempt to call the gradient, a log fatal will\n// occur.\n#define SHOULD_NOT_DO_GRADIENT(name) \\\n  REGISTER_GRADIENT(name, ThrowInTheTowelIfGradientIsCalled)\n\n#define GRADIENT_NOT_IMPLEMENTED_YET(name) \\\n  REGISTER_GRADIENT(name, GradientNotImplementedYet)\n\n/**\n * @brief Gets the GradientOpsMeta for the given operator def.\n */\nGradientOpsMeta GetGradientForOp(\n    const OperatorDef& def,\n    const vector<GradientWrapper>& g_output);\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_OPERATOR_GRADIENT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/operator_schema.h",
    "content": "#ifndef CAFFE2_CORE_OPERATOR_SCHEMA_H_\n#define CAFFE2_CORE_OPERATOR_SCHEMA_H_\n\n#include <climits>\n#include <functional>\n#include <initializer_list>\n#include <ostream>\n#include <set>\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n// A const value returned by OpSchema::CalculateOutput() if the number of\n// output cannot be determined.\nconstexpr int kCannotComputeNumOutputs = -1;\n\n/**\n * @brief A class to record the schema of an op.\n *\n * OpSchema records the common interface of an op specified by its name. This\n * is optional for each operator implemented in Caffe2 but is strongly\n * recommended.\n *\n * To register an OpSchema, one can use the macro OPERATOR_SCHEMA(name) and\n * then append the various functions in the class. For example, for an op\n * that itakes in two inputs, one output, and the first input and output\n * could be in-place, can be written as\n *\n *     OPERATOR_SCHEMA(name)\n *         .NumInputs(2).NumOutputs(1).AllowInplace({{0, 0}});\n */\nclass OpSchema {\n public:\n  OpSchema() : file_(\"unknown\"), line_(0) {}\n  OpSchema(const string& file, const int line) : file_(file), line_(line) {}\n\n  /**\n   * @brief Returns the file that the op schema is registered from.\n   */\n  inline const string& file() const {\n    return file_;\n  }\n\n  /**\n   * @brief Returns the line in file that the op schema is registered from.\n   */\n  inline int line() const {\n    return line_;\n  }\n\n  /**\n   * @brief Returns the docstring of the op schema.\n   */\n  inline const char* doc() const {\n    return doc_.empty() ? nullptr : doc_.c_str();\n  }\n\n  /**\n   * @brief Verifies if an operator definition protobuf matches the pattern\n   * specified in the schema.\n   */\n  bool Verify(const OperatorDef& def) const;\n\n  // Functions to set the property of the operator schemas.\n  // Sets the number of inputs, either a fixed number or a min and a max.\n\n  /**\n   * @brief A single input.\n   */\n  OpSchema& NumInputs(int n);\n  /**\n   * @brief Input could be in range [min, max], inclusive.\n   */\n  OpSchema& NumInputs(int min, int max);\n  /**\n   * @brief Input could be one of the values specified in allowed_input_nums.\n   */\n  OpSchema& NumInputs(set<int> allowed_input_nums);\n  /**\n   * @brief Input is checked with a specified function.\n   */\n  OpSchema& NumInputs(std::function<bool(int)> func);\n\n  // Sets the number of outputs, either a fixed number, a min and a max,\n  // or a function that takes in the input number and produces an output\n  // number. Use only one function in the set below.\n  /**\n   * @brief A single output.\n   */\n  OpSchema& NumOutputs(int n);\n  /**\n   * @brief Output could be in range [min, max], inclusive.\n   */\n  OpSchema& NumOutputs(int min, int max);\n  /**\n   * @brief Output could be one of the values specified in allowed_output_nums.\n   */\n  OpSchema& NumOutputs(set<int> allowed_output_nums);\n  /**\n   * @brief Output is checked with a specified function.\n   */\n  OpSchema& NumOutputs(std::function<bool(int)> func);\n\n  /**\n   * @brief Relationship between inputs and outputs is checked with a specified\n   * function.\n   */\n  OpSchema& NumInputsOutputs(std::function<bool(int, int)> func);\n\n  // Set the function that can calculate the number of output based on the\n  // number of input. Use only one function in the set below.\n  /**\n   * @brief Set the output calculator to a user-defined function.\n   */\n  OpSchema& OutputCalculator(std::function<int(int)> calc);\n  /**\n   * @brief Set the number of outputs to be the same as the number of inputs.\n   */\n  OpSchema& SameNumberOfOutput();\n\n  // Sets the rule to allow optional in-place operation.\n  OpSchema& AllowInplace(std::function<bool(int, int)> inplace);\n  OpSchema& AllowInplace(set<std::pair<int, int>> inplace);\n  OpSchema& AllowOneToOneInplace();\n  // Sets the rule to enforce in-place opeartion.\n  OpSchema& EnforceInplace(std::function<bool(int, int)> inplace);\n  OpSchema& EnforceInplace(set<std::pair<int, int>> inplace);\n  OpSchema& EnforceOneToOneInplace();\n\n  // Functions to deal with type and shape inference. Basically, this registers\n  // a function that takes in an OperatorDef and a series of input type and\n  // shape specified by TensorProto objects (whose data fields are empty), and\n  // produces a series of output type and shape.\n  typedef std::function<\n      vector<TensorShape>(const OperatorDef&, const vector<TensorShape>&)>\n      TensorInferenceFunctionType;\n  /**\n   * @brief Sets the tensor inference function, which is a std::function object\n   * defined in operator_schema.h.\n   */\n  OpSchema& TensorInferenceFunction(TensorInferenceFunctionType function);\n  /**\n   * @brief Sets the tensor inference function to produce the same output as\n   * the input.\n   */\n  OpSchema& IdenticalTypeAndShape();\n  OpSchema& IdenticalTypeAndShapeOfInput(int idx);\n  OpSchema& IdenticalTypeAndShapeOfInputDim(int idx, int dim);\n  OpSchema& ScalarType(::caffe2::TensorProto_DataType dt);\n\n  /**\n   * @brief A function to allow one to infer the type and shape from the op\n   * schema.\n   */\n  inline vector<TensorShape> InferTensor(\n      const OperatorDef& def,\n      const vector<TensorShape> input_type_shape) const {\n    return tensor_inference_function_(def, input_type_shape);\n  }\n\n  /*\n   * @brief A struct to store various cost information about\n   * an operator such as FLOPs and total memory use.\n   */\n  struct Cost {\n    uint64_t flops; // Floating point operations.\n    uint64_t bytes_moved; // Total memory used.\n  };\n  /**\n   * @brief Registers a function that takes in an OperatorDef\n   * and a series of input shapes and returns the total \"cost\"\n   * required to run the operator via struct by value.\n   */\n  typedef std::function<\n      struct Cost(const OperatorDef&, const vector<TensorShape>&)>\n      CostInferenceFunctionType;\n\n  /**\n   * @brief Register the Cost inference function.\n   */\n  OpSchema& CostInferenceFunction(CostInferenceFunctionType&& function);\n  bool HasCostInferenceFunction() const {\n    return !!cost_inference_function_;\n  }\n  inline struct Cost InferCost(\n      const OperatorDef& def,\n      const vector<TensorShape>& input_tensor_shape) const {\n    CAFFE_ENFORCE(\n        cost_inference_function_, \"Cost inference function not defined.\");\n    return (*cost_inference_function_)(def, input_tensor_shape);\n  }\n\n  // Functions to do documentation for the operator schema.\n  OpSchema& SetDoc(const string& doc);\n\n  struct Argument {\n    Argument(const char* name, const char* description, bool required)\n        : name_{name}, description_{description}, required_{required} {}\n\n    const char* name() const {\n      return name_;\n    }\n\n    const char* description() const {\n      return description_;\n    }\n\n    bool is_required() const {\n      return required_;\n    }\n\n   private:\n    const char* name_;\n    const char* description_;\n    const bool required_;\n  };\n\n  OpSchema&\n  Arg(const char* name, const char* description, bool required = false);\n\n#define DECLARE_STANDARD_ARG(name, str) \\\n  static const char* Arg_##name;        \\\n  OpSchema& Arg##name(const char* description);\n\n  DECLARE_STANDARD_ARG(IsTest, is_test)\n\n#undef DECLARE_STANDARD_ARG\n\n  OpSchema& Input(const int n, const char* name, const char* description);\n  OpSchema& Output(const int n, const char* name, const char* description);\n  // Calls the passed function with `this` as an argument. Useful for\n  // adding docs for temlated/macro ops.\n  OpSchema& FillUsing(std::function<void(OpSchema&)> populator);\n\n  // Remove from documentation\n  OpSchema& Private();\n\n  // This op can pass data across devices\n  OpSchema& InputsCanCrossDevices();\n\n  /**\n   * @brief A function to allow one to get the number of outputs based on the\n   * number of inputs, if this schema supports it.\n   */\n  int CalculateOutput(int num_input) const;\n\n  int min_input() const {\n    return min_input_;\n  }\n\n  int max_input() const {\n    return max_input_;\n  }\n\n  int min_output() const {\n    return min_output_;\n  }\n\n  int max_output() const {\n    return max_output_;\n  }\n\n  bool num_inputs_allowed(int x) const {\n    return num_inputs_allowed_(x);\n  }\n\n  bool num_outputs_allowed(int x) const {\n    return num_outputs_allowed_(x);\n  }\n\n  bool num_inputs_outputs_allowed(int x, int y) const {\n    return num_inputs_outputs_allowed_(x, y);\n  }\n\n  int inf() const {\n    return std::numeric_limits<int>::max();\n  }\n\n  friend std::ostream& operator<<(std::ostream& out, const OpSchema& schema);\n\n  const std::vector<Argument>& args() const {\n    return args_;\n  }\n\n  const std::vector<std::pair<const char*, const char*>>& input_desc() const {\n    return input_desc_;\n  }\n  const std::vector<std::pair<const char*, const char*>>& output_desc() const {\n    return output_desc_;\n  }\n  bool private_op() {\n    return private_;\n  }\n  bool inputs_can_cross_devices() const {\n    return inputs_can_cross_devices_;\n  }\n\n  /**\n   * @brief Returns the required device location of inputs and outputs.\n   */\n  using DeviceInferenceFunctionType = std::function<\n      std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>(\n          const OperatorDef& def)>;\n\n  OpSchema& DeviceInferenceFunction(DeviceInferenceFunctionType function);\n\n  /**\n   * @brief Infer required device location of an op's inputs and outputs\n   */\n  inline std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>\n  InferDevice(const OperatorDef& def) const {\n    return device_inference_function_(def);\n  }\n\n private:\n  string file_;\n  string doc_;\n  std::vector<Argument> args_{};\n  std::vector<std::pair<const char*, const char*>> input_desc_{};\n  std::vector<std::pair<const char*, const char*>> output_desc_{};\n  int line_ = 0;\n  int min_input_ = 0;\n  int max_input_ = std::numeric_limits<int>::max();\n  int min_output_ = 0;\n  int max_output_ = std::numeric_limits<int>::max();\n  bool private_ = false;\n  bool inputs_can_cross_devices_ = false;\n  std::function<bool(int)> num_inputs_allowed_ = [](int) { return true; };\n  std::function<bool(int)> num_outputs_allowed_ = [](int) { return true; };\n  std::function<bool(int, int)> num_inputs_outputs_allowed_ = [](int, int) {\n    return true;\n  };\n  std::function<int(int)> calculate_output_;\n  // In default, any in-place operation is neither allowed nor enforced.\n  std::function<bool(int, int)> inplace_allowed_ = [](int, int) {\n    return false;\n  };\n  std::function<bool(int, int)> inplace_enforced_ = [](int, int) {\n    return false;\n  };\n  TensorInferenceFunctionType tensor_inference_function_ =\n      [](const OperatorDef& def, const vector<TensorShape>&) {\n        vector<TensorShape> out;\n        for (int i = 0; i < def.output_size(); i++) {\n          TensorShape ts;\n          ts.set_unknown_shape(true);\n          out.push_back(ts);\n        }\n        return out;\n      };\n  std::unique_ptr<CostInferenceFunctionType> cost_inference_function_ = nullptr;\n  DeviceInferenceFunctionType device_inference_function_ =\n      [](const OperatorDef& def) {\n        auto op_device =\n            def.has_device_option() ? def.device_option() : DeviceOption();\n        vector<DeviceOption> in_dev(def.input_size(), op_device);\n        vector<DeviceOption> out_dev(def.output_size(), op_device);\n        return std::make_pair(in_dev, out_dev);\n      };\n};\n\n/**\n * @brief A registry to hold all the operator schemas.\n */\nclass OpSchemaRegistry {\n public:\n  static OpSchema&\n  NewSchema(const string& key, const string& file, const int line) {\n    auto& m = map();\n    if (m.count(key)) {\n      const auto& schema = m[key];\n      std::ios_base::Init init;\n      std::cerr << \"Trying to register schema with name \" << key\n                << \" from file \" << file << \" line \" << line\n                << \", but it is already registered from file \" << schema.file()\n                << \" line \" << schema.line();\n      abort();\n    }\n    m.emplace(std::make_pair(key, OpSchema(file, line)));\n    return m[key];\n  }\n\n  static const OpSchema* Schema(const string& key) {\n    auto& m = map();\n    if (m.count(key)) {\n      return &m[key];\n    } else {\n      return nullptr;\n    }\n  }\n\n private:\n  // OpSchemaRegistry should not need to be instantiated.\n  OpSchemaRegistry() = delete;\n\n  /**\n   * @brief Returns the underlying string to OpSchema map.\n   *\n   * You should not manually manipulate the map object returned. Instead, use\n   * the macros defined such as OPERATOR_SCHEMA to register your operator\n   * schema.\n   *\n   * We wrap it inside a function to avoid the statia initialization order\n   * fiasco.\n   */\n  static CaffeMap<string, OpSchema>& map();\n};\n\n// Helper function for creating simple tensorproto with dimension and type\ntemplate <typename T_I = int>\ninline TensorShape CreateTensorShape(\n    vector<T_I> dims,\n    ::caffe2::TensorProto_DataType dt) {\n  TensorShape ts;\n  for (int d : dims) {\n    ts.add_dims(d);\n  }\n  ts.set_data_type(dt);\n  return ts;\n}\n\n// Helper function\ninline vector<TIndex> GetDimsVector(const TensorShape& shape) {\n  vector<TIndex> dims;\n  for (auto d : shape.dims()) {\n    dims.push_back(d);\n  }\n  return dims;\n}\n\n// Helper function for infer op inputs and outputs device information.\ninline std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>\nInferOpInputOutputDevice(const OperatorDef& op) {\n  auto op_schema = OpSchemaRegistry::Schema(op.type());\n  CAFFE_ENFORCE(\n      op_schema, \"Device inference failed. No schema for: \", op.type());\n  // TODO(wyiming) : add try catch here.\n  return op_schema->InferDevice(op);\n}\n\n} // namespace caffe2\n\n#ifndef CAFFE2_NO_OPERATOR_SCHEMA\n\n#define OPERATOR_SCHEMA(name)                            \\\n  void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name(){}; \\\n  static OpSchema* CAFFE_ANONYMOUS_VARIABLE(name) =      \\\n      &OpSchemaRegistry::NewSchema(#name, __FILE__, __LINE__)\n#define OPERATOR_SCHEMA_STR(name)                                  \\\n  static OpSchema* CAFFE_ANONYMOUS_VARIABLE(schema_registration) = \\\n      &OpSchemaRegistry::NewSchema(name, __FILE__, __LINE__)\n\n#else // CAFFE2_NO_OPERATOR_SCHEMA\n\n#define OPERATOR_SCHEMA(name)                            \\\n  void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name(){}; \\\n  static OpSchema* CAFFE_ANONYMOUS_VARIABLE(name) =      \\\n      1 ? nullptr : &OpSchemaRegistry::NewSchema(#name, __FILE__, __LINE__)\n#define OPERATOR_SCHEMA_STR(name)                                  \\\n  static OpSchema* CAFFE_ANONYMOUS_VARIABLE(schema_registration) = \\\n      1 ? nullptr : &OpSchemaRegistry::NewSchema(name, __FILE__, __LINE__)\n\n#endif // CAFFE2_NO_OPERATOR_SCHEMA\n\n#endif // CAFFE2_CORE_OPERATOR_SCHEMA_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/plan_executor.h",
    "content": "#pragma once\n\n#include <functional>\n#ifndef CAFFE2_MOBILE\n#include \"caffe2/core/stats.h\"\n#endif // CAFFE2_MOBILE\n\nnamespace caffe2 {\n\nclass Workspace;\nclass PlanDef;\n\ntypedef std::function<bool(int)> ShouldContinue;\n\nbool RunPlanOnWorkspace(Workspace* ws, const PlanDef& plan, ShouldContinue);\n\n#ifndef CAFFE2_MOBILE\nstruct PlanExecutionTime {\n  CAFFE_STAT_CTOR(PlanExecutionTime);\n  CAFFE_EXPORTED_STAT(plan_execution_time_ns);\n};\n#endif // CAFFE2_MOBILE\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/predictor.h",
    "content": "#pragma once\n\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\nclass Predictor {\n public:\n  using TensorVector = std::vector<TensorCPU*>;\n  // Runs the `init_net` once, then saves the `run_net` to be executed\n  // in `::run`\n  Predictor(\n      const NetDef& init_net,\n      const NetDef& run_net,\n      Workspace* parent = nullptr);\n  ~Predictor();\n\n  // Executes `run_net` on the inputs.\n  // The first `inputs.size()` inputs from run_net::external_inputs\n  // are shared with the data in `inputs`.\n\n  // Precondition:\n  //   inputs.size() <= run_net_.external_inputs.size()\n\n  // Postcondition:\n  //   outputs->size() == run_net.external_inputs.size()\n\n  // Returns true on success\n  bool run(const TensorVector& inputs, TensorVector* outputs);\n\n  const NetDef& def() const {\n    return run_net_;\n  };\n\n  Workspace* ws() {\n    return &ws_;\n  };\n\n private:\n  NetDef run_net_;\n  Workspace ws_;\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/qtensor.h",
    "content": "#ifndef CAFFE2_CORE_QTENSOR_H_\n#define CAFFE2_CORE_QTENSOR_H_\n\n#include <algorithm>\n#include <climits>\n#include <cstddef>\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/typeid.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass QTensor {\n public:\n  QTensor() {}\n  virtual ~QTensor() {}\n  /**\n   * @brief Creates a quantized tensor of the given dimension.\n   *\n   * Note that the actual data allocation is not going to be carried out until\n   * the first time mutable_data() is called.\n   *\n   * The underlying storage of the quantized tensor interleaves elements\n   * by bit depth.\n   *\n   * Labeled memory for tensor of size 6, precision 3\n   *   [ E1[0] E2[0] E3[0] E4[0] E5[0] E6[0] ] // Least significant Bits\n   *   [ E1[1] E2[1] E3[1] E4[1] E5[1] E6[1] ]\n   *   [ E1[2] E2[2] E3[2] E4[2] E5[2] E6[2] ]\n   *\n   * In the case of sign bits (see enable_sign argument), an extra bit\n   * per element is added:\n   *\n   * Labeled memory for tensor of size 6, precision 3, sign bit enabled\n   *   [ E1[0] E2[0] E3[0] E4[0] E5[0] E6[0] ]\n   *   [ E1[1] E2[1] E3[1] E4[1] E5[1] E6[1] ]\n   *   [ E1[2] E2[2] E3[2] E4[2] E5[2] E6[2] ]\n   *   [ E1[s] E2[s] E3[s] E4[s] E5[s] E6[s] ]\n   *   Where 's' is 1 if E is negative\n   *\n   * The reason for this layout is the ability to efficiently multiply\n   * many low precision integers as a sum of popcnt(A & B) * 1 << bit.\n   * Explained here: https://arxiv.org/abs/1606.06160\n   */\n  explicit QTensor(\n      const std::vector<int>& dims,\n      const unsigned char precision,\n      const bool signbit = false)\n      : precision_(precision), signed_(signbit) {\n    Resize(dims);\n  }\n\n  void Resize(std::vector<int> dim_source) {\n    if (dims_ != dim_source) {\n      size_t source_size = std::accumulate(\n          dim_source.begin(), dim_source.end(), 1, std::multiplies<int>());\n      if ((source_size * (precision_ + signed_)) > capacity_) {\n        data_.reset();\n        capacity_ = 0;\n      }\n      dims_ = dim_source;\n      size_ = source_size;\n    }\n  }\n\n  void\n  SetBitAtIndex(const unsigned char bit, const size_t index, const bool value) {\n    // Get the mutable data at bit depth `bit`.\n    unsigned char* d = mutable_data();\n\n    CAFFE_ENFORCE(\n        bit < precision_ + signed_,\n        \"Attempted to a set a bit that is not allocated.\");\n    CAFFE_ENFORCE(bit * aligned_size() < capacity_);\n\n    auto idx = (aligned_size() * bit) / CHAR_BIT;\n    d = &d[idx];\n\n    idx = index / CHAR_BIT;\n    auto shift = CHAR_BIT - (index % CHAR_BIT) - 1;\n\n    if (value) {\n      d[idx] |= 1 << shift;\n    } else {\n      d[idx] &= ~(1 << shift);\n    }\n  }\n\n  bool GetBitAtIndex(const unsigned char bit, const size_t index) const {\n    // Get the data at bit depth `bit`\n    const unsigned char* d = data();\n    auto idx = (aligned_size() * bit) / CHAR_BIT;\n    d = &d[idx];\n\n    idx = index / CHAR_BIT;\n    auto shift = CHAR_BIT - (index % CHAR_BIT) - 1;\n\n    return d[idx] & (1 << shift);\n  }\n\n  void SetPrecision(const unsigned char precision) {\n    precision_ = precision;\n    data_.reset();\n  }\n\n  void SetSigned(const bool make_signed = true) {\n    signed_ = make_signed;\n    data_.reset();\n  }\n\n  void SetScale(const double scale) {\n    scale_ = scale;\n  }\n\n  void SetBias(const double bias) {\n    bias_ = bias;\n  }\n\n  unsigned char* mutable_data() {\n    if (!data_) {\n      auto ptr_and_deleter = Context::New(nbytes());\n      data_.reset(\n          static_cast<unsigned char*>(ptr_and_deleter.first),\n          ptr_and_deleter.second);\n      capacity_ = nbytes() * CHAR_BIT;\n    }\n    CAFFE_ENFORCE(capacity_ == nbytes() * CHAR_BIT);\n    return data_.get();\n  }\n\n  inline const unsigned char* data() const {\n    return data_.get();\n  }\n\n  inline size_t size() const {\n    return size_;\n  }\n\n  inline unsigned char alignment() const {\n    return alignment_;\n  }\n\n  inline unsigned char precision() const {\n    return precision_;\n  }\n\n  inline const vector<int>& dims() const {\n    return dims_;\n  }\n\n  inline bool is_signed() const {\n    return signed_;\n  }\n\n  /**\n   * Returns the number of dimensions of the data.\n   */\n  inline int ndim() const {\n    return dims_.size();\n  }\n\n  inline size_t aligned_size() const {\n    return alignment_ * ((size_ + alignment_ - 1) / alignment_);\n  }\n\n  inline size_t nbytes() const {\n    return (aligned_size() * (precision_ + signed_)) / CHAR_BIT;\n  }\n\n  inline double scale() const {\n    return scale_;\n  }\n\n  inline double bias() const {\n    return bias_;\n  }\n\n  /**\n   * Returns the i-th dimension of the qtensor in int.\n   */\n  inline int dim32(const int i) const {\n    DCHECK_LT(i, dims_.size()) << \"Exceeding ndim limit \" << dims_.size();\n    DCHECK_GE(i, 0) << \"Cannot have negative index\";\n    CAFFE_ENFORCE_LT(dims_[i], std::numeric_limits<int>::max());\n    return static_cast<int>(dims_[i]);\n  }\n\n  /**\n   * Returns the 'canonical' version of a (usually)  user-specified axis,\n   * allowing for negative indexing (e.g., -1 for the last axis).\n   *\n   * @param axis_index the axis index.\n   *        If 0 <= index < ndim(), return index.\n   *        If -ndim <= index <= -1, return (ndim() - (-index)),\n   *        e.g., the last axis index (ndim() - 1) if index == -1,\n   *        the second to last if index == -2, etc.\n   *        Dies on out of range index.\n   */\n  inline int canonical_axis_index(int axis_index) const {\n    CAFFE_ENFORCE_GE(axis_index, -ndim());\n    CAFFE_ENFORCE_LT(axis_index, ndim());\n    if (axis_index < 0) {\n      return axis_index + ndim();\n    }\n    return axis_index;\n  }\n\n  /**\n   * Return product of all dimensions starting from K.\n   */\n  inline TIndex size_from_dim(int k) const {\n    TIndex r = 1;\n    for (int i = k; i < dims_.size(); ++i) {\n      r *= dims_[i];\n    }\n    return r;\n  }\n\n  /**\n   * Product of all dims up to.\n   */\n  inline TIndex size_to_dim(int k) const {\n    CAFFE_ENFORCE(k < dims_.size());\n    TIndex r = 1;\n    for (int i = 0; i < k; ++i) {\n      r *= dims_[i];\n    }\n    return r;\n  }\n\n protected:\n  std::vector<int> dims_;\n  size_t size_ = 0;\n\n  // Precision in bits.\n  unsigned char precision_ = CHAR_BIT;\n  // Bit alignment.\n  unsigned char alignment_ = CHAR_BIT;\n\n  // Allocated data.\n  std::shared_ptr<unsigned char> data_;\n\n  // value = scale_ * (x + bias_)\n  double scale_;\n  double bias_;\n  bool signed_ = false;\n\n  // Capacity in bits.\n  size_t capacity_ = 0;\n};\n\n} // namespace caffe2\n#endif // CAFFE2_CORE_QTENSOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/qtensor_serialization.h",
    "content": "#ifndef CAFFE2_CORE_QTENSOR_SERIALIZATION_H_\n#define CAFFE2_CORE_QTENSOR_SERIALIZATION_H_\n\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/qtensor.h\"\n\nnamespace caffe2 {\n\nconstexpr auto kQTensorBlobQType = \"QTensor\";\n\ntemplate <class Context>\nclass QTensorSerializer : public BlobSerializerBase {\n public:\n  QTensorSerializer() : context_() {}\n  ~QTensorSerializer() {}\n  /**\n   * Serializes a Blob. Note that this blob has to contain QTensor<Context>.\n   */\n  void Serialize(\n      const Blob& blob,\n      const string& name,\n      SerializationAcceptor acceptor) override;\n\n private:\n  Context context_;\n};\n\ntemplate <class Context>\nclass QTensorDeserializer : public BlobDeserializerBase {\n public:\n  void Deserialize(const BlobProto& proto, Blob* blob) override;\n  void Deserialize(const QTensorProto& proto, QTensor<Context>* tensor);\n};\n\ntemplate <class Context>\nvoid QTensorSerializer<Context>::Serialize(\n    const Blob& blob,\n    const string& name,\n    BlobSerializerBase::SerializationAcceptor acceptor) {\n  const auto& qtensor = blob.template Get<QTensor<Context>>();\n  BlobProto blob_proto;\n  blob_proto.set_name(name);\n  blob_proto.set_type(kQTensorBlobQType);\n  QTensorProto& proto = *blob_proto.mutable_qtensor();\n  proto.set_name(name);\n  for (int i = 0; i < qtensor.ndim(); ++i) {\n    proto.add_dims(qtensor.dim32(i));\n  }\n  proto.set_precision(qtensor.precision());\n  proto.set_scale(qtensor.scale());\n  proto.set_bias(qtensor.bias());\n  proto.set_is_signed(qtensor.is_signed());\n  detail::CopyToProtoWithCast(\n      qtensor.nbytes(), qtensor.data(), proto.mutable_data(), &this->context_);\n  acceptor(name, blob_proto.SerializeAsString());\n}\n\ntemplate <class Context>\nvoid QTensorDeserializer<Context>::Deserialize(\n    const BlobProto& blob_proto,\n    Blob* blob) {\n  Deserialize(blob_proto.qtensor(), blob->GetMutable<QTensor<Context>>());\n}\n\ntemplate <class Context>\nvoid QTensorDeserializer<Context>::Deserialize(\n    const QTensorProto& proto,\n    QTensor<Context>* qtensor) {\n  Context context{};\n  vector<int> dims;\n  for (const int d : proto.dims()) {\n    dims.push_back(d);\n  }\n  qtensor->Resize(dims);\n  qtensor->SetPrecision(proto.precision());\n  qtensor->SetScale(proto.scale());\n  qtensor->SetBias(proto.bias());\n  qtensor->SetSigned(proto.is_signed());\n\n  detail::CopyFromProtoWithCast(\n      qtensor->nbytes(), proto.data(), qtensor->mutable_data(), &context);\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_QTENSOR_SERIALIZATION_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/registry.h",
    "content": "/**\n * Simple registry implementation in Caffe2 that uses static variables to\n * register object creators during program initialization time.\n */\n#ifndef CAFFE2_CORE_REGISTRY_H_\n#define CAFFE2_CORE_REGISTRY_H_\n\n#include <algorithm>\n#include <cstdio>\n#include <cstdlib>\n#include <functional>\n#include <memory>\n#include <mutex>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/typeid.h\"\n\nnamespace caffe2 {\n\ntemplate <typename KeyType>\ninline void PrintOffendingKey(const KeyType& key) {\n  printf(\"[key type printing not supported]\\n\");\n}\n\ntemplate <>\ninline void PrintOffendingKey(const string& key) {\n  printf(\"Offending key: %s.\\n\", key.c_str());\n}\n\n/**\n * @brief A template class that allows one to register classes by keys.\n *\n * The keys are usually a string specifying the name, but can be anything that\n * can be used in a std::map.\n *\n * You should most likely not use the Registry class explicitly, but use the\n * helper macros below to declare specific registries as well as registering\n * objects.\n */\ntemplate <class SrcType, class ObjectType, class... Args>\nclass Registry {\n public:\n  typedef std::function<std::unique_ptr<ObjectType> (Args ...)> Creator;\n\n  Registry() : registry_() {}\n\n  void Register(const SrcType& key, Creator creator) {\n    // The if statement below is essentially the same as the following line:\n    // CHECK_EQ(registry_.count(key), 0) << \"Key \" << key\n    //                                   << \" registered twice.\";\n    // However, CHECK_EQ depends on google logging, and since registration is\n    // carried out at static initialization time, we do not want to have an\n    // explicit dependency on glog's initialization function.\n    std::lock_guard<std::mutex> lock(register_mutex_);\n    if (registry_.count(key) != 0) {\n      printf(\"Key already registered.\\n\");\n      PrintOffendingKey(key);\n      std::exit(1);\n    }\n    registry_[key] = creator;\n  }\n\n  void Register(const SrcType& key, Creator creator, const string& help_msg) {\n    Register(key, creator);\n    help_message_[key] = help_msg;\n  }\n\n  inline bool Has(const SrcType& key) { return (registry_.count(key) != 0); }\n\n  unique_ptr<ObjectType> Create(const SrcType& key, Args ... args) {\n    if (registry_.count(key) == 0) {\n      // Returns nullptr if the key is not registered.\n      return nullptr;\n    }\n    return registry_[key](args...);\n  }\n\n  /**\n   * Returns the keys currently registered as a vector.\n   */\n  vector<SrcType> Keys() {\n    vector<SrcType> keys;\n    for (const auto& it : registry_) {\n      keys.push_back(it.first);\n    }\n    return keys;\n  }\n\n  const CaffeMap<SrcType, string>& HelpMessage() const {\n    return help_message_;\n  }\n\n  const char* HelpMessage(const SrcType& key) const {\n    auto it = help_message_.find(key);\n    if (it == help_message_.end()) {\n      return nullptr;\n    }\n    return it->second.c_str();\n  }\n\n private:\n  CaffeMap<SrcType, Creator> registry_;\n  CaffeMap<SrcType, string> help_message_;\n  std::mutex register_mutex_;\n\n  DISABLE_COPY_AND_ASSIGN(Registry);\n};\n\ntemplate <class SrcType, class ObjectType, class... Args>\nclass Registerer {\n public:\n  Registerer(const SrcType& key,\n             Registry<SrcType, ObjectType, Args...>* registry,\n             typename Registry<SrcType, ObjectType, Args...>::Creator creator,\n             const string& help_msg=\"\") {\n    registry->Register(key, creator, help_msg);\n  }\n\n  template <class DerivedType>\n  static unique_ptr<ObjectType> DefaultCreator(Args ... args) {\n    // TODO(jiayq): old versions of NVCC does not handle make_unique well\n    // so we are forced to use a unique_ptr constructor here. Check if it is\n    // fine to use make_unique in the future.\n    // return make_unique<DerivedType>(args...);\n    return std::unique_ptr<ObjectType>(new DerivedType(args...));\n  }\n};\n\n/**\n * CAFFE_ANONYMOUS_VARIABLE(str) introduces an identifier starting with\n * str and ending with a number that varies with the line.\n * Pretty much a copy from 'folly/Preprocessor.h'\n */\n#define CAFFE_CONCATENATE_IMPL(s1, s2) s1##s2\n#define CAFFE_CONCATENATE(s1, s2) CAFFE_CONCATENATE_IMPL(s1, s2)\n#ifdef __COUNTER__\n#define CAFFE_ANONYMOUS_VARIABLE(str) CAFFE_CONCATENATE(str, __COUNTER__)\n#else\n#define CAFFE_ANONYMOUS_VARIABLE(str) CAFFE_CONCATENATE(str, __LINE__)\n#endif\n\n/**\n * CAFFE_DECLARE_TYPED_REGISTRY is a macro that expands to a function\n * declaration, as well as creating a convenient typename for its corresponding\n * registerer.\n */\n#define CAFFE_DECLARE_TYPED_REGISTRY(RegistryName, SrcType, ObjectType, ...) \\\n  Registry<SrcType, ObjectType, ##__VA_ARGS__>* RegistryName();              \\\n  typedef Registerer<SrcType, ObjectType, ##__VA_ARGS__>                     \\\n      Registerer##RegistryName;\n\n#define CAFFE_DEFINE_TYPED_REGISTRY(RegistryName, SrcType, ObjectType, ...) \\\n  Registry<SrcType, ObjectType, ##__VA_ARGS__>* RegistryName() {            \\\n    static Registry<SrcType, ObjectType, ##__VA_ARGS__>* registry =         \\\n        new Registry<SrcType, ObjectType, ##__VA_ARGS__>();                 \\\n    return registry;                                                        \\\n  }\n\n// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated\n// creator with comma in its templated arguments.\n#define CAFFE_REGISTER_TYPED_CREATOR(RegistryName, key, ...)                  \\\n  namespace {                                                                 \\\n  static Registerer##RegistryName CAFFE_ANONYMOUS_VARIABLE(g_##RegistryName)( \\\n      key, RegistryName(), __VA_ARGS__);                                      \\\n  }\n\n#define CAFFE_REGISTER_TYPED_CLASS(RegistryName, key, ...)                    \\\n  namespace {                                                                 \\\n  static Registerer##RegistryName CAFFE_ANONYMOUS_VARIABLE(g_##RegistryName)( \\\n      key,                                                                    \\\n      RegistryName(),                                                         \\\n      Registerer##RegistryName::DefaultCreator<__VA_ARGS__>,                  \\\n      TypeMeta::Name<__VA_ARGS__>());                                         \\\n  }\n\n// CAFFE_DECLARE_REGISTRY and CAFFE_DEFINE_REGISTRY are hard-wired to use string\n// as the key\n// type, because that is the most commonly used cases.\n#define CAFFE_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \\\n  CAFFE_DECLARE_TYPED_REGISTRY(                               \\\n      RegistryName, std::string, ObjectType, ##__VA_ARGS__)\n\n#define CAFFE_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \\\n  CAFFE_DEFINE_TYPED_REGISTRY(                               \\\n      RegistryName, std::string, ObjectType, ##__VA_ARGS__)\n\n// CAFFE_REGISTER_CREATOR and CAFFE_REGISTER_CLASS are hard-wired to use string\n// as the key\n// type, because that is the most commonly used cases.\n#define CAFFE_REGISTER_CREATOR(RegistryName, key, ...) \\\n  CAFFE_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__)\n\n#define CAFFE_REGISTER_CLASS(RegistryName, key, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__)\n\n}  // namespace caffe2\n#endif  // CAFFE2_CORE_REGISTRY_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/scope_guard.h",
    "content": "/**\n * Copyright 2016 Facebook\n * @author Tudor Bosman (tudorb@fb.com)\n */\n\n#pragma once\n\n#include <cstddef>\n#include <functional>\n#include <new>\n#include <type_traits>\n#include <utility>\n\nnamespace caffe2 {\n\n// Copied from folly/ScopeGuard.h\n\n/**\n * ScopeGuard is a general implementation of the \"Initialization is\n * Resource Acquisition\" idiom.  Basically, it guarantees that a function\n * is executed upon leaving the currrent scope unless otherwise told.\n *\n * The MakeGuard() function is used to create a new ScopeGuard object.\n * It can be instantiated with a lambda function, a std::function<void()>,\n * a functor, or a void(*)() function pointer.\n *\n *\n * Usage example: Add a friend to memory iff it is also added to the db.\n *\n * void User::addFriend(User& newFriend) {\n *   // add the friend to memory\n *   friends_.push_back(&newFriend);\n *\n *   // If the db insertion that follows fails, we should\n *   // remove it from memory.\n *   // (You could also declare this as \"auto guard = MakeGuard(...)\")\n *   ScopeGuard guard = MakeGuard([&] { friends_.pop_back(); });\n *\n *   // this will throw an exception upon error, which\n *   // makes the ScopeGuard execute UserCont::pop_back()\n *   // once the Guard's destructor is called.\n *   db_->addFriend(GetName(), newFriend.GetName());\n *\n *   // an exception was not thrown, so don't execute\n *   // the Guard.\n *   guard.dismiss();\n * }\n *\n * Examine ScopeGuardTest.cpp for some more sample usage.\n *\n * Stolen from:\n *   Andrei's and Petru Marginean's CUJ article:\n *     http://drdobbs.com/184403758\n *   and the loki library:\n *     http://loki-lib.sourceforge.net/index.php?n=Idioms.ScopeGuardPointer\n *   and triendl.kj article:\n *     http://www.codeproject.com/KB/cpp/scope_guard.aspx\n */\nclass ScopeGuardImplBase {\n public:\n  void dismiss() noexcept {\n    dismissed_ = true;\n  }\n\n protected:\n  ScopeGuardImplBase() noexcept : dismissed_(false) {}\n\n  static ScopeGuardImplBase makeEmptyScopeGuard() noexcept {\n    return ScopeGuardImplBase{};\n  }\n\n  template <typename T>\n  static const T& asConst(const T& t) noexcept {\n    return t;\n  }\n\n  bool dismissed_;\n};\n\ntemplate <typename FunctionType>\nclass ScopeGuardImpl : public ScopeGuardImplBase {\n public:\n  explicit ScopeGuardImpl(FunctionType& fn) noexcept(\n      std::is_nothrow_copy_constructible<FunctionType>::value)\n      : ScopeGuardImpl(\n            asConst(fn),\n            makeFailsafe(std::is_nothrow_copy_constructible<FunctionType>{},\n                         &fn)) {}\n\n  explicit ScopeGuardImpl(const FunctionType& fn) noexcept(\n      std::is_nothrow_copy_constructible<FunctionType>::value)\n      : ScopeGuardImpl(\n            fn,\n            makeFailsafe(std::is_nothrow_copy_constructible<FunctionType>{},\n                         &fn)) {}\n\n  explicit ScopeGuardImpl(FunctionType&& fn) noexcept(\n      std::is_nothrow_move_constructible<FunctionType>::value)\n      : ScopeGuardImpl(\n            std::move_if_noexcept(fn),\n            makeFailsafe(std::is_nothrow_move_constructible<FunctionType>{},\n                         &fn)) {}\n\n  ScopeGuardImpl(ScopeGuardImpl&& other) noexcept(\n      std::is_nothrow_move_constructible<FunctionType>::value)\n      : function_(std::move_if_noexcept(other.function_)) {\n    // If the above line attempts a copy and the copy throws, other is\n    // left owning the cleanup action and will execute it (or not) depending\n    // on the value of other.dismissed_. The following lines only execute\n    // if the move/copy succeeded, in which case *this assumes ownership of\n    // the cleanup action and dismisses other.\n    dismissed_ = other.dismissed_;\n    other.dismissed_ = true;\n  }\n\n  ~ScopeGuardImpl() noexcept {\n    if (!dismissed_) {\n      execute();\n    }\n  }\n\n private:\n  static ScopeGuardImplBase makeFailsafe(std::true_type, const void*) noexcept {\n    return makeEmptyScopeGuard();\n  }\n\n  template <typename Fn>\n  static auto makeFailsafe(std::false_type, Fn* fn) noexcept\n      -> ScopeGuardImpl<decltype(std::ref(*fn))> {\n    return ScopeGuardImpl<decltype(std::ref(*fn))>{std::ref(*fn)};\n  }\n\n  template <typename Fn>\n  explicit ScopeGuardImpl(Fn&& fn, ScopeGuardImplBase&& failsafe)\n      : ScopeGuardImplBase{}, function_(std::forward<Fn>(fn)) {\n    failsafe.dismiss();\n  }\n\n  void* operator new(std::size_t) = delete;\n\n  void execute() noexcept { function_(); }\n\n  FunctionType function_;\n};\n\ntemplate <typename FunctionType>\nScopeGuardImpl<typename std::decay<FunctionType>::type>\nMakeGuard(FunctionType&& fn) noexcept(\n    std::is_nothrow_constructible<typename std::decay<FunctionType>::type,\n                                  FunctionType>::value) {\n  return ScopeGuardImpl<typename std::decay<FunctionType>::type>(\n      std::forward<FunctionType>(fn));\n}\n\n/**\n * This is largely unneeded if you just use auto for your guards.\n */\ntypedef ScopeGuardImplBase&& ScopeGuard;\n\n}  // namespaces\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/static_tracepoint.h",
    "content": "/*\n * Copyright 2017 Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *   http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__))\n#include <caffe2/core/static_tracepoint_elfx86.h>\n\n#define CAFFE_SDT(name, ...)                                         \\\n  CAFFE_SDT_PROBE_N(                                                 \\\n    caffe2, name, CAFFE_SDT_NARG(0, ##__VA_ARGS__), ##__VA_ARGS__)\n#else\n#define CAFFE_SDT(name, ...) do {} while(0)\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/static_tracepoint_elfx86.h",
    "content": "/*\n * Copyright 2017 Facebook, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *   http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n// Default constraint for the probe arguments as operands.\n#ifndef CAFFE_SDT_ARG_CONSTRAINT\n#define CAFFE_SDT_ARG_CONSTRAINT      \"nor\"\n#endif\n\n// Instruction to emit for the probe.\n#define CAFFE_SDT_NOP                 nop\n\n// Note section properties.\n#define CAFFE_SDT_NOTE_NAME           \"stapsdt\"\n#define CAFFE_SDT_NOTE_TYPE           3\n\n// Size of address depending on platform.\n#ifdef __LP64__\n#define CAFFE_SDT_ASM_ADDR            .8byte\n#else\n#define CAFFE_SDT_ASM_ADDR            .4byte\n#endif\n\n// Assembler helper Macros.\n#define CAFFE_SDT_S(x)                #x\n#define CAFFE_SDT_ASM_1(x)            CAFFE_SDT_S(x) \"\\n\"\n#define CAFFE_SDT_ASM_2(a, b)         CAFFE_SDT_S(a) \",\" CAFFE_SDT_S(b) \"\\n\"\n#define CAFFE_SDT_ASM_3(a, b, c)      CAFFE_SDT_S(a) \",\" CAFFE_SDT_S(b) \",\"    \\\n                                      CAFFE_SDT_S(c) \"\\n\"\n#define CAFFE_SDT_ASM_STRING(x)       CAFFE_SDT_ASM_1(.asciz CAFFE_SDT_S(x))\n\n// Helper to determine the size of an argument.\n#define CAFFE_SDT_ISARRAY(x)  (__builtin_classify_type(x) == 14)\n#define CAFFE_SDT_ARGSIZE(x)  (CAFFE_SDT_ISARRAY(x) ? sizeof(void*) : sizeof(x))\n\n// Format of each probe arguments as operand.\n// Size of the arugment tagged with CAFFE_SDT_Sn, with \"n\" constraint.\n// Value of the argument tagged with CAFFE_SDT_An, with configured constraint.\n#define CAFFE_SDT_ARG(n, x)                                                    \\\n  [CAFFE_SDT_S##n] \"n\"                ((size_t)CAFFE_SDT_ARGSIZE(x)),          \\\n  [CAFFE_SDT_A##n] CAFFE_SDT_ARG_CONSTRAINT (x)\n\n// Templates to append arguments as operands.\n#define CAFFE_SDT_OPERANDS_0()        [__sdt_dummy] \"g\" (0)\n#define CAFFE_SDT_OPERANDS_1(_1)      CAFFE_SDT_ARG(1, _1)\n#define CAFFE_SDT_OPERANDS_2(_1, _2)                                           \\\n  CAFFE_SDT_OPERANDS_1(_1), CAFFE_SDT_ARG(2, _2)\n#define CAFFE_SDT_OPERANDS_3(_1, _2, _3)                                       \\\n  CAFFE_SDT_OPERANDS_2(_1, _2), CAFFE_SDT_ARG(3, _3)\n#define CAFFE_SDT_OPERANDS_4(_1, _2, _3, _4)                                   \\\n  CAFFE_SDT_OPERANDS_3(_1, _2, _3), CAFFE_SDT_ARG(4, _4)\n#define CAFFE_SDT_OPERANDS_5(_1, _2, _3, _4, _5)                               \\\n  CAFFE_SDT_OPERANDS_4(_1, _2, _3, _4), CAFFE_SDT_ARG(5, _5)\n#define CAFFE_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6)                           \\\n  CAFFE_SDT_OPERANDS_5(_1, _2, _3, _4, _5), CAFFE_SDT_ARG(6, _6)\n#define CAFFE_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7)                       \\\n  CAFFE_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6), CAFFE_SDT_ARG(7, _7)\n#define CAFFE_SDT_OPERANDS_8(_1, _2, _3, _4, _5, _6, _7, _8)                   \\\n  CAFFE_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7), CAFFE_SDT_ARG(8, _8)\n\n// Templates to reference the arguments from operands in note section.\n#define CAFFE_SDT_ARGFMT(no)        %n[CAFFE_SDT_S##no]@%[CAFFE_SDT_A##no]\n#define CAFFE_SDT_ARG_TEMPLATE_0    /*No arguments*/\n#define CAFFE_SDT_ARG_TEMPLATE_1    CAFFE_SDT_ARGFMT(1)\n#define CAFFE_SDT_ARG_TEMPLATE_2    CAFFE_SDT_ARG_TEMPLATE_1 CAFFE_SDT_ARGFMT(2)\n#define CAFFE_SDT_ARG_TEMPLATE_3    CAFFE_SDT_ARG_TEMPLATE_2 CAFFE_SDT_ARGFMT(3)\n#define CAFFE_SDT_ARG_TEMPLATE_4    CAFFE_SDT_ARG_TEMPLATE_3 CAFFE_SDT_ARGFMT(4)\n#define CAFFE_SDT_ARG_TEMPLATE_5    CAFFE_SDT_ARG_TEMPLATE_4 CAFFE_SDT_ARGFMT(5)\n#define CAFFE_SDT_ARG_TEMPLATE_6    CAFFE_SDT_ARG_TEMPLATE_5 CAFFE_SDT_ARGFMT(6)\n#define CAFFE_SDT_ARG_TEMPLATE_7    CAFFE_SDT_ARG_TEMPLATE_6 CAFFE_SDT_ARGFMT(7)\n#define CAFFE_SDT_ARG_TEMPLATE_8    CAFFE_SDT_ARG_TEMPLATE_7 CAFFE_SDT_ARGFMT(8)\n\n// Structure of note section for the probe.\n#define CAFFE_SDT_NOTE_CONTENT(provider, name, arg_template)                   \\\n  CAFFE_SDT_ASM_1(990: CAFFE_SDT_NOP)                                          \\\n  CAFFE_SDT_ASM_3(     .pushsection .note.stapsdt,\"\",\"note\")                   \\\n  CAFFE_SDT_ASM_1(     .balign 4)                                              \\\n  CAFFE_SDT_ASM_3(     .4byte 992f-991f, 994f-993f, CAFFE_SDT_NOTE_TYPE)       \\\n  CAFFE_SDT_ASM_1(991: .asciz CAFFE_SDT_NOTE_NAME)                             \\\n  CAFFE_SDT_ASM_1(992: .balign 4)                                              \\\n  CAFFE_SDT_ASM_1(993: CAFFE_SDT_ASM_ADDR 990b)                                \\\n  CAFFE_SDT_ASM_1(     CAFFE_SDT_ASM_ADDR 0) /*Reserved for Semaphore address*/\\\n  CAFFE_SDT_ASM_1(     CAFFE_SDT_ASM_ADDR 0) /*Reserved for Semaphore name*/   \\\n  CAFFE_SDT_ASM_STRING(provider)                                               \\\n  CAFFE_SDT_ASM_STRING(name)                                                   \\\n  CAFFE_SDT_ASM_STRING(arg_template)                                           \\\n  CAFFE_SDT_ASM_1(994: .balign 4)                                              \\\n  CAFFE_SDT_ASM_1(     .popsection)\n\n// Main probe Macro.\n#define CAFFE_SDT_PROBE(provider, name, n, arglist)                            \\\n    __asm__ __volatile__ (                                                     \\\n      CAFFE_SDT_NOTE_CONTENT(provider, name, CAFFE_SDT_ARG_TEMPLATE_##n)       \\\n      :: CAFFE_SDT_OPERANDS_##n arglist                                        \\\n    )                                                                          \\\n\n// Helper Macros to handle variadic arguments.\n#define CAFFE_SDT_NARG_(_0, _1, _2, _3, _4, _5, _6, _7, _8, N, ...) N\n#define CAFFE_SDT_NARG(...)                                                    \\\n  CAFFE_SDT_NARG_(__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)\n#define CAFFE_SDT_PROBE_N(provider, name, N, ...)                              \\\n  CAFFE_SDT_PROBE(provider, name, N, (__VA_ARGS__))\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/stats.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <unordered_map>\n#include <vector>\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/static_tracepoint.h\"\n\nnamespace caffe2 {\n\nclass StatValue {\n  std::atomic<int64_t> v_{0};\n\n public:\n  int64_t increment(int64_t inc) {\n    return v_ += inc;\n  }\n\n  int64_t reset(int64_t value = 0) {\n    return v_.exchange(value);\n  }\n\n  int64_t get() const {\n    return v_.load();\n  }\n};\n\nstruct ExportedStatValue {\n  std::string key;\n  int64_t value;\n  std::chrono::time_point<std::chrono::high_resolution_clock> ts;\n};\n\n/**\n * @brief Holds names and values of counters exported from a StatRegistry.\n */\nusing ExportedStatList = std::vector<ExportedStatValue>;\nusing ExportedStatMap = std::unordered_map<std::string, int64_t>;\n\nExportedStatMap toMap(const ExportedStatList& stats);\n\n/**\n * @brief Holds a map of atomic counters keyed by name.\n *\n * The StatRegistry singleton, accessed through StatRegistry::get(), holds\n * counters registered through the macro CAFFE_EXPORTED_STAT. Example of usage:\n *\n * struct MyCaffeClass {\n *   MyCaffeClass(const std::string& instanceName): stats_(instanceName) {}\n *   void run(int numRuns) {\n *     try {\n *       CAFFE_EVENT(stats_, num_runs, numRuns);\n *       tryRun(numRuns);\n *       CAFFE_EVENT(stats_, num_successes);\n *     } catch (std::exception& e) {\n *       CAFFE_EVENT(stats_, num_failures, 1, \"arg_to_usdt\", e.what());\n *     }\n *     CAFFE_EVENT(stats_, usdt_only, 1, \"arg_to_usdt\");\n *   }\n *  private:\n *   struct MyStats {\n *     CAFFE_STAT_CTOR(MyStats);\n *     CAFFE_EXPORTED_STAT(num_runs);\n *     CAFFE_EXPORTED_STAT(num_successes);\n *     CAFFE_EXPORTED_STAT(num_failures);\n *     CAFFE_STAT(usdt_only);\n *   } stats_;\n * };\n *\n * int main() {\n *   MyCaffeClass a(\"first\");\n *   MyCaffeClass b(\"second\");\n *   for (int i = 0; i < 10; ++i) {\n *     a.run(10);\n *     b.run(5);\n *   }\n *   ExportedStatList finalStats;\n *   StatRegistry::get().publish(finalStats);\n * }\n *\n * For every new instance of MyCaffeClass, a new counter is created with\n * the instance name as prefix. Everytime run() is called, the corresponding\n * counter will be incremented by the given value, or 1 if value not provided.\n *\n * Counter values can then be exported into an ExportedStatList. In the\n * example above, considering \"tryRun\" never throws, `finalStats` will be\n * populated as follows:\n *\n *   first/num_runs       100\n *   first/num_successes   10\n *   first/num_failures     0\n *   second/num_runs       50\n *   second/num_successes  10\n *   second/num_failures    0\n *\n * The event usdt_only is not present in ExportedStatList because it is declared\n * as CAFFE_STAT, which does not create a counter.\n *\n * Additionally, for each call to CAFFE_EVENT, a USDT probe is generated.\n * The probe will be set up with the following arguments:\n *   - Probe name: field name (e.g. \"num_runs\")\n *   - Arg #0: instance name (e.g. \"first\", \"second\")\n *   - Arg #1: For CAFFE_EXPORTED_STAT, value of the updated counter\n *             For CAFFE_STAT, -1 since no counter is available\n *   - Args ...: Arguments passed to CAFFE_EVENT, including update value\n *             when provided.\n *\n * It is also possible to create additional StatRegistry instances beyond\n * the singleton. These instances are not automatically populated with\n * CAFFE_EVENT. Instead, they can be populated from an ExportedStatList\n * structure by calling StatRegistry::update().\n *\n */\nclass StatRegistry {\n  std::mutex mutex_;\n  std::unordered_map<std::string, std::unique_ptr<StatValue>> stats_;\n\n public:\n  /**\n   * Retrieve the singleton StatRegistry, which gets populated\n   * through the CAFFE_EVENT macro.\n   */\n  static StatRegistry& get();\n\n  /**\n   * Add a new counter with given name. If a counter for this name already\n   * exists, returns a pointer to it.\n   */\n  StatValue* add(const std::string& name);\n\n  /**\n   * Populate an ExportedStatList with current counter values.\n   * If `reset` is true, resets all counters to zero. It is guaranteed that no\n   * count is lost.\n   */\n  void publish(ExportedStatList& exported, bool reset = false);\n\n  ExportedStatList publish(bool reset = false) {\n    ExportedStatList stats;\n    publish(stats, reset);\n    return stats;\n  }\n\n  /**\n   * Update values of counters contained in the given ExportedStatList to\n   * the values provided, creating counters that don't exist.\n   */\n  void update(const ExportedStatList& data);\n\n  ~StatRegistry();\n};\n\nstruct Stat {\n  std::string groupName;\n  std::string name;\n  Stat(const std::string& gn, const std::string& n) : groupName(gn), name(n) {}\n\n  template <typename... Unused>\n  int64_t increment(Unused...) {\n    return -1;\n  }\n};\n\nclass ExportedStat : public Stat {\n  StatValue* value_;\n\n public:\n  ExportedStat(const std::string& gn, const std::string& n)\n      : Stat(gn, n), value_(StatRegistry::get().add(gn + \"/\" + n)) {}\n\n  int64_t increment(int64_t value = 1) {\n    return value_->increment(value);\n  }\n\n  template <typename T, typename Unused1, typename... Unused>\n  int64_t increment(T value, Unused1, Unused...) {\n    return increment(value);\n  }\n};\n\nclass AvgExportedStat : public ExportedStat {\n private:\n  ExportedStat count_;\n\n public:\n  AvgExportedStat(const std::string& gn, const std::string& n)\n      : ExportedStat(gn, n + \"/sum\"), count_(gn, n + \"/count\") {}\n\n  int64_t increment(int64_t value = 1) {\n    count_.increment();\n    return ExportedStat::increment(value);\n  }\n\n  template <typename T, typename Unused1, typename... Unused>\n  int64_t increment(T value, Unused1, Unused...) {\n    return increment(value);\n  }\n};\n\nclass DetailedExportedStat : public ExportedStat {\n private:\n  std::vector<ExportedStat> details_;\n\n public:\n  DetailedExportedStat(const std::string& gn, const std::string& n)\n      : ExportedStat(gn, n) {}\n\n  void setDetails(const std::vector<std::string>& detailNames) {\n    details_.clear();\n    for (const auto& detailName : detailNames) {\n      details_.emplace_back(groupName, name + \"/\" + detailName);\n    }\n  }\n\n  template <typename T, typename... Unused>\n  int64_t increment(T value, size_t detailIndex, Unused...) {\n    if (detailIndex < details_.size()) {\n      details_[detailIndex].increment(value);\n    }\n    return ExportedStat::increment(value);\n  }\n};\n\nnamespace detail {\n\ntemplate <class T>\nstruct _ScopeGuard {\n  T f_;\n  std::chrono::high_resolution_clock::time_point start_;\n\n  explicit _ScopeGuard(T f)\n      : f_(f), start_(std::chrono::high_resolution_clock::now()) {}\n  ~_ScopeGuard() {\n    using namespace std::chrono;\n    auto duration = high_resolution_clock::now() - start_;\n    int64_t nanos = duration_cast<nanoseconds>(duration).count();\n    f_(nanos);\n  }\n\n  // Using implicit cast to bool so that it can be used in an 'if' condition\n  // within CAFFE_DURATION macro below.\n  /* implicit */ operator bool() {\n    return true;\n  }\n};\n\ntemplate <class T>\n_ScopeGuard<T> ScopeGuard(T f) {\n  return _ScopeGuard<T>(f);\n}\n}\n\n#define CAFFE_STAT_CTOR(ClassName)                 \\\n  ClassName(std::string name) : groupName(name) {} \\\n  std::string groupName\n\n#define CAFFE_EXPORTED_STAT(name) \\\n  ExportedStat name {             \\\n    groupName, #name              \\\n  }\n\n#define CAFFE_AVG_EXPORTED_STAT(name) \\\n  AvgExportedStat name {              \\\n    groupName, #name                  \\\n  }\n\n#define CAFFE_DETAILED_EXPORTED_STAT(name) \\\n  DetailedExportedStat name {              \\\n    groupName, #name                       \\\n  }\n\n#define CAFFE_STAT(name) \\\n  Stat name {            \\\n    groupName, #name     \\\n  }\n\n#define CAFFE_EVENT(stats, field, ...)                              \\\n  {                                                                 \\\n    auto __caffe_event_value_ = stats.field.increment(__VA_ARGS__); \\\n    CAFFE_SDT(                                                      \\\n        field,                                                      \\\n        stats.field.groupName.c_str(),                              \\\n        __caffe_event_value_,                                       \\\n        ##__VA_ARGS__);                                             \\\n  }\n\n#define CAFFE_DURATION(stats, field, ...)                \\\n  if (auto g = detail::ScopeGuard([&](int64_t nanos) {   \\\n        CAFFE_EVENT(stats, field, nanos, ##__VA_ARGS__); \\\n      }))\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/tensor.h",
    "content": "#ifndef CAFFE2_CORE_TENSOR_H_\n#define CAFFE2_CORE_TENSOR_H_\n\n#include <cstddef>\n#include <cstdint>\n#include <fstream>\n#include <sstream>\n#include <type_traits>\n#include <typeinfo>\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/typeid.h\"\n#include \"caffe2/core/logging.h\"\n\n// A global boolean variable to control whether we free memory when a Tensor\n// is shrinked to a smaller size. As a result, a Tensor is always going to\n// keep the memory allocated for its maximum capacity reshaped to so far.\nCAFFE2_DECLARE_bool(caffe2_keep_on_shrink);\n\n// Since we can have high variance in blob memory allocated across different\n// inputs in the same run, we will shrink the blob only if the memory gain\n// is larger than this flag in bytes.\nCAFFE2_DECLARE_int64(caffe2_max_keep_on_shrink_memory);\n\nnamespace caffe2 {\n\n/**\n * A utility function to convert vector<int> to vector<TIndex>.\n */\ninline vector<TIndex> ToVectorTIndex(const std::vector<int>& src) {\n  return vector<TIndex>(src.begin(), src.end());\n}\n\n/**\n * Return product of all dimensions starting from K\n */\ninline TIndex size_from_dim_(int k, vector<TIndex> dims) {\n  TIndex r = 1;\n  for (int i = k; i < dims.size(); ++i) {\n    r *= dims[i];\n  }\n  return r;\n}\n\n// Product of all dims up to\ninline TIndex size_to_dim_(int k, vector<TIndex> dims) {\n  CAFFE_ENFORCE(k < dims.size());\n  TIndex r = 1;\n  for (int i = 0; i < k; ++i) {\n    r *= dims[i];\n  }\n  return r;\n}\n\ninline int canonical_axis_index_(int axis_index, int ndims) {\n  CAFFE_ENFORCE_GE(axis_index, -ndims);\n  CAFFE_ENFORCE_LT(axis_index, ndims);\n  if (axis_index < 0) {\n    return axis_index + ndims;\n  }\n  return axis_index;\n}\n\n/**\n * @brief Tensor is the basic class in Caffe2 that stores a contiguous memory\n * with its shape information.\n *\n * The Tensor class is essentially a wrapper around a device-specific memory\n * (the device is specified by the Context template argument), and deals with\n * the allocation and de-allocation of such memory. We make a simplified\n * assumption that the memory is always contiguous.\n */\ntemplate <class Context>\nclass Tensor {\n public:\n  /**\n   * Initializes an empty tensor.\n   */\n  Tensor() {}\n\n  /**\n   * @brief Creates a tensor of the given dimension.\n   *\n   * Note that the actual data allocation is not going to be carried out until\n   * the first time mutable_data() is called.\n   */\n  explicit Tensor(const vector<TIndex>& dims) { Resize(dims); }\n  explicit Tensor(const vector<int>& dims) { Resize(dims); }\n\n  /**\n   * @brief Creates a tensor from a source tensor, copying over the content.\n   *\n   * Note that the source tensor can be from a different device context. The\n   * second argument provides a device context object (either Context or\n   * SrcContext) that will be responsible for copying the underlying data.\n   * If you do not wish to pass in a Context object, an equivalent constructor\n   * function exists that will create an implicit context object for copy, but\n   * be noted that this will cause a potential performance hit.\n   */\n  template <class SrcContext, class ContextForCopy>\n  Tensor(const Tensor<SrcContext>& src, ContextForCopy* context) {\n    CopyFrom(src, context);\n  }\n\n  /**\n   * @brief Creates a tensor from a source tensor, copying over the content.\n   *\n   * Note that this may have a potential performance hit, since a temporary\n   * context object will be created for the memory copy. Prefer explicitly\n   * providing a context for copy if you can.\n   */\n  template <class SrcContext>\n  Tensor(const Tensor<SrcContext>& src) {\n    CopyFrom(src);\n  }\n\n  /**\n   * @brief Creates a tensor, and fills its contents with the given values.\n   */\n  template <typename T>\n  Tensor(const vector<TIndex>& dims, const vector<T>& values, Context* context)\n      : meta_(TypeMeta::Make<T>()) {\n    Resize(dims);\n    CAFFE_ENFORCE_EQ_WITH_CALLER(values.size(), size_);\n    context->template Copy<T, CPUContext, Context>(size_, values.data(), mutable_data<T>());\n  }\n\n  /**\n   * @brief Creates a scalar tensor, and fills its content with the given value.\n   */\n  template <typename T,\n            typename = typename std::enable_if<std::is_scalar<T>::value>::type>\n  Tensor(const T& value, Context* context) {\n    Resize(vector<TIndex>{});\n    context->template Copy<T, CPUContext, Context>(size_, &value, mutable_data<T>());\n  }\n\n  /**\n   * @brief Copies the data from a source tensor, with a contex provided to\n   * carry out the underlying memcpy operation.\n   */\n  template <class SrcContext, class ContextForCopy>\n  void CopyFrom(const Tensor<SrcContext>& src, ContextForCopy* context) {\n    if ((void*)&src == (void*)this) {\n      return;\n    }\n    meta_ = src.meta();\n    Resize(src.dims());\n    if (size() > 0) {\n      if (meta_.copy()) {\n        meta_.copy()(src.raw_data(), raw_mutable_data(), size());\n      } else {\n        context->template CopyBytes<SrcContext, Context>(\n            nbytes(), src.raw_data(), raw_mutable_data());\n      }\n    }\n  }\n\n  /**\n   * @brief Copies the data from a source tensor.\n   *\n   * Note that this may have a potential performance hit, since a temporary\n   * context object will be created for the memory copy. Prefer explicitly\n   * providing a context for copy if you can.\n   */\n  template <class SrcContext>\n  inline void CopyFrom(const Tensor<SrcContext>& src) {\n    SrcContext tmp_context;\n    CopyFrom(src, &tmp_context);\n  }\n\n  virtual ~Tensor() noexcept {}\n\n  /**\n   * @brief Extends the outer-most dimension of this tensor by num elements,\n   * preserving the existing data.\n   *\n   * The underlying data may be reallocated in order to accommodate the new\n   * elements, in which case this tensors' capacity is grown at a factor of\n   * growthPct. This ensures that Extend runs on an amortized O(1) time\n   * complexity.\n   */\n  template <class ContextForCopy>\n  void Extend(TIndex num, float growthPct, ContextForCopy* context) {\n    CAFFE_ENFORCE_GE_WITH_CALLER(dims_.size(), 1);\n    auto newDims = dims_;\n    newDims[0] += num;\n    if (!data_) {\n      Resize(newDims);\n      return;\n    }\n    auto newSize = std::accumulate(\n        newDims.begin(),\n        newDims.end(),\n        static_cast<TIndex>(1),\n        std::multiplies<TIndex>());\n    if (newSize * meta_.itemsize() <= capacity_) {\n      dims_ = newDims;\n      size_ = newSize;\n      return;\n    }\n    auto newCapacity = dims_;\n    newCapacity[0] = std::max<size_t>(\n        newDims[0], std::ceil(dims_[0] * (growthPct + 100) / 100));\n    Reserve(newCapacity, context);\n    dims_ = newDims;\n    size_ = newSize;\n  }\n\n  template <class T, class ContextForCopy>\n  void Reserve(const std::vector<T>& newCapacity, ContextForCopy* context) {\n    auto newSize = std::accumulate(\n        newCapacity.begin(),\n        newCapacity.end(),\n        static_cast<TIndex>(1),\n        std::multiplies<TIndex>());\n    if (newSize * meta_.itemsize() <= capacity_) {\n      return;\n    }\n    auto oldData = std::move(data_);\n    auto oldSize = size_;\n    auto oldDims = dims_;\n    Resize(newCapacity);\n    auto* newData = raw_mutable_data(meta_);\n    context->template CopyItems<ContextForCopy, ContextForCopy>(\n        meta_, oldSize, oldData.get(), newData);\n    dims_ = oldDims;\n    size_ = oldSize;\n    reserved_ = true;\n  }\n\n  /**\n   * @brief Shrinks the outer-most dimension to given size, keeping the data.\n   *\n   * This method guarantees that no re-allocations are carried out, which means\n   * that the extra capacity after the end of the shurnk tensor is maintained.\n   */\n  void Shrink(TIndex outer_dim) {\n    CAFFE_ENFORCE_WITH_CALLER(dims_.size() >= 1, \"Tensor must be at least 1D\");\n    CAFFE_ENFORCE_WITH_CALLER(\n        outer_dim <= dims_[0],\n        \"New outer dimension must be smaller than current.\");\n    dims_[0] = outer_dim;\n    size_ = std::accumulate(\n        dims_.begin(),\n        dims_.end(),\n        static_cast<TIndex>(1),\n        std::multiplies<TIndex>());\n  }\n\n  /**\n   * @brief Resizes a tensor.\n   *\n   * Resize takes in a vector of ints specifying the dimensions of the tensor.\n   * You can pass in an empty vector to specify that it is a scalar (i.e.\n   * containing one single item).\n   *\n   * The underlying storage may be deleted after calling Resize: if the new\n   * shape leads to a different number of items in the tensor, the old memory\n   * is deleted and new memory will be allocated next time you call\n   * mutable_data(). However, if the shape is different but the total number of\n   * items is the same, the underlying storage is kept.\n   */\n  template <typename... Ts>\n  void Resize(Ts... dim_source) {\n    bool size_changed = SetDims(dim_source...);\n    if (size_changed) {\n      // If needed, we will free the data. the next mutable_data() call\n      // will create the data storage.\n      int64_t new_size = size_ * meta_.itemsize();\n      bool reset_tensor = false;\n      if (reserved_) {\n        // If tensor is reserved then don't claim its memeory unless capacity_\n        // is smaller than new size\n        reset_tensor = capacity_ < new_size;\n      } else {\n        reset_tensor = capacity_ < new_size || !FLAGS_caffe2_keep_on_shrink ||\n            capacity_ - new_size > FLAGS_caffe2_max_keep_on_shrink_memory;\n      }\n\n      if (reset_tensor) {\n        FreeMemory();\n      }\n    }\n  }\n\n  /**\n   * Resize the tensor like the source tensor. Note that this is just a\n   * sugar wrapper that essentially calls Resize(src_tensor.dims()).\n   */\n  template <class OtherContext>\n  inline void ResizeLike(const Tensor<OtherContext>& src_tensor) {\n    // Note: need casting for different context types.\n    if (static_cast<void*>(this) != static_cast<const void*>(&src_tensor)) {\n      Resize(src_tensor.dims());\n    }\n  }\n\n  /**\n   * Resizes the tensor without touching underlying storage.\n   * This requires the total size of the tensor to remains constant.\n   */\n  inline void Reshape(const vector<TIndex>& dims) {\n    TIndex new_size = 1;\n    for (auto d : dims) {\n      CAFFE_ENFORCE_GE_WITH_CALLER(d, 0);\n      new_size *= d;\n    }\n    CAFFE_ENFORCE_WITH_CALLER(\n        new_size == size_,\n        \"New size and old size are not equal. You cannot use Reshape, \"\n        \"but should use Resize.\"\n        // TODO(jiayq): remove the following warning after pending diffs\n        // stabilize.\n        \" The old caffe2 mixes Reshape and Resize but this behavior has \"\n        \"been changed. If you find this error, most likely you will need \"\n        \"to change corresponding code from Reshape to Resize.\");\n    dims_ = dims;\n  }\n\n  inline void Reshape(const vector<int>& dims) {\n    Reshape(ToVectorTIndex(dims));\n  }\n\n  /**\n   * Release whatever memory the tensor was holding but keep size and type\n   * information. Subsequent call to mutable_data will trigger new memory\n   * allocation.\n   */\n  inline void FreeMemory() {\n    data_.reset();\n    capacity_ = 0;\n    // If reserved is true and we changed tensor memory then it is fine\n    // to switch it to false, if Resize is called from Reserve and it triggers\n    // FreeMemory() then reserved_ will be set to true at end of Reserve()\n    reserved_ = false;\n  }\n\n  /**\n   * A utility function to print the debug string for the tensor. Note that this\n   * is very slow since it involves quite some string operations, so do not use\n   * it in your performance-critical code.\n   */\n  string DebugString() const {\n    std::stringstream ss;\n    ss << \"A Tensor of item size \" << itemsize() << \" and type \"\n       << meta_.name() << \" and dimension (\";\n    for (int d : dims_) {\n      ss << d << \",\";\n    }\n    ss << \").\";\n    return ss.str();\n  }\n\n  void swap(Tensor<Context>& other) {\n    std::swap(dims_, other.dims_);\n    std::swap(size_, other.size_);\n    std::swap(meta_, other.meta_);\n    std::swap(data_, other.data_);\n    std::swap(shares_data_, other.shares_data_);\n    std::swap(capacity_, other.capacity_);\n    std::swap(reserved_, other.reserved_);\n  }\n\n  /**\n   * @brief Shares the data with another tensor.\n   *\n   * To share data between two tensors, the sizes of the two tensors must be\n   * equal already. The reason we do not implicitly do a Resize to make the two\n   * tensors have the same shape is that we want to allow tensors of different\n   * shapes but the same number of items to still be able to share data. This\n   * allows one to e.g. have a n-dimensional Tensor and a flattened version\n   * sharing the same underlying storage.\n   *\n   * The source tensor should already have its data allocated.\n   */\n  void ShareData(const Tensor& src) {\n    meta_ = src.meta();\n    CAFFE_ENFORCE_EQ_WITH_CALLER(\n        src.size_,\n        size_,\n        \"Size mismatch - did you call reshape before sharing the data?\");\n    // It is possible that the source tensor hasn't called mutable_data() yet,\n    // in which case ShareData() doesn't make much sense since we don't really\n    // know what to share yet.\n    CAFFE_ENFORCE_WITH_CALLER(\n        src.data_.get() || src.size_ == 0,\n        \"Source tensor has no content and has size > 0\");\n    // Finally, do sharing.\n    data_ = src.data_;\n    capacity_ = src.capacity_;\n    shares_data_ = true;\n  }\n\n  /**\n   * @brief Shares the data with an externally managed pointer.\n   *\n   * This is similar to ShareData() but the source is a pointer with an advanced\n   * deleter option. In default, no deletion takes place, and one needs to make\n   * sure that the external memory is deallocated only after the tensor finishes\n   * using it. If a Deleter object is passed in, when this tensor is reallocated\n   * or freed, the deleter function is going to be called.\n   */\n  template <typename T, typename Deleter = MemoryDeleter>\n  void ShareExternalPointer(T* src, size_t capacity = 0, Deleter d = nullptr) {\n    ShareExternalPointer(src, TypeMeta::Make<T>(), capacity, d);\n  }\n\n  template <typename Deleter = MemoryDeleter>\n  void ShareExternalPointer(\n      void* src,\n      const TypeMeta& meta,\n      size_t capacity = 0,\n      Deleter d = nullptr) {\n    meta_ = meta;\n    CAFFE_ENFORCE_WITH_CALLER(\n        meta_.id(),\n        \"To share with a raw external pointer you need to have meta \"\n        \"already set.\");\n    CAFFE_ENFORCE_WITH_CALLER(\n        size_ >= 0,\n        \"To share data with a raw pointer, you need to set shape first.\");\n    // Check if the deleter is a MemoryDeleter and is a simple nullptr.\n    if (std::is_same<MemoryDeleter, Deleter>::value &&\n        reinterpret_cast<MemoryDeleter*>(&d)[0] == nullptr) {\n      // Use aliasing constructor trick to avoid calling the destructor.\n      data_ = std::shared_ptr<void>(std::shared_ptr<void>(), src);\n    } else {\n      data_.reset(src, d);\n    }\n    // Sets capacity. If not specified, we will implicitly assume that\n    // the capacity is the current size.\n    if (capacity) {\n      capacity_ = capacity;\n    } else {\n      capacity_ = nbytes();\n    }\n    shares_data_ = true;\n  }\n\n  bool shares_data() const {\n    return shares_data_;\n  }\n\n  /**\n   * Returns a const raw void* pointer of the underlying storage. mutable_data()\n   * or raw_mutable_data() must have been called prior to this function call.\n   */\n  inline const void* raw_data() const {\n    CAFFE_ENFORCE_WITH_CALLER(data_.get() || size_ == 0);\n    return data_.get();\n  }\n\n  /**\n   * Returns a typed pointer of the underlying storage. mutable_data() or\n   * raw_mutable_data() must have been called prior to this function call, and\n   * the data type must be of the correct type. If you want to get a void*\n   * pointer instead, use raw_data().\n   */\n  template <typename T>\n  inline const T* data() const {\n    CAFFE_ENFORCE_WITH_CALLER(\n        data_.get() || size_ == 0,\n        \"The tensor is of non-zero shape, but its data is not allocated yet. \"\n        \"Caffe2 uses a lazy allocation, so you will need to call \"\n        \"mutable_data() or raw_mutable_data() to actually allocate memory.\");\n    CAFFE_ENFORCE_WITH_CALLER(\n        IsType<T>(),\n        \"Tensor type mismatch, caller expects elements to be \",\n        TypeMeta::Name<T>(),\n        \" while tensor contains \",\n        meta_.name());\n    return static_cast<T*>(data_.get());\n  }\n\n  /**\n   * Returns a mutable raw pointer of the underlying storage. Since we will need\n   * to know the type of the data for allocation, a TypeMeta object is passed in\n   * to specify the necessary information. This is conceptually equivalent of\n   * calling mutable_data<T>() where the TypeMeta parameter meta is derived from\n   * the type T. This function differs from mutable_data<T>() in the sense that\n   * the type T can be specified during runtime via the TypeMeta object.\n   *\n   * If the existing data does not match the desired type, it will be deleted\n   * and a new storage will be created.\n   */\n  inline void* raw_mutable_data(const TypeMeta& meta) {\n    // For 0-size tensors it's fine to return any pointer (including nullptr)\n    if (meta_ == meta && (data_.get() || size_ == 0)) {\n      return data_.get();\n    } else {\n      bool had_special_dtor = meta_.dtor() != nullptr;\n      meta_ = meta;\n      CAFFE_ENFORCE_WITH_CALLER(\n          size_ >= 0,\n          \"Tensor is not initialized. You probably need to call Resize() \"\n          \"before calling mutable_data()\");\n\n      // We can reuse the existing buffer if the current data does not have\n      // a special destructor and the new data doesn't have a special\n      // constructor.\n      if (size_ == 0 ||\n          (meta.ctor() == nullptr && !had_special_dtor &&\n           capacity_ >= size_ * meta_.itemsize())) {\n        return data_.get();\n      }\n      if (meta.ctor()) {\n        // For types that need placement new, we will call it, as well as\n        // making sure that when the data is freed, it calls the right\n        // destruction procedure.\n        auto size = size_;\n        auto dtor = meta_.dtor();\n        auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());\n        auto deleter = ptr_and_deleter.second;\n        data_.reset(\n            ptr_and_deleter.first, [size, dtor, deleter](void* ptr) -> void {\n              dtor(ptr, size);\n              deleter(ptr);\n            });\n        meta_.ctor()(data_.get(), size_);\n      } else {\n        // For fundamental type, new and delete is easier.\n        auto ptr_and_deleter = Context::New(size_ * meta_.itemsize());\n        data_.reset(ptr_and_deleter.first, ptr_and_deleter.second);\n      }\n      capacity_ = size_ * meta_.itemsize();\n      return data_.get();\n    }\n  }\n\n  /**\n   * Returns a mutable raw pointer of the underlying storage. This can only be\n   * used when you know for sure that the underlying storage of the tensor is\n   * already created via an earlier raw_mutable_data(meta) call or a\n   * mutable_data<T>() call.\n   *\n   * If the existing data does not match the desired type, it will be deleted\n   * and a new storage will be created.\n   */\n  inline void* raw_mutable_data() {\n    CAFFE_ENFORCE_WITH_CALLER(\n        meta_.id() != 0,\n        \"Calling raw_mutable_data() without meta, but the current meta is \"\n        \"of unknown type.\");\n    return raw_mutable_data(meta_);\n  }\n\n  /**\n   * Returns a typed pointer of the underlying storage.\n   *\n   * For fundamental types, we reuse possible existing storage if there\n   * is sufficient capacity.\n   */\n   template <typename T>\n    inline T* mutable_data() {\n      if ((size_ == 0 || data_.get()) && IsType<T>()) {\n        return static_cast<T*>(data_.get());\n      }\n      return static_cast<T*>(raw_mutable_data(TypeMeta::Make<T>()));\n    }\n\n\n  /**\n   * Returns the number of dimensions of the data.\n   */\n  inline int ndim() const { return dims_.size(); }\n  /**\n   * Returns the size (i.e. the number of items) of the tensor.\n   */\n  inline TIndex size() const { return size_; }\n  /**\n   * Return the number of bytes each item takes in the tensor.\n   */\n  inline size_t itemsize() const { return meta_.itemsize(); }\n  /**\n   * Returns the total number of bytes of the storage.\n   *\n   * This is equivalent to calling size() * itemsize().\n   */\n  inline size_t nbytes() const { return size_ * meta_.itemsize(); }\n\n  inline size_t capacity_nbytes() const {\n    return capacity_;\n  }\n  /**\n   * Returns the dimensions of the tensor as a vector.\n   */\n  inline const vector<TIndex>& dims() const { return dims_; }\n\n  inline TIndex size_from_dim(int k) const {\n    return size_from_dim_(k, dims_);\n  }\n\n  inline TIndex size_to_dim(int k) const {\n    return size_to_dim_(k, dims_);\n  }\n\n  /**\n  * Returns the 'canonical' version of a (usually)  user-specified axis,\n  * allowing for negative indexing (e.g., -1 for the last axis).\n  *\n  * @param axis_index the axis index.\n  *        If 0 <= index < ndim(), return index.\n  *        If -ndim <= index <= -1, return (ndim() - (-index)),\n  *        e.g., the last axis index (ndim() - 1) if index == -1,\n  *        the second to last if index == -2, etc.\n  *        Dies on out of range index.\n  */\n  inline int canonical_axis_index(int axis_index) const {\n    return canonical_axis_index_(axis_index, ndim());\n  }\n\n  /**\n   * Checks if the tensor content is of the given data type.\n   */\n  template <typename T>\n  inline bool IsType() const { return meta_.Match<T>(); }\n  /**\n   * Returns the TypeMeta object associated with the current data type.\n   */\n  inline const TypeMeta& meta() const { return meta_; }\n\n  /**\n   * Returns the i-th dimension of the tensor in int.\n   *\n   * This function returns an int value instead of TIndex, which depending on\n   * the typedef could be int64. If you want int64 dim values, make sure you\n   * call dim() instead.\n   */\n  inline int dim32(const int i) const {\n    #ifndef NDEBUG\n    CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), \"Exceeding ndim limit\");\n    CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, \"Cannot have negative dimension index\");\n    #endif\n    CAFFE_ENFORCE_LT_WITH_CALLER(dims_[i], std::numeric_limits<int>::max());\n    return static_cast<int>(dims_[i]);\n  }\n\n  /**\n   * Returns the i-th dimension of the tensor. Note that the passed in index\n   * must be between 0 (inclusive) and the number of dimensions, otherwise\n   * this function will produce a fatal message.\n   */\n  inline TIndex dim(const int i) const {\n    #ifndef NDEBUG\n    CAFFE_ENFORCE_LT_WITH_CALLER(i, dims_.size(), \"Exceeding ndim limit\");\n    CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, \"Cannot have negative dimension index\");\n    #endif\n    return dims_[i];\n  }\n\n protected:\n  vector<TIndex> dims_;\n  TIndex size_ = -1;\n  TypeMeta meta_;\n  std::shared_ptr<void> data_;\n  bool shares_data_ = false;\n  size_t capacity_ = 0;\n  bool reserved_ = false;\n  // In case of chunk load we store how much data was already loaded\n\n private:\n  template <\n      typename T,\n      typename = typename std::enable_if<std::is_integral<T>::value>::type>\n  bool SetDims(const vector<T>& src) {\n    auto old_size = size_;\n    dims_.resize(src.size());\n    TIndex new_size = 1;\n    for (unsigned int i = 0; i < src.size(); ++i) {\n      new_size *= src[i];\n      dims_[i] = src[i];\n    }\n    size_ = new_size;\n    return size_ != old_size;\n  }\n\n  bool SetDims() {\n    auto old_size = size_;\n    dims_.resize(0);\n    size_ = 1;\n    return size_ != old_size;\n  }\n\n  // TODO(jiayq): maybe rewrite the following functions with initializer list.\n  // NVCC does not play well with initializer lists last time, but worth\n  // another shot.\n  bool SetDims(const TIndex d0) {\n    auto old_size = size_;\n    dims_.resize(1);\n    dims_[0] = d0;\n    size_ = d0;\n    return size_ != old_size;\n  }\n\n  bool SetDims(const TIndex d0, const TIndex d1) {\n    auto old_size = size_;\n    dims_.resize(2);\n    dims_[0] = d0;\n    dims_[1] = d1;\n    size_ = d0 * d1;\n    return size_ != old_size;\n  }\n\n  bool SetDims(const TIndex d0, const TIndex d1, const TIndex d2) {\n    auto old_size = size_;\n    dims_.resize(3);\n    dims_[0] = d0;\n    dims_[1] = d1;\n    dims_[2] = d2;\n    size_ = d0 * d1 * d2;\n    return size_ != old_size;\n  }\n\n  bool\n  SetDims(const TIndex d0, const TIndex d1, const TIndex d2, const TIndex d3) {\n    auto old_size = size_;\n    dims_.resize(4);\n    dims_[0] = d0;\n    dims_[1] = d1;\n    dims_[2] = d2;\n    dims_[3] = d3;\n    size_ = d0 * d1 * d2 * d3;\n    return size_ != old_size;\n  }\n\n  // Note(jiayq): possibly a rule-of-three violation, but we explicitly\n  // discourage the use of = for Tensors.\n  Tensor& operator=(const Tensor& src) = delete;\n};\n\n// For simplicity, we will typedef Tensor<CPUContext> to TensorCPU.\ntypedef Tensor<CPUContext> TensorCPU;\n\nconstexpr int k_limit_default_ = 1000;\n\n// Type call registry\ntypedef TypeMeta (*TypeCall)(const void*);\nTypeCall GetTypeCallFunction(CaffeTypeId id);\nvoid RegisterTypeCallFunction(CaffeTypeId id, TypeCall c);\n\ntemplate <class Context>\nTypeMeta GetTensorType(const void* c) {\n  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);\n  return tc->meta();\n}\n\n// Shape call registry\ntypedef vector<TIndex> (*TensorInfoCall)(\n    const void*,\n    bool* shares_data,\n    size_t* capacity,\n    DeviceOption* device);\nTensorInfoCall GetTensorInfoFunction(CaffeTypeId id);\nvoid RegisterTensorInfoFunction(CaffeTypeId id, TensorInfoCall c);\n\ntemplate <class Context>\nvector<TIndex> GetTensorInfo(\n    const void* c,\n    bool* shares_data,\n    size_t* capacity,\n    DeviceOption* device) {\n  const Tensor<Context>* tc = static_cast<const Tensor<Context>*>(c);\n  *shares_data = tc->shares_data();\n  *capacity = tc->capacity_nbytes();\n  device->set_device_type(CPU);\n  device->set_cuda_gpu_id(0);\n  return tc->dims();\n}\n\nclass TensorPrinter {\n public:\n  explicit TensorPrinter(\n      const std::string& tensor_name = \"\",\n      const std::string& file_name = \"\",\n      int limit = k_limit_default_);\n  ~TensorPrinter();\n\n  template <class T>\n  void Print(const Tensor<CPUContext>& tensor);\n\n  template <class Context>\n  void PrintMeta(const Tensor<Context>& tensor);\n\n  string MetaStr(const Tensor<CPUContext>& tensor);\n\n private:\n  bool to_file_;\n  int limit_;\n  std::unique_ptr<std::ofstream> log_file_;\n  std::string tensor_name_;\n};\n\ntemplate <class T>\nvoid TensorPrinter::Print(const Tensor<CPUContext>& tensor) {\n  std::stringstream values_stream;\n  // One most likely doesn't want to print int64-number of items for visual\n  // inspection, so we cast down to int here.\n  int total_count = std::min(tensor.size(), TIndex(limit_));\n  const T* tensor_data = tensor.template data<T>();\n  for (int i = 0; i < total_count - 1; ++i) {\n    values_stream << tensor_data[i] << \",\";\n  }\n  // We do not add a comma after the last item.\n  values_stream << tensor_data[total_count - 1];\n  if (to_file_) {\n    (*log_file_) << MetaStr(tensor) << values_stream.str() << std::endl;\n  } else {\n    // Log to console.\n    LOG(INFO) << MetaStr(tensor) << values_stream.str();\n  }\n}\n\ntemplate <class Context>\nvoid TensorPrinter::PrintMeta(const Tensor<Context>& tensor) {\n  if (to_file_) {\n    (*log_file_) << MetaStr(tensor) << std::endl;\n  } else {\n    LOG(INFO) << MetaStr(tensor);\n  }\n}\n\n}  // namespace caffe2\n#endif  // CAFFE2_CORE_TENSOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/timer.h",
    "content": "#ifndef CAFFE2_CORE_TIMER_H_\n#define CAFFE2_CORE_TIMER_H_\n\n#include <chrono>\n\n#include \"caffe2/core/common.h\"\n\nnamespace caffe2 {\n\n/**\n * @brief A simple timer object for measuring time.\n *\n * This is a minimal class around a std::chrono::high_resolution_clock that\n * serves as a utility class for testing code.\n */\nclass Timer {\n public:\n  typedef std::chrono::high_resolution_clock clock;\n  typedef std::chrono::nanoseconds ns;\n  Timer() { Start(); }\n  /**\n   * @brief Starts a timer.\n   */\n  inline void Start() { start_time_ = clock::now(); }\n  inline float NanoSeconds() {\n    return std::chrono::duration_cast<ns>(clock::now() - start_time_).count();\n  }\n  /**\n   * @brief Returns the elapsed time in milliseconds.\n   */\n  inline float MilliSeconds() { return NanoSeconds() / 1000000.f; }\n  /**\n   * @brief Returns the elapsed time in microseconds.\n   */\n  inline float MicroSeconds() { return NanoSeconds() / 1000.f; }\n  /**\n   * @brief Returns the elapsed time in seconds.\n   */\n  inline float Seconds() { return NanoSeconds() / 1000000000.f; }\n\n protected:\n  std::chrono::time_point<clock> start_time_;\n  DISABLE_COPY_AND_ASSIGN(Timer);\n};\n}\n\n#endif  // CAFFE2_CORE_TIMER_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/transform.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/graph.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n/**\n * The Transform Base Object\n *\n * A Transform is an operation which manipulates a Caffe2 NetDef.\n * You can consider it as a function: Transform.ApplyTo(NetDef) -> NetDef\n *\n * A Transform Operation does 4 things:\n *    1) Creates a Graph object from a NetDef, which stores connections.\n *    2) Pattern Matches on the Graph, to find subgraphs it wants to change.\n *    3) Replaces the subgraphs that it's matched with new operators.\n *    4) Creates a NetDef from the changed Graph, and returns it.\n *\n * The effect of a Transform is defined by its 3 protected virtual functions.\n *    1) PatternRule determines for an ordered subgraph and a node, whether to\n *        consider adding the node to the subgraph.\n *    2) ValidatorRule determines, for an ordered subgraph, whether it is a\n *        match.\n *    3) ReplaceRule mutates the graph, based on a matched subgraph.\n *\n * This is the base class for all derived classes to base off. To create your\n * own transform, write your implementations for PatternRule, ValidatorRule, and\n * ReplaceRule.\n */\nclass Transform {\n public:\n  Transform() {}\n\n  /**\n   * Apply a Transform onto a NetDef.\n   * Returns the transformed NetDef.\n   */\n  NetDef ApplyTo(const NetDef& orig_net_def);\n\n  virtual ~Transform() {}\n\n  /**\n   * Determines the type of subgraphs that PatternMatch will find.\n   *\n   * CONNECTED_SUBGRAPH will only match subgraphs that are connected.\n   * These subgraphs satisfy that every node of the match is connected to the\n   * subgraph of the nodes that come before it.\n   * For example, in the graph (1) --> (2) --> (3) --> (4),\n   *    This is capable of matching the subgraph [2, 3] and [4, 3]\n   *    This is not capable of matching the subgraph [2, 4].\n   *\n   *\n   * SORTED_WRT_EXECUTION_ORDER will match subgraphs that guarantee\n   * sorted execution order.\n   * The nodes don't have to be connected. It is faster than General.\n   * For example, in the graph (1) --> (2) --> (3) --> (4),\n   *    This is capable of matching the subgraph [2, 4], [3, 4].\n   *    This is not capable of matching the subgraph [3, 1], [4, 3].\n   *\n   *\n   * GENERAL can match any subgraph.\n   * For example, in the graph (1) --> (2) --> (3) --> (4),\n   *    This is capable of matching subgraphs [2, 4], [3, 4], [4, 2, 1].\n   *    There is no ordered subgraph of G that cannot be matched by this.\n   */\n  enum PatternMatchType {\n    CONNECTED_SUBGRAPH,\n    SORTED_WRT_EXECUTION_ORDER,\n    GENERAL\n  };\n\n  /**\n   * Generates all matches (stored as ordered subgraphs) and returns them.\n   *\n   * A match is stored as vector<int>, which is a mapping to OperatorDefs\n   * in Graph. The order matters.\n   */\n  std::vector<std::vector<int>> PatternMatch(const transform::Graph& graph);\n\n  /**\n   * Applies the replace rule onto each of the matches found.\n   */\n  void ReplacePattern(\n      const std::vector<std::vector<int>>& matches,\n      transform::Graph* graph);\n\n protected:\n  /**\n   * The PatternRule essentially answers:\n   * Given the current subgraph (ordered), should we append the new node at idx?\n   */\n  virtual bool PatternRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph,\n      int /*idx*/) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  /**\n   * The ValidatorRule essentially answers:\n   * Given a subgraph, can we accept it?\n   */\n  virtual bool ValidatorRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  /**\n   * The ReplaceRule actually mutates the graph, and applies the transformation\n   * upon the subgraph.\n   */\n  virtual bool ReplaceRule(\n      const std::vector<int>& subgraph,\n      transform::Graph* g_ptr) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  void SetPatternMatchType(PatternMatchType type) {\n    pattern_match_type_ = type;\n  }\n\n private:\n  /**\n   * A helper function for PatternMatch, which keeps track of the best subgraph\n   * so far.\n   */\n  void PatternMatchHelper(\n      const transform::Graph& graph,\n      const std::vector<bool>& matched,\n      std::vector<int>* subgraph_ptr,\n      std::vector<int>* best_subgraph_ptr);\n  /**\n   * Attempts to append each neighbor to the end of the subgraph.\n   */\n  void TryNeighbors(\n      const transform::Graph& graph,\n      const std::map<int, std::vector<string>>& neighbors,\n      const std::vector<bool>& matched,\n      std::vector<int>* subgraph_ptr,\n      std::vector<int>* best_subgraph_ptr);\n\n  PatternMatchType pattern_match_type_ = CONNECTED_SUBGRAPH;\n};\n\n// Creates a Transform based on a key, which should be defined in registry.\nunique_ptr<Transform> CreateTransform(string key);\n\nCAFFE_DECLARE_REGISTRY(TransformRegistry, Transform);\n#define REGISTER_TRANSFORM(name, ...) \\\n  CAFFE_REGISTER_CLASS(TransformRegistry, name, __VA_ARGS__)\n\n// Create a Transform object from registry,\n// and immediately apply it to a Netdef.\nNetDef ApplyTransform(const string& key, const NetDef& netdef);\n\n// Create a Transform object from registry, apply it to a NetDef.\n// Will only return the transformed net if it is faster than the old net.\n// This will run the init net first, will run the two nets warmup_runs times.\n// Then, we will take the average time of main_runs runs, and only keep the\n// transformed net if it is faster by a factor of improvement_threshold.\nNetDef ApplyTransformIfFaster(\n    const string& key,\n    const NetDef& netdef,\n    const NetDef& init_netdef,\n    const int warmup_runs,\n    const int main_runs,\n    const double improvement_threshold);\n\n} // namespace\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/typeid.h",
    "content": "#ifndef CAFFE2_CORE_TYPEID_H_\n#define CAFFE2_CORE_TYPEID_H_\n\n#include <cassert>\n#include <cstdlib>\n#include <iostream>\n#include <map>\n#include <mutex>\n#include <type_traits>\n#ifdef __GXX_RTTI\n#include <set>\n#include <typeinfo>\n#endif\n\n#include <exception>\n\n#include \"caffe2/core/common.h\"\n\nnamespace caffe2 {\n\ntypedef intptr_t CaffeTypeId;\nstd::map<CaffeTypeId, string>& gTypeNames();\n#ifdef __GXX_RTTI\nstd::set<string>& gRegisteredTypeNames();\n#endif // __GXX_RTTI\n\n// A utility function to demangle a function name.\nstring Demangle(const char* name);\n\n// A utility function to return an exception string by prepending its exception\n// type before its what() content.\nstring GetExceptionString(const std::exception& e);\n\nstd::mutex& gCaffe2TypeRegistrationMutex();\n\ntemplate <typename T>\nstruct TypeNameRegisterer {\n  explicit TypeNameRegisterer(CaffeTypeId id) {\n    std::lock_guard<std::mutex> guard(gCaffe2TypeRegistrationMutex());\n#ifdef __GXX_RTTI\n    string name = Demangle(typeid(T).name());\n    gTypeNames()[id] = name;\n    // If we are in RTTI mode, we will also use this opportunity to do sanity\n    // check if there are duplicated ids registered for the same type. This\n    // usually happens when one does not do RTLD_GLOBAL, which is often the\n    // case in Python. The way we do the check is to make sure that there are\n    // no duplicated names registered - this could be done by checking the\n    // uniqueness of names.\n    if (gRegisteredTypeNames().count(name)) {\n      std::cerr << \"Type name \" << name\n                << \" registered twice. This should \"\n                   \"not happen. Do you have duplicated CAFFE_KNOWN_TYPE?\"\n                << std::endl;\n      throw std::runtime_error(\"TypeNameRegisterer error with type \" + name);\n    }\n    gRegisteredTypeNames().insert(name);\n#else // __GXX_RTTI\n    gTypeNames()[id] = \"(RTTI disabled, cannot show name)\";\n#endif // __GXX_RTTI\n  }\n};\n\n/**\n * TypeMeta is a thin class that allows us to store the type of a container such\n * as a blob, or the data type of a tensor, with a unique run-time id. It also\n * stores some additional data such as the item size and the name of the type\n * for run-time inspection.\n */\nclass TypeMeta {\n public:\n  typedef void (*PlacementNew)(void*, size_t);\n  typedef void (*TypedCopy)(const void*, void*, size_t);\n  typedef void (*TypedDestructor)(void*, size_t);\n  /** Create a dummy TypeMeta object. To create a TypeMeta object for a specific\n   * type, use TypeMeta::Make<T>().\n   */\n  TypeMeta()\n      : id_(0), itemsize_(0), ctor_(nullptr), copy_(nullptr), dtor_(nullptr) {}\n\n  /**\n   * Copy constructor.\n   */\n  TypeMeta(const TypeMeta& src)\n      : id_(src.id_),\n        itemsize_(src.itemsize_),\n        ctor_(src.ctor_),\n        copy_(src.copy_),\n        dtor_(src.dtor_) {}\n  /**\n   * Assignment operator.\n   */\n  TypeMeta& operator=(const TypeMeta& src) {\n    if (this == &src)\n      return *this;\n    id_ = src.id_;\n    itemsize_ = src.itemsize_;\n    ctor_ = src.ctor_;\n    copy_ = src.copy_;\n    dtor_ = src.dtor_;\n    return *this;\n  }\n\n private:\n  // TypeMeta can only be created by Make, making sure that we do not\n  // create incorrectly mixed up TypeMeta objects.\n  TypeMeta(\n      CaffeTypeId i,\n      size_t s,\n      PlacementNew ctor,\n      TypedCopy copy,\n      TypedDestructor dtor)\n      : id_(i), itemsize_(s), ctor_(ctor), copy_(copy), dtor_(dtor) {}\n\n public:\n  /**\n   * Returns the type id.\n   */\n  inline const CaffeTypeId& id() const {\n    return id_;\n  }\n  /**\n   * Returns the size of the item.\n   */\n  inline const size_t& itemsize() const {\n    return itemsize_;\n  }\n  /**\n   * Returns the placement new function pointer for individual items.\n   */\n  inline PlacementNew ctor() const {\n    return ctor_;\n  }\n  /**\n   * Returns the typed copy function pointer for individual iterms.\n   */\n  inline TypedCopy copy() const {\n    return copy_;\n  }\n  /**\n   * Returns the destructor function pointer for individual items.\n   */\n  inline TypedDestructor dtor() const {\n    return dtor_;\n  }\n  /**\n   * Returns a printable name for the type.\n   */\n  inline const char* name() const {\n    auto it = gTypeNames().find(id_);\n    assert(it != gTypeNames().end());\n    return it->second.c_str();\n  }\n  inline bool operator==(const TypeMeta& m) const {\n    return (id_ == m.id_);\n  }\n  inline bool operator!=(const TypeMeta& m) const {\n    return (id_ != m.id_);\n  }\n\n  template <typename T>\n  inline bool Match() const {\n    return (id_ == Id<T>());\n  }\n\n  // Below are static functions that can be called by passing a specific type.\n\n  /**\n   * Returns the unique id for the given type T. The id is unique for the type T\n   * in the sense that for any two different types, their id are different; for\n   * the same type T, the id remains the same over different calls of the\n   * function. However, this is not guaranteed over different runs, as the id\n   * is generated during run-time. Do NOT serialize the id for storage.\n   */\n  template <typename T>\n  CAFFE2_EXPORT static CaffeTypeId Id();\n\n  /**\n   * Returns the item size of the type. This is equivalent to sizeof(T).\n   */\n  template <typename T>\n  static size_t ItemSize() {\n    return sizeof(T);\n  }\n\n  /**\n   * Returns the printable name of the type.\n   *\n   * Works for all types, not only the ones registered with CAFFE_KNOWN_TYPE\n   */\n  template <typename T>\n  static const char* Name() {\n#ifdef __GXX_RTTI\n    static const string name = Demangle(typeid(T).name());\n    return name.c_str();\n#else // __GXX_RTTI\n    return \"(RTTI disabled, cannot show name)\";\n#endif // __GXX_RTTI\n  }\n\n  /**\n   * Placement new function for the type.\n   */\n  template <typename T>\n  static void _Ctor(void* ptr, size_t n) {\n    T* typed_ptr = static_cast<T*>(ptr);\n    for (int i = 0; i < n; ++i) {\n      new (typed_ptr + i) T;\n    }\n  }\n\n  /**\n   * Typed copy function for classes.\n   */\n  template <typename T>\n  static void _Copy(const void* src, void* dst, size_t n) {\n    const T* typed_src = static_cast<const T*>(src);\n    T* typed_dst = static_cast<T*>(dst);\n    for (int i = 0; i < n; ++i) {\n      typed_dst[i] = typed_src[i];\n    }\n  }\n\n  /**\n   * A placeholder function for types that do not allow assignment.\n   */\n  template <typename T>\n  static void\n  _CopyNotAllowed(const void* /*src*/, void* /*dst*/, size_t /*n*/) {\n    std::cerr << \"Type \" << Name<T>() << \" does not allow assignment.\";\n    // This is an error by design, so we will quit loud.\n    abort();\n  }\n\n  /**\n   * Destructor for non-fundamental types.\n   */\n  template <typename T>\n  static void _Dtor(void* ptr, size_t n) {\n    T* typed_ptr = static_cast<T*>(ptr);\n    for (int i = 0; i < n; ++i) {\n      typed_ptr[i].~T();\n    }\n  }\n\n  /**\n   * Returns a TypeMeta object that corresponds to the typename T.\n   */\n  template <typename T>\n  static typename std::enable_if<\n      std::is_fundamental<T>::value || std::is_pointer<T>::value,\n      TypeMeta>::type\n  Make() {\n    return TypeMeta(Id<T>(), ItemSize<T>(), nullptr, nullptr, nullptr);\n  }\n\n  template <\n      typename T,\n      typename std::enable_if<\n          !(std::is_fundamental<T>::value || std::is_pointer<T>::value) &&\n          std::is_copy_assignable<T>::value>::type* = nullptr>\n  static TypeMeta Make() {\n    return TypeMeta(Id<T>(), ItemSize<T>(), _Ctor<T>, _Copy<T>, _Dtor<T>);\n  }\n\n  template <typename T>\n  static TypeMeta Make(\n      typename std::enable_if<\n          !(std::is_fundamental<T>::value || std::is_pointer<T>::value) &&\n          !std::is_copy_assignable<T>::value>::type* = 0) {\n    return TypeMeta(\n        Id<T>(), ItemSize<T>(), _Ctor<T>, _CopyNotAllowed<T>, _Dtor<T>);\n  }\n\n private:\n  CaffeTypeId id_;\n  size_t itemsize_;\n  PlacementNew ctor_;\n  TypedCopy copy_;\n  TypedDestructor dtor_;\n};\n\n/**\n * Register unique id for a type so it can be used in TypeMeta context, e.g. be\n * used as a type for Blob or for Tensor elements.\n *\n * CAFFE_KNOWN_TYPE does explicit instantiation of TypeMeta::Id<T> template\n * function and thus needs to be put in a single translation unit (.cpp file)\n * for a given type T. Other translation units that use type T as a type of the\n * caffe2::Blob or element type of caffe2::Tensor need to depend on the\n * translation unit that contains CAFFE_KNOWN_TYPE declaration via regular\n * linkage dependencies.\n *\n * NOTE: the macro needs to be invoked in ::caffe2 namespace\n */\n#define CAFFE_KNOWN_TYPE(T)                            \\\n  template <>                                          \\\n  CaffeTypeId TypeMeta::Id<T>() {                      \\\n    static bool type_id_bit[1];                        \\\n    static TypeNameRegisterer<T> registerer(           \\\n        reinterpret_cast<CaffeTypeId>(type_id_bit));   \\\n    return reinterpret_cast<CaffeTypeId>(type_id_bit); \\\n  }\n\n} // namespace caffe2\n\n#endif // CAFFE2_CORE_TYPEID_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/types.h",
    "content": "#ifndef CAFFE2_CORE_TYPES_H_\n#define CAFFE2_CORE_TYPES_H_\n\n#include <cstdint>\n#include <string>\n#include <type_traits>\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/typeid.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n// Storage orders that are often used in the image applications.\nenum StorageOrder {\n  UNKNOWN = 0,\n  NHWC = 1,\n  NCHW = 2,\n};\n\ninline StorageOrder StringToStorageOrder(const string& str) {\n  if (str == \"NHWC\" || str == \"nhwc\") {\n    return StorageOrder::NHWC;\n  } else if (str == \"NCHW\" || str == \"nchw\") {\n    return StorageOrder::NCHW;\n  } else {\n    LOG(ERROR) << \"Unknown storage order string: \" << str;\n    return StorageOrder::UNKNOWN;\n  }\n}\n\ninline constexpr char NameScopeSeparator() { return '/'; }\n\n// From TypeMeta to caffe2::DataType protobuffer enum.\nTensorProto::DataType TypeMetaToDataType(const TypeMeta& meta);\n\n// From caffe2::DataType protobuffer enum to TypeMeta\nconst TypeMeta& DataTypeToTypeMeta(const TensorProto::DataType& dt);\n\n}  // namespace caffe2\n\n///////////////////////////////////////////////////////////////////////////////\n// Half float definition. Currently half float operators are mainly on CUDA\n// gpus.\n// The reason we do not directly use the cuda __half data type is because that\n// requires compilation with nvcc. The float16 data type should be compatible\n// with the cuda __half data type, but will allow us to refer to the data type\n// without the need of cuda.\nstatic_assert(sizeof(unsigned short) == 2,\n              \"Short on this platform is not 16 bit.\");\nnamespace caffe2 {\ntypedef struct CAFFE2_ALIGNED(2) __f16 { uint16_t x; } float16;\n}  // namespace caffe2\n\n// Make __f16 a fundamental type.\nnamespace std {\ntemplate<>\nstruct is_fundamental<caffe2::__f16> : std::integral_constant<bool, true> {\n};\n}  // namespace std\n\n#endif  // CAFFE2_CORE_TYPES_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/core/workspace.h",
    "content": "#ifndef CAFFE2_CORE_WORKSPACE_H_\n#define CAFFE2_CORE_WORKSPACE_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/observer.h\"\n\n#ifndef CAFFE2_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n#include <climits>\n#include <cstddef>\n#include <mutex>\n#include <typeinfo>\n#include <vector>\n\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/registry.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/signal_handler.h\"\n#if CAFFE2_MOBILE\n#include \"caffe2/utils/threadpool/ThreadPool.h\"\n#endif // CAFFE2_MOBILE\n\nCAFFE2_DECLARE_bool(caffe2_print_blob_sizes_at_exit);\n\nnamespace caffe2 {\n\nclass NetBase;\n\nstruct StopOnSignal {\n  StopOnSignal()\n      : handler_(std::make_shared<SignalHandler>(\n            SignalHandler::Action::STOP,\n            SignalHandler::Action::STOP)) {}\n\n  StopOnSignal(const StopOnSignal& other) : handler_(other.handler_) {}\n\n  bool operator()(int /*iter*/) {\n    return handler_->CheckForSignals() != SignalHandler::Action::STOP;\n  }\n\n  std::shared_ptr<SignalHandler> handler_;\n};\n\n/**\n * Workspace is a class that holds all the related objects created during\n * runtime: (1) all blobs, and (2) all instantiated networks. It is the owner of\n * all these objects and deals with the scaffolding logistics.\n */\nclass Workspace {\n public:\n  typedef std::function<bool(int)> ShouldContinue;\n  typedef CaffeMap<string, unique_ptr<Blob> > BlobMap;\n  typedef CaffeMap<string, unique_ptr<NetBase> > NetMap;\n  /**\n   * Initializes an empty workspace.\n   */\n  Workspace() : root_folder_(\".\"), shared_(nullptr) {}\n\n  /**\n   * Initializes an empty workspace with the given root folder.\n   *\n   * For any operators that are going to interface with the file system, such\n   * as load operators, they will write things under this root folder given\n   * by the workspace.\n   */\n  explicit Workspace(const string& root_folder)\n      : root_folder_(root_folder), shared_(nullptr) {}\n\n  /**\n   * Initializes a workspace with a shared workspace.\n   *\n   * When we access a Blob, we will first try to access the blob that exists\n   * in the local workspace, and if not, access the blob that exists in the\n   * shared workspace. The caller keeps the ownership of the shared workspace\n   * and is responsible for making sure that its lifetime is longer than the\n   * created workspace.\n   */\n  explicit Workspace(const Workspace* shared)\n      : root_folder_(\".\"), shared_(shared) {}\n\n  /**\n   * Initializes workspace with parent workspace, blob name remapping\n   * (new name -> parent blob name), no other blobs are inherited from\n   * parent workspace\n   */\n  Workspace(\n      const Workspace* shared,\n      const std::unordered_map<string, string>& forwarded_blobs)\n      : root_folder_(\".\"), shared_(nullptr) {\n    CAFFE_ENFORCE(shared, \"Parent workspace must be specified\");\n    for (const auto& forwarded : forwarded_blobs) {\n      CAFFE_ENFORCE(\n          shared->HasBlob(forwarded.second), \"Invalid parent workspace blob\");\n      forwarded_blobs_[forwarded.first] =\n          std::make_pair(shared, forwarded.second);\n    }\n  }\n\n  /**\n   * Initializes a workspace with a root folder and a shared workspace.\n   */\n  Workspace(const string& root_folder, Workspace* shared)\n      : root_folder_(root_folder), shared_(shared) {}\n\n  ~Workspace() {\n    if (FLAGS_caffe2_print_blob_sizes_at_exit) {\n      PrintBlobSizes();\n    }\n  }\n\n  /**\n   * Add blob mappings from another workspace\n   */\n  void AddBlobMapping(\n      const Workspace* parent,\n      const std::unordered_map<string, string>& forwarded_blobs);\n\n  /**\n   * Return list of blobs owned by this Workspace, not including blobs\n   * shared from parent workspace.\n   */\n  vector<string> LocalBlobs() const;\n\n  /**\n   * Return a list of blob names. This may be a bit slow since it will involve\n   * creation of multiple temp variables. For best performance, simply use\n   * HasBlob() and GetBlob().\n   */\n  vector<string> Blobs() const;\n\n  /**\n   * Return the root folder of the workspace.\n   */\n  const string& RootFolder() { return root_folder_; }\n  /**\n   * Checks if a blob with the given name is present in the current workspace.\n   */\n  inline bool HasBlob(const string& name) const {\n    // First, check the local workspace,\n    // Then, check the forwarding map, then the parent workspace\n    if (blob_map_.count(name)) {\n      return true;\n    } else if (forwarded_blobs_.count(name)) {\n      const auto parent_ws = forwarded_blobs_.at(name).first;\n      const auto& parent_name = forwarded_blobs_.at(name).second;\n      return parent_ws->HasBlob(parent_name);\n    } else if (shared_) {\n      return shared_->HasBlob(name);\n    }\n    return false;\n  }\n\n  void PrintBlobSizes();\n\n  /**\n   * Creates a blob of the given name. The pointer to the blob is returned, but\n   * the workspace keeps ownership of the pointer. If a blob of the given name\n   * already exists, the creation is skipped and the existing blob is returned.\n   */\n  Blob* CreateBlob(const string& name);\n  /**\n   * Remove the blob of the given name. Return true if removed and false if\n   * not exist.\n   * Will NOT remove from the shared workspace.\n   */\n  bool RemoveBlob(const string& name);\n  /**\n   * Gets the blob with the given name as a const pointer. If the blob does not\n   * exist, a nullptr is returned.\n   */\n  const Blob* GetBlob(const string& name) const;\n  /**\n   * Gets the blob with the given name as a mutable pointer. If the blob does\n   * not exist, a nullptr is returned.\n   */\n  Blob* GetBlob(const string& name);\n\n  /**\n   * Creates a network with the given NetDef, and returns the pointer to the\n   * network. If there is anything wrong during the creation of the network, a\n   * nullptr is returned. The Workspace keeps ownership of the pointer.\n   *\n   * If there is already a net created in the workspace with the given name,\n   * CreateNet will overwrite it if overwrite=true is specified. Otherwise, an\n   * exception is thrown.\n   */\n  NetBase* CreateNet(const NetDef& net_def, bool overwrite = false);\n  NetBase* CreateNet(\n      const std::shared_ptr<const NetDef>& net_def,\n      bool overwrite = false);\n  /**\n   * Gets the pointer to a created net. The workspace keeps ownership of the\n   * network.\n   */\n  NetBase* GetNet(const string& net_name);\n  /**\n   * Deletes the instantiated network with the given name.\n   */\n  void DeleteNet(const string& net_name);\n  /**\n   * Finds and runs the instantiated network with the given name. If the network\n   * does not exist or there are errors running the network, the function\n   * returns false.\n   */\n  bool RunNet(const string& net_name);\n\n  /**\n   * Returns a list of names of the currently instantiated networks.\n   */\n  vector<string> Nets() const {\n    vector<string> names;\n    for (auto& entry : net_map_) {\n      names.push_back(entry.first);\n    }\n    return names;\n  }\n\n  /**\n   * Runs a plan that has multiple nets and execution steps.\n   */\n  bool RunPlan(const PlanDef& plan_def,\n               ShouldContinue should_continue = StopOnSignal{});\n\n#if CAFFE2_MOBILE\n  /*\n   * Returns a CPU threadpool instace for parallel execution of\n   * work. The threadpool is created lazily; if no operators use it,\n   * then no threadpool will be created.\n   */\n  ThreadPool* GetThreadPool();\n#endif\n\n  // RunOperatorOnce and RunNetOnce runs an operator or net once. The difference\n  // between RunNet and RunNetOnce lies in the fact that RunNet allows you to\n  // have a persistent net object, while RunNetOnce creates a net and discards\n  // it on the fly - this may make things like database read and random number\n  // generators repeat the same thing over multiple calls.\n  bool RunOperatorOnce(const OperatorDef& op_def);\n  bool RunNetOnce(const NetDef& net_def);\n\n public:\n  std::atomic<int> last_failed_op_net_position;\n\n private:\n  BlobMap blob_map_;\n  NetMap net_map_;\n  const string root_folder_;\n  const Workspace* shared_;\n  std::unordered_map<string, std::pair<const Workspace*, string>>\n      forwarded_blobs_;\n#if CAFFE2_MOBILE\n  std::unique_ptr<ThreadPool> thread_pool_;\n  std::mutex thread_pool_creation_mutex_;\n#endif // CAFFE2_MOBILE\n\n  DISABLE_COPY_AND_ASSIGN(Workspace);\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_CORE_WORKSPACE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/cuda_rtc/common_rtc.h",
    "content": "#ifndef CAFFE2_CUDA_RTC_COMMON_RTC_H_\n#define CAFFE2_CUDA_RTC_COMMON_RTC_H_\n\n#include <sstream>\n#include <string>\n\n#include <cuda.h>\n#include <nvrtc.h>\n\n#define NVRTC_CHECK(condition)                                                 \\\n  do {                                                                         \\\n    nvrtcResult result = condition;                                            \\\n    if (result != NVRTC_SUCCESS) {                                             \\\n      LOG(FATAL) << \"Error at: \" << __FILE__ << \":\" << __LINE__ << \": \"   \\\n                      << nvrtcGetErrorString(result);                          \\\n    }                                                                          \\\n  } while(0)\n\nnamespace caffe2 {\n\ntemplate <typename Derived>\nclass CudaRTCFunction {\n public:\n  CudaRTCFunction() : module_loaded_(false) {}\n  ~CudaRTCFunction() {\n    if (module_loaded_) {\n      CUDA_DRIVERAPI_ENFORCE(cuModuleUnload(module_));\n    }\n  }\n\n  // TODO: this function is nontrivial and since CudaRTCFunction uses CRTP, it\n  // may potentially increase the binary size. In that case, move common parts\n  // into a separate function.\n  template <typename... Args>\n  void Compile(Args... args) {\n    string src = static_cast<Derived*>(this)->GetSource(args...);\n    string name = static_cast<Derived*>(this)->KernelName(args...);\n    VLOG(1) << \"function name: \" << name;\n    VLOG(1) << \"function src:\\n\" << src;\n    // Actually do the compiling.\n    nvrtcProgram prog;\n    NVRTC_CHECK(nvrtcCreateProgram(\n        &prog, src.c_str(), nullptr, 0, nullptr, nullptr));\n    // Compile the program.\n    // TODO(Yangqing): how to find the current gpu architecture instead of hard\n    // coding it?\n    const char *nvrtc_opts[] = {\"--gpu-architecture=compute_35\",\n                                \"--use_fast_math\"};\n    nvrtcResult compile_result = nvrtcCompileProgram(\n        prog, 2, nvrtc_opts);\n    if (compile_result != NVRTC_SUCCESS) {\n      size_t log_size;\n      NVRTC_CHECK(nvrtcGetProgramLogSize(prog, &log_size));\n      vector<char> nvrtc_log(log_size);\n      NVRTC_CHECK(nvrtcGetProgramLog(prog, nvrtc_log.data()));\n      LOG(FATAL) << \"Compilation failure for nvrtc(\"\n                 << nvrtcGetErrorString(compile_result) << \"): \\n\"\n                 << nvrtc_log.data();\n    }\n    size_t ptx_size;\n    NVRTC_CHECK(nvrtcGetPTXSize(prog, &ptx_size));\n    vector<char> nvrtc_ptx(ptx_size);\n    NVRTC_CHECK(nvrtcGetPTX(prog, nvrtc_ptx.data()));\n    NVRTC_CHECK(nvrtcDestroyProgram(&prog));\n    // After compilation, load the module.\n    if (module_loaded_) {\n      CUDA_DRIVERAPI_ENFORCE(cuModuleUnload(module_));\n    }\n    CUDA_DRIVERAPI_ENFORCE(\n        cuModuleLoadDataEx(&module_, nvrtc_ptx.data(), 0, 0, 0));\n    module_loaded_ = true;\n    CUDA_DRIVERAPI_ENFORCE(\n        cuModuleGetFunction(&kernel_, module_, name.c_str()));\n  }\n\n  template <typename... Args>\n  void Launch(unsigned int gx, unsigned int gy, unsigned int gz,\n              unsigned int bx, unsigned int by, unsigned int bz,\n              unsigned int shared_mem, cudaStream_t stream,\n              Args... args) {\n    CAFFE_ENFORCE(\n        module_loaded_, \"Cannot call Launch before a module is loaded.\");\n    void * args_voidp[] = {&args...};\n    CUDA_DRIVERAPI_ENFORCE(cuLaunchKernel(\n        kernel_, gx, gy, gz, bx, by, bz, shared_mem, stream, args_voidp, 0));\n  }\n\n  void LaunchEx(unsigned int gx, unsigned int gy, unsigned int gz,\n                unsigned int bx, unsigned int by, unsigned int bz,\n                unsigned int shared_mem, cudaStream_t stream,\n                void** extra) {\n    CAFFE_ENFORCE(\n        module_loaded_, \"Cannot call Launch before a module is loaded.\");\n    CUDA_DRIVERAPI_ENFORCE(cuLaunchKernel(\n        kernel_, gx, gy, gz, bx, by, bz, shared_mem, stream, nullptr, extra));\n  }\n\n private:\n  bool module_loaded_;\n  CUmodule module_;\n  CUfunction kernel_;\n};\n\n// TODO: this is in no way unique and is just a hack right now.\ninline std::string GetUniqueName() {\n  static constexpr int len = 20;\n  static const char alpha[] =\n      \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\";\n\n  std::stringstream ss;\n  ss << \"_cuda_kernel_\";\n  for (int i = 0; i < len; ++i) {\n    ss << alpha[rand() % (sizeof(alpha) - 1)];\n  }\n  return ss.str();\n}\n\n}  // namepsace caffe2\n\n#endif  // CAFFE2_CUDA_RTC_COMMON_RTC_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/db/create_db_op.h",
    "content": "#ifndef CAFFE2_DB_CREATE_DB_OP_H_\n#define CAFFE2_DB_CREATE_DB_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/db.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass CreateDBOp final : public Operator<Context> {\n public:\n  CreateDBOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        db_type_(OperatorBase::template GetSingleArgument<string>(\n            \"db_type\",\n            \"leveldb\")),\n        db_name_(OperatorBase::template GetSingleArgument<string>(\"db\", \"\")),\n        num_shards_(\n            OperatorBase::template GetSingleArgument<int>(\"num_shards\", 1)),\n        shard_id_(\n            OperatorBase::template GetSingleArgument<int>(\"shard_id\", 0)) {\n    CAFFE_ENFORCE_GT(db_name_.size(), 0, \"Must specify a db name.\");\n  }\n\n  bool RunOnDevice() final {\n    OperatorBase::Output<db::DBReader>(0)->Open(\n        db_type_, db_name_, num_shards_, shard_id_);\n    return true;\n  }\n\n private:\n  string db_type_;\n  string db_name_;\n  uint32_t num_shards_;\n  uint32_t shard_id_;\n  DISABLE_COPY_AND_ASSIGN(CreateDBOp);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_DB_CREATE_DB_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/file_store_handler.h",
    "content": "#pragma once\n\n#include <caffe2/distributed/store_handler.h>\n\nnamespace caffe2 {\n\nclass FileStoreHandler : public StoreHandler {\n public:\n  explicit FileStoreHandler(const std::string& path, const std::string& prefix);\n  virtual ~FileStoreHandler();\n\n  virtual void set(const std::string& name, const std::string& data) override;\n\n  virtual std::string get(const std::string& name) override;\n\n  virtual int64_t add(const std::string& name, int64_t value) override;\n\n  virtual bool check(const std::vector<std::string>& names) override;\n\n  virtual void wait(\n      const std::vector<std::string>& names,\n      const std::chrono::milliseconds& timeout = kDefaultTimeout) override;\n\n protected:\n  std::string basePath_;\n\n  std::string realPath(const std::string& path);\n\n  std::string tmpPath(const std::string& name);\n\n  std::string objectPath(const std::string& name);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/file_store_handler_op.h",
    "content": "#pragma once\n\n#include \"file_store_handler.h\"\n\n#include <caffe2/core/operator.h>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass FileStoreHandlerCreateOp final : public Operator<Context> {\n public:\n  explicit FileStoreHandlerCreateOp(\n      const OperatorDef& operator_def,\n      Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        basePath_(\n            OperatorBase::template GetSingleArgument<std::string>(\"path\", \"\")),\n        prefix_(OperatorBase::template GetSingleArgument<std::string>(\n            \"prefix\",\n            \"\")) {\n    CAFFE_ENFORCE_NE(basePath_, \"\", \"path is a required argument\");\n  }\n\n  bool RunOnDevice() override {\n    auto ptr =\n        std::unique_ptr<StoreHandler>(new FileStoreHandler(basePath_, prefix_));\n    *OperatorBase::Output<std::unique_ptr<StoreHandler>>(HANDLER) =\n        std::move(ptr);\n    return true;\n  }\n\n private:\n  std::string basePath_;\n  std::string prefix_;\n\n  OUTPUT_TAGS(HANDLER);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/redis_store_handler.h",
    "content": "#pragma once\n\n#include <caffe2/distributed/store_handler.h>\n\nextern \"C\" {\n#include <hiredis/hiredis.h>\n}\n\n#include <string>\n\nnamespace caffe2 {\n\nclass RedisStoreHandler : public StoreHandler {\n public:\n  explicit RedisStoreHandler(std::string& host, int port, std::string& prefix);\n  virtual ~RedisStoreHandler();\n\n  virtual void set(const std::string& name, const std::string& data) override;\n\n  virtual std::string get(const std::string& name) override;\n\n  virtual int64_t add(const std::string& name, int64_t value) override;\n\n  virtual bool check(const std::vector<std::string>& names) override;\n\n  virtual void wait(\n      const std::vector<std::string>& names,\n      const std::chrono::milliseconds& timeout = kDefaultTimeout) override;\n\n private:\n  std::string host_;\n  int port_;\n  std::string prefix_;\n\n  redisContext* redis_;\n\n  std::string compoundKey(const std::string& name);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/redis_store_handler_op.h",
    "content": "#pragma once\n\n#include \"redis_store_handler.h\"\n\n#include <caffe2/core/operator.h>\n\n#include <string>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass RedisStoreHandlerCreateOp final : public Operator<Context> {\n public:\n  explicit RedisStoreHandlerCreateOp(\n      const OperatorDef& operator_def,\n      Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        host_(\n            OperatorBase::template GetSingleArgument<std::string>(\"host\", \"\")),\n        port_(OperatorBase::template GetSingleArgument<int>(\"port\", 0)),\n        prefix_(OperatorBase::template GetSingleArgument<std::string>(\n            \"prefix\",\n            \"\")) {\n    CAFFE_ENFORCE_NE(host_, \"\", \"host is a required argument\");\n    CAFFE_ENFORCE_NE(port_, 0, \"port is a required argument\");\n  }\n\n  bool RunOnDevice() override {\n    auto ptr = std::unique_ptr<StoreHandler>(\n        new RedisStoreHandler(host_, port_, prefix_));\n    *OperatorBase::Output<std::unique_ptr<StoreHandler>>(HANDLER) =\n        std::move(ptr);\n    return true;\n  }\n\n private:\n  std::string host_;\n  int port_;\n  std::string prefix_;\n\n  OUTPUT_TAGS(HANDLER);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/store_handler.h",
    "content": "#pragma once\n\n#include <chrono>\n#include <cstdint>\n#include <stdexcept>\n#include <string>\n#include <vector>\n\nnamespace caffe2 {\n\nclass StoreHandler {\n public:\n  static constexpr std::chrono::milliseconds kDefaultTimeout =\n      std::chrono::seconds(30);\n  static constexpr std::chrono::milliseconds kNoTimeout =\n      std::chrono::milliseconds::zero();\n\n  virtual ~StoreHandler();\n\n  /*\n   * Set data for the key if it doesn't exist.\n   * If the key exists the data should be the same as the existing key.\n   */\n  virtual void set(const std::string& name, const std::string& data) = 0;\n\n  /*\n   * Get the data for the key.\n   * The call should wait until the key is stored with default timeout\n   * and return data if set else fail.\n   */\n  virtual std::string get(const std::string& name) = 0;\n\n  /*\n   * Does an atomic add operation on the key and returns the latest updated\n   * value.\n   * Note: To access the current value for this counter call with value = 0\n   */\n  virtual int64_t add(const std::string& name, int64_t value) = 0;\n\n  /*\n   * Check if a keys exist in the store.\n   */\n  virtual bool check(const std::vector<std::string>& names) = 0;\n\n  /*\n   * Wait for Keys to be stored.\n   */\n  virtual void wait(\n      const std::vector<std::string>& names,\n      const std::chrono::milliseconds& timeout = kDefaultTimeout) = 0;\n};\n\nstruct StoreHandlerTimeoutException : public std::runtime_error {\n  StoreHandlerTimeoutException() = default;\n  explicit StoreHandlerTimeoutException(const std::string& msg)\n      : std::runtime_error(msg) {}\n};\n\n#define STORE_HANDLER_TIMEOUT(...)              \\\n  throw ::caffe2::StoreHandlerTimeoutException( \\\n      ::caffe2::MakeString(\"[\", __FILE__, \":\", __LINE__, \"] \", __VA_ARGS__));\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/distributed/store_ops.h",
    "content": "#pragma once\n\n#include \"store_handler.h\"\n\n#include <caffe2/core/operator.h>\n\nnamespace caffe2 {\n\nclass StoreSetOp final : public Operator<CPUContext> {\n public:\n  StoreSetOp(const OperatorDef& operator_def, Workspace* ws);\n  bool RunOnDevice() override;\n\n private:\n  std::string blobName_;\n\n  INPUT_TAGS(HANDLER, DATA);\n};\n\nclass StoreGetOp final : public Operator<CPUContext> {\n public:\n  StoreGetOp(const OperatorDef& operator_def, Workspace* ws);\n  bool RunOnDevice() override;\n\n private:\n  std::string blobName_;\n\n  INPUT_TAGS(HANDLER);\n  OUTPUT_TAGS(DATA);\n};\n\nclass StoreAddOp final : public Operator<CPUContext> {\n public:\n  StoreAddOp(const OperatorDef& operator_def, Workspace* ws);\n  bool RunOnDevice() override;\n\n private:\n  std::string blobName_;\n  int addValue_;\n\n  INPUT_TAGS(HANDLER);\n  OUTPUT_TAGS(VALUE);\n};\n\nclass StoreWaitOp final : public Operator<CPUContext> {\n public:\n  StoreWaitOp(const OperatorDef& operator_def, Workspace* ws);\n  bool RunOnDevice() override;\n\n private:\n  std::vector<std::string> blobNames_;\n\n  INPUT_TAGS(HANDLER);\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/fully_connected_op_decomposition.h",
    "content": "#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_DECOMPOSITION_H_\n#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_DECOMPOSITION_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n/*\n * Although a FC_decomp is just like 2 small FC,\n * it is better to have it as one op for future analysis.\n * And if we have 2 FC with bias, it is not right.\n * TODO(wyiming): decompose the layer into 2 matrices\n * W(N * K) = U(N * middle) * trans(V(K * middle))\n * */\n// This is Caffe's InnerProductOp, with a name that fits its purpose better.\ntemplate <typename T, class Context, class Engine=DefaultEngine>\nclass FullyConnectedOpDecomp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FullyConnectedOpDecomp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~FullyConnectedOpDecomp() {}\n\n  bool RunOnDevice() override {\n    const auto& X = Input(0);\n    const auto& U = Input(1);\n    const auto& V = Input(2);\n    const auto& b = Input(3);\n    auto* Y = Output(0);\n    //auto* buffer_ptr = Output(1);\n    // Size M * middle;\n    //auto& multi_buffer_ = *buffer_ptr;\n    CAFFE_ENFORCE_GE(X.ndim(), 1);\n    CAFFE_ENFORCE_GE(U.ndim(), 2);\n    CAFFE_ENFORCE_GE(V.ndim(), 2);\n    if (X.ndim() > 2 || U.ndim() > 2 || V.ndim() > 2) {\n      VLOG(1) << \"Using legacy support for arbitrary input and weight \"\n                       \"dimensions.\";\n    }\n    CAFFE_ENFORCE_EQ(b.ndim(), 1);\n    // batch size\n    int M = X.ndim() > 1 ? X.dim32(0) : 1;\n    // Feature dimension\n    int K = X.size() / M;\n    // number of outputs.\n    int N = U.dim32(0);\n    int middle = U.dim32(0);\n    CAFFE_ENFORCE_EQ(K, V.dim32(0));\n    CAFFE_ENFORCE_EQ(N, b.dim32(0));\n    if (X.ndim() > 1) {\n      Y->Resize(M, N);\n      multi_buffer_.Resize(M, middle);\n    } else {\n      Y->Resize(N);\n      multi_buffer_.Resize(middle);\n    }\n  // The col buffer is stored in CHW order as well - kernel_dim, and the height\n  // and width.\n    //  multi_buffer_.Resize(M, middle);\n    T* multi_buffer_data = multi_buffer_.template mutable_data<T>();\n    //  X * V * tans(U)\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasNoTrans, M, middle, K, 1, X.template data<T>(),\n        V.template data<T>(), 0, multi_buffer_data,\n        &context_);\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasTrans, M, N, middle, 1, multi_buffer_data,\n        U.template data<T>(), 0, Y->template mutable_data<T>(),\n        &context_);\n    // Add bias term\n    if (bias_multiplier_.size() != M) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(M);\n      math::Set<T, Context>(\n          M, static_cast<T>(1), bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasNoTrans, M, N, 1, 1,\n        bias_multiplier_.template data<T>(), b.template data<T>(), 1,\n        Y->template mutable_data<T>(), &context_);\n    return true;\n  }\n\n protected:\n  Tensor<Context> bias_multiplier_;\n  Tensor<Context> multi_buffer_;\n};\n\ntemplate <typename T, class Context, class Engine=DefaultEngine>\nclass FullyConnectedDecompGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FullyConnectedDecompGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~FullyConnectedDecompGradientOp() {}\n\n  bool RunOnDevice() override {\n    const auto& X = Input(0);\n    const auto& U = Input(1);\n    const auto& V = Input(2);\n    const auto& dY = Input(3);\n    DCHECK_GE(X.ndim(), 1);\n    DCHECK_GE(U.ndim(), 2);\n    DCHECK_GE(V.ndim(), 2);\n    DCHECK_LE(dY.ndim(), 2);\n    // batch size\n    int M = X.ndim() > 1 ? X.dim32(0) : 1;\n    // Feature dimension\n    int K = X.size() / M;\n    // number of outputs.\n    int N = U.dim32(0);\n    int middle = U.dim32(1);\n    DCHECK_EQ(K, V.dim32(0));\n    if (dY.ndim() > 1) {\n      DCHECK_EQ(M, dY.dim32(0));\n      DCHECK_EQ(N, dY.dim32(1));\n    } else {\n      DCHECK_EQ(X.ndim(), 1);\n      DCHECK_EQ(N, dY.size());\n    }\n    auto* dU = Output(0);\n    auto* dV = Output(1);\n    auto* db = Output(2);\n    dU->ResizeLike(U);\n    dV->ResizeLike(V);\n    db->Resize(N);\n\n    // Compute dU\n    // first compute X * V\n    du_buffer_.Resize(N, middle);\n    T* du_buffer_data = du_buffer_.template mutable_data<T>();\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasNoTrans, M, middle, K, 1,\n        X.template data<T>(), V.template data<T>(),\n        0, du_buffer_data,\n        &context_);\n    math::Gemm<T, Context, Engine>(\n        CblasTrans, CblasNoTrans, N, middle, M, 1,\n        dY.template data<T>(), du_buffer_data,\n        0, dU->template mutable_data<T>(),\n        &context_);\n    // Compute dV\n    // first compute dY * U\n    dv_buffer_.Resize(M, middle);\n    T* dv_buffer_data = dv_buffer_.template mutable_data<T>();\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasNoTrans, M, middle, N, 1,\n        dY.template data<T>(), U.template data<T>(),\n        0, dv_buffer_data,\n        &context_);\n    math::Gemm<T, Context, Engine>(\n        CblasTrans, CblasNoTrans, K, middle, M, 1,\n        dY.template data<T>(), du_buffer_data,\n        0, dV->template mutable_data<T>(),\n        &context_);\n    if (bias_multiplier_.size() != M) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(M);\n      math::Set<T, Context>(\n          M, static_cast<T>(1),\n          bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n    // Compute dB\n    math::Gemv<T, Context>(\n        CblasTrans, M, N, 1, dY.template data<T>(),\n        bias_multiplier_.template data<T>(), 0,\n        db->template mutable_data<T>(),\n        &context_);\n    // Compute dX if necessary.\n    if (OutputSize() == 4) {\n      auto* dX = Output(3);\n      dX->ResizeLike(X);\n      dx_buffer_.Resize(M, middle);\n      T* dx_buffer_data = dx_buffer_.template mutable_data<T>();\n      math::Gemm<T, Context, Engine>(\n          CblasNoTrans, CblasNoTrans, M, middle, N, 1,\n          dY.template data<T>(), U.template data<T>(),\n          0, dx_buffer_data,\n          &context_);\n      math::Gemm<T, Context, Engine>(\n          CblasNoTrans, CblasTrans, M, K, middle, 1,\n          dx_buffer_data, V.template data<T>(),\n          0, dX->template mutable_data<T>(),\n          &context_);\n    }\n\n    return true;\n  }\n\n protected:\n  Tensor<Context> bias_multiplier_;\n  Tensor<Context> du_buffer_;\n  Tensor<Context> dv_buffer_;\n  Tensor<Context> dx_buffer_;\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/fully_connected_op_prune.h",
    "content": "#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_PRUNE_H_\n#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_PRUNE_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n  namespace {\n\n    template<int N>\n      using Shape = std::array<int, N>;\n\n    template<int N>\n      const std::vector<TIndex>& shape(Shape<N> vs) {\n        static thread_local std::vector<TIndex> cache;\n        cache.resize(vs.size());\n        for (auto i = 0; i < vs.size(); ++i) {\n          cache[i] = vs[i];\n        }\n        return cache;\n      }\n\n    inline const std::vector<TIndex>& shape(int i) {\n      return shape<1>(Shape<1>({i}));\n    }\n\n    inline const std::vector<TIndex>& shape(int i, int j) {\n      return shape<2>(Shape<2>({i, j}));\n    }\n\n    template <typename T, class Context>\n      void MaskMatrix(const T* mask, T* mat,\n          int M, int N);\n\n    template <typename T, class Context>\n      void MaskMatrix_Inc(T* mask_seq, T* mat,\n          int M, int N, int seq_len, T target);\n\n    template <typename T, class Context>\n      void AggrDW(T* ag_dw, const T* dw, int N, int K, Context* context);\n\n    template <typename T>\n      int MatrixCompare_LT(const T* mat, float thres,\n                           T* mask_seq, int M, int N);\n\n    // TODO(wyiming): write an incremental Mask\n    // Incremental Mask: only give the new mask positions;\n    // Assuming that weights masked will not be mask again;\n    // The incremental mask can also be used to update mask matrix;\n    // But this will include template for bool and float;\n    template <>\n      void MaskMatrix<float, CPUContext>(\n          const float* mask, float* mat, int M, int N) {\n        int offset = 0;\n        for (int i = 0; i < M; ++i) {\n          for (int j = 0; j < N; ++j) {\n            mat[offset] = mask[offset]? mat[offset] : 0;\n            offset++;\n          }\n        }\n      }\n\n      template <>\n      void MaskMatrix_Inc<float, CPUContext>(\n          float* mask_seq,\n          float* mat,\n          int /*M*/,\n          int /*N*/,\n          int seq_len,\n          float target) {\n        for (int i = 0; i < seq_len; ++i) {\n          // assume that the mask_seq is smaller than size\n          // Although it seems that random access gets bad performance,\n          // we make sure that seq is in order;\n          mat[static_cast<int>(mask_seq[i])] = target;\n        }\n      }\n\n    template <>\n      void AggrDW<float, CPUContext>(\n          float* ag_dw, const float* dw,\n          int N, int K, CPUContext* context) {\n        math::Add<float, CPUContext>(N*K, dw, ag_dw, ag_dw, context);\n      }\n\n    template <>\n      int MatrixCompare_LT<float>(\n          const float* mat, float thres,\n          float* mask_seq, int M, int N) {\n        int seq_len = 0;\n        int offset = 0;\n        for (int i = 0 ; i < M; ++i) {\n          for (int j = 0; j < N; ++j) {\n            if (mat[offset] != 0 &&\n                (mat[offset] < thres && mat[offset] > -thres)) {\n              mask_seq[seq_len++] = static_cast<float>(offset);\n            }\n            offset++;\n          }\n        }\n        return seq_len;\n      }\n\n  }\n\n  // This is Caffe's InnerProductOp, with a name that fits its purpose better.\n  template <typename T, class Context, class Engine=DefaultEngine>\n    class FullyConnectedOpPrune final : public Operator<Context> {\n      public:\n        USE_OPERATOR_CONTEXT_FUNCTIONS;\n        FullyConnectedOpPrune(const OperatorDef& operator_def, Workspace* ws)\n          : Operator<Context>(operator_def, ws) {}\n        ~FullyConnectedOpPrune() {}\n\n        bool RunOnDevice() override {\n          const auto& X = Input(0);\n          const auto& W = Input(1);\n          const auto& Mask = Input(2);\n          const auto& b = Input(3);\n          auto* Y = Output(0);\n          CAFFE_ENFORCE_GE(X.ndim(), 1);\n          CAFFE_ENFORCE_GE(W.ndim(), 2);\n          if (X.ndim() > 2 || W.ndim() > 2) {\n            VLOG(1) << \"Using legacy support for arbitrary input and weight \"\n              \"dimensions.\";\n          }\n          CAFFE_ENFORCE_EQ(b.ndim(), 1);\n          // batch size\n          int M = X.ndim() > 1 ? X.dim32(0) : 1;\n          // Feature dimension\n          int K = X.size() / M;\n          // number of outputs.\n          int N = W.dim32(0);\n          CAFFE_ENFORCE_EQ(K, W.size() / W.dim32(0));\n          CAFFE_ENFORCE_EQ(N, b.dim32(0));\n          if (X.ndim() > 1) {\n            Y->Resize(M, N);\n          } else {\n            Y->Resize(N);\n          }\n          // W * x\n          math::Gemm<T, Context, Engine>(\n              CblasNoTrans, CblasTrans, M, N, K, 1, X.template data<T>(),\n              W.template data<T>(), 0, Y->template mutable_data<T>(),\n              &context_);\n          // Add bias term\n          if (bias_multiplier_.size() != M) {\n            // If the helper bias multiplier is not M,\n            // reshape and fill it with one.\n            bias_multiplier_.Resize(M);\n            math::Set<T, Context>(\n                M, static_cast<T>(1),\n                bias_multiplier_.template mutable_data<T>(),\n                &context_);\n          }\n          math::Gemm<T, Context, Engine>(\n              CblasNoTrans, CblasNoTrans, M, N, 1, 1,\n              bias_multiplier_.template data<T>(), b.template data<T>(), 1,\n              Y->template mutable_data<T>(), &context_);\n          if (OutputSize() == 2){\n            auto* Comp_rate = Output(1);\n            Comp_rate->Resize(vector<TIndex>());\n            T* comp_data = Comp_rate->template mutable_data<T>();\n            math::Sum<T, Context>(\n                Mask.size(), Mask.template data<T>(), comp_data, &context_);\n            math::Scale<T, Context>(\n                1, static_cast<T>(1.) / Mask.size(), comp_data, comp_data,\n                &context_);\n          }\n          return true;\n        }\n\n      protected:\n        Tensor<Context> bias_multiplier_;\n    };\n\n  template <typename T, class Context, class Engine=DefaultEngine>\n    class FullyConnectedPruneGradientOp : public Operator<Context> {\n      public:\n        int iter_offset;\n      public:\n        USE_OPERATOR_CONTEXT_FUNCTIONS;\n        FullyConnectedPruneGradientOp\n          (const OperatorDef& operator_def, Workspace* ws)\n          : Operator<Context>(operator_def, ws) { iter_offset = 0; }\n        ~FullyConnectedPruneGradientOp() {}\n\n        bool RunOnDevice() override {\n          const auto& X = Input(0);\n          //const auto& W = Input(1);\n          auto* W_ptr = Output(2);\n          auto& W = *W_ptr;\n          //const auto& Mask = Input(2);\n          auto* Mask_ptr = Output(3);\n          auto& Mask = *Mask_ptr;\n          const auto& dY = Input(3);\n          //const auto& Ag_dW = Input(4);\n          auto* Ag_dW_ptr = Output(4);\n          auto& Ag_dW = *Ag_dW_ptr;\n          // it is also the Input(5)\n          auto* mask_seq_auto = Output(5);\n          // how about get threshold\n          auto& thres = Input(6);\n          //TODO(wyiming): check comp_lb is a float\n          auto& comp_lb = Input(7);\n          DCHECK_GE(X.ndim(), 1);\n          DCHECK_GE(W.ndim(), 2);\n          DCHECK_LE(dY.ndim(), 2);\n          // batch size\n          int M = X.ndim() > 1 ? X.dim32(0) : 1;\n          // Feature dimension\n          int K = X.size() / M;\n          // number of outputs.\n          int N = W.dim32(0);\n          // TODO(wyiming): add this window_size to workspace?\n          int window_size = 100;\n          // TODO(wyiming): this threshold should be\n          // based on distribution of the layer weight\n          float thr = 0.01;\n          DCHECK_EQ(Mask.dim32(0), W.dim32(0));\n          DCHECK_EQ(Mask.dim32(1), W.dim32(1));\n          DCHECK_EQ(Ag_dW.dim32(0), W.dim32(0));\n          DCHECK_EQ(Ag_dW.dim32(1), W.dim32(1));\n          DCHECK_EQ(K, W.size() / W.dim32(0));\n          if (dY.ndim() > 1) {\n            DCHECK_EQ(M, dY.dim32(0));\n            DCHECK_EQ(N, dY.dim32(1));\n          } else {\n            DCHECK_EQ(X.ndim(), 1);\n            DCHECK_EQ(N, dY.size());\n          }\n          auto* dW = Output(0);\n          auto* db = Output(1);\n          dW->ResizeLike(W);\n          db->Resize(N);\n\n          // Compute dW\n          math::Gemm<T, Context, Engine>(\n              CblasTrans, CblasNoTrans, N, K, M, 1,\n              dY.template data<T>(), X.template data<T>(),\n              0, dW->template mutable_data<T>(),\n              &context_);\n\n          comp_r_buf_.Resize(vector<TIndex>());\n          T* comp_data = comp_r_buf_.template mutable_data<T>();\n          math::Sum<T, Context>(\n              Mask.size(), Mask.template data<T>(), comp_data, &context_);\n          math::Scale<T, Context>(\n              1, static_cast<T>(1.) / Mask.size(), comp_data, comp_data,\n              &context_);\n          // update W size window\n          // Notice here we need to maintain state in OP.\n          // This is new in Caffe2.\n          // And this is something we might need to discuss in the future.\n          // at most mask half of the matrix at time\n          // 1. mask dw with previous mask\n          MaskMatrix<T, Context>(Mask.template mutable_data<T>(),\n              dW->template mutable_data<T>(), N, K);\n          if(*comp_data > *(comp_lb.template data<T>())){\n            iter_offset++;\n            if (iter_offset % window_size == 0) {\n              // TODO(wyiming):do the prune here;\n              sum_buffer_.ResizeLike(W);\n              math::Add<T, Context>(W.size(),\n                  W.template mutable_data<T>(),\n                  Ag_dW.template mutable_data<T>(),\n                  sum_buffer_.template mutable_data<T>(),\n                  &context_);\n              mask_seq_auto->ResizeLike(W);\n              T* mask_seq = mask_seq_auto->template mutable_data<T>();\n              math::Set<T, Context>(N*K, static_cast<T>(0),\n                  mask_seq_auto->template mutable_data<T>(), &context_);\n              // 2. find dw below thres but not eq 0\n              int seq_len = MatrixCompare_LT<T>(\n                  Ag_dW_ptr->template mutable_data<T>(),\n                  *thres.template data<T>(), mask_seq, N, K);\n              // 3. use the mask_seq to update W and dw\n              MaskMatrix_Inc<T, Context>(mask_seq,\n                                         dW->template mutable_data<T>(),\n                                         N, K, seq_len, 0);\n              MaskMatrix_Inc<T, Context>(mask_seq,\n                                         W.template mutable_data<T>(),\n                                         N, K, seq_len, 0);\n              MaskMatrix_Inc<T, Context>(mask_seq,\n                                         Mask.template mutable_data<T>(),\n                                         N, K, seq_len, 0);\n              math::Set<T, Context>(N*K, static_cast<T>(0),\n                  Ag_dW.template mutable_data<T>(),\n                  &context_);\n            } else {\n              // add dW to Aggregate dW.\n              AggrDW<T, Context>(\n                  Ag_dW.template mutable_data<T>(),\n                  dW->template mutable_data<T>(),\n                  N, K, &context_);\n            }\n          }\n          if (bias_multiplier_.size() != M) {\n            // If the helper bias multiplier is not M,\n            // reshape and fill it with one.\n            bias_multiplier_.Resize(M);\n            math::Set<T, Context>(\n                M, static_cast<T>(1),\n                bias_multiplier_.template mutable_data<T>(),\n                &context_);\n          }\n          // Compute dB\n          math::Gemv<T, Context>(\n              CblasTrans, M, N, 1, dY.template data<T>(),\n              bias_multiplier_.template data<T>(), 0,\n              db->template mutable_data<T>(),\n              &context_);\n          // Compute dX if necessary.\n          if (OutputSize() == 7) {\n            auto* dX = Output(6);\n            dX->ResizeLike(X);\n            math::Gemm<T, Context, Engine>(\n                CblasNoTrans, CblasNoTrans, M, K, N, 1,\n                dY.template data<T>(), W.template data<T>(),\n                0, dX->template mutable_data<T>(),\n                &context_);\n          }\n\n          return true;\n        }\n\n      protected:\n        Tensor<Context> bias_multiplier_;\n        Tensor<Context> sum_buffer_;\n        Tensor<Context> comp_r_buf_;\n    };\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/fully_connected_op_sparse.h",
    "content": "#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_SPARSE_H_\n#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_SPARSE_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n#ifdef CAFFE2_USE_MKL\n#include <mkl.h>\n#endif  // CAFFE2_USE_MKL\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate<int N>\nusing Shape = std::array<int, N>;\n\ntemplate<int N>\nconst std::vector<TIndex>& shape(Shape<N> vs) {\n  static thread_local std::vector<TIndex> cache;\n  cache.resize(vs.size());\n  for (auto i = 0; i < vs.size(); ++i) {\n    cache[i] = vs[i];\n  }\n  return cache;\n}\n\ninline const std::vector<TIndex>& shape(int i) {\n  return shape<1>(Shape<1>({i}));\n}\n\ninline const std::vector<TIndex>& shape(int i, int j) {\n  return shape<2>(Shape<2>({i, j}));\n}\n\ntemplate <typename T, class Context>\nvoid Sparse_mm(const T* acsr, const int* ia, const int* ja,\n              int m, int k, int n, const T* b, T* c, Context* context);\n\ntemplate<typename T, class Context>\nvoid trans_mat(const T* o, T* t, int m, int n, Context* context);\n\ntemplate <>\nvoid trans_mat<float, CPUContext>(\n    const float* o,\n    float* t,\n    int m,\n    int n,\n    CPUContext* /*context*/) {\n  for(int i = 0; i < m; ++i){\n    for(int j = 0; j < n; ++j){\n      t[j*m+i]=o[i*n+j];\n    }\n  }\n}\n\n// C = A(sparse) * B\n// No transpose;\ntemplate <>\nvoid Sparse_mm<float, CPUContext>(\n    const float* acsr,\n    const int* ia,\n    const int* ja,\n    int m,\n    int k,\n    int n,\n    const float* b,\n    float* c,\n    CPUContext* /*context*/) {\n  float alpha = 1.0, beta = 0.;\n  mkl_scsrmm(\"N\", &m, &n, &k, &alpha, \"GLNC\",\n             acsr, ja, ia, ia+1, b, &n, &beta, c, &n);\n}\n\n}\n\n// This is Caffe's InnerProductOp, with a name that fits its purpose better.\ntemplate <typename T, class Context, class Engine=DefaultEngine>\nclass FullyConnectedOp_SPARSE final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FullyConnectedOp_SPARSE(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~FullyConnectedOp_SPARSE() {}\n\n  bool RunOnDevice() override {\n    const auto& Xt = Input(0); // transposed X\n    const auto& Wcsr = Input(1);\n    const auto& iw = Input(2);\n    const auto& jw = Input(3);\n    // Notice that we do not need to transpose b\n    const auto& b = Input(4);\n    auto* Yt = Output(0); //transposed Y\n    // here we assume X is k-by-m\n    CAFFE_ENFORCE_EQ(Xt.ndim(), 2);\n    CAFFE_ENFORCE_EQ(b.ndim(), 1);\n    // batch size\n    int K = Xt.ndim() > 1 ? Xt.dim32(0) : 1;\n    // Feature dimension\n    int M = Xt.size() / K;\n    // number of outputs.\n    int N = iw.dim32(0)-1;\n    CAFFE_ENFORCE_EQ(N, b.dim32(0));\n    Yt->Resize(shape(N, M));\n\n    // Y' = W * X';\n    Sparse_mm<T, Context>(\n      Wcsr.template data<T>(), iw.template data<int>(),\n      jw.template data<int>(), N, K, M, Xt.template data<T>(),\n      Yt->template mutable_data<T>(), &context_);\n    // Add bias term\n    if (bias_multiplier_.size() != M) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(shape(M));\n      math::Set<T, Context>(\n          M, static_cast<T>(1), bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans, CblasNoTrans, N, M, 1, 1,\n        b.template data<T>(), bias_multiplier_.template data<T>(), 1,\n        Yt->template mutable_data<T>(), &context_);\n    return true;\n  }\n\n protected:\n  Tensor<Context> bias_multiplier_;\n};\n\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/funhash_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FUNHASH_OP_H_\n#define CAFFE2_OPERATORS_FUNHASH_OP_H_\n\n#include <xxhash.h>\n#include <array>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\n#define SIGN_MAGIC 0x9e3779b97f4a7c15\n#define INDEX_MAGIC 0xf39cc0605cedc834\n\n#define USE_SIGN\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass FunHashOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FunHashOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_outputs_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_outputs\", -1)),\n        num_segments_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_segments\", -1)),\n        seed_(OperatorBase::GetSingleArgument<uint64_t>(\"seed\", 0)) {\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"num_outputs\"),\n        \"Argument `num_outputs` is missing.\");\n    // If alpha is provided, use adaptive hashing parameterized by alpha.\n    adaptive_ = (InputSize() == 5);\n  }\n\n  bool RunOnDevice() override {\n    const auto& val = Input(0);\n    const auto& key = Input(1);\n    const auto& seg = Input(2);\n    const auto& weight = Input(3);\n\n    TIndex num_alpha = 1;\n    if (adaptive_) {\n      const auto& alpha = Input(4);\n      num_alpha = alpha.dim(0);\n    }\n\n    const auto* seg_data = seg.template data<int>();\n\n    TIndex num_weight = weight.dim(0);\n    TIndex num_nz_ent = seg.dim(0);\n\n    TIndex n_segments = num_segments_;\n    if (num_segments_ == -1) {\n      for (TIndex i = 0; i < num_nz_ent; ++i) {\n        if (seg_data[i] > n_segments) {\n          n_segments = seg_data[i];\n        }\n      }\n      ++n_segments;\n    }\n\n    auto* output = Output(0);\n    output->Resize(n_segments, num_outputs_);\n\n    T* output_data = output->template mutable_data<T>();\n\n    memset(output_data, 0, sizeof(T) * n_segments * num_outputs_);\n\n    const auto* weight_data = weight.template data<T>();\n    const auto* alpha_data = adaptive_ ? Input(4).template data<T>() : 0;\n    const auto* val_data = val.template data<T>();\n    const auto* key_data = key.template data<TIndex>();\n\n    for (TIndex j = 0; j < num_nz_ent; ++j) {\n      TIndex cur_seg = seg_data[j];\n      TIndex cur_key = key_data[j];\n      T cur_val = val_data[j];\n      TIndex output_stride = cur_seg * num_outputs_;\n      for (TIndex i = 0; i < num_outputs_; ++i) {\n        T sum = 0;\n        for (TIndex k = 0; k < num_alpha; ++k) {\n          uint64_t hash;\n          // The hash function takes as input four integers:\n          // 1. feature index\n          // 2. output index\n          // 3. alpha index\n          // 4. magic number: SIGN_MAGIC for sign (-1/+1)\n          //                  INDEX_MAGIC for weight index\n          hash_data[0] = cur_key;\n          hash_data[1] = i;\n          hash_data[2] = k;\n\n          hash_data[3] = INDEX_MAGIC;\n          hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n          TIndex index = hash % num_weight;\n\n          T cur_weight = weight_data[index];\n#ifdef USE_SIGN\n          hash_data[3] = SIGN_MAGIC;\n          hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n          if (hash % 2) {\n            cur_weight = -cur_weight;\n          }\n#endif // USE_SIGN\n\n          if (adaptive_) {\n            sum += cur_weight * alpha_data[k];\n          } else {\n            sum += cur_weight;\n          }\n        }\n        output_data[output_stride + i] += sum * cur_val;\n      }\n    }\n\n    return true;\n  }\n\n protected:\n  TIndex num_outputs_;\n  TIndex num_segments_;\n  uint64_t seed_;\n  std::array<uint64_t, 4> hash_data;\n  bool adaptive_;\n};\n\ntemplate <typename T, class Context>\nclass FunHashGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FunHashGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_outputs_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_outputs\", -1)),\n        seed_(OperatorBase::GetSingleArgument<uint64_t>(\"seed\", 0)) {\n    adaptive_ = (InputSize() == 6);\n  }\n\n  bool RunOnDevice() override {\n    const auto& grad_out = Input(0);\n    const auto& val = Input(1);\n    const auto& key = Input(2);\n    const auto& seg = Input(3);\n    const auto& weight = Input(4);\n\n    TIndex num_alpha = 1;\n    T* grad_alpha_data = 0;\n\n    if (adaptive_) {\n      const auto& alpha = Input(5);\n      num_alpha = alpha.dim(0);\n      auto* grad_alpha = Output(1);\n      grad_alpha->ResizeLike(alpha);\n      grad_alpha_data = grad_alpha->template mutable_data<T>();\n      memset(grad_alpha_data, 0, sizeof(T) * num_alpha);\n    }\n\n    const auto* seg_data = seg.template data<int>();\n\n    TIndex num_weight = weight.dim(0);\n    TIndex num_nz_ent = seg.dim(0);\n\n    auto* grad_weight = Output(0);\n    grad_weight->ResizeLike(weight);\n    T* grad_weight_data = grad_weight->template mutable_data<T>();\n\n    const auto* grad_out_data = grad_out.template data<T>();\n    const auto* weight_data = weight.template data<T>();\n    const auto* alpha_data = adaptive_ ? Input(5).template data<T>() : 0;\n    const auto* val_data = val.template data<T>();\n    const auto* key_data = key.template data<TIndex>();\n\n    memset(grad_weight_data, 0, sizeof(T) * num_weight);\n\n    for (TIndex j = 0; j < num_nz_ent; ++j) {\n      TIndex cur_seg = seg_data[j];\n      TIndex cur_key = key_data[j];\n      T cur_val = val_data[j];\n      TIndex grad_out_stride = cur_seg * num_outputs_;\n      for (TIndex i = 0; i < num_outputs_; ++i) {\n        T grad_out_scale = grad_out_data[grad_out_stride + i] * cur_val;\n        for (TIndex k = 0; k < num_alpha; ++k) {\n          uint64_t hash;\n          hash_data[0] = cur_key;\n          hash_data[1] = i;\n          hash_data[2] = k;\n\n          hash_data[3] = INDEX_MAGIC;\n          hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n          TIndex index = hash % num_weight;\n\n          T cur_grad_out_scale = grad_out_scale;\n#ifdef USE_SIGN\n          hash_data[3] = SIGN_MAGIC;\n          hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n          if (hash % 2) {\n            cur_grad_out_scale = -cur_grad_out_scale;\n          }\n#endif // USE_SIGN\n\n          if (adaptive_) {\n            grad_alpha_data[k] += cur_grad_out_scale * weight_data[index];\n            grad_weight_data[index] += alpha_data[k] * cur_grad_out_scale;\n          } else {\n            grad_weight_data[index] += cur_grad_out_scale;\n          }\n        }\n      }\n    }\n    return true;\n  }\n\n protected:\n  TIndex num_outputs_;\n  uint64_t seed_;\n  std::array<uint64_t, 4> hash_data;\n  bool adaptive_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FUNHASH_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/sparse_funhash_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_\n#define CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_\n\n#include <xxhash.h>\n#include <array>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\n#define HASH_MAGIC 0x9e3779b97f4a7c15\n\n#define USE_SIGN\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SparseFunHashOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseFunHashOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_outputs_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_outputs\", -1)),\n        num_segments_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_segments\", -1)),\n        seed_(OperatorBase::GetSingleArgument<uint64_t>(\"seed\", 0)) {\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"num_outputs\"),\n        \"Argument `num_outputs` is missing.\");\n    // If alpha is provided, use adaptive hashing parameterized by alpha.\n    adaptive_ = (InputSize() == 5);\n  }\n\n  bool RunOnDevice() override {\n    const auto& val = Input(0);\n    const auto& key = Input(1);\n    const auto& seg = Input(2);\n    const auto& weight = Input(3);\n\n    TIndex num_alpha = 1;\n    if (adaptive_) {\n      const auto& alpha = Input(4);\n      num_alpha = alpha.dim(0);\n    }\n\n    const auto* seg_data = seg.template data<int>();\n\n    TIndex num_weight = weight.dim(0);\n    TIndex num_nz_ent = seg.dim(0);\n\n    TIndex n_segments = num_segments_;\n    if (num_segments_ == -1) {\n      for (TIndex i = 0; i < num_nz_ent; ++i) {\n        if (seg_data[i] > n_segments) {\n          n_segments = seg_data[i];\n        }\n      }\n      ++n_segments;\n    }\n\n    auto* output = Output(0);\n    output->Resize(n_segments, num_outputs_);\n\n    T* output_data = output->template mutable_data<T>();\n\n    memset(output_data, 0, sizeof(T) * n_segments * num_outputs_);\n\n    const auto* weight_data = weight.template data<T>();\n    const auto* alpha_data = adaptive_ ? Input(4).template data<T>() : 0;\n    const auto* val_data = val.template data<T>();\n    const auto* key_data = key.template data<TIndex>();\n\n    for (TIndex j = 0; j < num_nz_ent; ++j) {\n      TIndex cur_seg = seg_data[j];\n      TIndex cur_key = key_data[j];\n      T cur_val = val_data[j];\n      TIndex output_stride = cur_seg * num_outputs_;\n      for (TIndex i = 0; i < num_outputs_; ++i) {\n        T sum = 0;\n        for (TIndex k = 0; k < num_alpha; ++k) {\n          // The hash function takes as input three integers:\n          // 1. feature index\n          // 2. output index\n          // 3. alpha index\n          // 4. magic number to improve hashing\n          hash_data[0] = cur_key;\n          hash_data[1] = i;\n          hash_data[2] = k;\n          hash_data[3] = HASH_MAGIC;\n\n          uint64_t hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n\n#ifdef USE_SIGN\n          // Use the least significant bit for sign, the rest for weights.\n          TIndex index = (hash >> 1) % num_weight;\n          T cur_weight = weight_data[index];\n          if (hash & 1) {\n            cur_weight = -cur_weight;\n          }\n#else\n          TIndex index = hash % num_weight;\n          T cur_weight = weight_data[index];\n#endif\n\n          if (adaptive_) {\n            sum += cur_weight * alpha_data[k];\n          } else {\n            sum += cur_weight;\n          }\n        }\n        output_data[output_stride + i] += sum * cur_val;\n      }\n    }\n\n    return true;\n  }\n\n protected:\n  TIndex num_outputs_;\n  TIndex num_segments_;\n  uint64_t seed_;\n  std::array<uint64_t, 4> hash_data;\n  bool adaptive_;\n};\n\ntemplate <typename T, class Context>\nclass SparseFunHashGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseFunHashGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_outputs_(\n            OperatorBase::GetSingleArgument<TIndex>(\"num_outputs\", -1)),\n        seed_(OperatorBase::GetSingleArgument<uint64_t>(\"seed\", 0)) {\n    adaptive_ = (InputSize() == 6);\n  }\n\n  bool RunOnDevice() override {\n    const auto& grad_out = Input(0);\n    const auto& val = Input(1);\n    const auto& key = Input(2);\n    const auto& seg = Input(3);\n    const auto& weight = Input(4);\n\n    TIndex num_alpha = 1;\n    T* grad_alpha_data = 0;\n\n    if (adaptive_) {\n      const auto& alpha = Input(5);\n      num_alpha = alpha.dim(0);\n      auto* grad_alpha = Output(2);\n      grad_alpha->ResizeLike(alpha);\n      grad_alpha_data = grad_alpha->template mutable_data<T>();\n      memset(grad_alpha_data, 0, sizeof(T) * num_alpha);\n    }\n\n    const auto* seg_data = seg.template data<int>();\n\n    TIndex num_weight = weight.dim(0);\n    TIndex num_nz_ent = seg.dim(0);\n\n    TIndex grad_weight_size = num_nz_ent * num_outputs_ * num_alpha;\n    auto* grad_weight_val = Output(0);\n    grad_weight_val->Resize(grad_weight_size);\n    T* grad_weight_val_data = grad_weight_val->template mutable_data<T>();\n\n    auto* grad_weight_ind = Output(1);\n    grad_weight_ind->Resize(grad_weight_size);\n    auto* grad_weight_ind_data =\n        grad_weight_ind->template mutable_data<TIndex>();\n\n    const auto* grad_out_data = grad_out.template data<T>();\n    const auto* weight_data = weight.template data<T>();\n    const auto* alpha_data = adaptive_ ? Input(5).template data<T>() : 0;\n    const auto* val_data = val.template data<T>();\n    const auto* key_data = key.template data<TIndex>();\n\n    TIndex w_ind = 0;\n    for (TIndex j = 0; j < num_nz_ent; ++j) {\n      TIndex cur_seg = seg_data[j];\n      TIndex cur_key = key_data[j];\n      T cur_val = val_data[j];\n      TIndex grad_out_stride = cur_seg * num_outputs_;\n      for (TIndex i = 0; i < num_outputs_; ++i) {\n        T grad_out_scale = grad_out_data[grad_out_stride + i] * cur_val;\n        for (TIndex k = 0; k < num_alpha; ++k) {\n          hash_data[0] = cur_key;\n          hash_data[1] = i;\n          hash_data[2] = k;\n          hash_data[3] = HASH_MAGIC;\n\n          uint64_t hash = XXH64(hash_data.data(), hash_data.size(), seed_);\n\n          T cur_grad_out_scale = grad_out_scale;\n#ifdef USE_SIGN\n          TIndex index = (hash >> 1) % num_weight;\n          if (hash & 1) {\n            cur_grad_out_scale = -cur_grad_out_scale;\n          }\n#else\n          TIndex index = hash % num_weight;\n#endif\n\n          if (adaptive_) {\n            grad_alpha_data[k] += cur_grad_out_scale * weight_data[index];\n            grad_weight_val_data[w_ind] = alpha_data[k] * cur_grad_out_scale;\n          } else {\n            grad_weight_val_data[w_ind] = cur_grad_out_scale;\n          }\n          grad_weight_ind_data[w_ind] = index;\n          ++w_ind;\n        }\n      }\n    }\n    return true;\n  }\n\n protected:\n  TIndex num_outputs_;\n  uint64_t seed_;\n  std::array<uint64_t, 4> hash_data;\n  bool adaptive_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/sparse_matrix_reshape_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_\n#define CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SparseMatrixReshapeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseMatrixReshapeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"old_shape\"),\n        \"Argument `old_shape` is missing.\");\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"new_shape\"),\n        \"Argument `new_shape` is missing.\");\n\n    vector<TIndex> old_shape =\n        OperatorBase::GetRepeatedArgument<TIndex>(\"old_shape\");\n    vector<TIndex> new_shape =\n        OperatorBase::GetRepeatedArgument<TIndex>(\"new_shape\");\n\n    CAFFE_ENFORCE(\n        old_shape.size() == 2,\n        \"Argument `old_shape` must contain exactly two integers.\");\n    CAFFE_ENFORCE(\n        new_shape.size() == 2,\n        \"Argument `new_shape` must contain exactly two integers.\");\n\n    CAFFE_ENFORCE(\n        old_shape[1] > 0,\n        \"The second dimension in argument `old_shape` must be positive.\");\n\n    old_stride_ = old_shape[1];\n\n    if (old_shape[0] == -1) {\n      CAFFE_ENFORCE(\n          new_shape[1] > 0,\n          \"The second dimension in `new_shape` must be positive.\");\n    } else {\n      CAFFE_ENFORCE(\n          old_shape[0] > 0,\n          \"The first dimension in `old_shape` must be positive.\");\n\n      TIndex matrix_size = old_shape[0] * old_shape[1];\n\n      if (new_shape[0] == -1) {\n        CAFFE_ENFORCE(\n            new_shape[1] > 0,\n            \"Only one dimension in argument `new_shape` can be -1.\");\n        CAFFE_ENFORCE(\n            matrix_size % new_shape[1] == 0,\n            \"Argument `new_shape` does not agree with `old_shape`.\");\n      } else {\n        CAFFE_ENFORCE(\n            new_shape[0] > 0 && (new_shape[1] == -1 || new_shape[1] > 0),\n            \"Dimensions in argument `new_shape` must be positive or -1.\");\n        if (new_shape[1] == -1) {\n          CAFFE_ENFORCE(\n              matrix_size % new_shape[0] == 0,\n              \"Argument `new_shape` does not agree with `old_shape`.\");\n          new_shape[1] = matrix_size / new_shape[0];\n        } else {\n          CAFFE_ENFORCE(\n              new_shape[0] * new_shape[1] == matrix_size,\n              \"Argument `new_shape` does not agree with `old_shape`.\");\n        }\n      }\n    }\n    new_stride_ = new_shape[1];\n  }\n\n  bool RunOnDevice() override {\n    auto& old_col = Input(0);\n    CAFFE_ENFORCE(old_col.ndim() == 1, \"Row index tensor must be 1-D.\");\n    auto& old_row = Input(1);\n    CAFFE_ENFORCE(old_row.ndim() == 1, \"Column index tensor must be 1-D.\");\n\n    const auto nnz = old_col.size();\n    CAFFE_ENFORCE(\n        old_row.size() == nnz,\n        \"Column and row tensors must have the same size.\");\n\n    auto* new_col = Output(0);\n    auto* new_row = Output(1);\n    new_col->Resize(nnz);\n    new_row->Resize(nnz);\n\n    const auto* old_col_data = old_col.template data<TIndex>();\n    const auto* old_row_data = old_row.template data<int>();\n\n    auto* new_col_data = new_col->template mutable_data<TIndex>();\n    auto* new_row_data = new_row->template mutable_data<int>();\n\n    for (int i = 0; i < nnz; ++i) {\n      TIndex offset = old_row_data[i] * old_stride_ + old_col_data[i];\n      new_row_data[i] = offset / new_stride_;\n      new_col_data[i] = offset % new_stride_;\n    }\n\n    return true;\n  }\n\n private:\n  TIndex old_stride_;\n  TIndex new_stride_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/tt_contraction_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_\n#define CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTContractionOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTContractionOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        K_(OperatorBase::GetSingleArgument<TIndex>(\"K\", 0)),\n        M_(OperatorBase::GetSingleArgument<TIndex>(\"M\", 0)),\n        N_(OperatorBase::GetSingleArgument<TIndex>(\"N\", 0)) {\n    CAFFE_ENFORCE(OperatorBase::HasArgument(\"K\"), \"Argument `K` is missing.\");\n    CAFFE_ENFORCE(OperatorBase::HasArgument(\"M\"), \"Argument `M` is missing.\");\n    CAFFE_ENFORCE(OperatorBase::HasArgument(\"N\"), \"Argument `N` is missing.\");\n  }\n\n  bool RunOnDevice() override {\n    const auto& A = Input(0);\n    const auto& B = Input(1);\n    auto* C = Output(0);\n\n    CAFFE_ENFORCE(A.ndim() == 2, A.ndim());\n\n    TIndex A_size = A.size_from_dim(0);\n    TIndex B_size = B.size_from_dim(0);\n\n    CAFFE_ENFORCE(\n        K_ * M_ == A_size,\n        \"Argument `K` and `M` do not agree with the size of A.\");\n\n    CAFFE_ENFORCE(\n        B_size % (K_ * N_) == 0,\n        \"Argument `K` and `N` do not agree with the size of B.\");\n\n    TIndex D_ = B_size / (K_ * N_);\n\n    TIndex C_size = D_ * M_ * N_;\n    C->Resize(vector<TIndex>{C_size});\n\n    TIndex B_stride = K_ * N_;\n    TIndex C_stride = M_ * N_;\n\n    const T* A_data = A.template data<T>();\n    const T* B_data = B.template data<T>();\n    T* C_data = C->template mutable_data<T>();\n\n    for (TIndex B_index = 0; B_index < B_size; B_index += B_stride) {\n      math::Gemm<T, Context, Engine>(\n          CblasTrans,\n          CblasNoTrans,\n          M_, N_, K_, 1,\n          A_data,\n          B_data + B_index,\n          0,\n          C_data,\n          &context_);\n      C_data += C_stride;\n    }\n\n    return true;\n  }\n\n protected:\n  TIndex K_;\n  TIndex M_;\n  TIndex N_;\n};\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTContractionGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTContractionGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        K_(OperatorBase::GetSingleArgument<TIndex>(\"K\", 0)),\n        M_(OperatorBase::GetSingleArgument<TIndex>(\"M\", 0)),\n        N_(OperatorBase::GetSingleArgument<TIndex>(\"N\", 0)) {}\n\n  bool RunOnDevice() override {\n    const auto& G = Input(0);\n    const auto& A = Input(1);\n    const auto& B = Input(2);\n    auto* dA = Output(0);\n    auto* dB = Output(1);\n\n    TIndex G_size = G.size_from_dim(0);\n    TIndex D_ = G_size / (M_ * N_);\n\n    TIndex dB_size = D_ * K_ * N_;\n\n    dA->Resize(A.dims());\n    dB->Resize(B.dims());\n\n    TIndex B_stride = K_ * N_;\n    TIndex G_stride = M_ * N_;\n\n    const T* G_data = G.template data<T>();\n    const T* A_data = A.template data<T>();\n    const T* B_data = B.template data<T>();\n\n    T* dA_data = dA->template mutable_data<T>();\n    T* dB_data = dB->template mutable_data<T>();\n\n    const T* G_ptr = G_data;\n    for (TIndex B_index = 0; B_index < dB_size; B_index += B_stride) {\n      math::Gemm<T, Context, Engine>(\n          CblasNoTrans,\n          CblasTrans,\n          K_, M_, N_, 1,\n          B_data + B_index,\n          G_ptr,\n          B_index == 0 ? 0 : 1,\n          dA_data,\n          &context_);\n      G_ptr += G_stride;\n    }\n\n    G_ptr = G_data;\n    for (TIndex B_index = 0; B_index < dB_size; B_index += B_stride) {\n      math::Gemm<T, Context, Engine>(\n          CblasNoTrans,\n          CblasNoTrans,\n          K_, N_, M_, 1,\n          A_data,\n          G_ptr,\n          0,\n          dB_data + B_index,\n          &context_);\n      G_ptr += G_stride;\n    }\n\n    return true;\n  }\n\n protected:\n  TIndex K_;\n  TIndex M_;\n  TIndex N_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/experiments/operators/tt_pad_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_TT_PAD_OP_H_\n#define CAFFE2_OPERATORS_TT_PAD_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTPadOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTPadOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        scale_(OperatorBase::GetSingleArgument<TIndex>(\"scale\", 0)) {\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"scale\"), \"Argument `scale` is missing.\");\n  }\n\n  bool RunOnDevice() override {\n    const auto& X = Input(0);\n    auto* X_pad = Output(0);\n    CAFFE_ENFORCE(&X == X_pad);\n\n    CAFFE_ENFORCE(X.ndim() == 2, X.ndim());\n\n    auto X_dim0 = X.dim(0);\n    auto X_dim1 = X.dim(1);\n\n    auto* X_orig_dim0 = Output(1);\n    X_orig_dim0->Resize(1);\n    *X_orig_dim0->template mutable_data<TIndex>() = X_dim0;\n\n    if (X_dim0 % scale_ != 0) {\n      TIndex padded_dim0 = (X_dim0 / scale_ + 1) * scale_;\n      auto dim0_diff = padded_dim0 - X_dim0;\n      // set growthPct to the upper bound percentage: (100 * scale_ / X_dim0)\n      X_pad->template Extend(dim0_diff, 100 * scale_ / X_dim0, &context_);\n\n      auto* X_pad_data = X_pad->template mutable_data<T>();\n      TIndex X_size = X_dim0 * X_dim1;\n      memset(X_pad_data + X_size, 0, dim0_diff * X_dim1 * sizeof(T));\n    }\n\n    return true;\n  }\n\n protected:\n  TIndex scale_;\n};\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTPadGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTPadGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    const auto& G = Input(0);\n    auto* output = Output(0);\n    CAFFE_ENFORCE(&G == output);\n\n    auto old_dim0 = *Input(1).template data<TIndex>();\n    auto new_dim0 = G.dim(0);\n    auto dim1 = G.dim(1);\n\n    if (old_dim0 < new_dim0) {\n      output->Shrink(old_dim0);\n    }\n\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TT_PAD_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/image/image_input_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_IMAGE_IMAGE_INPUT_OP_H_\n#define CAFFE2_IMAGE_IMAGE_INPUT_OP_H_\n\n#include <opencv2/opencv.hpp>\n\n#include <iostream>\n#include <algorithm>\n\n#include \"caffe/proto/caffe.pb.h\"\n#include \"caffe2/core/db.h\"\n#include \"caffe2/utils/cast.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/utils/thread_pool.h\"\n#include \"caffe2/operators/prefetch_op.h\"\n#include \"caffe2/image/transform_gpu.h\"\n\nnamespace caffe2 {\n\nclass CUDAContext;\n\ntemplate <class Context>\nclass ImageInputOp final\n    : public PrefetchOperator<Context> {\n  // SINGLE_LABEL: single integer label for multi-class classification\n  // MULTI_LABEL_SPARSE: sparse active label indices for multi-label classification\n  // MULTI_LABEL_DENSE: dense label embedding vector for label embedding regression\n  // MULTI_LABEL_WEIGHTED_SPARSE: sparse active label indices with per-label weights\n  // for multi-label classification\n  enum LABEL_TYPE {\n    SINGLE_LABEL = 0,\n    MULTI_LABEL_SPARSE = 1,\n    MULTI_LABEL_DENSE = 2,\n    MULTI_LABEL_WEIGHTED_SPARSE = 3\n  };\n\n  // INCEPTION_STYLE: Random crop with size 8% - 100% image area and aspect\n  // ratio in [3/4, 4/3]. Reference: GoogleNet paper\n  enum SCALE_JITTER_TYPE {\n    NO_SCALE_JITTER = 0,\n    INCEPTION_STYLE = 1\n    // TODO(zyan3): ResNet-style random scale jitter\n  };\n\n public:\n  using OperatorBase::OutputSize;\n  using PrefetchOperator<Context>::context_;\n  using PrefetchOperator<Context>::prefetch_thread_;\n  explicit ImageInputOp(const OperatorDef& operator_def,\n                                    Workspace* ws);\n  ~ImageInputOp() {\n    PrefetchOperator<Context>::Finalize();\n  }\n\n  bool Prefetch() override;\n  bool CopyPrefetched() override;\n\n private:\n  using BoundingBox = struct {\n    bool valid;\n    int ymin;\n    int xmin;\n    int height;\n    int width;\n  };\n\n  // Structure to store per-image information\n  // This can be modified by the DecodeAnd* so needs\n  // to be privatized per launch.\n  using PerImageArg = struct {\n    BoundingBox bounding_params;\n  };\n\n  bool GetImageAndLabelAndInfoFromDBValue(\n      const string& value, cv::Mat* img, PerImageArg& info, int item_id,\n      std::mt19937* randgen);\n  void DecodeAndTransform(\n      const std::string& value, float *image_data, int item_id,\n      const int channels, std::size_t thread_index);\n  void DecodeAndTransposeOnly(\n      const std::string& value, uint8_t *image_data, int item_id,\n      const int channels, std::size_t thread_index);\n\n  unique_ptr<db::DBReader> owned_reader_;\n  const db::DBReader* reader_;\n  CPUContext cpu_context_;\n  TensorCPU prefetched_image_;\n  TensorCPU prefetched_label_;\n  vector<TensorCPU> prefetched_additional_outputs_;\n  Tensor<Context> prefetched_image_on_device_;\n  Tensor<Context> prefetched_label_on_device_;\n  vector<Tensor<Context>> prefetched_additional_outputs_on_device_;\n  // Default parameters for images\n  PerImageArg default_arg_;\n  int batch_size_;\n  LABEL_TYPE label_type_;\n  int num_labels_;\n\n  bool color_;\n  bool color_jitter_;\n  float img_saturation_;\n  float img_brightness_;\n  float img_contrast_;\n  bool color_lighting_;\n  float color_lighting_std_;\n  std::vector<std::vector<float>> color_lighting_eigvecs_;\n  std::vector<float> color_lighting_eigvals_;\n  SCALE_JITTER_TYPE scale_jitter_type_;\n  int scale_;\n  // Minsize is similar to scale except that it will only\n  // force the image to scale up if it is too small. In other words,\n  // it ensures that both dimensions of the image are at least minsize_\n  int minsize_;\n  bool warp_;\n  int crop_;\n  std::vector<float> mean_;\n  std::vector<float> std_;\n  Tensor<Context> mean_gpu_;\n  Tensor<Context> std_gpu_;\n  bool mirror_;\n  bool is_test_;\n  bool use_caffe_datum_;\n  bool gpu_transform_;\n  bool mean_std_copied_ = false;\n\n  // thread pool for parse + decode\n  int num_decode_threads_;\n  int additional_inputs_offset_;\n  int additional_inputs_count_;\n  std::shared_ptr<TaskThreadPool> thread_pool_;\n\n  // Output type for GPU transform path\n  TensorProto_DataType output_type_;\n\n  // random minsize\n  vector<int> random_scale_;\n  bool random_scaling_;\n\n\n  // Working variables\n  std::vector<std::mt19937> randgen_per_thread_;\n};\n\ntemplate <class Context>\nImageInputOp<Context>::ImageInputOp(\n    const OperatorDef& operator_def,\n    Workspace* ws)\n    : PrefetchOperator<Context>(operator_def, ws),\n      reader_(nullptr),\n      prefetched_additional_outputs_(OutputSize() - 2),\n      prefetched_additional_outputs_on_device_(OutputSize() - 2),\n      batch_size_(\n          OperatorBase::template GetSingleArgument<int>(\"batch_size\", 0)),\n      label_type_(static_cast<LABEL_TYPE>(\n          OperatorBase::template GetSingleArgument<int>(\"label_type\", 0))),\n      num_labels_(\n          OperatorBase::template GetSingleArgument<int>(\"num_labels\", 0)),\n      color_(OperatorBase::template GetSingleArgument<int>(\"color\", 1)),\n      color_jitter_(\n          OperatorBase::template GetSingleArgument<int>(\"color_jitter\", 0)),\n      img_saturation_(OperatorBase::template GetSingleArgument<float>(\n          \"img_saturation\",\n          0.4)),\n      img_brightness_(OperatorBase::template GetSingleArgument<float>(\n          \"img_brightness\",\n          0.4)),\n      img_contrast_(\n          OperatorBase::template GetSingleArgument<float>(\"img_contrast\", 0.4)),\n      color_lighting_(\n          OperatorBase::template GetSingleArgument<int>(\"color_lighting\", 0)),\n      color_lighting_std_(OperatorBase::template GetSingleArgument<float>(\n          \"color_lighting_std\",\n          0.1)),\n      scale_jitter_type_(static_cast<SCALE_JITTER_TYPE>(\n          OperatorBase::template GetSingleArgument<int>(\n              \"scale_jitter_type\",\n              0))),\n      scale_(OperatorBase::template GetSingleArgument<int>(\"scale\", -1)),\n      minsize_(OperatorBase::template GetSingleArgument<int>(\"minsize\", -1)),\n      warp_(OperatorBase::template GetSingleArgument<int>(\"warp\", 0)),\n      crop_(OperatorBase::template GetSingleArgument<int>(\"crop\", -1)),\n      mirror_(OperatorBase::template GetSingleArgument<int>(\"mirror\", 0)),\n      is_test_(OperatorBase::template GetSingleArgument<int>(\n          OpSchema::Arg_IsTest,\n          0)),\n      use_caffe_datum_(\n          OperatorBase::template GetSingleArgument<int>(\"use_caffe_datum\", 0)),\n      gpu_transform_(OperatorBase::template GetSingleArgument<int>(\n          \"use_gpu_transform\",\n          0)),\n      num_decode_threads_(\n          OperatorBase::template GetSingleArgument<int>(\"decode_threads\", 4)),\n      thread_pool_(std::make_shared<TaskThreadPool>(num_decode_threads_)),\n      // output type only supported with CUDA and use_gpu_transform for now\n      output_type_(\n          cast::GetCastDataType(ArgumentHelper(operator_def), \"output_type\")),\n      random_scale_(\n          OperatorBase::template GetRepeatedArgument<int>(\"random_scale\", {-1,-1})) {\n  if ((random_scale_[0] == -1) || (random_scale_[1] == -1)) {\n    random_scaling_ = false;\n  } else {\n    random_scaling_ = true;\n    minsize_ = random_scale_[0];\n  }\n\n  mean_ = OperatorBase::template GetRepeatedArgument<float>(\n    \"mean_per_channel\",\n    {OperatorBase::template GetSingleArgument<float>(\"mean\", 0.)});\n\n  std_ = OperatorBase::template GetRepeatedArgument<float>(\n    \"std_per_channel\",\n    {OperatorBase::template GetSingleArgument<float>(\"std\", 1.)});\n\n  vector<int> additional_output_sizes =\n      OperatorBase::template GetRepeatedArgument<int>(\n          \"output_sizes\", vector<int>(OutputSize() - 2, 1));\n  additional_inputs_count_ = OutputSize() - 2;\n\n  default_arg_.bounding_params = {\n    false,\n    OperatorBase::template GetSingleArgument<int>(\"bounding_ymin\", -1),\n    OperatorBase::template GetSingleArgument<int>(\"bounding_xmin\", -1),\n    OperatorBase::template GetSingleArgument<int>(\"bounding_height\", -1),\n    OperatorBase::template GetSingleArgument<int>(\"bounding_width\", -1),\n  };\n\n  if (operator_def.input_size() == 0) {\n    LOG(ERROR) << \"You are using an old ImageInputOp format that creates \"\n                       \"a local db reader. Consider moving to the new style \"\n                       \"that takes in a DBReader blob instead.\";\n    string db_name =\n        OperatorBase::template GetSingleArgument<string>(\"db\", \"\");\n    CAFFE_ENFORCE_GT(db_name.size(), 0, \"Must specify a db name.\");\n    owned_reader_.reset(new db::DBReader(\n        OperatorBase::template GetSingleArgument<string>(\n            \"db_type\", \"leveldb\"),\n        db_name));\n    reader_ = owned_reader_.get();\n  }\n\n  // hard-coded PCA eigenvectors and eigenvalues, based on RBG channel order\n  color_lighting_eigvecs_.push_back(\n    std::vector<float>{-144.7125, 183.396, 102.2295});\n  color_lighting_eigvecs_.push_back(\n    std::vector<float>{-148.104, -1.1475, -207.57});\n  color_lighting_eigvecs_.push_back(\n    std::vector<float>{-148.818, -177.174, 107.1765});\n\n  color_lighting_eigvals_ = std::vector<float>{0.2175, 0.0188, 0.0045};\n\n  CAFFE_ENFORCE_GT(batch_size_, 0, \"Batch size should be nonnegative.\");\n  if (use_caffe_datum_) {\n    CAFFE_ENFORCE_EQ(label_type_, SINGLE_LABEL,\n      \"Caffe datum only supports single integer label\");\n  }\n  if (label_type_ !=  SINGLE_LABEL) {\n    CAFFE_ENFORCE_GT(num_labels_, 0,\n      \"Number of labels must be set for using either sparse label indices or dense label embedding.\");\n  }\n  if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE) {\n    additional_inputs_offset_ = 3;\n  } else {\n    additional_inputs_offset_ = 2;\n  }\n  CAFFE_ENFORCE((scale_ > 0) != (minsize_ > 0),\n                \"Must provide one and only one of scaling or minsize\");\n  CAFFE_ENFORCE_GT(crop_, 0, \"Must provide the cropping value.\");\n  CAFFE_ENFORCE_GE(\n    scale_ > 0 ? scale_ : minsize_,\n    crop_, \"The scale/minsize value must be no smaller than the crop value.\");\n\n  CAFFE_ENFORCE_EQ(\n      mean_.size(),\n      std_.size(),\n      \"The mean and std. dev vectors must be of the same size.\");\n  CAFFE_ENFORCE(mean_.size() == 1 || mean_.size() == 3,\n                \"The mean and std. dev vectors must be of size 1 or 3\");\n  CAFFE_ENFORCE(\n      !use_caffe_datum_ || OutputSize() == 2,\n      \"There can only be 2 outputs if the Caffe datum format is used\");\n  CAFFE_ENFORCE(\n      additional_output_sizes.size() == OutputSize() - 2,\n      \"If the output sizes are specified, they must be specified for all \"\n      \"additional outputs\");\n\n  CAFFE_ENFORCE(random_scale_.size() == 2,\n      \"Must provide [scale_min, scale_max]\");\n  CAFFE_ENFORCE_GE(random_scale_[1], random_scale_[0],\n      \"random scale must provide a range [min, max]\");\n\n  if (default_arg_.bounding_params.ymin < 0\n      || default_arg_.bounding_params.xmin < 0\n      || default_arg_.bounding_params.height < 0\n      || default_arg_.bounding_params.width < 0) {\n    default_arg_.bounding_params.valid = false;\n  } else {\n    default_arg_.bounding_params.valid = true;\n  }\n\n  if (mean_.size() == 1) {\n    // We are going to extend to 3 using the first value\n    mean_.resize(3, mean_[0]);\n    std_.resize(3, std_[0]);\n  }\n\n  LOG(INFO) << \"Creating an image input op with the following setting: \";\n  LOG(INFO) << \"    Using \" << num_decode_threads_ << \" CPU threads;\";\n  if (gpu_transform_) {\n    LOG(INFO) << \"    Performing transformation on GPU\";\n  }\n  LOG(INFO) << \"    Outputting in batches of \" << batch_size_ << \" images;\";\n  LOG(INFO) << \"    Treating input image as \"\n            << (color_ ? \"color \" : \"grayscale \") << \"image;\";\n  if (default_arg_.bounding_params.valid) {\n    LOG(INFO) << \"    Applying a default bounding box of Y [\"\n              << default_arg_.bounding_params.ymin << \"; \"\n              << default_arg_.bounding_params.ymin +\n      default_arg_.bounding_params.height\n              << \") x X [\"\n              << default_arg_.bounding_params.xmin << \"; \"\n              << default_arg_.bounding_params.xmin +\n      default_arg_.bounding_params.width\n              << \")\";\n  }\n  if (scale_ > 0 && !random_scaling_) {\n    LOG(INFO) << \"    Scaling image to \" << scale_\n              << (warp_ ? \" with \" : \" without \") << \"warping;\";\n  } else {\n    if (random_scaling_) {\n      // randomly set min_size_ for each image\n      LOG(INFO) << \"    Randomly scaling shortest side between \"\n                << random_scale_[0] << \" and \"\n                << random_scale_[1];\n    } else {\n      // Here, minsize_ > 0\n      LOG(INFO) << \"    Ensuring minimum image size of \" << minsize_\n                << (warp_ ? \" with \" : \" without \") << \"warping;\";\n    }\n  }\n  LOG(INFO) << \"    \" << (is_test_ ? \"Central\" : \"Random\")\n            << \" cropping image to \" << crop_\n            << (mirror_ ? \" with \" : \" without \") << \"random mirroring;\";\n  LOG(INFO) << \"Label Type: \" << label_type_;\n  LOG(INFO) << \"Num Labels: \" << num_labels_;\n\n  auto mit = mean_.begin();\n  auto sit = std_.begin();\n\n  for (int i = 0;\n       mit != mean_.end() && sit != std_.end();\n       ++mit, ++sit, ++i) {\n    LOG(INFO) << \"    Default [Channel \" << i << \"] Subtract mean \" << *mit\n              << \" and divide by std \" << *sit << \".\";\n    // We actually will use the inverse of std, so inverse it here\n    *sit = 1.f / *sit;\n  }\n  LOG(INFO) << \"    Outputting images as \"\n            << OperatorBase::template GetSingleArgument<string>(\"output_type\", \"unknown\") << \".\";\n\n  std::mt19937 meta_randgen(time(nullptr));\n  for (int i = 0; i < num_decode_threads_; ++i) {\n    randgen_per_thread_.emplace_back(meta_randgen());\n  }\n  prefetched_image_.Resize(\n      TIndex(batch_size_),\n      TIndex(crop_),\n      TIndex(crop_),\n      TIndex(color_ ? 3 : 1));\n  if (label_type_ != SINGLE_LABEL) {\n    prefetched_label_.Resize(TIndex(batch_size_), TIndex(num_labels_));\n  } else {\n    prefetched_label_.Resize(vector<TIndex>(1, batch_size_));\n  }\n\n  for (int i = 0; i < additional_output_sizes.size(); ++i) {\n    prefetched_additional_outputs_[i].Resize(\n        TIndex(batch_size_), TIndex(additional_output_sizes[i]));\n  }\n}\n\n// Inception-stype scale jittering\ntemplate <class Context>\nbool RandomSizedCropping(\n  cv::Mat* img,\n  const int crop,\n  std::mt19937* randgen\n) {\n  cv::Mat scaled_img;\n  bool inception_scale_jitter = false;\n  int im_height = img->rows, im_width = img->cols;\n  int area = im_height * im_width;\n  std::uniform_real_distribution<> area_dis(0.08, 1.0);\n  std::uniform_real_distribution<> aspect_ratio_dis(3.0 / 4.0, 4.0 / 3.0);\n\n  cv::Mat cropping;\n  for (int i = 0; i < 10; ++i) {\n    int target_area = int(ceil(area_dis(*randgen) * area));\n    float aspect_ratio = aspect_ratio_dis(*randgen);\n    int nh = floor(std::sqrt(((float)target_area / aspect_ratio)));\n    int nw = floor(std::sqrt(((float)target_area * aspect_ratio)));\n    if (nh >= 1 && nh <= im_height && nw >=1 && nw <= im_width) {\n      int height_offset = std::uniform_int_distribution<>(\n        0, im_height - nh)(*randgen);\n      int width_offset = std::uniform_int_distribution<>(\n        0,im_width - nw)(*randgen);\n      cv::Rect ROI(width_offset, height_offset, nw, nh);\n      cropping = (*img)(ROI);\n      cv::resize(\n          cropping,\n          scaled_img,\n          cv::Size(crop, crop),\n          0,\n          0,\n          cv::INTER_AREA);\n      *img = scaled_img;\n      inception_scale_jitter = true;\n      break;\n    }\n  }\n  return inception_scale_jitter;\n}\n\ntemplate <class Context>\nbool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(\n    const string& value,\n    cv::Mat* img,\n    PerImageArg& info,\n    int item_id,\n    std::mt19937* randgen) {\n  //\n  // recommend using --caffe2_use_fatal_for_enforce=1 when using ImageInputOp\n  // as this function runs on a worker thread and the exceptions from\n  // CAFFE_ENFORCE are silently dropped by the thread worker functions\n  //\n  cv::Mat src;\n\n  // Use the default information for images\n  info = default_arg_;\n  if (use_caffe_datum_) {\n    // The input is a caffe datum format.\n    caffe::Datum datum;\n    CAFFE_ENFORCE(datum.ParseFromString(value));\n\n    prefetched_label_.mutable_data<int>()[item_id] = datum.label();\n    if (datum.encoded()) {\n      // encoded image in datum.\n      src = cv::imdecode(\n          cv::Mat(\n              1,\n              datum.data().size(),\n              CV_8UC1,\n              const_cast<char*>(datum.data().data())),\n          color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);\n    } else {\n      // Raw image in datum.\n      CAFFE_ENFORCE(datum.channels() == 3 || datum.channels() == 1);\n\n      int src_c = datum.channels();\n      src.create(\n          datum.height(), datum.width(), (src_c == 3) ? CV_8UC3 : CV_8UC1);\n\n      if (src_c == 1) {\n        memcpy(src.ptr<uchar>(0), datum.data().data(), datum.data().size());\n      } else {\n        // Datum stores things in CHW order, let's do HWC for images to make\n        // things more consistent with conventional image storage.\n        for (int c = 0; c < 3; ++c) {\n          const char* datum_buffer =\n              datum.data().data() + datum.height() * datum.width() * c;\n          uchar* ptr = src.ptr<uchar>(0) + c;\n          for (int h = 0; h < datum.height(); ++h) {\n            for (int w = 0; w < datum.width(); ++w) {\n              *ptr = *(datum_buffer++);\n              ptr += 3;\n            }\n          }\n        }\n      }\n    }\n  } else {\n    // The input is a caffe2 format.\n    TensorProtos protos;\n    CAFFE_ENFORCE(protos.ParseFromString(value));\n    const TensorProto& image_proto = protos.protos(0);\n    const TensorProto& label_proto = protos.protos(1);\n    vector<TensorProto> additional_output_protos;\n    int start = additional_inputs_offset_;\n    int end = start + additional_inputs_count_;\n    for (int i = start; i < end; ++i) {\n      additional_output_protos.push_back(protos.protos(i));\n    }\n\n    if (protos.protos_size() == end + 1) {\n      // We have bounding box information\n      const TensorProto& bounding_proto = protos.protos(end);\n      DCHECK_EQ(bounding_proto.data_type(), TensorProto::INT32);\n      DCHECK_EQ(bounding_proto.int32_data_size(), 4);\n      info.bounding_params.valid = true;\n      info.bounding_params.ymin = bounding_proto.int32_data(0);\n      info.bounding_params.xmin = bounding_proto.int32_data(1);\n      info.bounding_params.height = bounding_proto.int32_data(2);\n      info.bounding_params.width = bounding_proto.int32_data(3);\n    }\n\n    if (image_proto.data_type() == TensorProto::STRING) {\n      // encoded image string.\n      DCHECK_EQ(image_proto.string_data_size(), 1);\n      const string& encoded_image_str = image_proto.string_data(0);\n      int encoded_size = encoded_image_str.size();\n      // We use a cv::Mat to wrap the encoded str so we do not need a copy.\n      src = cv::imdecode(\n          cv::Mat(\n              1,\n              &encoded_size,\n              CV_8UC1,\n              const_cast<char*>(encoded_image_str.data())),\n          color_ ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);\n    } else if (image_proto.data_type() == TensorProto::BYTE) {\n      // raw image content.\n      int src_c = (image_proto.dims_size() == 3) ? image_proto.dims(2) : 1;\n      CAFFE_ENFORCE(src_c == 3 || src_c == 1);\n\n      src.create(\n          image_proto.dims(0),\n          image_proto.dims(1),\n          (src_c == 3) ? CV_8UC3 : CV_8UC1);\n      memcpy(\n          src.ptr<uchar>(0),\n          image_proto.byte_data().data(),\n          image_proto.byte_data().size());\n    } else {\n      LOG(FATAL) << \"Unknown image data type.\";\n    }\n\n    if (label_proto.data_type() == TensorProto::FLOAT) {\n      if (label_type_ == SINGLE_LABEL) {\n        DCHECK_EQ(label_proto.float_data_size(), 1);\n        prefetched_label_.mutable_data<float>()[item_id] =\n            label_proto.float_data(0);\n      } else if (label_type_ == MULTI_LABEL_SPARSE) {\n        float* label_data = prefetched_label_.mutable_data<float>() +\n          item_id * num_labels_;\n        memset(label_data, 0, sizeof(float) * num_labels_);\n        for (int i = 0; i < label_proto.float_data_size(); ++i) {\n          label_data[(int)label_proto.float_data(i)] = 1.0;\n        }\n      } else if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE) {\n        const TensorProto& weight_proto = protos.protos(2);\n        float* label_data =\n            prefetched_label_.mutable_data<float>() + item_id * num_labels_;\n        memset(label_data, 0, sizeof(float) * num_labels_);\n        for (int i = 0; i < label_proto.float_data_size(); ++i) {\n          label_data[(int)label_proto.float_data(i)] =\n              weight_proto.float_data(i);\n        }\n      } else if (label_type_ == MULTI_LABEL_DENSE) {\n        CAFFE_ENFORCE(label_proto.float_data_size() == num_labels_);\n        float* label_data = prefetched_label_.mutable_data<float>() +\n          item_id * num_labels_;\n        for (int i = 0; i < label_proto.float_data_size(); ++i) {\n          label_data[i] = label_proto.float_data(i);\n        }\n      } else {\n        LOG(ERROR) << \"Unknown label type:\" << label_type_;\n      }\n    } else if (label_proto.data_type() == TensorProto::INT32) {\n      if (label_type_ == SINGLE_LABEL) {\n        DCHECK_EQ(label_proto.int32_data_size(), 1);\n        prefetched_label_.mutable_data<int>()[item_id] =\n            label_proto.int32_data(0);\n      } else if (label_type_ == MULTI_LABEL_SPARSE) {\n        int* label_data = prefetched_label_.mutable_data<int>() +\n          item_id * num_labels_;\n        memset(label_data, 0, sizeof(int) * num_labels_);\n        for (int i = 0; i < label_proto.int32_data_size(); ++i) {\n          label_data[label_proto.int32_data(i)] = 1;\n        }\n      } else if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE) {\n        const TensorProto& weight_proto = protos.protos(2);\n        float* label_data =\n            prefetched_label_.mutable_data<float>() + item_id * num_labels_;\n        memset(label_data, 0, sizeof(float) * num_labels_);\n        for (int i = 0; i < label_proto.int32_data_size(); ++i) {\n          label_data[label_proto.int32_data(i)] = weight_proto.float_data(i);\n        }\n      } else if (label_type_ == MULTI_LABEL_DENSE) {\n        CAFFE_ENFORCE(label_proto.int32_data_size() == num_labels_);\n        int* label_data = prefetched_label_.mutable_data<int>() +\n          item_id * num_labels_;\n        for (int i = 0; i < label_proto.int32_data_size(); ++i) {\n          label_data[i] = label_proto.int32_data(i);\n        }\n      } else {\n        LOG(ERROR) << \"Unknown label type:\" << label_type_;\n      }\n    } else {\n      LOG(FATAL) << \"Unsupported label data type.\";\n    }\n\n    for (int i = 0; i < additional_output_protos.size(); ++i) {\n      auto additional_output_proto = additional_output_protos[i];\n\n      if (additional_output_proto.data_type() == TensorProto::FLOAT) {\n        float* additional_output =\n            prefetched_additional_outputs_[i].template mutable_data<float>() +\n            item_id * additional_output_proto.float_data_size();\n\n        for (int j = 0; j < additional_output_proto.float_data_size(); ++j) {\n          additional_output[j] = additional_output_proto.float_data(j);\n        }\n      } else if (additional_output_proto.data_type() == TensorProto::INT32) {\n        int* additional_output =\n            prefetched_additional_outputs_[i].template mutable_data<int>() +\n            item_id * additional_output_proto.int32_data_size();\n\n        for (int j = 0; j < additional_output_proto.int32_data_size(); ++j) {\n          additional_output[j] = additional_output_proto.int32_data(j);\n        }\n      } else if (additional_output_proto.data_type() == TensorProto::INT64) {\n        int64_t* additional_output =\n            prefetched_additional_outputs_[i].template mutable_data<int64_t>() +\n            item_id * additional_output_proto.int64_data_size();\n\n        for (int j = 0; j < additional_output_proto.int64_data_size(); ++j) {\n          additional_output[j] = additional_output_proto.int64_data(j);\n        }\n      }\n      else {\n        LOG(FATAL) << \"Unsupported output type.\";\n      }\n    }\n  }\n\n  //\n  // convert source to the color format requested from Op\n  //\n  int out_c = color_ ? 3 : 1;\n  if (out_c == src.channels()) {\n    *img = src;\n  } else {\n    cv::cvtColor(src, *img, (out_c == 1) ? CV_BGR2GRAY : CV_GRAY2BGR);\n  }\n\n  // Note(Yangqing): I believe that the mat should be created continuous.\n  CAFFE_ENFORCE(img->isContinuous());\n\n  // Sanity check now that we decoded everything\n\n  // Ensure that the bounding box is legit\n  if (info.bounding_params.valid\n      && (src.rows < info.bounding_params.ymin + info.bounding_params.height\n        || src.cols < info.bounding_params.xmin + info.bounding_params.width\n     )) {\n    info.bounding_params.valid = false;\n  }\n\n  // Apply the bounding box if requested\n  if (info.bounding_params.valid) {\n    // If we reach here, we know the parameters are sane\n    cv::Rect bounding_box(info.bounding_params.xmin, info.bounding_params.ymin,\n                          info.bounding_params.width, info.bounding_params.height);\n    *img = (*img)(bounding_box);\n\n    /*\n    LOG(INFO) << \"Did bounding with ymin:\"\n              << info.bounding_params.ymin << \" xmin:\" << info.bounding_params.xmin\n              << \" height:\" << info.bounding_params.height\n              << \" width:\" << info.bounding_params.width << \"\\n\";\n    LOG(INFO) << \"Bounded matrix: \" << img;\n    */\n  } else {\n    // LOG(INFO) << \"No bounding\\n\";\n  }\n\n  cv::Mat scaled_img;\n  bool inception_scale_jitter = false;\n  if (scale_jitter_type_ == INCEPTION_STYLE) {\n    if (!is_test_) {\n      // Inception-stype scale jittering is only used for training\n      inception_scale_jitter = RandomSizedCropping<Context>(img, crop_, randgen);\n      // if a random crop is still not found, do simple random cropping later\n    }\n  }\n\n  if ((scale_jitter_type_ == NO_SCALE_JITTER) ||\n    (scale_jitter_type_ == INCEPTION_STYLE && !inception_scale_jitter)) {\n      int scaled_width, scaled_height;\n      int scale_to_use = scale_ > 0 ? scale_ : minsize_;\n\n      // set the random minsize\n      if (random_scaling_) {\n        scale_to_use = std::uniform_int_distribution<>(random_scale_[0],\n                                                       random_scale_[1])(*randgen);\n      }\n\n      if (warp_) {\n        scaled_width = scale_to_use;\n        scaled_height = scale_to_use;\n      } else if (img->rows > img->cols) {\n        scaled_width = scale_to_use;\n        scaled_height =\n            static_cast<float>(img->rows) * scale_to_use / img->cols;\n      } else {\n        scaled_height = scale_to_use;\n        scaled_width =\n            static_cast<float>(img->cols) * scale_to_use / img->rows;\n      }\n      if ((scale_ > 0 &&\n           (scaled_height != img->rows || scaled_width != img->cols))\n          || (scaled_height > img->rows || scaled_width > img->cols)) {\n        // We rescale in all cases if we are using scale_\n        // but only to make the image bigger if using minsize_\n        /*\n        LOG(INFO) << \"Scaling to \" << scaled_width << \" x \" << scaled_height\n                  << \" From \" << img->cols << \" x \" << img->rows;\n        */\n        cv::resize(\n            *img,\n            scaled_img,\n            cv::Size(scaled_width, scaled_height),\n            0,\n            0,\n            cv::INTER_AREA);\n        *img = scaled_img;\n      }\n  }\n  // TODO(Yangqing): return false if any error happens.\n  return true;\n}\n\n// assume HWC order and color channels BGR\ntemplate <class Context>\nvoid Saturation(\n  float* img,\n  const int img_size,\n  const float alpha_rand,\n  std::mt19937* randgen\n) {\n  float alpha = 1.0f +\n    std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);\n  // BGR to Gray scale image: R -> 0.299, G -> 0.587, B -> 0.114\n  int p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      float gray_color = img[3 * p] * 0.114f + img[3 * p + 1] * 0.587f +\n        img[3 * p + 2] * 0.299f;\n      for (int c = 0; c < 3; ++c) {\n        img[3 * p + c] = img[3 * p + c] * alpha + gray_color * (1.0f - alpha);\n      }\n      p++;\n    }\n  }\n}\n\n// assume HWC order and color channels BGR\ntemplate <class Context>\nvoid Brightness(\n  float* img,\n  const int img_size,\n  const float alpha_rand,\n  std::mt19937* randgen\n) {\n  float alpha = 1.0f +\n    std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);\n  int p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      for (int c = 0; c < 3; ++c) {\n        img[p++] *= alpha;\n      }\n    }\n  }\n}\n\n// assume HWC order and color channels BGR\ntemplate <class Context>\nvoid Contrast(\n  float* img,\n  const int img_size,\n  const float alpha_rand,\n  std::mt19937* randgen\n){\n  float gray_mean = 0;\n  int p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      // BGR to Gray scale image: R -> 0.299, G -> 0.587, B -> 0.114\n      gray_mean += img[3 * p] * 0.114f + img[3 * p + 1] * 0.587f +\n        img[3 * p + 2] * 0.299f;\n      p++;\n    }\n  }\n  gray_mean /= (img_size * img_size);\n\n  float alpha = 1.0f +\n    std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);\n  p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      for (int c = 0; c < 3; ++c) {\n        img[p] = img[p] * alpha + gray_mean * (1.0f - alpha);\n        p++;\n      }\n    }\n  }\n}\n\n// assume HWC order and color channels BGR\ntemplate <class Context>\nvoid ColorJitter(\n  float* img,\n  const int img_size,\n  const float saturation,\n  const float brightness,\n  const float contrast,\n  std::mt19937* randgen\n) {\n  std::srand (unsigned(std::time(0)));\n  std::vector<int> jitter_order{0, 1, 2};\n  // obtain a time-based seed:\n  unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();\n  std::shuffle(jitter_order.begin(), jitter_order.end(),\n    std::default_random_engine(seed));\n\n  for (int i = 0; i < 3; ++i) {\n    if (jitter_order[i] == 0) {\n      Saturation<Context>(img, img_size, saturation, randgen);\n    } else if (jitter_order[i] == 1) {\n      Brightness<Context>(img, img_size, brightness, randgen);\n    } else {\n      Contrast<Context>(img, img_size, contrast, randgen);\n    }\n  }\n}\n\n// assume HWC order and color channels BGR\ntemplate <class Context>\nvoid ColorLighting(\n  float* img,\n  const int img_size,\n  const float alpha_std,\n  const std::vector<std::vector<float>>& eigvecs,\n  const std::vector<float>& eigvals,\n  std::mt19937* randgen\n) {\n  std::normal_distribution<float> d(0, alpha_std);\n  std::vector<float> alphas(3);\n  for (int i = 0; i < 3; ++i) {\n    alphas[i] = d(*randgen);\n  }\n\n  std::vector<float> delta_rgb(3, 0.0);\n  for (int i = 0; i < 3; ++i) {\n    for (int j = 0; j < 3; ++j) {\n      delta_rgb[i] += eigvecs[i][j] * eigvals[j] * alphas[j];\n    }\n  }\n\n  int p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      for (int c = 0; c < 3; ++c) {\n        img[p++] += delta_rgb[2 - c];\n      }\n    }\n  }\n\n}\n\n// assume HWC order and color channels BGR\n// mean subtraction and scaling.\ntemplate <class Context>\nvoid ColorNormalization(\n  float* img,\n  const int img_size,\n  const int channels,\n  const std::vector<float>& mean,\n  const std::vector<float>& std\n) {\n  int p = 0;\n  for (int h = 0; h < img_size; ++h) {\n    for (int w = 0; w < img_size; ++w) {\n      for (int c = 0; c < channels; ++c) {\n        img[p] = (img[p] - mean[c]) * std[c];\n        p++;\n      }\n    }\n  }\n}\n\n// Factored out image transformation\ntemplate <class Context>\nvoid TransformImage(\n    const cv::Mat& scaled_img,\n    const int channels,\n    float* image_data,\n    const bool color_jitter,\n    const float saturation,\n    const float brightness,\n    const float contrast,\n    const bool color_lighting,\n    const float color_lighting_std,\n    const std::vector<std::vector<float>>& color_lighting_eigvecs,\n    const std::vector<float>& color_lighting_eigvals,\n    const int crop,\n    const bool mirror,\n    const std::vector<float>& mean,\n    const std::vector<float>& std,\n    std::mt19937* randgen,\n    std::bernoulli_distribution* mirror_this_image,\n    bool is_test = false) {\n  CAFFE_ENFORCE_GE(\n      scaled_img.rows, crop, \"Image height must be bigger than crop.\");\n  CAFFE_ENFORCE_GE(\n      scaled_img.cols, crop, \"Image width must be bigger than crop.\");\n\n  // find the cropped region, and copy it to the destination matrix\n  int width_offset, height_offset;\n  if (is_test) {\n    width_offset = (scaled_img.cols - crop) / 2;\n    height_offset = (scaled_img.rows - crop) / 2;\n  } else {\n    width_offset =\n      std::uniform_int_distribution<>(0, scaled_img.cols - crop)(*randgen);\n    height_offset =\n      std::uniform_int_distribution<>(0, scaled_img.rows - crop)(*randgen);\n  }\n\n  float* image_data_ptr = image_data;\n  if (!is_test && mirror && (*mirror_this_image)(*randgen)) {\n    // Copy mirrored image.\n    for (int h = height_offset; h < height_offset + crop; ++h) {\n      for (int w = width_offset + crop - 1; w >= width_offset; --w) {\n        const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;\n        for (int c = 0; c < channels; ++c) {\n          *(image_data_ptr++) = static_cast<float>(cv_data[c]);\n        }\n      }\n    }\n  } else {\n    // Copy normally.\n    for (int h = height_offset; h < height_offset + crop; ++h) {\n      for (int w = width_offset; w < width_offset + crop; ++w) {\n        const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;\n        for (int c = 0; c < channels; ++c) {\n          *(image_data_ptr++) = static_cast<float>(cv_data[c]);\n        }\n      }\n    }\n  }\n\n  if (color_jitter && channels == 3 && !is_test) {\n    ColorJitter<Context>(image_data, crop, saturation, brightness, contrast,\n      randgen);\n  }\n  if (color_lighting && channels == 3 && !is_test) {\n    ColorLighting<Context>(image_data, crop, color_lighting_std,\n      color_lighting_eigvecs, color_lighting_eigvals, randgen);\n  }\n\n  // Color normalization\n  // Mean subtraction and scaling.\n  ColorNormalization<Context>(image_data, crop, channels, mean, std);\n}\n\n// Only crop / transose the image\n// leave in uint8_t dataType\ntemplate <class Context>\nvoid CropTransposeImage(const cv::Mat& scaled_img, const int channels,\n                        uint8_t *cropped_data, const int crop,\n                        const bool mirror, std::mt19937 *randgen,\n                        std::bernoulli_distribution *mirror_this_image,\n                        bool is_test = false) {\n  CAFFE_ENFORCE_GE(\n      scaled_img.rows, crop, \"Image height must be bigger than crop.\");\n  CAFFE_ENFORCE_GE(\n      scaled_img.cols, crop, \"Image width must be bigger than crop.\");\n\n  // find the cropped region, and copy it to the destination matrix\n  int width_offset, height_offset;\n  if (is_test) {\n    width_offset = (scaled_img.cols - crop) / 2;\n    height_offset = (scaled_img.rows - crop) / 2;\n  } else {\n    width_offset =\n      std::uniform_int_distribution<>(0, scaled_img.cols - crop)(*randgen);\n    height_offset =\n      std::uniform_int_distribution<>(0, scaled_img.rows - crop)(*randgen);\n  }\n\n  if (mirror && (*mirror_this_image)(*randgen)) {\n    // Copy mirrored image.\n    for (int h = height_offset; h < height_offset + crop; ++h) {\n      for (int w = width_offset + crop - 1; w >= width_offset; --w) {\n        const uint8_t* cv_data = scaled_img.ptr(h) + w*channels;\n        for (int c = 0; c < channels; ++c) {\n          *(cropped_data++) = cv_data[c];\n        }\n      }\n    }\n  } else {\n    // Copy normally.\n    for (int h = height_offset; h < height_offset + crop; ++h) {\n      for (int w = width_offset; w < width_offset + crop; ++w) {\n        const uint8_t* cv_data = scaled_img.ptr(h) + w*channels;\n        for (int c = 0; c < channels; ++c) {\n          *(cropped_data++) = cv_data[c];\n        }\n      }\n    }\n  }\n}\n\n// Parse datum, decode image, perform transform\n// Intended as entry point for binding to thread pool\ntemplate <class Context>\nvoid ImageInputOp<Context>::DecodeAndTransform(\n      const std::string& value, float *image_data, int item_id,\n      const int channels, std::size_t thread_index) {\n\n  CAFFE_ENFORCE((int)thread_index < num_decode_threads_);\n\n  std::bernoulli_distribution mirror_this_image(0.5f);\n  std::mt19937* randgen = &(randgen_per_thread_[thread_index]);\n\n  cv::Mat img;\n  // Decode the image\n  PerImageArg info;\n  CHECK(GetImageAndLabelAndInfoFromDBValue(value, &img, info, item_id,\n    randgen));\n\n  // Factor out the image transformation\n  TransformImage<Context>(img, channels, image_data,\n    color_jitter_, img_saturation_, img_brightness_, img_contrast_,\n    color_lighting_, color_lighting_std_, color_lighting_eigvecs_,\n    color_lighting_eigvals_, crop_, mirror_, mean_, std_,\n    randgen, &mirror_this_image, is_test_);\n}\n\ntemplate <class Context>\nvoid ImageInputOp<Context>::DecodeAndTransposeOnly(\n    const std::string& value, uint8_t *image_data, int item_id,\n    const int channels, std::size_t thread_index) {\n\n  CAFFE_ENFORCE((int)thread_index < num_decode_threads_);\n\n  std::bernoulli_distribution mirror_this_image(0.5f);\n  std::mt19937* randgen = &(randgen_per_thread_[thread_index]);\n\n  cv::Mat img;\n  // Decode the image\n  PerImageArg info;\n  CHECK(GetImageAndLabelAndInfoFromDBValue(value, &img, info, item_id,\n    randgen));\n\n  // Factor out the image transformation\n  CropTransposeImage<Context>(img, channels, image_data, crop_, mirror_,\n                              randgen, &mirror_this_image, is_test_);\n}\n\n\ntemplate <class Context>\nbool ImageInputOp<Context>::Prefetch() {\n  if (!owned_reader_.get()) {\n    // if we are not owning the reader, we will get the reader pointer from\n    // input. Otherwise the constructor should have already set the reader\n    // pointer.\n    reader_ = &OperatorBase::Input<db::DBReader>(0);\n  }\n  const int channels = color_ ? 3 : 1;\n  // Call mutable_data() once to allocate the underlying memory.\n  if (gpu_transform_) {\n    // we'll transfer up in int8, then convert later\n    prefetched_image_.mutable_data<uint8_t>();\n  } else {\n    prefetched_image_.mutable_data<float>();\n  }\n\n  prefetched_label_.mutable_data<int>();\n  // Prefetching handled with a thread pool of \"decode_threads\" threads.\n\n  for (int item_id = 0; item_id < batch_size_; ++item_id) {\n    std::string key, value;\n    cv::Mat img;\n\n    // read data\n    reader_->Read(&key, &value);\n\n    // determine label type based on first item\n    if( item_id == 0 ) {\n      if( use_caffe_datum_ ) {\n        prefetched_label_.mutable_data<int>();\n      } else {\n        TensorProtos protos;\n        CAFFE_ENFORCE(protos.ParseFromString(value));\n        TensorProto_DataType labeldt = protos.protos(1).data_type();\n        if( labeldt == TensorProto::INT32 ) {\n          prefetched_label_.mutable_data<int>();\n        } else if ( labeldt == TensorProto::FLOAT) {\n          prefetched_label_.mutable_data<float>();\n        } else {\n          LOG(FATAL) << \"Unsupported label type.\";\n        }\n\n        for (int i = 0; i < additional_inputs_count_; ++i) {\n          int index = additional_inputs_offset_ + i;\n          TensorProto additional_output_proto = protos.protos(index);\n\n          if (additional_output_proto.data_type() == TensorProto::FLOAT) {\n            prefetched_additional_outputs_[i].template mutable_data<float>();\n          } else if (\n              additional_output_proto.data_type() == TensorProto::INT32) {\n            prefetched_additional_outputs_[i].template mutable_data<int>();\n          } else if (\n              additional_output_proto.data_type() == TensorProto::INT64) {\n            prefetched_additional_outputs_[i].template mutable_data<int64_t>();\n          } else {\n            LOG(FATAL) << \"Unsupported output type.\";\n          }\n        }\n      }\n    }\n\n    // launch into thread pool for processing\n    // TODO: support color jitter and color lighting in gpu_transform\n    if (gpu_transform_) {\n      // output of decode will still be int8\n      uint8_t* image_data = prefetched_image_.mutable_data<uint8_t>() +\n          crop_ * crop_ * channels * item_id;\n      thread_pool_->runTaskWithID(std::bind(\n          &ImageInputOp<Context>::DecodeAndTransposeOnly,\n          this,\n          std::string(value),\n          image_data,\n          item_id,\n          channels,\n          std::placeholders::_1));\n    } else {\n      float* image_data = prefetched_image_.mutable_data<float>() +\n          crop_ * crop_ * channels * item_id;\n      thread_pool_->runTaskWithID(std::bind(\n          &ImageInputOp<Context>::DecodeAndTransform,\n          this,\n          std::string(value),\n          image_data,\n          item_id,\n          channels,\n          std::placeholders::_1));\n    }\n  }\n  thread_pool_->waitWorkComplete();\n\n  // If the context is not CPUContext, we will need to do a copy in the\n  // prefetch function as well.\n  if (!std::is_same<Context, CPUContext>::value) {\n    prefetched_image_on_device_.CopyFrom(prefetched_image_, &context_);\n    prefetched_label_on_device_.CopyFrom(prefetched_label_, &context_);\n\n    for (int i = 0; i < prefetched_additional_outputs_on_device_.size(); ++i) {\n      prefetched_additional_outputs_on_device_[i].CopyFrom(\n          prefetched_additional_outputs_[i], &context_);\n    }\n  }\n  return true;\n}\n\ntemplate <class Context>\nbool ImageInputOp<Context>::CopyPrefetched() {\n  auto* image_output = OperatorBase::Output<Tensor<Context> >(0);\n  auto* label_output = OperatorBase::Output<Tensor<Context> >(1);\n  vector<Tensor<Context>*> additional_outputs_output;\n\n  for (int i = 2; i < OutputSize(); ++i) {\n    additional_outputs_output.push_back(\n        OperatorBase::Output<Tensor<Context>>(i));\n  }\n\n  // Note(jiayq): The if statement below should be optimized away by the\n  // compiler since std::is_same is a constexpr.\n  if (std::is_same<Context, CPUContext>::value) {\n    image_output->CopyFrom(prefetched_image_, &context_);\n    label_output->CopyFrom(prefetched_label_, &context_);\n\n    for (int i = 0; i < additional_outputs_output.size(); ++i) {\n      additional_outputs_output[i]->CopyFrom(\n          prefetched_additional_outputs_[i], &context_);\n    }\n  } else {\n    // TODO: support color jitter and color lighting in gpu_transform\n    if (gpu_transform_) {\n      if (!mean_std_copied_) {\n        mean_gpu_.Resize(mean_.size());\n        std_gpu_.Resize(std_.size());\n\n        context_.template Copy<float, CPUContext, Context>(\n          mean_.size(), mean_.data(), mean_gpu_.template mutable_data<float>());\n        context_.template Copy<float, CPUContext, Context>(\n          std_.size(), std_.data(), std_gpu_.template mutable_data<float>());\n        mean_std_copied_ = true;\n      }\n      // GPU transform kernel allows explicitly setting output type\n      if (output_type_ == TensorProto_DataType_FLOAT) {\n        TransformOnGPU<uint8_t,float,Context>(prefetched_image_on_device_,\n                                              image_output, mean_gpu_,\n                                              std_gpu_, &context_);\n      } else if (output_type_ == TensorProto_DataType_FLOAT16) {\n        TransformOnGPU<uint8_t,float16,Context>(prefetched_image_on_device_,\n                                                image_output, mean_gpu_,\n                                                std_gpu_, &context_);\n      }  else {\n        return false;\n      }\n    } else {\n      image_output->CopyFrom(prefetched_image_on_device_, &context_);\n    }\n    label_output->CopyFrom(prefetched_label_on_device_, &context_);\n\n    for (int i = 0; i < additional_outputs_output.size(); ++i) {\n      additional_outputs_output[i]->CopyFrom(\n          prefetched_additional_outputs_on_device_[i], &context_);\n    }\n  }\n  return true;\n}\n}  // namespace caffe2\n\n#endif  // CAFFE2_IMAGE_IMAGE_INPUT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/image/transform_gpu.h",
    "content": "#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_\n#define CAFFE2_IMAGE_TRANSFORM_GPU_H_\n\n/**\n *\n * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this\n *    list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n *    this list of conditions and the following disclaimer in the documentation\n *    and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n **/\n\n#include \"caffe2/core/context.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T_IN, typename T_OUT, class Context>\nbool TransformOnGPU(Tensor<Context>& X, Tensor<Context>* Y,\n                    Tensor<Context>& mean, Tensor<Context>& std,\n                    Context* context);\n\n}  // namespace caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/mkl_utils.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_UTILS_H_\n#define CAFFE2_UTILS_MKL_UTILS_H_\n\n#include \"caffe2/core/macros.h\"  // For caffe2 macros.\n\n#ifdef CAFFE2_USE_MKL\n\n#include \"caffe2/mkl/utils/mkl_version_check.h\"\n\n// MKLDNN_CHECK should be used in places where exceptions should not be thrown,\n// such as in destructors.\n#define MKLDNN_CHECK(condition)   \\\n  do {                            \\\n    dnnError_t error = condition; \\\n    CAFFE_ENFORCE_EQ(             \\\n        error,                    \\\n        E_SUCCESS,                \\\n        \"Error at : \",            \\\n        __FILE__,                 \\\n        \":\",                      \\\n        __LINE__,                 \\\n        \", error number: \",       \\\n        error);                   \\\n  } while (0)\n\n#define MKLDNN_SAFE_CALL(condition) \\\n  do {                              \\\n    dnnError_t error = condition;   \\\n    CAFFE_ENFORCE_EQ(               \\\n        error,                      \\\n        E_SUCCESS,                  \\\n        \"Error at : \",              \\\n        __FILE__,                   \\\n        \":\",                        \\\n        __LINE__,                   \\\n        \", error number: \",         \\\n        error);                     \\\n  } while (0)\n\n#define CHECK_INPUT_FILTER_DIMS(X, filter, condition) \\\n  do {                                                \\\n    if (cached_input_dims_ != X.dims() ||             \\\n        cached_filter_dims_ != filter.dims()) {       \\\n      cached_input_dims_ = X.dims();                  \\\n      cached_filter_dims_ = filter.dims();            \\\n      condition = true;                               \\\n    } else {                                          \\\n      condition = false;                              \\\n    }                                                 \\\n  } while (0)\n\n#define CHECK_INPUT_DIMS(X, condition)    \\\n  do {                                    \\\n    if (cached_input_dims_ != X.dims()) { \\\n      cached_input_dims_ = X.dims();      \\\n      condition = true;                   \\\n    } else {                              \\\n      condition = false;                  \\\n    }                                     \\\n  } while (0)\n\n// All caffe2 mkl related headers\n\n#ifdef CAFFE2_HAS_MKL_DNN\n#include \"caffe2/mkl/utils/mkl_context.h\"\n#include \"caffe2/mkl/utils/mkl_dnn_cppwrapper.h\"\n#include \"caffe2/mkl/utils/mkl_memory.h\"\n#include \"caffe2/mkl/utils/mkl_operator.h\"\n#endif // CAFFE2_HAS_MKL_DNN\n\n#ifdef CAFFE2_HAS_MKL_SGEMM_PACK\n#include \"caffe2/mkl/utils/sgemm_pack.h\"\n#endif // CAFFE2_HAS_MKL_SGEMM_PACK\n\n#endif // CAFFE2_USE_MKL\n#endif // CAFFE2_UTILS_MKL_UTILS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/operators/operator_fallback_mkl.h",
    "content": "#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/mkl/mkl_utils.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\n#ifdef CAFFE2_HAS_MKL_DNN\nnamespace caffe2 {\nnamespace mkl {\n\n/**\n * @brief A templated class to allow one to wrap a CPU operator as an MKL\n * operator.\n *\n * This class can be used when one does not have the MKL implementation ready\n * yet for an operator. Essentially, what this op does is to automatically\n * deal with data copy for you. Plausibly, this causes a lot of overhead and\n * is not optimal, so you should use this operator mostly for quick prototyping\n * purpose.\n *\n * All the input and output of the original operator should be TensorCPU.\n *\n * Example usage: if you have a class MyMagicOp that is CPU based, and you use\n * the registration code\n *     REGISTER_CPU_OPERATOR(MyMagic, MyMagicOp);\n * to register the CPU side, you can create its corresponding MKL operator\n * (with performance hits of course) via\n *     REGISTER_MKL_OPERATOR(MyMagic,\n *                            MKLFallbackOp<MyMagicOp>);\n *\n * Advanced usage: if you want to have some specific outputs never copied, you\n * can use the SkipOutputCopy template argument to do that. For example, if\n * MyMagic produces two outputs and the first output is always going to live on\n * the CPU, you can do\n *     REGISTER_MKL_OPERATOR(MyMagic,\n *                            MKLFallbackOp<MyMagicOp, SkipIndices<0>>);\n */\ntemplate <class CPUOp, typename SkipOutputCopy = SkipIndices<>>\nclass MKLFallbackOp final : public Operator<MKLContext> {\n public:\n  USE_OPERATOR_FUNCTIONS(MKLContext);\n  MKLFallbackOp(const OperatorDef& def, Workspace* ws)\n      : Operator<MKLContext>(def, ws) {\n    CAFFE_ENFORCE_EQ(def.device_option().device_type(), MKLDNN);\n    OperatorDef base_def_(def);\n    // base_def_ runs on CPU, so we will set its device option to CPU.\n    // Copy to allow random_seed to be correctly propagated.\n    base_def_.mutable_device_option()->CopyFrom(def.device_option());\n    base_def_.mutable_device_option()->set_device_type(CPU);\n    // Set up the symbols for the local workspace.\n    for (const string& name : def.input()) {\n      local_input_blobs_.push_back(local_ws_.CreateBlob(name));\n      CHECK_NOTNULL(local_input_blobs_.back());\n    }\n    base_op_.reset(new CPUOp(base_def_, &local_ws_));\n    for (const string& name : def.output()) {\n      local_output_blobs_.push_back(local_ws_.GetBlob(name));\n      CHECK_NOTNULL(local_output_blobs_.back());\n    }\n  }\n\n  bool RunOnDevice() override {\n    for (int i = 0; i < InputSize(); ++i) {\n      if (OperatorBase::InputIsType<MKLMemory<float>>(i)) {\n        OperatorBase::Input<MKLMemory<float>>(i).CopyTo(\n            local_input_blobs_[i]->template GetMutable<TensorCPU>());\n      } else if (OperatorBase::InputIsType<MKLMemory<double>>(i)) {\n        OperatorBase::Input<MKLMemory<double>>(i).CopyTo(\n            local_input_blobs_[i]->template GetMutable<TensorCPU>());\n      } else {\n        VLOG(1) << \"Input \" << i << \" is not MKLMemory. Skipping copy.\";\n        // Note(jiayq): This removes a const but conceptually\n        // local_input_blobs will only be used as const blob input for the\n        // base op so we are still fine.\n        local_input_blobs_[i]->ShareExternal(\n            const_cast<void*>(OperatorBase::Inputs()[i]->GetRaw()),\n            OperatorBase::Inputs()[i]->meta());\n      }\n    }\n\n    if (!base_op_->Run()) {\n      LOG(ERROR) << \"Base op run failed in MKLFallbackOp. Def: \"\n                 << ProtoDebugString(this->debug_def());\n      return false;\n    }\n\n    for (int i = 0; i < OutputSize(); ++i) {\n      if (SkipOutputCopy::Contains(i)) {\n        VLOG(1) << \"Copy output: index \" << i << \" skipped.\";\n        continue;\n      }\n      CAFFE_ENFORCE(\n          local_output_blobs_[i]->template IsType<TensorCPU>(),\n          \"MKL fallback op currently does not support non-TensorCPU \"\n          \"output type who needs copying.\");\n      const auto& src = local_output_blobs_[i]->template Get<TensorCPU>();\n      if (src.template IsType<float>()) {\n        Blob* dst = OperatorBase::OutputBlob(i);\n        if (!dst->template IsType<MKLMemory<float>>() ||\n            dst->Get<MKLMemory<float>>().dims() != src.dims()) {\n          dst->Reset(new MKLMemory<float>(src.dims()));\n        }\n        dst->GetMutable<MKLMemory<float>>()->CopyFrom(src);\n      } else if (src.template IsType<double>()) {\n        Blob* dst = OperatorBase::OutputBlob(i);\n        if (!dst->template IsType<MKLMemory<double>>() ||\n            dst->Get<MKLMemory<double>>().dims() != src.dims()) {\n          dst->Reset(new MKLMemory<double>(src.dims()));\n        }\n        dst->GetMutable<MKLMemory<double>>()->CopyFrom(src);\n      } else {\n        CAFFE_THROW(\"MKLMemory only supports float and double.\");\n      }\n    }\n    return true;\n  }\n\n protected:\n  Workspace local_ws_;\n  vector<Blob*> local_input_blobs_;\n  vector<Blob*> local_output_blobs_;\n  std::unique_ptr<CPUOp> base_op_;\n};\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_HAS_MKL_DNN\n#endif // CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/mkl_context.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_CONTEXT_H_\n#define CAFFE2_UTILS_MKL_CONTEXT_H_\n\n#include <cstdlib>\n#include <ctime>\n#include <random>\n\n#include \"caffe2/core/context.h\"\n\nnamespace caffe2 {\n\n/**\n * The MKL Context, which is largely the same as the CPUContext. We instantiate\n * this mainly in order to have a first-class MKL device.\n *\n * Note that although New() and Delete() are implemented, we expect MKLContext\n * operators to mainly perform input and output via MKLMemory. As a result,\n * most likely MKLContext::New and ::Delete won't be used as often.\n */\nclass MKLContext final {\n public:\n  MKLContext() : random_seed_(math::randomNumberSeed()) {}\n  explicit MKLContext(const DeviceOption& option)\n      : random_seed_(\n            option.has_random_seed() ? option.random_seed()\n                                     : math::randomNumberSeed()) {\n    CAFFE_ENFORCE_EQ(option.device_type(), MKLDNN);\n  }\n\n  ~MKLContext() {}\n\n  inline void SwitchToDevice(int /*stream_id*/ = 0) {}\n  inline void WaitEvent(const Event& ev) {\n    ev.Wait(MKLDNN, this);\n  }\n  inline void Record(Event* ev) const {\n    CAFFE_ENFORCE(ev, \"Event must not be null.\");\n    ev->Record(MKLDNN, this);\n  }\n  inline void FinishDeviceComputation() {}\n\n  inline std::mt19937& RandGenerator() {\n    if (!random_generator_.get()) {\n      random_generator_.reset(new std::mt19937(random_seed_));\n    }\n    return *random_generator_.get();\n  }\n\n  inline static std::pair<void*, MemoryDeleter> New(size_t nbytes) {\n    return GetCPUAllocator()->New(nbytes);\n  }\n\n  // Two copy functions that deals with cross-device copies.\n  template <class SrcContext, class DstContext>\n  inline void CopyBytes(size_t nbytes, const void* src, void* dst);\n\n  template <typename T, class SrcContext, class DstContext>\n  inline void Copy(size_t n, const T* src, T* dst) {\n    if (std::is_fundamental<T>::value) {\n      CopyBytes<SrcContext, DstContext>(\n          n * sizeof(T),\n          static_cast<const void*>(src),\n          static_cast<void*>(dst));\n    } else {\n      for (int i = 0; i < n; ++i) {\n        dst[i] = src[i];\n      }\n    }\n  }\n\n  template <class SrcContext, class DstContext>\n  inline void\n  CopyItems(const TypeMeta& meta, size_t n, const void* src, void* dst) {\n    if (meta.copy()) {\n      meta.copy()(src, dst, n);\n    } else {\n      CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);\n    }\n  }\n\n protected:\n  // TODO(jiayq): instead of hard-coding a generator, make it more flexible.\n  int random_seed_{1701};\n  std::unique_ptr<std::mt19937> random_generator_;\n};\n\ntemplate <>\ninline void MKLContext::CopyBytes<MKLContext, MKLContext>(\n    size_t nbytes,\n    const void* src,\n    void* dst) {\n  memcpy(dst, src, nbytes);\n}\n\ntemplate <>\ninline void MKLContext::CopyBytes<CPUContext, MKLContext>(\n    size_t nbytes,\n    const void* src,\n    void* dst) {\n  memcpy(dst, src, nbytes);\n}\n\ntemplate <>\ninline void MKLContext::CopyBytes<MKLContext, CPUContext>(\n    size_t nbytes,\n    const void* src,\n    void* dst) {\n  memcpy(dst, src, nbytes);\n}\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_MKL_CONTEXT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/mkl_dnn_cppwrapper.h",
    "content": "// Do not directl include this file. Include caffe2/mkl/mkl_utils.h instead.\n#ifndef CAFFE2_UTILS_MKL_MKL_DNN_CPPWRAPPER_H\n#define CAFFE2_UTILS_MKL_MKL_DNN_CPPWRAPPER_H\n\n#include <stdarg.h>\n#include <stddef.h>\n\n#include <mkl.h>\n\n#define C2_MKL_TEMPLATE_PREFIX \\\n  template <typename T>        \\\n  inline\n#define C2_MKL_SPEC_PREFIX \\\n  template <>              \\\n  inline\n\nnamespace caffe2 {\nnamespace mkl {\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnLayoutCreate(\n    dnnLayout_t* pLayout,\n    size_t dimension,\n    const size_t size[],\n    const size_t strides[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutCreate<float>(\n    dnnLayout_t* pLayout,\n    size_t dimension,\n    const size_t size[],\n    const size_t strides[]) {\n  return dnnLayoutCreate_F32(pLayout, dimension, size, strides);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutCreate<double>(\n    dnnLayout_t* pLayout,\n    size_t dimension,\n    const size_t size[],\n    const size_t strides[]) {\n  return dnnLayoutCreate_F64(pLayout, dimension, size, strides);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnLayoutCreateFromPrimitive(\n    dnnLayout_t* pLayout,\n    const dnnPrimitive_t primitive,\n    dnnResourceType_t type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutCreateFromPrimitive<float>(\n    dnnLayout_t* pLayout,\n    const dnnPrimitive_t primitive,\n    dnnResourceType_t type) {\n  return dnnLayoutCreateFromPrimitive_F32(pLayout, primitive, type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutCreateFromPrimitive<double>(\n    dnnLayout_t* pLayout,\n    const dnnPrimitive_t primitive,\n    dnnResourceType_t type) {\n  return dnnLayoutCreateFromPrimitive_F64(pLayout, primitive, type);\n}\n\nC2_MKL_TEMPLATE_PREFIX size_t dnnLayoutGetMemorySize(const dnnLayout_t layout);\nC2_MKL_SPEC_PREFIX size_t\ndnnLayoutGetMemorySize<float>(const dnnLayout_t layout) {\n  return dnnLayoutGetMemorySize_F32(layout);\n}\nC2_MKL_SPEC_PREFIX size_t\ndnnLayoutGetMemorySize<double>(const dnnLayout_t layout) {\n  return dnnLayoutGetMemorySize_F64(layout);\n}\n\nC2_MKL_TEMPLATE_PREFIX int dnnLayoutCompare(\n    const dnnLayout_t l1,\n    const dnnLayout_t l2);\nC2_MKL_SPEC_PREFIX int dnnLayoutCompare<float>(\n    const dnnLayout_t l1,\n    const dnnLayout_t l2) {\n  return dnnLayoutCompare_F32(l1, l2);\n}\nC2_MKL_SPEC_PREFIX int dnnLayoutCompare<double>(\n    const dnnLayout_t l1,\n    const dnnLayout_t l2) {\n  return dnnLayoutCompare_F64(l1, l2);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnAllocateBuffer(void** pPtr, dnnLayout_t layout);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnAllocateBuffer<float>(void** pPtr, dnnLayout_t layout) {\n  return dnnAllocateBuffer_F32(pPtr, layout);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnAllocateBuffer<double>(void** pPtr, dnnLayout_t layout) {\n  return dnnAllocateBuffer_F64(pPtr, layout);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnReleaseBuffer(void* ptr);\nC2_MKL_SPEC_PREFIX dnnError_t dnnReleaseBuffer<float>(void* ptr) {\n  return dnnReleaseBuffer_F32(ptr);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnReleaseBuffer<double>(void* ptr) {\n  return dnnReleaseBuffer_F64(ptr);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnLayoutDelete(dnnLayout_t layout);\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutDelete<float>(dnnLayout_t layout) {\n  return dnnLayoutDelete_F32(layout);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnLayoutDelete<double>(dnnLayout_t layout) {\n  return dnnLayoutDelete_F64(layout);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnPrimitiveAttributesCreate(dnnPrimitiveAttributes_t* attributes);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnPrimitiveAttributesCreate<float>(dnnPrimitiveAttributes_t* attributes) {\n  return dnnPrimitiveAttributesCreate_F32(attributes);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnPrimitiveAttributesCreate<double>(dnnPrimitiveAttributes_t* attributes) {\n  return dnnPrimitiveAttributesCreate_F64(attributes);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnPrimitiveAttributesDestroy(dnnPrimitiveAttributes_t attributes);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnPrimitiveAttributesDestroy<float>(dnnPrimitiveAttributes_t attributes) {\n  return dnnPrimitiveAttributesDestroy_F32(attributes);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnPrimitiveAttributesDestroy<double>(dnnPrimitiveAttributes_t attributes) {\n  return dnnPrimitiveAttributesDestroy_F64(attributes);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnPrimitiveGetAttributes(\n    dnnPrimitive_t primitive,\n    dnnPrimitiveAttributes_t* attributes);\nC2_MKL_SPEC_PREFIX dnnError_t dnnPrimitiveGetAttributes<float>(\n    dnnPrimitive_t primitive,\n    dnnPrimitiveAttributes_t* attributes) {\n  return dnnPrimitiveGetAttributes_F32(primitive, attributes);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnPrimitiveGetAttributes<double>(\n    dnnPrimitive_t primitive,\n    dnnPrimitiveAttributes_t* attributes) {\n  return dnnPrimitiveGetAttributes_F64(primitive, attributes);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnExecute(dnnPrimitive_t primitive, void* resources[]);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnExecute<float>(dnnPrimitive_t primitive, void* resources[]) {\n  return dnnExecute_F32(primitive, resources);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnExecute<double>(dnnPrimitive_t primitive, void* resources[]) {\n  return dnnExecute_F64(primitive, resources);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnExecuteAsync(dnnPrimitive_t primitive, void* resources[]);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnExecuteAsync<float>(dnnPrimitive_t primitive, void* resources[]) {\n  return dnnExecuteAsync_F32(primitive, resources);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnExecuteAsync<double>(dnnPrimitive_t primitive, void* resources[]) {\n  return dnnExecuteAsync_F64(primitive, resources);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnWaitFor(dnnPrimitive_t primitive);\nC2_MKL_SPEC_PREFIX dnnError_t dnnWaitFor<float>(dnnPrimitive_t primitive) {\n  return dnnWaitFor_F32(primitive);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnWaitFor<double>(dnnPrimitive_t primitive) {\n  return dnnWaitFor_F64(primitive);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnDelete(dnnPrimitive_t primitive);\nC2_MKL_SPEC_PREFIX dnnError_t dnnDelete<float>(dnnPrimitive_t primitive) {\n  return dnnDelete_F32(primitive);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnDelete<double>(dnnPrimitive_t primitive) {\n  return dnnDelete_F64(primitive);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConversionCreate(\n    dnnPrimitive_t* pConversion,\n    const dnnLayout_t from,\n    const dnnLayout_t to);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConversionCreate<float>(\n    dnnPrimitive_t* pConversion,\n    const dnnLayout_t from,\n    const dnnLayout_t to) {\n  return dnnConversionCreate_F32(pConversion, from, to);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConversionCreate<double>(\n    dnnPrimitive_t* pConversion,\n    const dnnLayout_t from,\n    const dnnLayout_t to) {\n  return dnnConversionCreate_F64(pConversion, from, to);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t\ndnnConversionExecute(dnnPrimitive_t conversion, void* from, void* to);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnConversionExecute<float>(dnnPrimitive_t conversion, void* from, void* to) {\n  return dnnConversionExecute_F32(conversion, from, to);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnConversionExecute<double>(dnnPrimitive_t conversion, void* from, void* to) {\n  return dnnConversionExecute_F64(conversion, from, to);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateForward(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateForward<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateForward_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateForward<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateForward_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateForwardBias(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateForwardBias<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateForwardBias_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateForwardBias<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateForwardBias_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardData(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardData<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateBackwardData_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardData<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateBackwardData_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateBackwardFilter_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnConvolutionCreateBackwardFilter_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardBias(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t dstSize[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardBias<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t dstSize[]) {\n  return dnnConvolutionCreateBackwardBias_F32(\n      pConvolution, attributes, algorithm, dimension, dstSize);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardBias<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t dimension,\n    const size_t dstSize[]) {\n  return dnnConvolutionCreateBackwardBias_F64(\n      pConvolution, attributes, algorithm, dimension, dstSize);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateForward(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForward<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateForward_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForward<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateForward_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateForwardBias_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateForwardBias_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateBackwardData_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateBackwardData_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateBackwardFilter_F32(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t srcSize[],\n    const size_t dstSize[],\n    const size_t filterSize[],\n    const size_t convolutionStrides[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnGroupsConvolutionCreateBackwardFilter_F64(\n      pConvolution,\n      attributes,\n      algorithm,\n      groups,\n      dimension,\n      srcSize,\n      dstSize,\n      filterSize,\n      convolutionStrides,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t dstSize[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias<float>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t dstSize[]) {\n  return dnnGroupsConvolutionCreateBackwardBias_F32(\n      pConvolution, attributes, algorithm, groups, dimension, dstSize);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias<double>(\n    dnnPrimitive_t* pConvolution,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t algorithm,\n    size_t groups,\n    size_t dimension,\n    const size_t dstSize[]) {\n  return dnnGroupsConvolutionCreateBackwardBias_F64(\n      pConvolution, attributes, algorithm, groups, dimension, dstSize);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnReLUCreateForward(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float negativeSlope);\nC2_MKL_SPEC_PREFIX dnnError_t dnnReLUCreateForward<float>(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float negativeSlope) {\n  return dnnReLUCreateForward_F32(pRelu, attributes, dataLayout, negativeSlope);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnReLUCreateForward<double>(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float negativeSlope) {\n  return dnnReLUCreateForward_F64(pRelu, attributes, dataLayout, negativeSlope);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnReLUCreateBackward(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    float negativeSlope);\nC2_MKL_SPEC_PREFIX dnnError_t dnnReLUCreateBackward<float>(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    float negativeSlope) {\n  return dnnReLUCreateBackward_F32(\n      pRelu, attributes, diffLayout, dataLayout, negativeSlope);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnReLUCreateBackward<double>(\n    dnnPrimitive_t* pRelu,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    float negativeSlope) {\n  return dnnReLUCreateBackward_F64(\n      pRelu, attributes, diffLayout, dataLayout, negativeSlope);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnLRNCreateForward(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k);\nC2_MKL_SPEC_PREFIX dnnError_t dnnLRNCreateForward<float>(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k) {\n  return dnnLRNCreateForward_F32(\n      pLrn, attributes, dataLayout, kernel_size, alpha, beta, k);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnLRNCreateForward<double>(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k) {\n  return dnnLRNCreateForward_F64(\n      pLrn, attributes, dataLayout, kernel_size, alpha, beta, k);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnLRNCreateBackward(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k);\nC2_MKL_SPEC_PREFIX dnnError_t dnnLRNCreateBackward<float>(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k) {\n  return dnnLRNCreateBackward_F32(\n      pLrn, attributes, diffLayout, dataLayout, kernel_size, alpha, beta, k);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnLRNCreateBackward<double>(\n    dnnPrimitive_t* pLrn,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t diffLayout,\n    const dnnLayout_t dataLayout,\n    size_t kernel_size,\n    float alpha,\n    float beta,\n    float k) {\n  return dnnLRNCreateBackward_F64(\n      pLrn, attributes, diffLayout, dataLayout, kernel_size, alpha, beta, k);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnPoolingCreateForward(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnPoolingCreateForward<float>(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnPoolingCreateForward_F32(\n      pPooling,\n      attributes,\n      op,\n      srcLayout,\n      kernelSize,\n      kernelStride,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnPoolingCreateForward<double>(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnPoolingCreateForward_F64(\n      pPooling,\n      attributes,\n      op,\n      srcLayout,\n      kernelSize,\n      kernelStride,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnPoolingCreateBackward(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type);\nC2_MKL_SPEC_PREFIX dnnError_t dnnPoolingCreateBackward<float>(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnPoolingCreateBackward_F32(\n      pPooling,\n      attributes,\n      op,\n      srcLayout,\n      kernelSize,\n      kernelStride,\n      inputOffset,\n      border_type);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnPoolingCreateBackward<double>(\n    dnnPrimitive_t* pPooling,\n    dnnPrimitiveAttributes_t attributes,\n    dnnAlgorithm_t op,\n    const dnnLayout_t srcLayout,\n    const size_t kernelSize[],\n    const size_t kernelStride[],\n    const int inputOffset[],\n    const dnnBorder_t border_type) {\n  return dnnPoolingCreateBackward_F64(\n      pPooling,\n      attributes,\n      op,\n      srcLayout,\n      kernelSize,\n      kernelStride,\n      inputOffset,\n      border_type);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnConcatCreate(\n    dnnPrimitive_t* pConcat,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnConcatCreate<float>(\n    dnnPrimitive_t* pConcat,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src[]) {\n  return dnnConcatCreate_F32(pConcat, attributes, N, src);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnConcatCreate<double>(\n    dnnPrimitive_t* pConcat,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src[]) {\n  return dnnConcatCreate_F64(pConcat, attributes, N, src);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnSplitCreate(\n    dnnPrimitive_t* pSplit,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src,\n    size_t dst[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnSplitCreate<float>(\n    dnnPrimitive_t* pSplit,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src,\n    size_t dst[]) {\n  return dnnSplitCreate_F32(pSplit, attributes, N, src, dst);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnSplitCreate<double>(\n    dnnPrimitive_t* pSplit,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t N,\n    dnnLayout_t src,\n    size_t dst[]) {\n  return dnnSplitCreate_F64(pSplit, attributes, N, src, dst);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnSumCreate(\n    dnnPrimitive_t* pSum,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t nSummands,\n    dnnLayout_t layout,\n    T* coefficients);\nC2_MKL_SPEC_PREFIX dnnError_t dnnSumCreate<float>(\n    dnnPrimitive_t* pSum,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t nSummands,\n    dnnLayout_t layout,\n    float* coefficients) {\n  return dnnSumCreate_F32(pSum, attributes, nSummands, layout, coefficients);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnSumCreate<double>(\n    dnnPrimitive_t* pSum,\n    dnnPrimitiveAttributes_t attributes,\n    const size_t nSummands,\n    dnnLayout_t layout,\n    double* coefficients) {\n  return dnnSumCreate_F64(pSum, attributes, nSummands, layout, coefficients);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateForward(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps);\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward<float>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateForward_F32(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward<double>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateForward_F64(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateBackwardData(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps);\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackwardData<float>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateBackwardData_F32(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\n\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackwardData<double>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateBackwardData_F64(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateBackwardScaleShift(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps);\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnBatchNormalizationCreateBackwardScaleShift<float>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateBackwardScaleShift_F32(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\nC2_MKL_SPEC_PREFIX dnnError_t\ndnnBatchNormalizationCreateBackwardScaleShift<double>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps) {\n  return dnnBatchNormalizationCreateBackwardScaleShift_F64(\n      pBatchNormalization, attributes, dataLayout, eps);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateForward_v2(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags);\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward_v2<float>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags) {\n  return dnnBatchNormalizationCreateForward_v2_F32(\n      pBatchNormalization, attributes, dataLayout, eps, flags);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward_v2<double>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags) {\n  return dnnBatchNormalizationCreateForward_v2_F64(\n      pBatchNormalization, attributes, dataLayout, eps, flags);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateBackward_v2(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags);\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackward_v2<float>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags) {\n  return dnnBatchNormalizationCreateBackward_v2_F32(\n      pBatchNormalization, attributes, dataLayout, eps, flags);\n}\n\nC2_MKL_SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackward_v2<double>(\n    dnnPrimitive_t* pBatchNormalization,\n    dnnPrimitiveAttributes_t attributes,\n    const dnnLayout_t dataLayout,\n    float eps,\n    unsigned int flags) {\n  return dnnBatchNormalizationCreateBackward_v2_F64(\n      pBatchNormalization, attributes, dataLayout, eps, flags);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnInnerProductCreateForward(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels);\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateForward<float>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateForward_F32(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateForward<double>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateForward_F64(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnInnerProductCreateForwardBias(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels);\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateForwardBias<float>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateForwardBias_F32(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateForwardBias<double>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateForwardBias_F64(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnInnerProductCreateBackwardData(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels);\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardData<float>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateBackwardData_F32(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardData<double>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateBackwardData_F64(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnInnerProductCreateBackwardFilter(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels);\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardFilter<float>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateBackwardFilter_F32(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardFilter<double>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[],\n    size_t outputChannels) {\n  return dnnInnerProductCreateBackwardFilter_F64(\n      pInnerProduct, attributes, dimensions, srcSize, outputChannels);\n}\n\nC2_MKL_TEMPLATE_PREFIX dnnError_t dnnInnerProductCreateBackwardBias(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[]);\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardBias<float>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[]) {\n  return dnnInnerProductCreateBackwardBias_F32(\n      pInnerProduct, attributes, dimensions, srcSize);\n}\nC2_MKL_SPEC_PREFIX dnnError_t dnnInnerProductCreateBackwardBias<double>(\n    dnnPrimitive_t* pInnerProduct,\n    dnnPrimitiveAttributes_t attributes,\n    size_t dimensions,\n    const size_t srcSize[]) {\n  return dnnInnerProductCreateBackwardBias_F64(\n      pInnerProduct, attributes, dimensions, srcSize);\n}\n\n} // namespace mkl\n} // namespace caffe2\n\n// Undef macros to make sure that things are clean.\n#undef C2_MKL_TEMPLATE_PREFIX\n#undef C2_MKL_SPEC_PREFIX\n\n#endif // CAFFE2_UTILS_MKL_MKL_DNN_CPPWRAPPER_H\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/mkl_memory.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_MKL_MEMORY_H_\n#define CAFFE2_UTILS_MKL_MKL_MEMORY_H_\n\n#include <string>\n#include <vector>\n#include <mutex>\n\n#include \"caffe2/core/flags.h\" // for TIndex\n#include \"caffe2/core/tensor.h\" // for TIndex\n#include \"caffe2/mkl/utils/mkl_dnn_cppwrapper.h\"\n\n// A global boolean variable that controls the behavior when we call View() on\n// an MKLMemory: if it is set true, then the View() function will actually\n// change the underlying storage. If it is set false, an implicit copy is\n// triggered but the original storage is not affected.\nCAFFE2_DECLARE_bool(caffe2_mkl_implicit_layout_change);\n\nnamespace caffe2 {\nnamespace mkl {\n\ntemplate <typename T>\nclass PrimitiveWrapper {\n public:\n  PrimitiveWrapper() {}\n  // Creates a primitive wrapper from an existing primitive. The wrapper\n  // takes over ownership.\n  explicit PrimitiveWrapper(dnnPrimitive_t primitive) : primitive_(primitive) {}\n\n  template <typename Creator, typename FirstArg, typename... Args>\n  PrimitiveWrapper(Creator creator, FirstArg&& arg, Args&&... args) {\n    creator(&primitive_, arg, args...);\n  }\n\n  ~PrimitiveWrapper() {\n    if (primitive_) {\n      MKLDNN_CHECK(dnnDelete<T>(primitive_));\n    }\n  }\n\n  template <typename Creator, typename... Args>\n  void Reset(Creator creator, Args&&... args) {\n    if (primitive_) {\n      MKLDNN_SAFE_CALL(dnnDelete<T>(primitive_));\n    }\n    creator(&primitive_, args...);\n  }\n\n  operator dnnPrimitive_t() const {\n    return primitive_;\n  }\n\n private:\n  dnnPrimitive_t primitive_ = 0;\n  DISABLE_COPY_AND_ASSIGN(PrimitiveWrapper);\n};\n\ntemplate <typename T>\nclass LayoutWrapper {\n public:\n  LayoutWrapper() {}\n  // Create a user layout from a TensorCPU with the given shapes.\n  explicit LayoutWrapper(const TensorCPU& tensor) {\n    Reset(tensor);\n  }\n\n  // Create an internal layout from the primitive and type.\n  LayoutWrapper(const dnnPrimitive_t primitive, const dnnResourceType_t type) {\n    Reset(primitive, type);\n  }\n\n  // Create a user layout from the given dimension, size and strides.\n  LayoutWrapper(\n      const size_t dimension,\n      const size_t size[],\n      const size_t strides[]) {\n    Reset(dimension, size, strides);\n  }\n\n  // Destructs the layout wrapper.\n  ~LayoutWrapper() {\n    if (layout_)\n      MKLDNN_CHECK(dnnLayoutDelete<T>(layout_));\n  }\n\n  // Create a user layout from a TensorCPU with the given shapes.\n  void Reset(const TensorCPU& tensor) {\n    if (layout_)\n      MKLDNN_CHECK(dnnLayoutDelete<T>(layout_));\n    CAFFE_ENFORCE(tensor.size(), \"Cannot reset with an empty tensor.\");\n    size_t dimension = tensor.ndim();\n    size_t size[dimension];\n    size_t strides[dimension];\n    for (int i = 0; i < dimension; ++i) {\n      size[i] = tensor.dim(dimension - i - 1);\n      strides[i] = (i == 0) ? 1 : strides[i - 1] * size[i - 1];\n    }\n    MKLDNN_SAFE_CALL(dnnLayoutCreate<T>(&layout_, dimension, size, strides));\n  }\n\n  // Create an internal layout from the primitive and type.\n  void Reset(const dnnPrimitive_t primitive, const dnnResourceType_t type) {\n    CAFFE_ENFORCE(primitive, \"Cannot reset with an unknwon primitive.\");\n    CAFFE_ENFORCE(\n        type != dnnResourceNumber,\n        \"Cannot reset with an unknown resource number.\");\n    if (layout_) {\n      MKLDNN_CHECK(dnnLayoutDelete<T>(layout_));\n    }\n    MKLDNN_SAFE_CALL(\n        dnnLayoutCreateFromPrimitive<T>(&layout_, primitive, type));\n  }\n\n  // Create a user layout from the given dimension, size and strides.\n  void\n  Reset(const size_t dimension, const size_t size[], const size_t strides[]) {\n    if (layout_)\n      MKLDNN_CHECK(dnnLayoutDelete<T>(layout_));\n    MKLDNN_SAFE_CALL(dnnLayoutCreate<T>(&layout_, dimension, size, strides));\n  }\n\n  operator dnnLayout_t() const {\n    return layout_;\n  }\n\n private:\n  dnnLayout_t layout_ = 0;\n  DISABLE_COPY_AND_ASSIGN(LayoutWrapper);\n};\n\n/**\n * @brief A wrapper around an opaque MKL internal resource that has certain\n * layouts and convertion primitives set up.\n *\n * Most of the MKLMemory functions are not thread safe.\n */\ntemplate <typename T>\nclass MKLMemory {\n public:\n  // Initializes an empty MKLMemory.\n  MKLMemory() {}\n  // Initialize an MKLMemory with the given size, strides, dnn\n  // primitive and type.\n  MKLMemory(\n      const size_t dimension,\n      const size_t size[],\n      const size_t strides[],\n      const dnnPrimitive_t primitive = nullptr,\n      const dnnResourceType_t type = dnnResourceNumber,\n      bool share_mem_if_possible = false) {\n    Reset(dimension, size, strides, primitive, type, share_mem_if_possible);\n  }\n\n  // Initialize an MKLMemory, with the given dimension assuming a C-contiguous\n  // storage.\n  template <typename IndexType>\n  explicit MKLMemory(\n      const vector<IndexType>& dims,\n      const dnnPrimitive_t primitive = nullptr,\n      const dnnResourceType_t type = dnnResourceNumber,\n      bool share_mem_if_possible = false) {\n    Reset(dims, primitive, type, share_mem_if_possible);\n  }\n\n  // Initialize an MKLMemory with the given size, strides, dnn\n  // primitive and type.\n  void Reset(\n      const size_t dimension,\n      const size_t size[],\n      const size_t strides[],\n      const dnnPrimitive_t primitive = nullptr,\n      const dnnResourceType_t type = dnnResourceNumber,\n      bool share_mem_if_possible = false) {\n    buffer_.reset();\n    dims_.resize(dimension);\n    for (int i = 0; i < dimension; ++i) {\n      dims_[i] = size[dimension - 1 - i];\n    }\n    user_layout_.Reset(dimension, size, strides);\n    if (primitive) {\n      layout_.Reset(primitive, type);\n    } else {\n      layout_.Reset(dimension, size, strides);\n    }\n    convert_in_.Reset(dnnConversionCreate<T>, user_layout_, layout_);\n    convert_out_.Reset(dnnConversionCreate<T>, layout_, user_layout_);\n    share_mem_if_possible_ = share_mem_if_possible;\n    layout_is_user_layout_ = dnnLayoutCompare<T>(layout_, user_layout_);\n    VLOG(2) << \"layout is user layout? \" << layout_is_user_layout_;\n    if (!share_mem_if_possible_) {\n      // If we are not going to share memory, we will simply allocate\n      // memory upfront.\n      buffer();\n    }\n  }\n\n  // Initialize an MKLMemory, with the given dimension assuming a C-contiguous\n  // storage.\n  template <typename IndexType>\n  void Reset(\n      const vector<IndexType>& dims,\n      const dnnPrimitive_t primitive = nullptr,\n      const dnnResourceType_t type = dnnResourceNumber,\n      bool share_mem_if_possible = false) {\n    buffer_.reset();\n    dims_.resize(dims.size());\n    for (int i = 0; i < dims.size(); ++i) {\n      dims_[i] = dims[i];\n    }\n    size_t dimension = dims.size();\n    vector<size_t> size(dimension);\n    vector<size_t> strides(dimension);\n    for (int i = 0; i < dimension; ++i) {\n      size[i] = dims[dimension - i - 1];\n      strides[i] = (i == 0) ? 1 : strides[i - 1] * size[i - 1];\n    }\n    user_layout_.Reset(dims.size(), size.data(), strides.data());\n    if (primitive) {\n      layout_.Reset(primitive, type);\n    } else {\n      layout_.Reset(dimension, size.data(), strides.data());\n    }\n    convert_in_.Reset(dnnConversionCreate<T>, user_layout_, layout_);\n    convert_out_.Reset(dnnConversionCreate<T>, layout_, user_layout_);\n    share_mem_if_possible_ = share_mem_if_possible;\n    layout_is_user_layout_ = dnnLayoutCompare<T>(layout_, user_layout_);\n    VLOG(2) << \"layout is user layout? \" << layout_is_user_layout_;\n    if (!share_mem_if_possible_) {\n      // If we are not going to share memory, we will simply allocate\n      // memory upfront.\n      buffer();\n    }\n  }\n\n  // Destructs the MKLMemory.\n  ~MKLMemory() {}\n\n  void CopyFrom(const void* ptr) {\n    if (share_mem_if_possible_ && layout_is_user_layout_) {\n      VLOG(2) << \"Sharing underlying memory and skip copy.\";\n      buffer_.reset(const_cast<void*>(ptr), [](void*) -> void {});\n    } else {\n      VLOG(2) << \"Copying external content.\";\n      MKLDNN_SAFE_CALL(dnnConversionExecute<T>(\n          convert_in_, const_cast<void*>(ptr), buffer()));\n    }\n  }\n\n  void CopyFrom(const TensorCPU& tensor) {\n    CAFFE_ENFORCE_EQ(\n        tensor.dims(),\n        dims_,\n        \"Dims does not match the expected dims of the resource.\");\n    CopyFrom(tensor.template data<T>());\n  }\n\n  void CopyFrom(const MKLMemory<T>& other) {\n    if (share_mem_if_possible_ && dnnLayoutCompare(other.layout_, layout_)) {\n      buffer_ = other.buffer_;\n    } else {\n      PrimitiveWrapper<T> convert(\n          dnnConversionCreate<T>, other.layout_, layout_);\n      MKLDNN_SAFE_CALL(\n          dnnConversionExecute<T>(convert, other.buffer_, buffer()));\n    }\n  }\n\n  bool ShareFromRaw(const void* ptr) {\n    if (share_mem_if_possible_ && layout_is_user_layout_) {\n      buffer_.reset(const_cast<void*>(ptr), [](void*) -> void {});\n      return true;\n    } else {\n      return false;\n    }\n  }\n\n  bool ShareFromTensor(const TensorCPU& tensor) {\n    CAFFE_ENFORCE_EQ(\n        tensor.dims(),\n        dims_,\n        \"Dims does not match the expected dims of the resource.\");\n    return ShareFromRaw(tensor.template data<T>());\n  }\n\n  bool ShareFrom(const MKLMemory<T>& other) {\n    if (share_mem_if_possible_ && dnnLayoutCompare<T>(other.layout_, layout_)) {\n      VLOG(2) << \"Sharing underlying memory.\";\n      buffer_ = other.buffer_;\n      if (!buffer_.get()) {\n        VLOG(2) << \"Warning: the source MKLMemory has no content yet, so the \"\n                   \"sharing actually has no effect.\";\n      }\n      return true;\n    } else {\n      VLOG(2) << \"Not sharing underlying memory.\";\n      return false;\n    }\n  }\n\n  void CopyTo(void* ptr) const {\n    if (buffer_.get() == ptr) {\n      // This is already mapping to the same memory region. Skip copy.\n      VLOG(2) << \"CopyTo does not need actual copying, as we are sharing \"\n                 \"memory with the output.\";\n      return;\n    }\n    CAFFE_ENFORCE(\n        buffer_.get(), \"Canot copy out from an uninitialized MKLMemory.\");\n    VLOG(2) << \"Copy to external memory.\";\n    MKLDNN_SAFE_CALL(dnnConversionExecute<T>(convert_out_, buffer_.get(), ptr));\n  }\n\n  void CopyTo(TensorCPU* tensor) const {\n    if (tensor->size() > 0 && buffer_.get() == tensor->mutable_data<T>()) {\n      // This is already mapping to the same memory region. Skip copy.\n      VLOG(2) << \"CopyTo does not need actual copying, as we are sharing \"\n                 \"memory with the output.\";\n      return;\n    }\n    tensor->Resize(dims_);\n    CopyTo(tensor->mutable_data<T>());\n  }\n\n  // Copies to another MKL memory.\n  //\n  // This function\n  void CopyTo(\n      MKLMemory<T>* other,\n      const dnnPrimitive_t primitive = nullptr,\n      const dnnResourceType_t type = dnnResourceNumber) {\n    if (buffer_.get() == other->buffer_.get()) {\n      VLOG(2) << \"CopyTo does not need actual copying, as we are sharing \"\n                 \"memory with the output.\";\n      // This is already mapping to the same memory region. Skip copy.\n      return;\n    }\n    CAFFE_ENFORCE(\n        buffer_.get(), \"Canot copy out from an uninitialized MKLMemory.\");\n    // TODO(jiayq): if primitive creation is a big overhead and we will be\n    // consistently copying stuff with fixed src and dst layouts, consider\n    // making a cache for the primitive below.\n    VLOG(2) << \"CopyTo requires copying. Performing direct copy.\";\n    PrimitiveWrapper<T> convert(\n        dnnConversionCreate<T>, layout_, other->layout_);\n    if (dnnPrimitive_t(convert) == nullptr ||\n        dnnConversionExecute<T>(convert, buffer_.get(), other->buffer()) !=\n            E_SUCCESS) {\n      VLOG(2) << \"Direct copy failed, will need to allocate output.\";\n      // If CopyTo directly did not succeed, it could be because the target\n      // MKLMemory is not having the right layout. In this case we will reset\n      // the target and then do another copy.\n      other->Reset(dims_, primitive, type);\n      PrimitiveWrapper<T> convert2(\n          dnnConversionCreate<T>, layout_, other->layout_);\n      MKLDNN_SAFE_CALL(\n          dnnConversionExecute<T>(convert2, buffer_.get(), other->buffer()));\n    }\n  }\n\n  inline void* buffer() {\n    if (buffer_ == nullptr) {\n      CAFFE_ENFORCE(\n          layout_ != nullptr, \"Trying to allocate buffer but layout is empty.\");\n      void* allocated = nullptr;\n      MKLDNN_SAFE_CALL(dnnAllocateBuffer<T>(&allocated, layout_));\n      buffer_.reset(allocated, [](void* ptr) -> void {\n        MKLDNN_CHECK(dnnReleaseBuffer<T>(ptr));\n      });\n    }\n    return buffer_.get();\n  }\n\n  // MKLDNN does not use const void* even for the inputs, so we will\n  // have to use void* and rely on the underlying implementation to make\n  // sure that the buffer is actually not changed.\n  inline void* buffer() const {\n    CAFFE_ENFORCE(\n        buffer_ != nullptr, \"Trying to refer to an unallocated buffer.\");\n    return buffer_.get();\n  }\n\n  inline const vector<TIndex>& dims() const {\n    return dims_;\n  }\n\n  inline const int ndim() const { return dims_.size(); }\n\n  inline int dim32(const int i) const {\n    CAFFE_ENFORCE_LT(dims_.at(i), std::numeric_limits<int>::max());\n    return static_cast<int>(dims_[i]);\n  }\n\n  /**\n   * Returns the i-th dimension of the tensor. Note that the passed in index\n   * must be between 0 (inclusive) and the number of dimensions, otherwise\n   * this function will produce a fatal message.\n   */\n  inline TIndex dim(const int i) const {\n    return dims_.at(i);\n  }\n\n  inline const LayoutWrapper<T>& layout() const {\n    return layout_;\n  }\n\n  // Returns a view of the content. We mark this function const, but be noted\n  // that the returned std::shared_ptr is not const protected - user discretion\n  // is recommended for correctness.\n  std::shared_ptr<void> View(\n      dnnLayout_t layout_wanted,\n      dnnPrimitive_t primitive,\n      dnnResourceType_t type) const {\n    std::lock_guard<std::mutex> lock(buffer_lock_);\n    if (dnnLayoutCompare<T>(layout_wanted, layout_)) {\n      // If they are the same, return the original content.\n      VLOG(2) << \"Creating a view without the need of copying.\";\n      return std::shared_ptr<void>(buffer_);\n    } else {\n      void* temp_buffer;\n      VLOG(2) << \"Creating a view with copying.\";\n      MKLDNN_SAFE_CALL(dnnAllocateBuffer<T>(&temp_buffer, layout_wanted));\n      PrimitiveWrapper<T> convert(\n          dnnConversionCreate<T>, layout_, layout_wanted);\n      MKLDNN_SAFE_CALL(dnnConversionExecute<T>(\n          convert, buffer_.get(), temp_buffer));\n      if (FLAGS_caffe2_mkl_implicit_layout_change) {\n        VLOG(2) << \"Implicit layout change set. \"\n                   \"Changing the underlying storage.\";\n        // We will need to call Reset to set up all the member variables.\n        // This is not thread safe, so we might want to double check if this\n        // makes sense in actual use cases.\n        const_cast<MKLMemory<T>*>(this)->Reset(\n            dims_, primitive, type, share_mem_if_possible_);\n        CAFFE_ENFORCE(dnnLayoutCompare<T>(layout_wanted, layout_),\n                      \"You passed in a target layout that is not \"\n                      \"generated by the given primitive and type.\");\n        buffer_.reset(temp_buffer, [](void* ptr) -> void {\n                MKLDNN_CHECK(dnnReleaseBuffer<T>(ptr));\n            });\n        return std::shared_ptr<void>(buffer_);\n      } else {\n        return std::shared_ptr<void>(temp_buffer, [](void* ptr) -> void {\n                MKLDNN_CHECK(dnnReleaseBuffer<T>(ptr));\n            });\n      }\n    }\n  }\n\n private:\n  bool share_mem_if_possible_;\n  bool layout_is_user_layout_;\n  // The internal buffer in the specific dnn layout.\n  // It is marked mutable but any modification in a const function should\n  // be accompanied by the buffer lock, see the View() function.\n  mutable std::shared_ptr<void> buffer_;\n  // A mutex to control the access of buffer in the View() function.\n  mutable std::mutex buffer_lock_;\n  // The dimensions in the same order as Caffe2 does. This is used to\n  // interface with C2.\n  vector<TIndex> dims_;\n  // The user dnn layout.\n  LayoutWrapper<T> user_layout_;\n  // The internal dnn layout.\n  LayoutWrapper<T> layout_;\n  // The primitive to use to convert from user layout to internal layout\n  PrimitiveWrapper<T> convert_in_;\n  // The primitive to use to convert from internal layout to user layout\n  PrimitiveWrapper<T> convert_out_;\n\n  DISABLE_COPY_AND_ASSIGN(MKLMemory);\n};\n\n} // namespace mkl\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_MKL_MKL_MEMORY_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/mkl_operator.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_OPERATOR_H_\n#define CAFFE2_UTILS_MKL_OPERATOR_H_\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/mkl/utils/mkl_dnn_cppwrapper.h\"\n#include \"caffe2/mkl/utils/mkl_memory.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\nCAFFE_DECLARE_REGISTRY(\n    MKLOperatorRegistry,\n    OperatorBase,\n    const OperatorDef&,\n    Workspace*);\n#define REGISTER_MKL_OPERATOR_CREATOR(key, ...) \\\n  CAFFE_REGISTER_CREATOR(MKLOperatorRegistry, key, __VA_ARGS__)\n#define REGISTER_MKL_OPERATOR(name, ...) \\\n  CAFFE_REGISTER_CLASS(MKLOperatorRegistry, name, __VA_ARGS__)\n#define REGISTER_MKL_OPERATOR_STR(str_name, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(MKLOperatorRegistry, str_name, __VA_ARGS__)\n\n#define REGISTER_MKL_OPERATOR_WITH_ENGINE(name, engine, ...) \\\n  CAFFE_REGISTER_CLASS(MKLOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__)\n\nnamespace mkl {\n// MKLOperator is the base scaffolding of the operators that uses MKLDNN. It\n// provides a few operators that are useful to MKLDNN specific implementations.\ntemplate <typename T>\nclass MKLOperator : public OperatorBase {\n public:\n  explicit MKLOperator(const OperatorDef& operator_def, Workspace* ws)\n      : OperatorBase(operator_def, ws),\n        context_(operator_def.device_option()) {}\n  virtual ~MKLOperator() {}\n\n  inline const MKLMemory<T>& Input(int idx) {\n    return OperatorBase::template Input<MKLMemory<T>>(idx);\n  }\n  inline MKLMemory<T>* Output(int idx) {\n    return OperatorBase::template Output<MKLMemory<T>>(idx);\n  }\n\n  // The run function of Operator switches to the device, and then carries out\n  // the actual computation with RunOnDevice(). You should implement RunOnDevice\n  // instead of Run().\n  bool Run(int /* unused */ /*stream_id*/) final {\n    // Since MKLDNN does not need to do SwithToDevice and\n    // FinishDeviceComputation,\n    // it is always just a re-route to RunOnDevice().\n    try {\n      return RunOnDevice();\n    } catch (EnforceNotMet& err) {\n      err.AppendMessage(\n          \"Error from operator: \\n\" + ProtoDebugString(debug_def()));\n      throw;\n    }\n  }\n\n  // Waits for a previous event. Note that to properly wait and run\n  // asynchronously, WaitEvent, RunAsync and Record should all be executed\n  // on the same CPU thread.\n  void WaitEvent(const Event& ev) final {\n    context_.SwitchToDevice();\n    context_.WaitEvent(ev);\n  }\n\n  void Record() final {\n    context_.SwitchToDevice();\n    context_.Record(&event_);\n  }\n\n  virtual bool RunOnDevice() = 0;\n\n  inline void ExecutePrimitive() {\n    MKLDNN_SAFE_CALL(mkl::dnnExecute<T>(primitive_, resources_));\n  }\n\n protected:\n  MKLContext context_;\n  // The primitive used in the operator.\n  PrimitiveWrapper<T> primitive_;\n  // Size cache for all the input sizes.\n  vector<vector<TIndex>> input_size_cache_;\n  // An internal MKLMemory buffer. This is usually handy when we have a\n  // single output from the operator. If your operator has multiple outputs\n  // then you should allocate your own buffer.\n  MKLMemory<T> buffer_;\n  // The resources vector that we will need to use;\n  void* resources_[dnnResourceNumber];\n};\n} // namespace mkl\n\n#define USE_MKLOPERATOR_FUNCTIONS(T)                            \\\n  USE_OPERATOR_BASE_FUNCTIONS;                                  \\\n  /* using override */ using MKLOperator<T>::Input;             \\\n  /* using override */ using MKLOperator<T>::Output;            \\\n  /* using override */ using MKLOperator<T>::ExecutePrimitive;  \\\n  /* using override */ using MKLOperator<T>::primitive_;        \\\n  /* using override */ using MKLOperator<T>::input_size_cache_; \\\n  /* using override */ using MKLOperator<T>::buffer_;           \\\n  /* using override */ using MKLOperator<T>::resources_\n\n#define USE_SIMPLE_MKL_CTOR_DTOR(name, T)              \\\n  name(const OperatorDef& operator_def, Workspace* ws) \\\n      : MKLOperator<T>(operator_def, ws) {}            \\\n  virtual ~name() {}\n\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_MKL_OPERATOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/mkl_version_check.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_MKL_VERSION_CHECK_H_\n#define CAFFE2_UTILS_MKL_MKL_VERSION_CHECK_H_\n#ifdef CAFFE2_USE_MKL\n\n#include <mkl.h>\n\n#if INTEL_MKL_VERSION >= 20170000\n#define CAFFE2_HAS_MKL_SGEMM_PACK\n#define CAFFE2_HAS_MKL_DNN\n#endif // INTEL_MKL_VERSION >= 20170000\n\n#endif // CAFFE2_USE_MKL\n#endif // CAFFE2_UTILS_MKL_MKL_VERSION_CHECK_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mkl/utils/sgemm_pack.h",
    "content": "#ifndef CAFFE2_UTILS_MKL_SGEMM_PACK_H_\n#define CAFFE2_UTILS_MKL_SGEMM_PACK_H_\n\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\nnamespace mkl {\nstruct MKLPackedMatrix {\n  CBLAS_IDENTIFIER identifier_;\n  CBLAS_TRANSPOSE trans_;\n  int m_;\n  int n_;\n  int k_;\n  float alpha_;\n  int ld_;\n  float* data_ = nullptr;\n\n  MKLPackedMatrix(\n      const CBLAS_IDENTIFIER identifier,\n      const CBLAS_TRANSPOSE trans,\n      const int m,\n      const int n,\n      const int k,\n      const float alpha,\n      const float* src,\n      const int ld)\n      : identifier_(identifier),\n        trans_(trans),\n        m_(m),\n        n_(n),\n        k_(k),\n        alpha_(alpha),\n        ld_(ld) {\n    data_ = cblas_sgemm_alloc(identifier, m, n, k);\n    CAFFE_ENFORCE(data_, \"MKL runtime error: cannot allocate sgemm memory.\");\n    cblas_sgemm_pack(\n        CblasRowMajor, identifier, trans, m, n, k, alpha, src, ld, data_);\n  }\n\n  ~MKLPackedMatrix() {\n    if (data_) {\n      cblas_sgemm_free(data_);\n    }\n  }\n};\n\n} // namespace mkl\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_MKL_SGEMM_PACK_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/ios_caffe.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifdef __cplusplus\n\n#include <string>\n#include <vector>\n#include \"caffe2/core/predictor.h\"\n#include \"caffe2/mobile/contrib/ios/ios_caffe_defines.h\"\n#include \"caffe2/mobile/contrib/ios/ios_caffe_predictor.h\"\n\nextern \"C\" {\n\nIOS_CAFFE_EXPORT Caffe2IOSPredictor* MakeCaffe2Predictor(const std::string& init_net_str,\n                                                         const std::string& predict_net_str,\n                                                         bool disableMultithreadProcessing,\n                                                         bool allowMetalOperators,\n                                                         std::string& errorMessage);\nIOS_CAFFE_EXPORT void GenerateStylizedImage(std::vector<float>& originalImage,\n                                            const std::string& init_net_str,\n                                            const std::string& predict_net_str,\n                                            int height,\n                                            int width,\n                                            std::vector<float>& dataOut);\n}\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/ios_caffe_defines.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#define IOS_CAFFE_EXPORT __attribute__((visibility(\"default\")))\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/ios_caffe_predictor.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include <string>\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/predictor.h\"\n#include \"caffe2/mobile/contrib/ios/ios_caffe_defines.h\"\n\nstruct Tensor {\n  std::vector<int64_t> dims;\n  uint8_t* data;\n};\n\nclass IOS_CAFFE_EXPORT Caffe2IOSPredictor final {\n public:\n  /**\n   @allowMetalOperators Allow converting eligible operators to Metal GPU framework accelerated\n   operators. Setting this flag to true doesn't gaurantee predictor will be using Metal operators;\n   Client code must check usingMetalOperators flag to determine predictor is using them.\n   */\n  static Caffe2IOSPredictor* NewCaffe2IOSPredictor(const caffe2::NetDef& init_net,\n                                                   const caffe2::NetDef& predict_net,\n                                                   bool disableMultithreadProcessing,\n                                                   bool allowMetalOperators);\n  void run(const Tensor& inData, Tensor& outData, std::string& errorMessage);\n  ~Caffe2IOSPredictor(){};\n\n  const bool usingMetalOperators;\n\n private:\n  Caffe2IOSPredictor(const caffe2::NetDef& init_net,\n                     const caffe2::NetDef& predict_net,\n                     bool disableMultithreadProcessing,\n                     bool usingMetalOperators);\n  caffe2::Predictor predictor_;\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/mpscnn/mpscnn.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"caffe2/core/net.h\"\n\nnamespace caffe2 {\nstatic constexpr const char* kMPSCNNReadCountArg = \"__mpscnn_read_count__\";\nstatic constexpr const char* kMPSCNNOutputIsTempImageArg = \"__mpscnn_output_is_temp_img__\";\n\n// We currently only try to convert a fixed set of operators that handle a subset of a full\n// CNN. We also only run when MPSCNN is available, provides a speedup.\n// On failure, returns false. On success, returns true, and sets the MPSCNN net in the output\n// parameter.\n\nbool tryConvertToMPSCNN(const NetDef& initNet, const NetDef& predictNet, NetDef* mpscnnPredictNet);\n\n// Exposed for testing.\nNetDef annotateDefWithReadCounts(const NetDef& net);\nNetDef rewriteForMetal(const NetDef& net);\nNetDef runMPSCNNFusion(const NetDef& net);\nvoid dumpDef(const NetDef& d);\nvoid mpscnnRecordExecutionFinish();\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/mpscnn/mpscnn_context.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#import <Metal/MTLBuffer.h>\n#import <Metal/MTLDevice.h>\n#import <Metal/MTLLibrary.h>\n\n#include <array>\n#include <mutex>\n#include <string>\n#include <thread>\n#include <unordered_map>\n\nnamespace caffe2 {\n\nstruct MPSCNNContext {\n public:\n  id<MTLDevice> device;\n  id<MTLCommandQueue> commandQueue;\n  id<MTLLibrary> library;\n\n  id<MTLComputePipelineState> getPipelineState(NSString* kernel);\n  id<MTLComputePipelineState> getSpecializedPipelineState(NSString* kernel,\n                                                          const std::vector<ushort>& constants);\n\n private:\n  std::mutex pipelineCacheMutex_;\n  std::unordered_map<std::string, id<MTLComputePipelineState>> pipelineCache_;\n};\n\n// get the singleton instance.\nMPSCNNContext& getMPSCNNContext();\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/mpscnn/mpscnn_graph_mask.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"caffe2/core/net.h\"\n#include \"mpscnn.h\"\n\nnamespace caffe2 {\n// We currently only try to convert a fixed set of operators that handle a subset of a full\n// CNN. We also only run when MPSCNN is available, provides a speedup.\n// On failure, returns false. On success, returns true, and sets the MPSCNN net in the output\n// parameter.\n// The rewrite function now supports insertion of copies in intermediate ops.\nbool tryConvertToMPSCNNIntermediateCopies(const NetDef& initNet,\n                                          const NetDef& predictNet,\n                                          NetDef* mpscnnPredictNet);\nNetDef setSpecialArgs(const NetDef& def);\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/mpscnn/mpscnn_kernels.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n// @generated\n\nstatic const char* MPSCNN_KERNELS = R\"V0G0N(\n\n//  Copyright 2004-present Facebook. All Rights Reserved.\n\n#include <metal_stdlib>\n\nusing namespace metal;\n\nconstant ushort ushort_arg_0[[function_constant(0)]];\nconstant ushort ushort_arg_1[[function_constant(1)]];\nconstant ushort ushort_arg_2[[function_constant(2)]];\nconstant ushort ushort_arg_3[[function_constant(3)]];\nconstant ushort ushort_arg_4[[function_constant(4)]];\nconstant ushort ushort_arg_5[[function_constant(5)]];\nconstant ushort ushort_arg_6[[function_constant(6)]];\nconstant ushort ushort_arg_7[[function_constant(7)]];\nconstant ushort ushort_arg_8[[function_constant(8)]];\nconstant ushort ushort_arg_9[[function_constant(9)]];\n\ninline constexpr ushort divRoundUp(ushort x, ushort y) { return (x + (y - 1)) / y; }\n\nkernel void affine(constant half4* scale[[buffer(0)]],\n                   constant half4* shift[[buffer(1)]],\n                   texture2d_array<half, access::read> in[[texture(0)]],\n                   texture2d_array<half, access::write> out[[texture(1)]],\n                   ushort3 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    const half4 scale_c = scale[gid.z % divRoundUp(C, 4)];\n    const half4 shift_c = shift[gid.z % divRoundUp(C, 4)];\n    ushort2 gid_(gid.x, gid.y);\n    const half4 x = in.read(gid_, gid.z);\n    const half4 y = scale_c * x + shift_c;\n    out.write(y, gid_, gid.z);\n}\n\nkernel void affine_nonarray(constant half4* scale[[buffer(0)]],\n                            constant half4* shift[[buffer(1)]],\n                            texture2d<half, access::read> in[[texture(0)]],\n                            texture2d<half, access::write> out[[texture(1)]],\n                            ushort2 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    const half4 scale_c = scale[0];\n    const half4 shift_c = shift[0];\n    half4 x = in.read(gid);\n    const half4 y = scale_c * x + shift_c;\n    out.write(y, gid);\n}\n\nkernel void prelu_nonshared(constant half4* weights[[buffer(0)]],\n                            texture2d_array<half, access::read> in[[texture(0)]],\n                            texture2d_array<half, access::write> out[[texture(1)]],\n                            ushort3 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    const ushort S = ushort_arg_1;\n    const bool channel_shared = S == 1;\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    half4 w = channel_shared ? half4(weights[0][0], weights[0][0], weights[0][0], weights[0][0])\n    : weights[gid.z % divRoundUp(C, 4)];\n    ushort2 gid_(gid.x, gid.y);\n    half4 x = in.read(gid_, gid.z);\n    half4 y = select(x * w, x, x > 0.0h);\n    out.write(y, gid_, gid.z);\n}\n\nkernel void prelu_nonshared_nonarray(constant half4* weights[[buffer(0)]],\n                                     texture2d<half, access::read> in[[texture(0)]],\n                                     texture2d<half, access::write> out[[texture(1)]],\n                                     ushort2 gid[[thread_position_in_grid]]) {\n    // const ushort C = ushort_arg_0;\n    const ushort S = ushort_arg_1;\n    const bool channel_shared = S == 1;\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    half4 w = channel_shared ? half4(weights[0][0], weights[0][0], weights[0][0], weights[0][0])\n    : weights[0];\n    half4 x = in.read(gid);\n    half4 y = select(x * w, x, x > 0.0h);\n    out.write(y, gid);\n}\n\n// One block per texture.\n// 256 threads per block.\nusing AccT = float4;\n\nconstant const bool instance_norm_has_prelu = ushort_arg_1 > 0;\n\nkernel void instance_norm(\n                          constant half4* weights[[buffer(0)]],\n                          constant half4* bias[[buffer(1)]],\n                          constant half4* preluWeights[[ buffer(2), function_constant(instance_norm_has_prelu) ]],\n                          texture2d_array<half, access::read> in[[texture(0)]],\n                          texture2d_array<half, access::write> out[[texture(1)]],\n                          ushort3 gid[[thread_position_in_grid]],\n                          ushort tid[[thread_index_in_threadgroup]],\n                          ushort3 tcount[[threads_per_threadgroup]]) {\n    if (gid.z >= out.get_array_size()) {\n        return;\n    }\n    const ushort C = ushort_arg_0;\n    const ushort S = ushort_arg_1;\n    const bool channel_shared = S == 1;\n    const ushort c = gid.z % divRoundUp(C, 4);\n    constexpr ushort THREADGROUP_SIZE = 256;\n    \n    threadgroup AccT per_thread_state[THREADGROUP_SIZE];\n    // Each block handles a single texture.\n    per_thread_state[tid] = 0;\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            per_thread_state[tid] += static_cast<AccT>(in.read(ushort2(x, y), gid.z));\n        }\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    // 256 -> 32 reduction\n    if (tid < 32) {\n        per_thread_state[tid] += per_thread_state[tid + 32] + per_thread_state[tid + 64] +\n        per_thread_state[tid + 96] + per_thread_state[tid + 128] +\n        per_thread_state[tid + 160] + per_thread_state[tid + 192] +\n        per_thread_state[tid + 224];\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    if (tid == 0) {\n        AccT sum = 0.0;\n        for (ushort i = 0; i < 32; ++i) {\n            sum += per_thread_state[i];\n        }\n        sum /= (in.get_width() * in.get_height());\n        per_thread_state[0] = sum;\n    }\n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    // Broadcast to all threads.\n    const AccT mean = per_thread_state[0];\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    per_thread_state[tid] = 0;\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            AccT delta = static_cast<AccT>(in.read(ushort2(x, y), gid.z)) - mean;\n            per_thread_state[tid] += delta * delta;\n        }\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    // 256 -> 32 reduction\n    if (tid < 32) {\n        per_thread_state[tid] += per_thread_state[tid + 32] + per_thread_state[tid + 64] +\n        per_thread_state[tid + 96] + per_thread_state[tid + 128] +\n        per_thread_state[tid + 160] + per_thread_state[tid + 192] +\n        per_thread_state[tid + 224];\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    if (tid == 0) {\n        AccT sum = 0.0;\n        for (ushort i = 0; i < 32; ++i) {\n            sum += per_thread_state[i];\n        }\n        sum /= (in.get_width() * in.get_height());\n        per_thread_state[0] = 1.0 / sqrt(max(sum, AccT(1e-5, 1e-5, 1e-5, 1e-5)) + 1.0e-5);\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    // Broadcast to all threads.\n    const AccT inv_var = per_thread_state[0];\n    \n    const AccT c_weights = static_cast<AccT>(weights[c]);\n    const AccT c_bias = static_cast<AccT>(bias[c]);\n    \n    const AccT scale = inv_var * c_weights;\n    const AccT shift = c_bias - mean * scale;\n    \n    half4 w;\n    if (instance_norm_has_prelu) {\n        w = channel_shared ? half4(preluWeights[0][0]) : preluWeights[c];\n    }\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            half4 scaled =\n            static_cast<half4>(static_cast<AccT>(in.read(ushort2(x, y), gid.z)) * scale + shift);\n            if (instance_norm_has_prelu) {\n                scaled = select(scaled * w, scaled, scaled > 0.0h);\n            }\n            out.write(scaled, ushort2(x, y), gid.z);\n        }\n    }\n}\n\n// One block per texture.\n// 256 threads per block.\nkernel void instance_norm_nonarray(\n                                   constant half4* weights[[buffer(0)]],\n                                   constant half4* bias[[buffer(1)]],\n                                   constant half4* preluWeights[[ buffer(2), function_constant(instance_norm_has_prelu) ]],\n                                   texture2d<half, access::read> in[[texture(0)]],\n                                   texture2d<half, access::write> out[[texture(1)]],\n                                   ushort3 gid[[thread_position_in_grid]],\n                                   ushort tid[[thread_index_in_threadgroup]],\n                                   ushort3 tcount[[threads_per_threadgroup]]) {\n    // const ushort C = ushort_arg_0;\n    const ushort S = ushort_arg_1;\n    const bool channel_shared = S == 1;\n    constexpr ushort THREADGROUP_SIZE = 256;\n    \n    threadgroup AccT per_thread_state[THREADGROUP_SIZE];\n    // Each block handles a single texture.\n    per_thread_state[tid] = 0;\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            per_thread_state[tid] += static_cast<AccT>(in.read(ushort2(x, y)));\n        }\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    // 256 -> 32 reduction\n    if (tid < 32) {\n        per_thread_state[tid] += per_thread_state[tid + 32] + per_thread_state[tid + 64] +\n        per_thread_state[tid + 96] + per_thread_state[tid + 128] +\n        per_thread_state[tid + 160] + per_thread_state[tid + 192] +\n        per_thread_state[tid + 224];\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    if (tid == 0) {\n        AccT sum = 0.0;\n        for (ushort i = 0; i < 32; ++i) {\n            sum += per_thread_state[i];\n        }\n        sum /= (in.get_width() * in.get_height());\n        per_thread_state[0] = sum;\n    }\n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    // Broadcast to all threads.\n    const AccT mean = per_thread_state[0];\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    per_thread_state[tid] = 0;\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            AccT delta = static_cast<AccT>(in.read(ushort2(x, y))) - mean;\n            per_thread_state[tid] += delta * delta;\n        }\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    // 256 -> 32 reduction\n    if (tid < 32) {\n        per_thread_state[tid] += per_thread_state[tid + 32] + per_thread_state[tid + 64] +\n        per_thread_state[tid + 96] + per_thread_state[tid + 128] +\n        per_thread_state[tid + 160] + per_thread_state[tid + 192] +\n        per_thread_state[tid + 224];\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    \n    if (tid == 0) {\n        AccT sum = 0.0;\n        for (ushort i = 0; i < 32; ++i) {\n            sum += per_thread_state[i];\n        }\n        sum /= (in.get_width() * in.get_height());\n        per_thread_state[0] = 1.0 / sqrt(max(sum, AccT(1e-5, 1e-5, 1e-5, 1e-5)) + 1.0e-5);\n    }\n    \n    threadgroup_barrier(mem_flags::mem_threadgroup);\n    // Broadcast to all threads.\n    const AccT inv_var = per_thread_state[0];\n    \n    const AccT c_weights = static_cast<AccT>(weights[0]);\n    const AccT c_bias = static_cast<AccT>(bias[0]);\n    \n    const AccT scale = inv_var * c_weights;\n    const AccT shift = c_bias - mean * scale;\n    \n    half4 w;\n    if (instance_norm_has_prelu) {\n        w = channel_shared ? half4(preluWeights[0][0]) : preluWeights[0];\n    }\n    for (ushort y = gid.y; y < in.get_height(); y += tcount.y) {\n        for (ushort x = gid.x; x < in.get_width(); x += tcount.x) {\n            half4 scaled = static_cast<half4>(static_cast<AccT>(in.read(ushort2(x, y))) * scale + shift);\n            if (instance_norm_has_prelu) {\n                scaled = select(scaled * w, scaled, scaled > 0.0h);\n            }\n            out.write(scaled, ushort2(x, y));\n        }\n    }\n}\n\nkernel void copy_nchw_to_metal(constant float* in[[buffer(0)]],\n                               texture2d_array<half, access::write> out[[texture(0)]],\n                               ushort3 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    const ushort H = ushort_arg_1;\n    const ushort W = ushort_arg_2;\n    if (gid.x >= W || gid.y >= H) {\n        return;\n    }\n    \n    const ushort n = gid.z / divRoundUp(C, 4);\n    const ushort c = gid.z - n * divRoundUp(C, 4);\n    \n    // TODO: are the `else` branches needed?\n    // TODO: trick the optimizer for case where C == 4?\n#define CHW_TO_CHWP4(idx, n, c_, h, w)                                     \\\nif ((c_) < C) {                                                          \\\ntrns[idx] = in[n * H * W * C + int(c_) * H * W + int(h) * W + int(w)]; \\\n} else {                                                                 \\\ntrns[idx] = 0.0h;                                                      \\\n}\n    \n    half4 trns;\n    CHW_TO_CHWP4(0, n, c * 4 + 0, gid.y, gid.x);\n    CHW_TO_CHWP4(1, n, c * 4 + 1, gid.y, gid.x);\n    CHW_TO_CHWP4(2, n, c * 4 + 2, gid.y, gid.x);\n    CHW_TO_CHWP4(3, n, c * 4 + 3, gid.y, gid.x);\n#undef CHW_TO_CHWP4\n    \n    out.write(trns, ushort2(gid.x, gid.y), gid.z);\n}\n\nkernel void copy_nchw_to_metal_nonarray(constant float* in[[buffer(0)]],\n                                        texture2d<half, access::write> out[[texture(0)]],\n                                        ushort2 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    const ushort H = ushort_arg_1;\n    const ushort W = ushort_arg_2;\n    \n    if (gid.x >= W || gid.y >= H) {\n        return;\n    }\n    \n    half4 trns;\n    // TODO: are the `else` branches needed?\n    // TODO: trick the optimizer for case where C % 4 == 0?\n    \n#define CHW_TO_CHWP4(idx, c, h, w)                        \\\nif ((c) < C) {                                          \\\ntrns[idx] = in[int(c) * H * W + int(h) * W + int(w)]; \\\n} else {                                                \\\ntrns[idx] = 0.0h;                                     \\\n}\n    \n    CHW_TO_CHWP4(0, 0, gid.y, gid.x);\n    CHW_TO_CHWP4(1, 1, gid.y, gid.x);\n    CHW_TO_CHWP4(2, 2, gid.y, gid.x);\n    CHW_TO_CHWP4(3, 3, gid.y, gid.x);\n#undef CHW_TO_CHWP4\n    \n    out.write(trns, ushort2(gid.x, gid.y));\n}\n\nkernel void copy_metal_to_nchw(texture2d_array<half, access::read> in[[texture(0)]],\n                               device float* out[[buffer(0)]],\n                               ushort3 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    const ushort H = ushort_arg_1;\n    const ushort W = ushort_arg_2;\n    \n    if (gid.x >= W || gid.y >= H) {\n        return;\n    }\n    const ushort n = gid.z / divRoundUp(C, 4);\n    const ushort c = gid.z - n * divRoundUp(C, 4);\n    \n    half4 cs = in.read(ushort2(gid.x, gid.y), gid.z);\n    \n#define CHWP4_TO_CHW(idx, n, c_, h, w)                                    \\\nif ((c_) < C) {                                                         \\\nout[n * H * W * C + int(c_) * H * W + int(h) * W + int(w)] = cs[idx]; \\\n}\n    \n    CHWP4_TO_CHW(0, n, c * 4 + 0, gid.y, gid.x);\n    CHWP4_TO_CHW(1, n, c * 4 + 1, gid.y, gid.x);\n    CHWP4_TO_CHW(2, n, c * 4 + 2, gid.y, gid.x);\n    CHWP4_TO_CHW(3, n, c * 4 + 3, gid.y, gid.x);\n#undef CHWP4_TO_CHW\n}\n\nkernel void copy_metal_to_nchw_nonarray(texture2d<half, access::read> in[[texture(0)]],\n                                        device float* out[[buffer(0)]],\n                                        ushort2 gid[[thread_position_in_grid]]) {\n    const ushort C = ushort_arg_0;\n    const ushort H = ushort_arg_1;\n    const ushort W = ushort_arg_2;\n    \n    if (gid.x >= W || gid.y >= H) {\n        return;\n    }\n    \n    half4 cs = in.read(ushort2(gid.x, gid.y));\n    \n#define CHWP4_TO_CHW(idx, c, h, w)                       \\\nif ((c) < C) {                                         \\\nout[int(c) * H * W + int(h) * W + int(w)] = cs[idx]; \\\n}\n    \n    CHWP4_TO_CHW(0, 0, gid.y, gid.x);\n    CHWP4_TO_CHW(1, 1, gid.y, gid.x);\n    CHWP4_TO_CHW(2, 2, gid.y, gid.x);\n    CHWP4_TO_CHW(3, 3, gid.y, gid.x);\n#undef CHWP4_TO_CHW\n}\n\nkernel void convtranspose_upscale(texture2d_array<half, access::read> in[[texture(0)]],\n                                  texture2d_array<half, access::write> out[[texture(1)]],\n                                  ushort3 gid[[thread_position_in_grid]]) {\n    // All resolved at compile time.\n    // Assume symmetric kernel/stride/pad for now.\n    const ushort kernel_ = ushort_arg_0;\n    const ushort stride = ushort_arg_1;\n    const ushort pad = ushort_arg_2;\n    \n    half4 zero(0.0h, 0.0h, 0.0h, 0.0h);\n    \n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    const ushort2 gid_ = ushort2(gid.x, gid.y);\n    if (gid.x < kernel_ - 1 - pad || gid.y < kernel_ - 1 - pad) {\n        out.write(zero, gid_, gid.z);\n        return;\n    }\n    \n    if (((gid.x - (kernel_ - 1 - pad)) % stride == 0) &&\n        ((gid.y - (kernel_ - 1 - pad)) % stride == 0)) {\n        ushort2 in_pos((gid.x - (kernel_ - 1 - pad)) / stride, (gid.y - (kernel_ - 1 - pad)) / stride);\n        \n        if (in_pos.x < in.get_width() && in_pos.y < in.get_height()) {\n            half4 input = in.read(in_pos, gid.z);\n            out.write(input, gid_, gid.z);\n        } else {\n            out.write(zero, gid_, gid.z);\n        }\n    } else {\n        out.write(zero, gid_, gid.z);\n    }\n}\n\nconstant bool has_in_arr = (ushort_arg_7 > 1 || ushort_arg_0 * ushort_arg_1 * ushort_arg_6 > 4);\nconstant bool has_out_arr = (ushort_arg_7 > 1 || ushort_arg_6 > 4);\nconstant bool has_in_tex = (!has_in_arr);\nconstant bool has_out_tex = (!has_out_arr);\n\nkernel void col2im(\n                   texture2d_array<half, access::read> ina[[ texture(0), function_constant(has_in_arr) ]],\n                   texture2d<half, access::read> in[[ texture(0), function_constant(has_in_tex) ]],\n                   texture2d_array<half, access::write> outa[[ texture(1), function_constant(has_out_arr) ]],\n                   texture2d<half, access::write> out[[ texture(1), function_constant(has_out_tex) ]],\n                   constant half4* bias[[buffer(0)]],\n                   ushort3 gid[[thread_position_in_grid]]) {\n    const ushort kernel_h = ushort_arg_0;\n    const ushort kernel_w = ushort_arg_1;\n    const ushort stride_h = ushort_arg_2;\n    const ushort stride_w = ushort_arg_3;\n    const ushort pad_l = ushort_arg_4;\n    const ushort pad_t = ushort_arg_5;\n    const ushort C = ushort_arg_6;\n    //  const int N = ushort_arg_7;\n    const ushort height_col = ushort_arg_8; //(outa.get_height() + pad + pad - kernel_) / stride + 1;\n    const ushort width_col = ushort_arg_9; // (outa.get_width() + pad + pad - kernel_) / stride + 1;\n    \n    const ushort n = gid.z / divRoundUp(C, 4);\n    const ushort c = gid.z - n * divRoundUp(C, 4);\n    \n    const ushort w = gid.x + pad_l;\n    const ushort h = gid.y + pad_t;\n    \n    // compute the start and end of the output\n    const ushort w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;\n    const ushort w_col_end = min(ushort(w / stride_w + 1), ushort(width_col));\n    const ushort h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;\n    const ushort h_col_end = min(ushort(h / stride_h + 1), ushort(height_col));\n    \n    float4 val = static_cast<float4>(bias[c]);\n    for (ushort h_col = h_col_start; h_col < h_col_end; ++h_col) {\n        for (ushort w_col = w_col_start; w_col < w_col_end; ++w_col) {\n            const ushort w_k = w - w_col * stride_w;\n            const ushort h_k = h - h_col * stride_h;\n            \n            // layout is essentially: [N][K][K][C][H][W]\n            // - where the divRoundUp(K * K * C, 4) channels are interleaved as usual.\n            // Thus, it's actually [N][divRoundUp(K * K * C, 4)][H][W].\u0013\n            \n            // If C % 4 is not zero, then we have to play some games via partial indexing.\n            // TODO: is it worth optimizing this loop via padding in C?\n            if (C % 4 == 0) {\n                ushort c_col = n * kernel_h * kernel_w * divRoundUp(C, 4) +\n                h_k * kernel_w * divRoundUp(C, 4) + w_k * divRoundUp(C, 4) + c;\n                if (has_in_arr) {\n                    val += static_cast<float4>(ina.read(ushort2(w_col, h_col), c_col));\n                }\n                if (has_in_tex) {\n                    val += static_cast<float4>(in.read(ushort2(w_col, h_col), c_col));\n                }\n            } else {\n                half4 components(0, 0, 0, 0);\n                for (auto i = 0; i < 4; ++i) {\n                    ushort c_col_i = n * divRoundUp(kernel_h * kernel_w * C, 4) * 4 + h_k * kernel_w * C +\n                    w_k * C + c * 4 + i;\n                    ushort c_col_i_z = c_col_i / 4;\n                    ushort c_col_i_off = c_col_i - c_col_i_z * 4;\n                    if (has_in_arr) {\n                        components[i] = ina.read(ushort2(w_col, h_col), c_col_i_z)[c_col_i_off];\n                    }\n                    if (has_in_tex) {\n                        components[i] = in.read(ushort2(w_col, h_col))[c_col_i_off];\n                    }\n                }\n                val += static_cast<float4>(components);\n            }\n        }\n    }\n    if (has_out_arr) {\n        outa.write(static_cast<half4>(val), ushort2(gid.x, gid.y), gid.z);\n    }\n    if (has_out_tex) {\n        out.write(static_cast<half4>(val), ushort2(gid.x, gid.y));\n    }\n}\n\nkernel void preprocess_stylizer(device uchar4* in[[buffer(0)]],\n                                constant half* mean[[buffer(1)]],\n                                constant half4* noise[[buffer(2)]],\n                                texture2d<half, access::write> out[[texture(0)]],\n                                ushort2 gid[[thread_position_in_grid]]) {\n    \n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    const ushort noise_size = ushort_arg_0;\n    \n    half4 mean_half(mean[0], mean[1], mean[2], 0.0h);\n    uint input_noise_idx = ((uint)out.get_width() * (uint)gid.y + (uint)gid.x) % (noise_size / 4);\n    const half4 input_noise = noise[input_noise_idx];\n    const uint W = out.get_width();\n#define in_at(h, w) in[(uint)(h)*W + (uint)(w)]\n    uchar4 input = in_at(gid.y, gid.x);\n#undef in_at\n    half4 input_half = static_cast<half4>(input);\n    out.write(input_half - mean_half + input_noise, gid);\n}\n\nkernel void deprocess_stylizer(texture2d<half, access::read> in[[texture(0)]],\n                               device uchar4* out[[buffer(0)]],\n                               constant half* mean[[buffer(1)]],\n                               ushort2 gid[[thread_position_in_grid]]) {\n    if (gid.x >= in.get_width() || gid.y >= in.get_height()) {\n        return;\n    }\n    \n    half4 value = in.read(gid);\n    \n    half4 mean_h(mean[0], mean[1], mean[2], 0.0h);\n    half4 min_h(0.0h, 0.0h, 0.0h, 255.0h);\n    half4 max_h(255.0h, 255.0h, 255.0h, 255.0h);\n    half4 clamped = clamp(value + mean_h, min_h, max_h);\n    const uint W = in.get_width();\n#define out_at(h, w, v) out[(uint)(h)*W + (uint)(w)] = (v)\n    out_at(gid.y, gid.x, static_cast<uchar4>(clamped));\n#undef out_at\n}\n\nkernel void reflection_padding_nonarray(texture2d<half, access::read> in[[texture(0)]],\n                                        texture2d<half, access::write> out[[texture(1)]],\n                                        ushort2 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    ushort H = in.get_height();\n    ushort PH = out.get_height();\n    \n    // Note: we assume symmetric padding on H/W here, which is verified\n    // in the calling code.\n    ushort pad_h = (PH - H) / 2;\n    ushort W = in.get_width();\n    ushort PW = out.get_width();\n    ushort pad_w = (PW - W) / 2;\n    \n    short h = short(gid.y) - short(pad_h);\n    h = max(h, short(-h));\n    h = min(h, short(2 * H - h - 2));\n    \n    short w = short(gid.x) - short(pad_w);\n    w = max(w, short(-w));\n    w = min(w, short(2 * W - w - 2));\n    \n    ushort2 inid(w, h);\n    out.write(in.read(inid), gid);\n}\n\nkernel void reflection_padding(texture2d_array<half, access::read> in[[texture(0)]],\n                               texture2d_array<half, access::write> out[[texture(1)]],\n                               ushort3 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    ushort H = in.get_height();\n    ushort PH = out.get_height();\n    \n    // Note: we assume symmetric padding on H/W here, which is verified\n    // in the calling code.\n    ushort pad_h = (PH - H) / 2;\n    ushort W = in.get_width();\n    ushort PW = out.get_width();\n    ushort pad_w = (PW - W) / 2;\n    \n    short h = short(gid.y) - short(pad_h);\n    h = max(h, short(-h));\n    h = min(h, short(2 * H - h - 2));\n    \n    short w = short(gid.x) - short(pad_w);\n    w = max(w, short(-w));\n    w = min(w, short(2 * W - w - 2));\n    \n    ushort2 inid(w, h);\n    \n    out.write(in.read(inid, gid.z), ushort2(gid.x, gid.y), gid.z);\n}\n\nkernel void bilinear_upsample(texture2d<half, access::sample> in[[texture(0)]],\n                              texture2d<half, access::write> out[[texture(1)]],\n                              ushort2 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    ushort2 src = gid / 2;\n    constexpr sampler sampler(address::clamp_to_edge, filter::linear, coord::pixel);\n    half4 value = in.sample(sampler, static_cast<float2>(src));\n    out.write(value, gid);\n}\n\nconstant bool in0_is_tex = ushort_arg_0 <= 1 && ushort_arg_1 <= 4;\nconstant bool in0_is_arr = !in0_is_tex;\n\nkernel void elementwise_mul(texture2d<half, access::read> in0[[texture(0), function_constant(in0_is_tex)]],\n                            texture2d_array<half, access::read> ina0[[texture(0), function_constant(in0_is_arr)]],\n                            texture2d<half, access::write> out[[texture(2), function_constant(in0_is_tex)]],\n                            texture2d_array<half, access::write> outa[[texture(2), function_constant(in0_is_arr)]],\n                            constant float* in1[[buffer(1)]],\n                            ushort3 gid[[thread_position_in_grid]]) {\n  if (in0_is_tex) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n      return;\n    }\n  } else {\n    if (gid.x >= outa.get_width() || gid.y >= outa.get_height()) {\n      return;\n    }\n  }\n  ushort2 gid_ = ushort2(gid.x, gid.y);\n  if (in0_is_tex) {\n    out.write(in0.read(gid_) * in1[0], gid_);\n  } else {\n    outa.write(ina0.read(gid_, gid.z) * in1[0], gid_, gid.z);\n  }\n}\n\nkernel void elementwise_add_nonarray(texture2d<half, access::read> in0[[texture(0)]],\n                                     texture2d<half, access::read> in1[[texture(1)]],\n                                     texture2d<half, access::write> out[[texture(2)]],\n                                     ushort2 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    out.write(in0.read(gid) + in1.read(gid), gid);\n}\n\nkernel void elementwise_add(texture2d_array<half, access::read> in0[[texture(0)]],\n                            texture2d_array<half, access::read> in1[[texture(1)]],\n                            texture2d_array<half, access::write> out[[texture(2)]],\n                            ushort3 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    ushort2 gid_ = ushort2(gid.x, gid.y);\n    out.write(in0.read(gid_, gid.z) + in1.read(gid_, gid.z), gid_, gid.z);\n}\n\nconstant bool has_in0_arg = (ushort_arg_0 > 0);\nconstant bool has_in1_arg = (ushort_arg_1 > 0);\nconstant bool has_in2_arg = (ushort_arg_2 > 0);\nconstant bool has_in3_arg = (ushort_arg_3 > 0);\n\nconstant bool has_in0_tex = (has_in0_arg && ushort_arg_0 <= 4 && ushort_arg_4 <= 1);\nconstant bool has_in1_tex = (has_in1_arg && ushort_arg_1 <= 4 && ushort_arg_4 <= 1);\nconstant bool has_in2_tex = (has_in2_arg && ushort_arg_2 <= 4 && ushort_arg_4 <= 1);\nconstant bool has_in3_tex = (has_in3_arg && ushort_arg_3 <= 4 && ushort_arg_4 <= 1);\n\nconstant bool has_in0_array = (has_in0_arg && !has_in0_tex);\nconstant bool has_in1_array = (has_in1_arg && !has_in1_tex);\nconstant bool has_in2_array = (has_in2_arg && !has_in2_tex);\nconstant bool has_in3_array = (has_in3_arg && !has_in3_tex);\n\ninline ushort idx_3(ushort z, ushort C0, ushort C1, ushort C2, ushort C3) {\n    if (z < C0 / 4) {\n        return 0;\n    }\n    if (z < (C0 + C1) / 4) {\n        return 1;\n    }\n    if (z < (C0 + C1 + C2) / 4) {\n        return 2;\n    }\n    return 3;\n}\n\ninline ushort idx_2(ushort z, ushort C0, ushort C1, ushort C2) {\n    if (z < C0 / 4) {\n        return 0;\n    }\n    if (z < (C0 + C1) / 4) {\n        return 1;\n    }\n    return 2;\n}\n\ninline ushort idx_1(ushort z, ushort C0, ushort C1) {\n    if (z < C0 / 4) {\n        return 0;\n    } else {\n        return 1;\n    }\n}\n\ninline ushort idx_0(ushort z, ushort C0) { return 0; }\n\n// in a texture_array with size C, find the offset for image N at plane c.\ninline constexpr ushort z_off(ushort n, ushort c, ushort C) { return n * divRoundUp(C, 4) + c; }\n\nkernel void concat(\n                   texture2d<half, access::read> in0[[ texture(0), function_constant(has_in0_tex) ]],\n                   texture2d<half, access::read> in1[[ texture(1), function_constant(has_in1_tex) ]],\n                   texture2d<half, access::read> in2[[ texture(2), function_constant(has_in2_tex) ]],\n                   texture2d<half, access::read> in3[[ texture(3), function_constant(has_in3_tex) ]],\n                   texture2d_array<half, access::read> ina0[[ texture(0), function_constant(has_in0_array) ]],\n                   texture2d_array<half, access::read> ina1[[ texture(1), function_constant(has_in1_array) ]],\n                   texture2d_array<half, access::read> ina2[[ texture(2), function_constant(has_in2_array) ]],\n                   texture2d_array<half, access::read> ina3[[ texture(3), function_constant(has_in3_array) ]],\n                   texture2d_array<half, access::write> out[[texture(5)]],\n                   ushort3 gid[[thread_position_in_grid]]) {\n    if (gid.x >= out.get_width() || gid.y >= out.get_height()) {\n        return;\n    }\n    \n    const ushort C0 = ushort_arg_0;\n    const ushort C1 = ushort_arg_1;\n    const ushort C2 = ushort_arg_2;\n    const ushort C3 = ushort_arg_3;\n    const ushort C = C0 + C1 + C2 + C3;\n    const ushort n = gid.z / divRoundUp(C, 4);\n    const ushort c = gid.z - n * divRoundUp(C, 4);\n    \n    ushort idx = 0;\n    if (has_in3_arg) {\n        idx = idx_3(c, C0, C1, C2, C3);\n    } else if (has_in2_arg) {\n        idx = idx_2(c, C0, C1, C2);\n    } else if (has_in1_arg) {\n        idx = idx_1(c, C0, C1);\n    } else if (has_in0_arg) {\n        idx = idx_0(c, C0);\n    } else {\n        // never reached.\n        idx = 0;\n    }\n    \n    ushort2 gid_ = ushort2(gid.x, gid.y);\n    half4 value;\n    switch (idx) {\n        case 0: {\n            if (has_in0_tex) {\n                value = in0.read(gid_);\n            }\n            if (has_in0_array) {\n                value = ina0.read(gid_, z_off(n, c, C0));\n            }\n            break;\n        }\n        case 1: {\n            if (has_in1_tex) {\n                value = in1.read(gid_);\n            }\n            if (has_in1_array) {\n                value = ina1.read(gid_, z_off(n, c - (C0) / 4, C1));\n            }\n            break;\n        }\n        case 2: {\n            if (has_in2_tex) {\n                value = in2.read(gid_);\n            }\n            if (has_in2_array) {\n                value = ina2.read(gid_, z_off(n, c - (C0 + C1) / 4, C2));\n            }\n            break;\n        }\n        case 3: {\n            if (has_in3_tex) {\n                value = in3.read(gid_);\n            }\n            if (has_in3_array) {\n                value = ina3.read(gid_, z_off(n, c - (C0 + C1 + C2) / 4, C3));\n            }\n            break;\n        }\n    }\n    out.write(value, gid_, gid.z);\n}\n\nusing RoIT = half;\nusing RoIT4 = half4;\nkernel void roi_warp(texture2d_array<half, access::sample> in[[texture(0)]],\n                     texture2d_array<half, access::write> out[[texture(1)]],\n                     constant half4* rois[[buffer(0)]],\n                     ushort3 gid[[thread_position_in_grid]]) {\n    constexpr sampler s2(coord::pixel, address::clamp_to_edge, filter::linear);\n    \n    const half spatial_scale = half(ushort_arg_0) / 10000;\n    const ushort sampling_ratio = ushort_arg_1;\n    const ushort C = ushort_arg_2;\n    const ushort pw = gid.x;\n    const ushort ph = gid.y;\n    const ushort n = gid.z / divRoundUp(C, 4);\n    const ushort c = gid.z % divRoundUp(C, 4);\n    \n    const RoIT4 roi_scaled = rois[n] * spatial_scale;\n    const RoIT roi_start_w = roi_scaled[0];\n    const RoIT roi_start_h = roi_scaled[1];\n    const RoIT roi_end_w = roi_scaled[2];\n    const RoIT roi_end_h = roi_scaled[3];\n    \n    // Force malformed ROIs to be 1x1\n    const RoIT roi_width = max(roi_end_w - roi_start_w, (RoIT)1.);\n    const RoIT roi_height = max(roi_end_h - roi_start_h, (RoIT)1.);\n    \n    const RoIT bin_size_h = static_cast<RoIT>(roi_height) / static_cast<RoIT>(out.get_height());\n    const RoIT bin_size_w = static_cast<RoIT>(roi_width) / static_cast<RoIT>(out.get_width());\n    const ushort roi_bin_grid_h = sampling_ratio;\n    const ushort roi_bin_grid_w = sampling_ratio;\n    const ushort iy_upper = sampling_ratio;\n    const ushort ix_upper = sampling_ratio;\n    \n    const RoIT count = iy_upper * ix_upper;\n    \n    RoIT4 output_val = 0.0;\n    for (int iy = 0; iy < iy_upper; iy++) {\n        for (int ix = 0; ix < ix_upper; ix++) {\n            const RoIT y =\n            roi_start_h + ph * bin_size_h + iy * bin_size_h / static_cast<RoIT>(roi_bin_grid_h);\n            const RoIT x =\n            roi_start_w + pw * bin_size_w + ix * bin_size_w / static_cast<RoIT>(roi_bin_grid_w);\n            output_val += in.sample(s2, float2(x + 0.5, y + 0.5), c);\n        }\n    }\n    output_val /= count;\n    out.write(static_cast<half4>(output_val), ushort2(gid.x, gid.y), gid.z);\n}\n\nkernel void resize_nearest(texture2d_array<half, access::sample> in[[texture(0)]],\n                           texture2d_array<half, access::write> out[[texture(1)]],\n                           ushort3 gid[[thread_position_in_grid]]) {\n    const ushort oH = ushort_arg_0;\n    const ushort oW = ushort_arg_1;\n    if (gid.x >= oW || gid.y >= oH) {\n        return;\n    }\n    const float height_scale = float(ushort_arg_2) / 10000;\n    const float width_scale = float(ushort_arg_3) / 10000;\n    constexpr sampler s(coord::pixel, address::clamp_to_edge, filter::nearest);\n    const int in_y = (int)(gid.y / height_scale);\n    const int in_x = (int)(gid.x / width_scale);\n    out.write(in.sample(s, float2(in_x, in_y), gid.z), ushort2(gid.x, gid.y), gid.z);\n}\n\nkernel void resize_nearest_nonarray(texture2d<half, access::sample> in[[texture(0)]],\n                                    texture2d<half, access::write> out[[texture(1)]],\n                                    ushort2 gid[[thread_position_in_grid]]) {\n    const ushort oH = ushort_arg_0;\n    const ushort oW = ushort_arg_1;\n    if (gid.x >= oW || gid.y >= oH) {\n        return;\n    }\n    const float height_scale = float(ushort_arg_2) / 10000;\n    const float width_scale = float(ushort_arg_3) / 10000;\n    constexpr sampler s(coord::pixel, address::clamp_to_edge, filter::nearest);\n    const int in_y = (int)(gid.y / height_scale);\n    const int in_x = (int)(gid.x / width_scale);\n    out.write(in.sample(s, float2(in_x, in_y)), ushort2(gid.x, gid.y));\n}\n\nkernel void nms(device uint* mask[[buffer(0)]],\n                constant float* proposals[[buffer(1)]],\n                constant int* indices[[buffer(2)]],\n                ushort2 tgid[[threadgroup_position_in_grid]],\n                ushort2 tid[[thread_position_in_threadgroup]]) {\n    const ushort num_proposals = ushort_arg_0;\n    const ushort threads_per_group = ushort_arg_1;\n    float nms_thresh = float(ushort_arg_2) / 10000.0;\n    const ushort global_offset = ushort_arg_3;\n    const ushort row_start = tgid.y;\n    const ushort col_start = tgid.x;\n    const ushort trd_id = tid.x;\n    \n    const short row_size = min(short(32), short(num_proposals - row_start * threads_per_group));\n    const short col_size = min(short(32), short(num_proposals - col_start * threads_per_group));\n    \n    // mask the bit if the IoU between two proposals exceeds the threshold\n    if (trd_id < row_size) {\n        const ushort cur_idx = global_offset + row_start * threads_per_group + trd_id;\n        const ushort offset = indices[cur_idx] * 4;\n        const float4 cur_proposal = float4(\n                                           proposals[offset], proposals[offset + 1], proposals[offset + 2], proposals[offset + 3]);\n        uint cur_mask = 0;\n        ushort group_start = 0; // start index within group\n        if (row_start == col_start) {\n            // if in the same group, start from the next\n            group_start = trd_id + 1;\n        }\n        for (ushort i = group_start; i < col_size; i++) {\n            float4 a = cur_proposal;\n            ushort idx = indices[global_offset + col_start * threads_per_group + i] * 4;\n            float4 b = float4(proposals[idx], proposals[idx + 1], proposals[idx + 2], proposals[idx + 3]);\n            float left = max(a[0], b[0]);\n            float right = min(a[2], b[2]);\n            float top = max(a[1], b[1]);\n            float bottom = min(a[3], b[3]);\n            float width = max(right - left + 1.0, 0.0);\n            float height = max(bottom - top + 1.0, 0.0);\n            float interS = width * height;\n            float Sa = (a[2] - a[0] + 1.0) * (a[3] - a[1] + 1.0);\n            float Sb = (b[2] - b[0] + 1.0) * (b[3] - b[1] + 1.0);\n            float iou = interS / (Sa + Sb - interS);\n            if (iou - nms_thresh > 0) {\n                cur_mask |= 1U << i;\n            }\n        }\n        ushort col_blocks = (num_proposals + threads_per_group - 1) / threads_per_group;\n        mask[cur_idx * col_blocks + col_start] = cur_mask;\n    }\n}\n\n\n)V0G0N\";\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ios/mpscnn/mpscnn_test.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#include \"caffe2/core/net.h\"\n#pragma once\n\nnamespace caffe2 {\n\nvoid testMPSCNN();\nvoid compareModels(const NetDef& initNet, NetDef predictNet);\nvoid verifyRewrite(const NetDef& initNet, const NetDef& net, std::vector<int> inputDims);\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/cl.h",
    "content": "/*******************************************************************************\n * Copyright (c) 2008 - 2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n ******************************************************************************/\n\n#ifndef __OPENCL_CL_H\n#define __OPENCL_CL_H\n\n#ifdef __APPLE__\n#include <OpenCL/cl_platform.h>\n#else\n#include <CL/cl_platform.h>\n#endif\t\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/******************************************************************************/\n\ntypedef struct _cl_platform_id *    cl_platform_id;\ntypedef struct _cl_device_id *      cl_device_id;\ntypedef struct _cl_context *        cl_context;\ntypedef struct _cl_command_queue *  cl_command_queue;\ntypedef struct _cl_mem *            cl_mem;\ntypedef struct _cl_program *        cl_program;\ntypedef struct _cl_kernel *         cl_kernel;\ntypedef struct _cl_event *          cl_event;\ntypedef struct _cl_sampler *        cl_sampler;\n\ntypedef cl_uint             cl_bool;                     /* WARNING!  Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */\ntypedef cl_ulong            cl_bitfield;\ntypedef cl_bitfield         cl_device_type;\ntypedef cl_uint             cl_platform_info;\ntypedef cl_uint             cl_device_info;\ntypedef cl_bitfield         cl_device_fp_config;\ntypedef cl_uint             cl_device_mem_cache_type;\ntypedef cl_uint             cl_device_local_mem_type;\ntypedef cl_bitfield         cl_device_exec_capabilities;\ntypedef cl_bitfield         cl_command_queue_properties;\ntypedef intptr_t            cl_device_partition_property;\ntypedef cl_bitfield         cl_device_affinity_domain;\n\ntypedef intptr_t            cl_context_properties;\ntypedef cl_uint             cl_context_info;\ntypedef cl_uint             cl_command_queue_info;\ntypedef cl_uint             cl_channel_order;\ntypedef cl_uint             cl_channel_type;\ntypedef cl_bitfield         cl_mem_flags;\ntypedef cl_uint             cl_mem_object_type;\ntypedef cl_uint             cl_mem_info;\ntypedef cl_bitfield         cl_mem_migration_flags;\ntypedef cl_uint             cl_image_info;\ntypedef cl_uint             cl_buffer_create_type;\ntypedef cl_uint             cl_addressing_mode;\ntypedef cl_uint             cl_filter_mode;\ntypedef cl_uint             cl_sampler_info;\ntypedef cl_bitfield         cl_map_flags;\ntypedef cl_uint             cl_program_info;\ntypedef cl_uint             cl_program_build_info;\ntypedef cl_uint             cl_program_binary_type;\ntypedef cl_int              cl_build_status;\ntypedef cl_uint             cl_kernel_info;\ntypedef cl_uint             cl_kernel_arg_info;\ntypedef cl_uint             cl_kernel_arg_address_qualifier;\ntypedef cl_uint             cl_kernel_arg_access_qualifier;\ntypedef cl_bitfield         cl_kernel_arg_type_qualifier;\ntypedef cl_uint             cl_kernel_work_group_info;\ntypedef cl_uint             cl_event_info;\ntypedef cl_uint             cl_command_type;\ntypedef cl_uint             cl_profiling_info;\n\n\ntypedef struct _cl_image_format {\n    cl_channel_order        image_channel_order;\n    cl_channel_type         image_channel_data_type;\n} cl_image_format;\n\ntypedef struct _cl_image_desc {\n    cl_mem_object_type      image_type;\n    size_t                  image_width;\n    size_t                  image_height;\n    size_t                  image_depth;\n    size_t                  image_array_size;\n    size_t                  image_row_pitch;\n    size_t                  image_slice_pitch;\n    cl_uint                 num_mip_levels;\n    cl_uint                 num_samples;\n    cl_mem                  buffer;\n} cl_image_desc;\n\ntypedef struct _cl_buffer_region {\n    size_t                  origin;\n    size_t                  size;\n} cl_buffer_region;\n\n\n/******************************************************************************/\n\n/* Error Codes */\n#define CL_SUCCESS                                  0\n#define CL_DEVICE_NOT_FOUND                         -1\n#define CL_DEVICE_NOT_AVAILABLE                     -2\n#define CL_COMPILER_NOT_AVAILABLE                   -3\n#define CL_MEM_OBJECT_ALLOCATION_FAILURE            -4\n#define CL_OUT_OF_RESOURCES                         -5\n#define CL_OUT_OF_HOST_MEMORY                       -6\n#define CL_PROFILING_INFO_NOT_AVAILABLE             -7\n#define CL_MEM_COPY_OVERLAP                         -8\n#define CL_IMAGE_FORMAT_MISMATCH                    -9\n#define CL_IMAGE_FORMAT_NOT_SUPPORTED               -10\n#define CL_BUILD_PROGRAM_FAILURE                    -11\n#define CL_MAP_FAILURE                              -12\n#define CL_MISALIGNED_SUB_BUFFER_OFFSET             -13\n#define CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST -14\n#define CL_COMPILE_PROGRAM_FAILURE                  -15\n#define CL_LINKER_NOT_AVAILABLE                     -16\n#define CL_LINK_PROGRAM_FAILURE                     -17\n#define CL_DEVICE_PARTITION_FAILED                  -18\n#define CL_KERNEL_ARG_INFO_NOT_AVAILABLE            -19\n\n#define CL_INVALID_VALUE                            -30\n#define CL_INVALID_DEVICE_TYPE                      -31\n#define CL_INVALID_PLATFORM                         -32\n#define CL_INVALID_DEVICE                           -33\n#define CL_INVALID_CONTEXT                          -34\n#define CL_INVALID_QUEUE_PROPERTIES                 -35\n#define CL_INVALID_COMMAND_QUEUE                    -36\n#define CL_INVALID_HOST_PTR                         -37\n#define CL_INVALID_MEM_OBJECT                       -38\n#define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR          -39\n#define CL_INVALID_IMAGE_SIZE                       -40\n#define CL_INVALID_SAMPLER                          -41\n#define CL_INVALID_BINARY                           -42\n#define CL_INVALID_BUILD_OPTIONS                    -43\n#define CL_INVALID_PROGRAM                          -44\n#define CL_INVALID_PROGRAM_EXECUTABLE               -45\n#define CL_INVALID_KERNEL_NAME                      -46\n#define CL_INVALID_KERNEL_DEFINITION                -47\n#define CL_INVALID_KERNEL                           -48\n#define CL_INVALID_ARG_INDEX                        -49\n#define CL_INVALID_ARG_VALUE                        -50\n#define CL_INVALID_ARG_SIZE                         -51\n#define CL_INVALID_KERNEL_ARGS                      -52\n#define CL_INVALID_WORK_DIMENSION                   -53\n#define CL_INVALID_WORK_GROUP_SIZE                  -54\n#define CL_INVALID_WORK_ITEM_SIZE                   -55\n#define CL_INVALID_GLOBAL_OFFSET                    -56\n#define CL_INVALID_EVENT_WAIT_LIST                  -57\n#define CL_INVALID_EVENT                            -58\n#define CL_INVALID_OPERATION                        -59\n#define CL_INVALID_GL_OBJECT                        -60\n#define CL_INVALID_BUFFER_SIZE                      -61\n#define CL_INVALID_MIP_LEVEL                        -62\n#define CL_INVALID_GLOBAL_WORK_SIZE                 -63\n#define CL_INVALID_PROPERTY                         -64\n#define CL_INVALID_IMAGE_DESCRIPTOR                 -65\n#define CL_INVALID_COMPILER_OPTIONS                 -66\n#define CL_INVALID_LINKER_OPTIONS                   -67\n#define CL_INVALID_DEVICE_PARTITION_COUNT           -68\n\n/* OpenCL Version */\n#define CL_VERSION_1_0                              1\n#define CL_VERSION_1_1                              1\n#define CL_VERSION_1_2                              1\n\n/* cl_bool */\n#define CL_FALSE                                    0\n#define CL_TRUE                                     1\n#define CL_BLOCKING                                 CL_TRUE\n#define CL_NON_BLOCKING                             CL_FALSE\n\n/* cl_platform_info */\n#define CL_PLATFORM_PROFILE                         0x0900\n#define CL_PLATFORM_VERSION                         0x0901\n#define CL_PLATFORM_NAME                            0x0902\n#define CL_PLATFORM_VENDOR                          0x0903\n#define CL_PLATFORM_EXTENSIONS                      0x0904\n\n/* cl_device_type - bitfield */\n#define CL_DEVICE_TYPE_DEFAULT                      (1 << 0)\n#define CL_DEVICE_TYPE_CPU                          (1 << 1)\n#define CL_DEVICE_TYPE_GPU                          (1 << 2)\n#define CL_DEVICE_TYPE_ACCELERATOR                  (1 << 3)\n#define CL_DEVICE_TYPE_CUSTOM                       (1 << 4)\n#define CL_DEVICE_TYPE_ALL                          0xFFFFFFFF\n\n/* cl_device_info */\n#define CL_DEVICE_TYPE                              0x1000\n#define CL_DEVICE_VENDOR_ID                         0x1001\n#define CL_DEVICE_MAX_COMPUTE_UNITS                 0x1002\n#define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS          0x1003\n#define CL_DEVICE_MAX_WORK_GROUP_SIZE               0x1004\n#define CL_DEVICE_MAX_WORK_ITEM_SIZES               0x1005\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR       0x1006\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT      0x1007\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT        0x1008\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG       0x1009\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT      0x100A\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE     0x100B\n#define CL_DEVICE_MAX_CLOCK_FREQUENCY               0x100C\n#define CL_DEVICE_ADDRESS_BITS                      0x100D\n#define CL_DEVICE_MAX_READ_IMAGE_ARGS               0x100E\n#define CL_DEVICE_MAX_WRITE_IMAGE_ARGS              0x100F\n#define CL_DEVICE_MAX_MEM_ALLOC_SIZE                0x1010\n#define CL_DEVICE_IMAGE2D_MAX_WIDTH                 0x1011\n#define CL_DEVICE_IMAGE2D_MAX_HEIGHT                0x1012\n#define CL_DEVICE_IMAGE3D_MAX_WIDTH                 0x1013\n#define CL_DEVICE_IMAGE3D_MAX_HEIGHT                0x1014\n#define CL_DEVICE_IMAGE3D_MAX_DEPTH                 0x1015\n#define CL_DEVICE_IMAGE_SUPPORT                     0x1016\n#define CL_DEVICE_MAX_PARAMETER_SIZE                0x1017\n#define CL_DEVICE_MAX_SAMPLERS                      0x1018\n#define CL_DEVICE_MEM_BASE_ADDR_ALIGN               0x1019\n#define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE          0x101A\n#define CL_DEVICE_SINGLE_FP_CONFIG                  0x101B\n#define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE             0x101C\n#define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE         0x101D\n#define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE             0x101E\n#define CL_DEVICE_GLOBAL_MEM_SIZE                   0x101F\n#define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE          0x1020\n#define CL_DEVICE_MAX_CONSTANT_ARGS                 0x1021\n#define CL_DEVICE_LOCAL_MEM_TYPE                    0x1022\n#define CL_DEVICE_LOCAL_MEM_SIZE                    0x1023\n#define CL_DEVICE_ERROR_CORRECTION_SUPPORT          0x1024\n#define CL_DEVICE_PROFILING_TIMER_RESOLUTION        0x1025\n#define CL_DEVICE_ENDIAN_LITTLE                     0x1026\n#define CL_DEVICE_AVAILABLE                         0x1027\n#define CL_DEVICE_COMPILER_AVAILABLE                0x1028\n#define CL_DEVICE_EXECUTION_CAPABILITIES            0x1029\n#define CL_DEVICE_QUEUE_PROPERTIES                  0x102A\n#define CL_DEVICE_NAME                              0x102B\n#define CL_DEVICE_VENDOR                            0x102C\n#define CL_DRIVER_VERSION                           0x102D\n#define CL_DEVICE_PROFILE                           0x102E\n#define CL_DEVICE_VERSION                           0x102F\n#define CL_DEVICE_EXTENSIONS                        0x1030\n#define CL_DEVICE_PLATFORM                          0x1031\n#define CL_DEVICE_DOUBLE_FP_CONFIG                  0x1032\n/* 0x1033 reserved for CL_DEVICE_HALF_FP_CONFIG */\n#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF       0x1034\n#define CL_DEVICE_HOST_UNIFIED_MEMORY               0x1035\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR          0x1036\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT         0x1037\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_INT           0x1038\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG          0x1039\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT         0x103A\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE        0x103B\n#define CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF          0x103C\n#define CL_DEVICE_OPENCL_C_VERSION                  0x103D\n#define CL_DEVICE_LINKER_AVAILABLE                  0x103E\n#define CL_DEVICE_BUILT_IN_KERNELS                  0x103F\n#define CL_DEVICE_IMAGE_MAX_BUFFER_SIZE             0x1040\n#define CL_DEVICE_IMAGE_MAX_ARRAY_SIZE              0x1041\n#define CL_DEVICE_PARENT_DEVICE                     0x1042\n#define CL_DEVICE_PARTITION_MAX_SUB_DEVICES         0x1043\n#define CL_DEVICE_PARTITION_PROPERTIES              0x1044\n#define CL_DEVICE_PARTITION_AFFINITY_DOMAIN         0x1045\n#define CL_DEVICE_PARTITION_TYPE                    0x1046\n#define CL_DEVICE_REFERENCE_COUNT                   0x1047\n#define CL_DEVICE_PREFERRED_INTEROP_USER_SYNC       0x1048\n#define CL_DEVICE_PRINTF_BUFFER_SIZE                0x1049\n#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT             0x104A\n#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT      0x104B\n\n/* cl_device_fp_config - bitfield */\n#define CL_FP_DENORM                                (1 << 0)\n#define CL_FP_INF_NAN                               (1 << 1)\n#define CL_FP_ROUND_TO_NEAREST                      (1 << 2)\n#define CL_FP_ROUND_TO_ZERO                         (1 << 3)\n#define CL_FP_ROUND_TO_INF                          (1 << 4)\n#define CL_FP_FMA                                   (1 << 5)\n#define CL_FP_SOFT_FLOAT                            (1 << 6)\n#define CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT         (1 << 7)\n\n/* cl_device_mem_cache_type */\n#define CL_NONE                                     0x0\n#define CL_READ_ONLY_CACHE                          0x1\n#define CL_READ_WRITE_CACHE                         0x2\n\n/* cl_device_local_mem_type */\n#define CL_LOCAL                                    0x1\n#define CL_GLOBAL                                   0x2\n\n/* cl_device_exec_capabilities - bitfield */\n#define CL_EXEC_KERNEL                              (1 << 0)\n#define CL_EXEC_NATIVE_KERNEL                       (1 << 1)\n\n/* cl_command_queue_properties - bitfield */\n#define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE      (1 << 0)\n#define CL_QUEUE_PROFILING_ENABLE                   (1 << 1)\n\n/* cl_context_info  */\n#define CL_CONTEXT_REFERENCE_COUNT                  0x1080\n#define CL_CONTEXT_DEVICES                          0x1081\n#define CL_CONTEXT_PROPERTIES                       0x1082\n#define CL_CONTEXT_NUM_DEVICES                      0x1083\n\n/* cl_context_properties */\n#define CL_CONTEXT_PLATFORM                         0x1084\n#define CL_CONTEXT_INTEROP_USER_SYNC                0x1085\n\n/* cl_device_partition_property */\n#define CL_DEVICE_PARTITION_EQUALLY                 0x1086\n#define CL_DEVICE_PARTITION_BY_COUNTS               0x1087\n#define CL_DEVICE_PARTITION_BY_COUNTS_LIST_END      0x0\n#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN      0x1088\n\n/* cl_device_affinity_domain */\n#define CL_DEVICE_AFFINITY_DOMAIN_NUMA                     (1 << 0)\n#define CL_DEVICE_AFFINITY_DOMAIN_L4_CACHE                 (1 << 1)\n#define CL_DEVICE_AFFINITY_DOMAIN_L3_CACHE                 (1 << 2)\n#define CL_DEVICE_AFFINITY_DOMAIN_L2_CACHE                 (1 << 3)\n#define CL_DEVICE_AFFINITY_DOMAIN_L1_CACHE                 (1 << 4)\n#define CL_DEVICE_AFFINITY_DOMAIN_NEXT_PARTITIONABLE       (1 << 5)\n\n/* cl_command_queue_info */\n#define CL_QUEUE_CONTEXT                            0x1090\n#define CL_QUEUE_DEVICE                             0x1091\n#define CL_QUEUE_REFERENCE_COUNT                    0x1092\n#define CL_QUEUE_PROPERTIES                         0x1093\n\n/* cl_mem_flags - bitfield */\n#define CL_MEM_READ_WRITE                           (1 << 0)\n#define CL_MEM_WRITE_ONLY                           (1 << 1)\n#define CL_MEM_READ_ONLY                            (1 << 2)\n#define CL_MEM_USE_HOST_PTR                         (1 << 3)\n#define CL_MEM_ALLOC_HOST_PTR                       (1 << 4)\n#define CL_MEM_COPY_HOST_PTR                        (1 << 5)\n// reserved                                         (1 << 6)\n#define CL_MEM_HOST_WRITE_ONLY                      (1 << 7)\n#define CL_MEM_HOST_READ_ONLY                       (1 << 8)\n#define CL_MEM_HOST_NO_ACCESS                       (1 << 9)\n\n/* cl_mem_migration_flags - bitfield */\n#define CL_MIGRATE_MEM_OBJECT_HOST                  (1 << 0)\n#define CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED     (1 << 1)\n\n/* cl_channel_order */\n#define CL_R                                        0x10B0\n#define CL_A                                        0x10B1\n#define CL_RG                                       0x10B2\n#define CL_RA                                       0x10B3\n#define CL_RGB                                      0x10B4\n#define CL_RGBA                                     0x10B5\n#define CL_BGRA                                     0x10B6\n#define CL_ARGB                                     0x10B7\n#define CL_INTENSITY                                0x10B8\n#define CL_LUMINANCE                                0x10B9\n#define CL_Rx                                       0x10BA\n#define CL_RGx                                      0x10BB\n#define CL_RGBx                                     0x10BC\n#define CL_DEPTH                                    0x10BD\n#define CL_DEPTH_STENCIL                            0x10BE\n\n/* cl_channel_type */\n#define CL_SNORM_INT8                               0x10D0\n#define CL_SNORM_INT16                              0x10D1\n#define CL_UNORM_INT8                               0x10D2\n#define CL_UNORM_INT16                              0x10D3\n#define CL_UNORM_SHORT_565                          0x10D4\n#define CL_UNORM_SHORT_555                          0x10D5\n#define CL_UNORM_INT_101010                         0x10D6\n#define CL_SIGNED_INT8                              0x10D7\n#define CL_SIGNED_INT16                             0x10D8\n#define CL_SIGNED_INT32                             0x10D9\n#define CL_UNSIGNED_INT8                            0x10DA\n#define CL_UNSIGNED_INT16                           0x10DB\n#define CL_UNSIGNED_INT32                           0x10DC\n#define CL_HALF_FLOAT                               0x10DD\n#define CL_FLOAT                                    0x10DE\n#define CL_UNORM_INT24                              0x10DF\n\n/* cl_mem_object_type */\n#define CL_MEM_OBJECT_BUFFER                        0x10F0\n#define CL_MEM_OBJECT_IMAGE2D                       0x10F1\n#define CL_MEM_OBJECT_IMAGE3D                       0x10F2\n#define CL_MEM_OBJECT_IMAGE2D_ARRAY                 0x10F3\n#define CL_MEM_OBJECT_IMAGE1D                       0x10F4\n#define CL_MEM_OBJECT_IMAGE1D_ARRAY                 0x10F5\n#define CL_MEM_OBJECT_IMAGE1D_BUFFER                0x10F6\n\n/* cl_mem_info */\n#define CL_MEM_TYPE                                 0x1100\n#define CL_MEM_FLAGS                                0x1101\n#define CL_MEM_SIZE                                 0x1102\n#define CL_MEM_HOST_PTR                             0x1103\n#define CL_MEM_MAP_COUNT                            0x1104\n#define CL_MEM_REFERENCE_COUNT                      0x1105\n#define CL_MEM_CONTEXT                              0x1106\n#define CL_MEM_ASSOCIATED_MEMOBJECT                 0x1107\n#define CL_MEM_OFFSET                               0x1108\n\n/* cl_image_info */\n#define CL_IMAGE_FORMAT                             0x1110\n#define CL_IMAGE_ELEMENT_SIZE                       0x1111\n#define CL_IMAGE_ROW_PITCH                          0x1112\n#define CL_IMAGE_SLICE_PITCH                        0x1113\n#define CL_IMAGE_WIDTH                              0x1114\n#define CL_IMAGE_HEIGHT                             0x1115\n#define CL_IMAGE_DEPTH                              0x1116\n#define CL_IMAGE_ARRAY_SIZE                         0x1117\n#define CL_IMAGE_BUFFER                             0x1118\n#define CL_IMAGE_NUM_MIP_LEVELS                     0x1119\n#define CL_IMAGE_NUM_SAMPLES                        0x111A\n\n/* cl_addressing_mode */\n#define CL_ADDRESS_NONE                             0x1130\n#define CL_ADDRESS_CLAMP_TO_EDGE                    0x1131\n#define CL_ADDRESS_CLAMP                            0x1132\n#define CL_ADDRESS_REPEAT                           0x1133\n#define CL_ADDRESS_MIRRORED_REPEAT                  0x1134\n\n/* cl_filter_mode */\n#define CL_FILTER_NEAREST                           0x1140\n#define CL_FILTER_LINEAR                            0x1141\n\n/* cl_sampler_info */\n#define CL_SAMPLER_REFERENCE_COUNT                  0x1150\n#define CL_SAMPLER_CONTEXT                          0x1151\n#define CL_SAMPLER_NORMALIZED_COORDS                0x1152\n#define CL_SAMPLER_ADDRESSING_MODE                  0x1153\n#define CL_SAMPLER_FILTER_MODE                      0x1154\n\n/* cl_map_flags - bitfield */\n#define CL_MAP_READ                                 (1 << 0)\n#define CL_MAP_WRITE                                (1 << 1)\n#define CL_MAP_WRITE_INVALIDATE_REGION              (1 << 2)\n\n/* cl_program_info */\n#define CL_PROGRAM_REFERENCE_COUNT                  0x1160\n#define CL_PROGRAM_CONTEXT                          0x1161\n#define CL_PROGRAM_NUM_DEVICES                      0x1162\n#define CL_PROGRAM_DEVICES                          0x1163\n#define CL_PROGRAM_SOURCE                           0x1164\n#define CL_PROGRAM_BINARY_SIZES                     0x1165\n#define CL_PROGRAM_BINARIES                         0x1166\n#define CL_PROGRAM_NUM_KERNELS                      0x1167\n#define CL_PROGRAM_KERNEL_NAMES                     0x1168\n\n/* cl_program_build_info */\n#define CL_PROGRAM_BUILD_STATUS                     0x1181\n#define CL_PROGRAM_BUILD_OPTIONS                    0x1182\n#define CL_PROGRAM_BUILD_LOG                        0x1183\n#define CL_PROGRAM_BINARY_TYPE                      0x1184\n\n/* cl_program_binary_type */\n#define CL_PROGRAM_BINARY_TYPE_NONE                 0x0\n#define CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT      0x1\n#define CL_PROGRAM_BINARY_TYPE_LIBRARY              0x2\n#define CL_PROGRAM_BINARY_TYPE_EXECUTABLE           0x4\n\n/* cl_build_status */\n#define CL_BUILD_SUCCESS                            0\n#define CL_BUILD_NONE                               -1\n#define CL_BUILD_ERROR                              -2\n#define CL_BUILD_IN_PROGRESS                        -3\n\n/* cl_kernel_info */\n#define CL_KERNEL_FUNCTION_NAME                     0x1190\n#define CL_KERNEL_NUM_ARGS                          0x1191\n#define CL_KERNEL_REFERENCE_COUNT                   0x1192\n#define CL_KERNEL_CONTEXT                           0x1193\n#define CL_KERNEL_PROGRAM                           0x1194\n#define CL_KERNEL_ATTRIBUTES                        0x1195\n\n/* cl_kernel_arg_info */\n#define CL_KERNEL_ARG_ADDRESS_QUALIFIER             0x1196\n#define CL_KERNEL_ARG_ACCESS_QUALIFIER              0x1197\n#define CL_KERNEL_ARG_TYPE_NAME                     0x1198\n#define CL_KERNEL_ARG_TYPE_QUALIFIER                0x1199\n#define CL_KERNEL_ARG_NAME                          0x119A\n\n/* cl_kernel_arg_address_qualifier */\n#define CL_KERNEL_ARG_ADDRESS_GLOBAL                0x119B\n#define CL_KERNEL_ARG_ADDRESS_LOCAL                 0x119C\n#define CL_KERNEL_ARG_ADDRESS_CONSTANT              0x119D\n#define CL_KERNEL_ARG_ADDRESS_PRIVATE               0x119E\n\n/* cl_kernel_arg_access_qualifier */\n#define CL_KERNEL_ARG_ACCESS_READ_ONLY              0x11A0\n#define CL_KERNEL_ARG_ACCESS_WRITE_ONLY             0x11A1\n#define CL_KERNEL_ARG_ACCESS_READ_WRITE             0x11A2\n#define CL_KERNEL_ARG_ACCESS_NONE                   0x11A3\n\n/* cl_kernel_arg_type_qualifer */\n#define CL_KERNEL_ARG_TYPE_NONE                     0\n#define CL_KERNEL_ARG_TYPE_CONST                    (1 << 0)\n#define CL_KERNEL_ARG_TYPE_RESTRICT                 (1 << 1)\n#define CL_KERNEL_ARG_TYPE_VOLATILE                 (1 << 2)\n\n/* cl_kernel_work_group_info */\n#define CL_KERNEL_WORK_GROUP_SIZE                   0x11B0\n#define CL_KERNEL_COMPILE_WORK_GROUP_SIZE           0x11B1\n#define CL_KERNEL_LOCAL_MEM_SIZE                    0x11B2\n#define CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x11B3\n#define CL_KERNEL_PRIVATE_MEM_SIZE                  0x11B4\n#define CL_KERNEL_GLOBAL_WORK_SIZE                  0x11B5\n\n/* cl_event_info  */\n#define CL_EVENT_COMMAND_QUEUE                      0x11D0\n#define CL_EVENT_COMMAND_TYPE                       0x11D1\n#define CL_EVENT_REFERENCE_COUNT                    0x11D2\n#define CL_EVENT_COMMAND_EXECUTION_STATUS           0x11D3\n#define CL_EVENT_CONTEXT                            0x11D4\n\n/* cl_command_type */\n#define CL_COMMAND_NDRANGE_KERNEL                   0x11F0\n#define CL_COMMAND_TASK                             0x11F1\n#define CL_COMMAND_NATIVE_KERNEL                    0x11F2\n#define CL_COMMAND_READ_BUFFER                      0x11F3\n#define CL_COMMAND_WRITE_BUFFER                     0x11F4\n#define CL_COMMAND_COPY_BUFFER                      0x11F5\n#define CL_COMMAND_READ_IMAGE                       0x11F6\n#define CL_COMMAND_WRITE_IMAGE                      0x11F7\n#define CL_COMMAND_COPY_IMAGE                       0x11F8\n#define CL_COMMAND_COPY_IMAGE_TO_BUFFER             0x11F9\n#define CL_COMMAND_COPY_BUFFER_TO_IMAGE             0x11FA\n#define CL_COMMAND_MAP_BUFFER                       0x11FB\n#define CL_COMMAND_MAP_IMAGE                        0x11FC\n#define CL_COMMAND_UNMAP_MEM_OBJECT                 0x11FD\n#define CL_COMMAND_MARKER                           0x11FE\n#define CL_COMMAND_ACQUIRE_GL_OBJECTS               0x11FF\n#define CL_COMMAND_RELEASE_GL_OBJECTS               0x1200\n#define CL_COMMAND_READ_BUFFER_RECT                 0x1201\n#define CL_COMMAND_WRITE_BUFFER_RECT                0x1202\n#define CL_COMMAND_COPY_BUFFER_RECT                 0x1203\n#define CL_COMMAND_USER                             0x1204\n#define CL_COMMAND_BARRIER                          0x1205\n#define CL_COMMAND_MIGRATE_MEM_OBJECTS              0x1206\n#define CL_COMMAND_FILL_BUFFER                      0x1207\n#define CL_COMMAND_FILL_IMAGE                       0x1208\n\n/* command execution status */\n#define CL_COMPLETE                                 0x0\n#define CL_RUNNING                                  0x1\n#define CL_SUBMITTED                                0x2\n#define CL_QUEUED                                   0x3\n\n/* cl_buffer_create_type  */\n#define CL_BUFFER_CREATE_TYPE_REGION                0x1220\n\n/* cl_profiling_info  */\n#define CL_PROFILING_COMMAND_QUEUED                 0x1280\n#define CL_PROFILING_COMMAND_SUBMIT                 0x1281\n#define CL_PROFILING_COMMAND_START                  0x1282\n#define CL_PROFILING_COMMAND_END                    0x1283\n\n/********************************************************************************************************/\n\n/* Platform API */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetPlatformIDs(cl_uint          /* num_entries */,\n                 cl_platform_id * /* platforms */,\n                 cl_uint *        /* num_platforms */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetPlatformInfo(cl_platform_id   /* platform */,\n                  cl_platform_info /* param_name */,\n                  size_t           /* param_value_size */,\n                  void *           /* param_value */,\n                  size_t *         /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Device APIs */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetDeviceIDs(cl_platform_id   /* platform */,\n               cl_device_type   /* device_type */,\n               cl_uint          /* num_entries */,\n               cl_device_id *   /* devices */,\n               cl_uint *        /* num_devices */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetDeviceInfo(cl_device_id    /* device */,\n                cl_device_info  /* param_name */,\n                size_t          /* param_value_size */,\n                void *          /* param_value */,\n                size_t *        /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclCreateSubDevices(cl_device_id                         /* in_device */,\n                   const cl_device_partition_property * /* properties */,\n                   cl_uint                              /* num_devices */,\n                   cl_device_id *                       /* out_devices */,\n                   cl_uint *                            /* num_devices_ret */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;\n\n/* Context APIs  */\nextern CL_API_ENTRY cl_context CL_API_CALL\nclCreateContext(const cl_context_properties * /* properties */,\n                cl_uint                 /* num_devices */,\n                const cl_device_id *    /* devices */,\n                void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *),\n                void *                  /* user_data */,\n                cl_int *                /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_context CL_API_CALL\nclCreateContextFromType(const cl_context_properties * /* properties */,\n                        cl_device_type          /* device_type */,\n                        void (CL_CALLBACK *     /* pfn_notify*/ )(const char *, const void *, size_t, void *),\n                        void *                  /* user_data */,\n                        cl_int *                /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetContextInfo(cl_context         /* context */,\n                 cl_context_info    /* param_name */,\n                 size_t             /* param_value_size */,\n                 void *             /* param_value */,\n                 size_t *           /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Command Queue APIs */\nextern CL_API_ENTRY cl_command_queue CL_API_CALL\nclCreateCommandQueue(cl_context                     /* context */,\n                     cl_device_id                   /* device */,\n                     cl_command_queue_properties    /* properties */,\n                     cl_int *                       /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetCommandQueueInfo(cl_command_queue      /* command_queue */,\n                      cl_command_queue_info /* param_name */,\n                      size_t                /* param_value_size */,\n                      void *                /* param_value */,\n                      size_t *              /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Memory Object APIs */\nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateBuffer(cl_context   /* context */,\n               cl_mem_flags /* flags */,\n               size_t       /* size */,\n               void *       /* host_ptr */,\n               cl_int *     /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateSubBuffer(cl_mem                   /* buffer */,\n                  cl_mem_flags             /* flags */,\n                  cl_buffer_create_type    /* buffer_create_type */,\n                  const void *             /* buffer_create_info */,\n                  cl_int *                 /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateImage(cl_context              /* context */,\n              cl_mem_flags            /* flags */,\n              const cl_image_format * /* image_format */,\n              const cl_image_desc *   /* image_desc */,\n              void *                  /* host_ptr */,\n              cl_int *                /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetSupportedImageFormats(cl_context           /* context */,\n                           cl_mem_flags         /* flags */,\n                           cl_mem_object_type   /* image_type */,\n                           cl_uint              /* num_entries */,\n                           cl_image_format *    /* image_formats */,\n                           cl_uint *            /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetMemObjectInfo(cl_mem           /* memobj */,\n                   cl_mem_info      /* param_name */,\n                   size_t           /* param_value_size */,\n                   void *           /* param_value */,\n                   size_t *         /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetImageInfo(cl_mem           /* image */,\n               cl_image_info    /* param_name */,\n               size_t           /* param_value_size */,\n               void *           /* param_value */,\n               size_t *         /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclSetMemObjectDestructorCallback(  cl_mem /* memobj */,\n                                    void (CL_CALLBACK * /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),\n                                    void * /*user_data */ )             CL_API_SUFFIX__VERSION_1_1;\n\n/* Sampler APIs */\nextern CL_API_ENTRY cl_sampler CL_API_CALL\nclCreateSampler(cl_context          /* context */,\n                cl_bool             /* normalized_coords */,\n                cl_addressing_mode  /* addressing_mode */,\n                cl_filter_mode      /* filter_mode */,\n                cl_int *            /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetSamplerInfo(cl_sampler         /* sampler */,\n                 cl_sampler_info    /* param_name */,\n                 size_t             /* param_value_size */,\n                 void *             /* param_value */,\n                 size_t *           /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Program Object APIs  */\nextern CL_API_ENTRY cl_program CL_API_CALL\nclCreateProgramWithSource(cl_context        /* context */,\n                          cl_uint           /* count */,\n                          const char **     /* strings */,\n                          const size_t *    /* lengths */,\n                          cl_int *          /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_program CL_API_CALL\nclCreateProgramWithBinary(cl_context                     /* context */,\n                          cl_uint                        /* num_devices */,\n                          const cl_device_id *           /* device_list */,\n                          const size_t *                 /* lengths */,\n                          const unsigned char **         /* binaries */,\n                          cl_int *                       /* binary_status */,\n                          cl_int *                       /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_program CL_API_CALL\nclCreateProgramWithBuiltInKernels(cl_context            /* context */,\n                                  cl_uint               /* num_devices */,\n                                  const cl_device_id *  /* device_list */,\n                                  const char *          /* kernel_names */,\n                                  cl_int *              /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclBuildProgram(cl_program           /* program */,\n               cl_uint              /* num_devices */,\n               const cl_device_id * /* device_list */,\n               const char *         /* options */,\n               void (CL_CALLBACK *  /* pfn_notify */)(cl_program /* program */, void * /* user_data */),\n               void *               /* user_data */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclCompileProgram(cl_program           /* program */,\n                 cl_uint              /* num_devices */,\n                 const cl_device_id * /* device_list */,\n                 const char *         /* options */,\n                 cl_uint              /* num_input_headers */,\n                 const cl_program *   /* input_headers */,\n                 const char **        /* header_include_names */,\n                 void (CL_CALLBACK *  /* pfn_notify */)(cl_program /* program */, void * /* user_data */),\n                 void *               /* user_data */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_program CL_API_CALL\nclLinkProgram(cl_context           /* context */,\n              cl_uint              /* num_devices */,\n              const cl_device_id * /* device_list */,\n              const char *         /* options */,\n              cl_uint              /* num_input_programs */,\n              const cl_program *   /* input_programs */,\n              void (CL_CALLBACK *  /* pfn_notify */)(cl_program /* program */, void * /* user_data */),\n              void *               /* user_data */,\n              cl_int *             /* errcode_ret */ ) CL_API_SUFFIX__VERSION_1_2;\n\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclUnloadPlatformCompiler(cl_platform_id /* platform */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetProgramInfo(cl_program         /* program */,\n                 cl_program_info    /* param_name */,\n                 size_t             /* param_value_size */,\n                 void *             /* param_value */,\n                 size_t *           /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetProgramBuildInfo(cl_program            /* program */,\n                      cl_device_id          /* device */,\n                      cl_program_build_info /* param_name */,\n                      size_t                /* param_value_size */,\n                      void *                /* param_value */,\n                      size_t *              /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Kernel Object APIs */\nextern CL_API_ENTRY cl_kernel CL_API_CALL\nclCreateKernel(cl_program      /* program */,\n               const char *    /* kernel_name */,\n               cl_int *        /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclCreateKernelsInProgram(cl_program     /* program */,\n                         cl_uint        /* num_kernels */,\n                         cl_kernel *    /* kernels */,\n                         cl_uint *      /* num_kernels_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainKernel(cl_kernel    /* kernel */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseKernel(cl_kernel   /* kernel */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclSetKernelArg(cl_kernel    /* kernel */,\n               cl_uint      /* arg_index */,\n               size_t       /* arg_size */,\n               const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetKernelInfo(cl_kernel       /* kernel */,\n                cl_kernel_info  /* param_name */,\n                size_t          /* param_value_size */,\n                void *          /* param_value */,\n                size_t *        /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetKernelArgInfo(cl_kernel       /* kernel */,\n                   cl_uint         /* arg_indx */,\n                   cl_kernel_arg_info  /* param_name */,\n                   size_t          /* param_value_size */,\n                   void *          /* param_value */,\n                   size_t *        /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetKernelWorkGroupInfo(cl_kernel                  /* kernel */,\n                         cl_device_id               /* device */,\n                         cl_kernel_work_group_info  /* param_name */,\n                         size_t                     /* param_value_size */,\n                         void *                     /* param_value */,\n                         size_t *                   /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Event Object APIs */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclWaitForEvents(cl_uint             /* num_events */,\n                const cl_event *    /* event_list */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetEventInfo(cl_event         /* event */,\n               cl_event_info    /* param_name */,\n               size_t           /* param_value_size */,\n               void *           /* param_value */,\n               size_t *         /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_event CL_API_CALL\nclCreateUserEvent(cl_context    /* context */,\n                  cl_int *      /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclRetainEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclReleaseEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclSetUserEventStatus(cl_event   /* event */,\n                     cl_int     /* execution_status */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclSetEventCallback( cl_event    /* event */,\n                    cl_int      /* command_exec_callback_type */,\n                    void (CL_CALLBACK * /* pfn_notify */)(cl_event, cl_int, void *),\n                    void *      /* user_data */) CL_API_SUFFIX__VERSION_1_1;\n\n/* Profiling APIs */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetEventProfilingInfo(cl_event            /* event */,\n                        cl_profiling_info   /* param_name */,\n                        size_t              /* param_value_size */,\n                        void *              /* param_value */,\n                        size_t *            /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Flush and Finish APIs */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclFlush(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclFinish(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;\n\n/* Enqueued Commands APIs */\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueReadBuffer(cl_command_queue    /* command_queue */,\n                    cl_mem              /* buffer */,\n                    cl_bool             /* blocking_read */,\n                    size_t              /* offset */,\n                    size_t              /* size */,\n                    void *              /* ptr */,\n                    cl_uint             /* num_events_in_wait_list */,\n                    const cl_event *    /* event_wait_list */,\n                    cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueReadBufferRect(cl_command_queue    /* command_queue */,\n                        cl_mem              /* buffer */,\n                        cl_bool             /* blocking_read */,\n                        const size_t *      /* buffer_offset */,\n                        const size_t *      /* host_offset */,\n                        const size_t *      /* region */,\n                        size_t              /* buffer_row_pitch */,\n                        size_t              /* buffer_slice_pitch */,\n                        size_t              /* host_row_pitch */,\n                        size_t              /* host_slice_pitch */,\n                        void *              /* ptr */,\n                        cl_uint             /* num_events_in_wait_list */,\n                        const cl_event *    /* event_wait_list */,\n                        cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueWriteBuffer(cl_command_queue   /* command_queue */,\n                     cl_mem             /* buffer */,\n                     cl_bool            /* blocking_write */,\n                     size_t             /* offset */,\n                     size_t             /* size */,\n                     const void *       /* ptr */,\n                     cl_uint            /* num_events_in_wait_list */,\n                     const cl_event *   /* event_wait_list */,\n                     cl_event *         /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueWriteBufferRect(cl_command_queue    /* command_queue */,\n                         cl_mem              /* buffer */,\n                         cl_bool             /* blocking_write */,\n                         const size_t *      /* buffer_offset */,\n                         const size_t *      /* host_offset */,\n                         const size_t *      /* region */,\n                         size_t              /* buffer_row_pitch */,\n                         size_t              /* buffer_slice_pitch */,\n                         size_t              /* host_row_pitch */,\n                         size_t              /* host_slice_pitch */,\n                         const void *        /* ptr */,\n                         cl_uint             /* num_events_in_wait_list */,\n                         const cl_event *    /* event_wait_list */,\n                         cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueFillBuffer(cl_command_queue   /* command_queue */,\n                    cl_mem             /* buffer */,\n                    const void *       /* pattern */,\n                    size_t             /* pattern_size */,\n                    size_t             /* offset */,\n                    size_t             /* size */,\n                    cl_uint            /* num_events_in_wait_list */,\n                    const cl_event *   /* event_wait_list */,\n                    cl_event *         /* event */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueCopyBuffer(cl_command_queue    /* command_queue */,\n                    cl_mem              /* src_buffer */,\n                    cl_mem              /* dst_buffer */,\n                    size_t              /* src_offset */,\n                    size_t              /* dst_offset */,\n                    size_t              /* size */,\n                    cl_uint             /* num_events_in_wait_list */,\n                    const cl_event *    /* event_wait_list */,\n                    cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueCopyBufferRect(cl_command_queue    /* command_queue */,\n                        cl_mem              /* src_buffer */,\n                        cl_mem              /* dst_buffer */,\n                        const size_t *      /* src_origin */,\n                        const size_t *      /* dst_origin */,\n                        const size_t *      /* region */,\n                        size_t              /* src_row_pitch */,\n                        size_t              /* src_slice_pitch */,\n                        size_t              /* dst_row_pitch */,\n                        size_t              /* dst_slice_pitch */,\n                        cl_uint             /* num_events_in_wait_list */,\n                        const cl_event *    /* event_wait_list */,\n                        cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_1;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueReadImage(cl_command_queue     /* command_queue */,\n                   cl_mem               /* image */,\n                   cl_bool              /* blocking_read */,\n                   const size_t *       /* origin[3] */,\n                   const size_t *       /* region[3] */,\n                   size_t               /* row_pitch */,\n                   size_t               /* slice_pitch */,\n                   void *               /* ptr */,\n                   cl_uint              /* num_events_in_wait_list */,\n                   const cl_event *     /* event_wait_list */,\n                   cl_event *           /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueWriteImage(cl_command_queue    /* command_queue */,\n                    cl_mem              /* image */,\n                    cl_bool             /* blocking_write */,\n                    const size_t *      /* origin[3] */,\n                    const size_t *      /* region[3] */,\n                    size_t              /* input_row_pitch */,\n                    size_t              /* input_slice_pitch */,\n                    const void *        /* ptr */,\n                    cl_uint             /* num_events_in_wait_list */,\n                    const cl_event *    /* event_wait_list */,\n                    cl_event *          /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueFillImage(cl_command_queue   /* command_queue */,\n                   cl_mem             /* image */,\n                   const void *       /* fill_color */,\n                   const size_t *     /* origin[3] */,\n                   const size_t *     /* region[3] */,\n                   cl_uint            /* num_events_in_wait_list */,\n                   const cl_event *   /* event_wait_list */,\n                   cl_event *         /* event */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueCopyImage(cl_command_queue     /* command_queue */,\n                   cl_mem               /* src_image */,\n                   cl_mem               /* dst_image */,\n                   const size_t *       /* src_origin[3] */,\n                   const size_t *       /* dst_origin[3] */,\n                   const size_t *       /* region[3] */,\n                   cl_uint              /* num_events_in_wait_list */,\n                   const cl_event *     /* event_wait_list */,\n                   cl_event *           /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */,\n                           cl_mem           /* src_image */,\n                           cl_mem           /* dst_buffer */,\n                           const size_t *   /* src_origin[3] */,\n                           const size_t *   /* region[3] */,\n                           size_t           /* dst_offset */,\n                           cl_uint          /* num_events_in_wait_list */,\n                           const cl_event * /* event_wait_list */,\n                           cl_event *       /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueCopyBufferToImage(cl_command_queue /* command_queue */,\n                           cl_mem           /* src_buffer */,\n                           cl_mem           /* dst_image */,\n                           size_t           /* src_offset */,\n                           const size_t *   /* dst_origin[3] */,\n                           const size_t *   /* region[3] */,\n                           cl_uint          /* num_events_in_wait_list */,\n                           const cl_event * /* event_wait_list */,\n                           cl_event *       /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY void * CL_API_CALL\nclEnqueueMapBuffer(cl_command_queue /* command_queue */,\n                   cl_mem           /* buffer */,\n                   cl_bool          /* blocking_map */,\n                   cl_map_flags     /* map_flags */,\n                   size_t           /* offset */,\n                   size_t           /* size */,\n                   cl_uint          /* num_events_in_wait_list */,\n                   const cl_event * /* event_wait_list */,\n                   cl_event *       /* event */,\n                   cl_int *         /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY void * CL_API_CALL\nclEnqueueMapImage(cl_command_queue  /* command_queue */,\n                  cl_mem            /* image */,\n                  cl_bool           /* blocking_map */,\n                  cl_map_flags      /* map_flags */,\n                  const size_t *    /* origin[3] */,\n                  const size_t *    /* region[3] */,\n                  size_t *          /* image_row_pitch */,\n                  size_t *          /* image_slice_pitch */,\n                  cl_uint           /* num_events_in_wait_list */,\n                  const cl_event *  /* event_wait_list */,\n                  cl_event *        /* event */,\n                  cl_int *          /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueUnmapMemObject(cl_command_queue /* command_queue */,\n                        cl_mem           /* memobj */,\n                        void *           /* mapped_ptr */,\n                        cl_uint          /* num_events_in_wait_list */,\n                        const cl_event *  /* event_wait_list */,\n                        cl_event *        /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueMigrateMemObjects(cl_command_queue       /* command_queue */,\n                           cl_uint                /* num_mem_objects */,\n                           const cl_mem *         /* mem_objects */,\n                           cl_mem_migration_flags /* flags */,\n                           cl_uint                /* num_events_in_wait_list */,\n                           const cl_event *       /* event_wait_list */,\n                           cl_event *             /* event */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueNDRangeKernel(cl_command_queue /* command_queue */,\n                       cl_kernel        /* kernel */,\n                       cl_uint          /* work_dim */,\n                       const size_t *   /* global_work_offset */,\n                       const size_t *   /* global_work_size */,\n                       const size_t *   /* local_work_size */,\n                       cl_uint          /* num_events_in_wait_list */,\n                       const cl_event * /* event_wait_list */,\n                       cl_event *       /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueTask(cl_command_queue  /* command_queue */,\n              cl_kernel         /* kernel */,\n              cl_uint           /* num_events_in_wait_list */,\n              const cl_event *  /* event_wait_list */,\n              cl_event *        /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueNativeKernel(cl_command_queue  /* command_queue */,\n\t\t\t\t\t  void (CL_CALLBACK * /*user_func*/)(void *),\n                      void *            /* args */,\n                      size_t            /* cb_args */,\n                      cl_uint           /* num_mem_objects */,\n                      const cl_mem *    /* mem_list */,\n                      const void **     /* args_mem_loc */,\n                      cl_uint           /* num_events_in_wait_list */,\n                      const cl_event *  /* event_wait_list */,\n                      cl_event *        /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueMarkerWithWaitList(cl_command_queue /* command_queue */,\n                            cl_uint           /* num_events_in_wait_list */,\n                            const cl_event *  /* event_wait_list */,\n                            cl_event *        /* event */) CL_API_SUFFIX__VERSION_1_2;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueBarrierWithWaitList(cl_command_queue /* command_queue */,\n                             cl_uint           /* num_events_in_wait_list */,\n                             const cl_event *  /* event_wait_list */,\n                             cl_event *        /* event */) CL_API_SUFFIX__VERSION_1_2;\n\n\n/* Extension function access\n *\n * Returns the extension function address for the given function name,\n * or NULL if a valid function can not be found.  The client must\n * check to make sure the address is not NULL, before using or\n * calling the returned function address.\n */\nextern CL_API_ENTRY void * CL_API_CALL\nclGetExtensionFunctionAddressForPlatform(cl_platform_id /* platform */,\n                                         const char *   /* func_name */) CL_API_SUFFIX__VERSION_1_2;\n\n\n// Deprecated OpenCL 1.1 APIs\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL\nclCreateImage2D(cl_context              /* context */,\n                cl_mem_flags            /* flags */,\n                const cl_image_format * /* image_format */,\n                size_t                  /* image_width */,\n                size_t                  /* image_height */,\n                size_t                  /* image_row_pitch */,\n                void *                  /* host_ptr */,\n                cl_int *                /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL\nclCreateImage3D(cl_context              /* context */,\n                cl_mem_flags            /* flags */,\n                const cl_image_format * /* image_format */,\n                size_t                  /* image_width */,\n                size_t                  /* image_height */,\n                size_t                  /* image_depth */,\n                size_t                  /* image_row_pitch */,\n                size_t                  /* image_slice_pitch */,\n                void *                  /* host_ptr */,\n                cl_int *                /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL\nclEnqueueMarker(cl_command_queue    /* command_queue */,\n                cl_event *          /* event */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL\nclEnqueueWaitForEvents(cl_command_queue /* command_queue */,\n                        cl_uint          /* num_events */,\n                        const cl_event * /* event_list */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL\nclEnqueueBarrier(cl_command_queue /* command_queue */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL\nclUnloadCompiler(void) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED void * CL_API_CALL\nclGetExtensionFunctionAddress(const char * /* func_name */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif  /* __OPENCL_CL_H */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_ext.h",
    "content": "/*******************************************************************************\n * Copyright (c) 2008 - 2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n ******************************************************************************/\n\n/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */\n\n/* cl_ext.h contains OpenCL extensions which don't have external */\n/* (OpenGL, D3D) dependencies.                                   */\n\n#ifndef __CL_EXT_H\n#define __CL_EXT_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#ifdef __APPLE__\n\t#include <OpenCL/cl.h>\n    #include <AvailabilityMacros.h>\n#else\n\t#include <CL/cl.h>\n#endif\n\n/* cl_khr_fp16 extension - no extension #define since it has no functions  */\n#define CL_DEVICE_HALF_FP_CONFIG                    0x1033\n\n/* Memory object destruction\n *\n * Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR\n *\n * Registers a user callback function that will be called when the memory object is deleted and its resources \n * freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback \n * stack associated with memobj. The registered user callback functions are called in the reverse order in \n * which they were registered. The user callback functions are called and then the memory object is deleted \n * and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be \n * notified when the memory referenced by host_ptr, specified when the memory object is created and used as \n * the storage bits for the memory object, can be reused or freed.\n *\n * The application may not call CL api's with the cl_mem object passed to the pfn_notify.\n *\n * Please check for the \"cl_APPLE_SetMemObjectDestructor\" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)\n * before using.\n */\n#define cl_APPLE_SetMemObjectDestructor 1\ncl_int\tCL_API_ENTRY clSetMemObjectDestructorAPPLE(  cl_mem /* memobj */, \n                                        void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/), \n                                        void * /*user_data */ )             CL_EXT_SUFFIX__VERSION_1_0;  \n\n\n/* Context Logging Functions\n *\n * The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().\n * Please check for the \"cl_APPLE_ContextLoggingFunctions\" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)\n * before using.\n *\n * clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger \n */\n#define cl_APPLE_ContextLoggingFunctions 1\nextern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE(  const char * /* errstr */, \n                                            const void * /* private_info */, \n                                            size_t       /* cb */, \n                                            void *       /* user_data */ )  CL_EXT_SUFFIX__VERSION_1_0;\n\n/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */\nextern void CL_API_ENTRY clLogMessagesToStdoutAPPLE(   const char * /* errstr */, \n                                          const void * /* private_info */, \n                                          size_t       /* cb */, \n                                          void *       /* user_data */ )    CL_EXT_SUFFIX__VERSION_1_0;\n\n/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */\nextern void CL_API_ENTRY clLogMessagesToStderrAPPLE(   const char * /* errstr */, \n                                          const void * /* private_info */, \n                                          size_t       /* cb */, \n                                          void *       /* user_data */ )    CL_EXT_SUFFIX__VERSION_1_0;\n\n\n/************************ \n* cl_khr_icd extension *                                                  \n************************/\n#define cl_khr_icd 1\n\n/* cl_platform_info                                                        */\n#define CL_PLATFORM_ICD_SUFFIX_KHR                  0x0920\n\n/* Additional Error Codes                                                  */\n#define CL_PLATFORM_NOT_FOUND_KHR                   -1001\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclIcdGetPlatformIDsKHR(cl_uint          /* num_entries */,\n                       cl_platform_id * /* platforms */,\n                       cl_uint *        /* num_platforms */);\n\ntypedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(\n    cl_uint          /* num_entries */,\n    cl_platform_id * /* platforms */,\n    cl_uint *        /* num_platforms */);\n\n\n/* Extension: cl_khr_image2D_buffer\n *\n * This extension allows a 2D image to be created from a cl_mem buffer without a copy.\n * The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.\n * Both the sampler and sampler-less read_image built-in functions are supported for 2D images\n * and 2D images created from a buffer.  Similarly, the write_image built-ins are also supported\n * for 2D images created from a buffer.\n *\n * When the 2D image from buffer is created, the client must specify the width,\n * height, image format (i.e. channel order and channel data type) and optionally the row pitch\n *\n * The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.\n * The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.\n */\n    \n/*************************************\n * cl_khr_initalize_memory extension *\n *************************************/\n    \n#define CL_CONTEXT_MEMORY_INITIALIZE_KHR            0x200E\n    \n    \n/**************************************\n * cl_khr_terminate_context extension *\n **************************************/\n    \n#define CL_DEVICE_TERMINATE_CAPABILITY_KHR          0x200F\n#define CL_CONTEXT_TERMINATE_KHR                    0x2010\n\n#define cl_khr_terminate_context 1\nextern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;\n\ntypedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;\n    \n    \n/*\n * Extension: cl_khr_spir\n *\n * This extension adds support to create an OpenCL program object from a \n * Standard Portable Intermediate Representation (SPIR) instance\n */\n\n/******************************************\n* cl_nv_device_attribute_query extension *\n******************************************/\n/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */\n#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV       0x4000\n#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV       0x4001\n#define CL_DEVICE_REGISTERS_PER_BLOCK_NV            0x4002\n#define CL_DEVICE_WARP_SIZE_NV                      0x4003\n#define CL_DEVICE_GPU_OVERLAP_NV                    0x4004\n#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV            0x4005\n#define CL_DEVICE_INTEGRATED_MEMORY_NV              0x4006\n\n\n/*********************************\n* cl_amd_device_attribute_query *\n*********************************/\n#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD        0x4036\n\n#ifdef CL_VERSION_1_1\n   /***********************************\n    * cl_ext_device_fission extension *\n    ***********************************/\n    #define cl_ext_device_fission   1\n    \n    extern CL_API_ENTRY cl_int CL_API_CALL\n    clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; \n    \n    typedef CL_API_ENTRY cl_int \n    (CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;\n\n    extern CL_API_ENTRY cl_int CL_API_CALL\n    clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1; \n    \n    typedef CL_API_ENTRY cl_int \n    (CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;\n\n    typedef cl_ulong  cl_device_partition_property_ext;\n    extern CL_API_ENTRY cl_int CL_API_CALL\n    clCreateSubDevicesEXT(  cl_device_id /*in_device*/,\n                            const cl_device_partition_property_ext * /* properties */,\n                            cl_uint /*num_entries*/,\n                            cl_device_id * /*out_devices*/,\n                            cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;\n\n    typedef CL_API_ENTRY cl_int \n    ( CL_API_CALL * clCreateSubDevicesEXT_fn)(  cl_device_id /*in_device*/,\n                                                const cl_device_partition_property_ext * /* properties */,\n                                                cl_uint /*num_entries*/,\n                                                cl_device_id * /*out_devices*/,\n                                                cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;\n\n    /* cl_device_partition_property_ext */\n    #define CL_DEVICE_PARTITION_EQUALLY_EXT             0x4050\n    #define CL_DEVICE_PARTITION_BY_COUNTS_EXT           0x4051\n    #define CL_DEVICE_PARTITION_BY_NAMES_EXT            0x4052\n    #define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT  0x4053\n    \n    /* clDeviceGetInfo selectors */\n    #define CL_DEVICE_PARENT_DEVICE_EXT                 0x4054\n    #define CL_DEVICE_PARTITION_TYPES_EXT               0x4055\n    #define CL_DEVICE_AFFINITY_DOMAINS_EXT              0x4056\n    #define CL_DEVICE_REFERENCE_COUNT_EXT               0x4057\n    #define CL_DEVICE_PARTITION_STYLE_EXT               0x4058\n    \n    /* error codes */\n    #define CL_DEVICE_PARTITION_FAILED_EXT              -1057\n    #define CL_INVALID_PARTITION_COUNT_EXT              -1058\n    #define CL_INVALID_PARTITION_NAME_EXT               -1059\n    \n    /* CL_AFFINITY_DOMAINs */\n    #define CL_AFFINITY_DOMAIN_L1_CACHE_EXT             0x1\n    #define CL_AFFINITY_DOMAIN_L2_CACHE_EXT             0x2\n    #define CL_AFFINITY_DOMAIN_L3_CACHE_EXT             0x3\n    #define CL_AFFINITY_DOMAIN_L4_CACHE_EXT             0x4\n    #define CL_AFFINITY_DOMAIN_NUMA_EXT                 0x10\n    #define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT     0x100\n    \n    /* cl_device_partition_property_ext list terminators */\n    #define CL_PROPERTIES_LIST_END_EXT                  ((cl_device_partition_property_ext) 0)\n    #define CL_PARTITION_BY_COUNTS_LIST_END_EXT         ((cl_device_partition_property_ext) 0)\n    #define CL_PARTITION_BY_NAMES_LIST_END_EXT          ((cl_device_partition_property_ext) 0 - 1)\n\n\n\n#endif /* CL_VERSION_1_1 */\n\n#ifdef __cplusplus\n}\n#endif\n\n\n#endif /* __CL_EXT_H */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_gl.h",
    "content": "/**********************************************************************************\n * Copyright (c) 2008 - 2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n **********************************************************************************/\n\n#ifndef __OPENCL_CL_GL_H\n#define __OPENCL_CL_GL_H\n\n#ifdef __APPLE__\n#include <OpenCL/cl.h>\n#else\n#include <CL/cl.h>\n#endif\t\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\ntypedef cl_uint     cl_gl_object_type;\ntypedef cl_uint     cl_gl_texture_info;\ntypedef cl_uint     cl_gl_platform_info;\ntypedef struct __GLsync *cl_GLsync;\n\n/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken           */\n#define CL_GL_OBJECT_BUFFER                     0x2000\n#define CL_GL_OBJECT_TEXTURE2D                  0x2001\n#define CL_GL_OBJECT_TEXTURE3D                  0x2002\n#define CL_GL_OBJECT_RENDERBUFFER               0x2003\n#define CL_GL_OBJECT_TEXTURE2D_ARRAY            0x200E\n#define CL_GL_OBJECT_TEXTURE1D                  0x200F\n#define CL_GL_OBJECT_TEXTURE1D_ARRAY            0x2010\n#define CL_GL_OBJECT_TEXTURE_BUFFER             0x2011\n\n/* cl_gl_texture_info           */\n#define CL_GL_TEXTURE_TARGET                    0x2004\n#define CL_GL_MIPMAP_LEVEL                      0x2005\n#define CL_GL_NUM_SAMPLES                       0x2012\n\n\nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateFromGLBuffer(cl_context     /* context */,\n                     cl_mem_flags   /* flags */,\n                     cl_GLuint      /* bufobj */,\n                     int *          /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateFromGLTexture(cl_context      /* context */,\n                      cl_mem_flags    /* flags */,\n                      cl_GLenum       /* target */,\n                      cl_GLint        /* miplevel */,\n                      cl_GLuint       /* texture */,\n                      cl_int *        /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;\n    \nextern CL_API_ENTRY cl_mem CL_API_CALL\nclCreateFromGLRenderbuffer(cl_context   /* context */,\n                           cl_mem_flags /* flags */,\n                           cl_GLuint    /* renderbuffer */,\n                           cl_int *     /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetGLObjectInfo(cl_mem                /* memobj */,\n                  cl_gl_object_type *   /* gl_object_type */,\n                  cl_GLuint *           /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;\n                  \nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetGLTextureInfo(cl_mem               /* memobj */,\n                   cl_gl_texture_info   /* param_name */,\n                   size_t               /* param_value_size */,\n                   void *               /* param_value */,\n                   size_t *             /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueAcquireGLObjects(cl_command_queue      /* command_queue */,\n                          cl_uint               /* num_objects */,\n                          const cl_mem *        /* mem_objects */,\n                          cl_uint               /* num_events_in_wait_list */,\n                          const cl_event *      /* event_wait_list */,\n                          cl_event *            /* event */) CL_API_SUFFIX__VERSION_1_0;\n\nextern CL_API_ENTRY cl_int CL_API_CALL\nclEnqueueReleaseGLObjects(cl_command_queue      /* command_queue */,\n                          cl_uint               /* num_objects */,\n                          const cl_mem *        /* mem_objects */,\n                          cl_uint               /* num_events_in_wait_list */,\n                          const cl_event *      /* event_wait_list */,\n                          cl_event *            /* event */) CL_API_SUFFIX__VERSION_1_0;\n\n\n// Deprecated OpenCL 1.1 APIs\nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL\nclCreateFromGLTexture2D(cl_context      /* context */,\n                        cl_mem_flags    /* flags */,\n                        cl_GLenum       /* target */,\n                        cl_GLint        /* miplevel */,\n                        cl_GLuint       /* texture */,\n                        cl_int *        /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n    \nextern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL\nclCreateFromGLTexture3D(cl_context      /* context */,\n                        cl_mem_flags    /* flags */,\n                        cl_GLenum       /* target */,\n                        cl_GLint        /* miplevel */,\n                        cl_GLuint       /* texture */,\n                        cl_int *        /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;\n    \n/* cl_khr_gl_sharing extension  */\n    \n#define cl_khr_gl_sharing 1\n    \ntypedef cl_uint     cl_gl_context_info;\n    \n/* Additional Error Codes  */\n#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR  -1000\n    \n/* cl_gl_context_info  */\n#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR    0x2006\n#define CL_DEVICES_FOR_GL_CONTEXT_KHR           0x2007\n    \n/* Additional cl_context_properties  */\n#define CL_GL_CONTEXT_KHR                       0x2008\n#define CL_EGL_DISPLAY_KHR                      0x2009\n#define CL_GLX_DISPLAY_KHR                      0x200A\n#define CL_WGL_HDC_KHR                          0x200B\n#define CL_CGL_SHAREGROUP_KHR                   0x200C\n    \nextern CL_API_ENTRY cl_int CL_API_CALL\nclGetGLContextInfoKHR(const cl_context_properties * /* properties */,\n                      cl_gl_context_info            /* param_name */,\n                      size_t                        /* param_value_size */,\n                      void *                        /* param_value */,\n                      size_t *                      /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;\n    \ntypedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(\n    const cl_context_properties * properties,\n    cl_gl_context_info            param_name,\n    size_t                        param_value_size,\n    void *                        param_value,\n    size_t *                      param_value_size_ret);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif  /* __OPENCL_CL_GL_H */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_gl_ext.h",
    "content": "/**********************************************************************************\n * Copyright (c) 2008-2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n **********************************************************************************/\n\n/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */\n\n/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have           */\n/* OpenGL dependencies.                                                         */\n\n#ifndef __OPENCL_CL_GL_EXT_H\n#define __OPENCL_CL_GL_EXT_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#ifdef __APPLE__\n    #include <OpenCL/cl_gl.h>\n#else\n    #include <CL/cl_gl.h>\n#endif\n\n/*\n * For each extension, follow this template\n *  cl_VEN_extname extension  */\n/* #define cl_VEN_extname 1\n * ... define new types, if any\n * ... define new tokens, if any\n * ... define new APIs, if any\n *\n *  If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header\n *  This allows us to avoid having to decide whether to include GL headers or GLES here.\n */\n\n/* \n *  cl_khr_gl_event  extension\n *  See section 9.9 in the OpenCL 1.1 spec for more information\n */\n#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR     0x200D\n\nextern CL_API_ENTRY cl_event CL_API_CALL\nclCreateEventFromGLsyncKHR(cl_context           /* context */,\n                           cl_GLsync            /* cl_GLsync */,\n                           cl_int *             /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\t/* __OPENCL_CL_GL_EXT_H  */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_platform.h",
    "content": "/**********************************************************************************\n * Copyright (c) 2008-2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n **********************************************************************************/\n\n/* $Revision: 11803 $ on $Date: 2010-06-25 10:02:12 -0700 (Fri, 25 Jun 2010) $ */\n\n#ifndef __CL_PLATFORM_H\n#define __CL_PLATFORM_H\n\n#ifdef __APPLE__\n    /* Contains #defines for AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER below */\n    #include <AvailabilityMacros.h>\n#endif\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#if defined(_WIN32)\n    #define CL_API_ENTRY\n    #define CL_API_CALL     __stdcall\n    #define CL_CALLBACK     __stdcall\n#else\n    #define CL_API_ENTRY\n    #define CL_API_CALL\n    #define CL_CALLBACK\n#endif\n\n#ifdef __APPLE__\n    #define CL_EXTENSION_WEAK_LINK       __attribute__((weak_import))\n    #define CL_API_SUFFIX__VERSION_1_0                  AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER\n    #define CL_EXT_SUFFIX__VERSION_1_0                  CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER\n    #define CL_API_SUFFIX__VERSION_1_1                  AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n    #define GCL_API_SUFFIX__VERSION_1_1                 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n    #define CL_EXT_SUFFIX__VERSION_1_1                  CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n    #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED       CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_7\n    \n    #ifdef AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER\n        #define CL_API_SUFFIX__VERSION_1_2              AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER\n        #define GCL_API_SUFFIX__VERSION_1_2             AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER\n        #define CL_EXT_SUFFIX__VERSION_1_2              CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER\n        #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED\n        #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED   CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_8\n    #else\n        #warning  This path should never happen outside of internal operating system development.  AvailabilityMacros do not function correctly here!\n        #define CL_API_SUFFIX__VERSION_1_2              AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n        #define GCL_API_SUFFIX__VERSION_1_2             AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n        #define CL_EXT_SUFFIX__VERSION_1_2              CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n        #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED   CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER\n    #endif\n#else\n    #define CL_EXTENSION_WEAK_LINK  \n    #define CL_API_SUFFIX__VERSION_1_0\n    #define CL_EXT_SUFFIX__VERSION_1_0\n    #define CL_API_SUFFIX__VERSION_1_1\n    #define CL_EXT_SUFFIX__VERSION_1_1\n    #define CL_API_SUFFIX__VERSION_1_2\n    #define CL_EXT_SUFFIX__VERSION_1_2\n    \n    #ifdef __GNUC__\n        #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS\n            #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED\n            #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED    \n        #else\n            #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED __attribute__((deprecated))\n            #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED    \n        #endif\n    \n        #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS\n            #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED    \n            #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED    \n        #else\n            #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED __attribute__((deprecated))\n            #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED    \n        #endif\n    #elif _WIN32\n        #ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS\n            #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED    \n            #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED    \n        #else\n            #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED \n            #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED __declspec(deprecated)     \n        #endif\n    \n        #ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS\n            #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED\n            #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED    \n        #else\n            #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED \n            #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED __declspec(deprecated)     \n        #endif\n    #else\n        #define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED\n        #define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED\n    \n        #define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED\n        #define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED\n    #endif\n#endif\n\n#if (defined (_WIN32) && defined(_MSC_VER))\n\n/* scalar types  */\ntypedef signed   __int8         cl_char;\ntypedef unsigned __int8         cl_uchar;\ntypedef signed   __int16        cl_short;\ntypedef unsigned __int16        cl_ushort;\ntypedef signed   __int32        cl_int;\ntypedef unsigned __int32        cl_uint;\ntypedef signed   __int64        cl_long;\ntypedef unsigned __int64        cl_ulong;\n\ntypedef unsigned __int16        cl_half;\ntypedef float                   cl_float;\ntypedef double                  cl_double;\n\n/* Macro names and corresponding values defined by OpenCL */\n#define CL_CHAR_BIT         8\n#define CL_SCHAR_MAX        127\n#define CL_SCHAR_MIN        (-127-1)\n#define CL_CHAR_MAX         CL_SCHAR_MAX\n#define CL_CHAR_MIN         CL_SCHAR_MIN\n#define CL_UCHAR_MAX        255\n#define CL_SHRT_MAX         32767\n#define CL_SHRT_MIN         (-32767-1)\n#define CL_USHRT_MAX        65535\n#define CL_INT_MAX          2147483647\n#define CL_INT_MIN          (-2147483647-1)\n#define CL_UINT_MAX         0xffffffffU\n#define CL_LONG_MAX         ((cl_long) 0x7FFFFFFFFFFFFFFFLL)\n#define CL_LONG_MIN         ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)\n#define CL_ULONG_MAX        ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)\n\n#define CL_FLT_DIG          6\n#define CL_FLT_MANT_DIG     24\n#define CL_FLT_MAX_10_EXP   +38\n#define CL_FLT_MAX_EXP      +128\n#define CL_FLT_MIN_10_EXP   -37\n#define CL_FLT_MIN_EXP      -125\n#define CL_FLT_RADIX        2\n#define CL_FLT_MAX          340282346638528859811704183484516925440.0f\n#define CL_FLT_MIN          1.175494350822287507969e-38f\n#define CL_FLT_EPSILON      0x1.0p-23f\n\n#define CL_DBL_DIG          15\n#define CL_DBL_MANT_DIG     53\n#define CL_DBL_MAX_10_EXP   +308\n#define CL_DBL_MAX_EXP      +1024\n#define CL_DBL_MIN_10_EXP   -307\n#define CL_DBL_MIN_EXP      -1021\n#define CL_DBL_RADIX        2\n#define CL_DBL_MAX          179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0\n#define CL_DBL_MIN          2.225073858507201383090e-308\n#define CL_DBL_EPSILON      2.220446049250313080847e-16\n\n#define  CL_M_E             2.718281828459045090796\n#define  CL_M_LOG2E         1.442695040888963387005\n#define  CL_M_LOG10E        0.434294481903251816668\n#define  CL_M_LN2           0.693147180559945286227\n#define  CL_M_LN10          2.302585092994045901094\n#define  CL_M_PI            3.141592653589793115998\n#define  CL_M_PI_2          1.570796326794896557999\n#define  CL_M_PI_4          0.785398163397448278999\n#define  CL_M_1_PI          0.318309886183790691216\n#define  CL_M_2_PI          0.636619772367581382433\n#define  CL_M_2_SQRTPI      1.128379167095512558561\n#define  CL_M_SQRT2         1.414213562373095145475\n#define  CL_M_SQRT1_2       0.707106781186547572737\n\n#define  CL_M_E_F           2.71828174591064f\n#define  CL_M_LOG2E_F       1.44269502162933f\n#define  CL_M_LOG10E_F      0.43429449200630f\n#define  CL_M_LN2_F         0.69314718246460f\n#define  CL_M_LN10_F        2.30258512496948f\n#define  CL_M_PI_F          3.14159274101257f\n#define  CL_M_PI_2_F        1.57079637050629f\n#define  CL_M_PI_4_F        0.78539818525314f\n#define  CL_M_1_PI_F        0.31830987334251f\n#define  CL_M_2_PI_F        0.63661974668503f\n#define  CL_M_2_SQRTPI_F    1.12837922573090f\n#define  CL_M_SQRT2_F       1.41421353816986f\n#define  CL_M_SQRT1_2_F     0.70710676908493f\n\n#define CL_NAN              (CL_INFINITY - CL_INFINITY)\n#define CL_HUGE_VALF        ((cl_float) 1e50)\n#define CL_HUGE_VAL         ((cl_double) 1e500)\n#define CL_MAXFLOAT         CL_FLT_MAX\n#define CL_INFINITY         CL_HUGE_VALF\n\n#else\n\n#include <stdint.h>\n\n/* scalar types  */\ntypedef int8_t          cl_char;\ntypedef uint8_t         cl_uchar;\ntypedef int16_t         cl_short    __attribute__((aligned(2)));\ntypedef uint16_t        cl_ushort   __attribute__((aligned(2)));\ntypedef int32_t         cl_int      __attribute__((aligned(4)));\ntypedef uint32_t        cl_uint     __attribute__((aligned(4)));\ntypedef int64_t         cl_long     __attribute__((aligned(8)));\ntypedef uint64_t        cl_ulong    __attribute__((aligned(8)));\n\ntypedef uint16_t        cl_half     __attribute__((aligned(2)));\ntypedef float           cl_float    __attribute__((aligned(4)));\ntypedef double          cl_double   __attribute__((aligned(8)));\n\n/* Macro names and corresponding values defined by OpenCL */\n#define CL_CHAR_BIT         8\n#define CL_SCHAR_MAX        127\n#define CL_SCHAR_MIN        (-127-1)\n#define CL_CHAR_MAX         CL_SCHAR_MAX\n#define CL_CHAR_MIN         CL_SCHAR_MIN\n#define CL_UCHAR_MAX        255\n#define CL_SHRT_MAX         32767\n#define CL_SHRT_MIN         (-32767-1)\n#define CL_USHRT_MAX        65535\n#define CL_INT_MAX          2147483647\n#define CL_INT_MIN          (-2147483647-1)\n#define CL_UINT_MAX         0xffffffffU\n#define CL_LONG_MAX         ((cl_long) 0x7FFFFFFFFFFFFFFFLL)\n#define CL_LONG_MIN         ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)\n#define CL_ULONG_MAX        ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)\n\n#define CL_FLT_DIG          6\n#define CL_FLT_MANT_DIG     24\n#define CL_FLT_MAX_10_EXP   +38\n#define CL_FLT_MAX_EXP      +128\n#define CL_FLT_MIN_10_EXP   -37\n#define CL_FLT_MIN_EXP      -125\n#define CL_FLT_RADIX        2\n#define CL_FLT_MAX          0x1.fffffep127f\n#define CL_FLT_MIN          0x1.0p-126f\n#define CL_FLT_EPSILON      0x1.0p-23f\n\n#define CL_DBL_DIG          15\n#define CL_DBL_MANT_DIG     53\n#define CL_DBL_MAX_10_EXP   +308\n#define CL_DBL_MAX_EXP      +1024\n#define CL_DBL_MIN_10_EXP   -307\n#define CL_DBL_MIN_EXP      -1021\n#define CL_DBL_RADIX        2\n#define CL_DBL_MAX          0x1.fffffffffffffp1023\n#define CL_DBL_MIN          0x1.0p-1022\n#define CL_DBL_EPSILON      0x1.0p-52\n\n#define  CL_M_E             2.718281828459045090796\n#define  CL_M_LOG2E         1.442695040888963387005\n#define  CL_M_LOG10E        0.434294481903251816668\n#define  CL_M_LN2           0.693147180559945286227\n#define  CL_M_LN10          2.302585092994045901094\n#define  CL_M_PI            3.141592653589793115998\n#define  CL_M_PI_2          1.570796326794896557999\n#define  CL_M_PI_4          0.785398163397448278999\n#define  CL_M_1_PI          0.318309886183790691216\n#define  CL_M_2_PI          0.636619772367581382433\n#define  CL_M_2_SQRTPI      1.128379167095512558561\n#define  CL_M_SQRT2         1.414213562373095145475\n#define  CL_M_SQRT1_2       0.707106781186547572737\n\n#define  CL_M_E_F           2.71828174591064f\n#define  CL_M_LOG2E_F       1.44269502162933f\n#define  CL_M_LOG10E_F      0.43429449200630f\n#define  CL_M_LN2_F         0.69314718246460f\n#define  CL_M_LN10_F        2.30258512496948f\n#define  CL_M_PI_F          3.14159274101257f\n#define  CL_M_PI_2_F        1.57079637050629f\n#define  CL_M_PI_4_F        0.78539818525314f\n#define  CL_M_1_PI_F        0.31830987334251f\n#define  CL_M_2_PI_F        0.63661974668503f\n#define  CL_M_2_SQRTPI_F    1.12837922573090f\n#define  CL_M_SQRT2_F       1.41421353816986f\n#define  CL_M_SQRT1_2_F     0.70710676908493f\n\n#if defined( __GNUC__ )\n   #define CL_HUGE_VALF     __builtin_huge_valf()\n   #define CL_HUGE_VAL      __builtin_huge_val()\n   #define CL_NAN           __builtin_nanf( \"\" )\n#else\n   #define CL_HUGE_VALF     ((cl_float) 1e50)\n   #define CL_HUGE_VAL      ((cl_double) 1e500)\n   float nanf( const char * );\n   #define CL_NAN           nanf( \"\" )  \n#endif\n#define CL_MAXFLOAT         CL_FLT_MAX\n#define CL_INFINITY         CL_HUGE_VALF\n\n#endif\n\n#include <stddef.h>\n\n/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */\ntypedef unsigned int cl_GLuint;\ntypedef int          cl_GLint;\ntypedef unsigned int cl_GLenum;\n\n/*\n * Vector types \n *\n *  Note:   OpenCL requires that all types be naturally aligned. \n *          This means that vector types must be naturally aligned.\n *          For example, a vector of four floats must be aligned to\n *          a 16 byte boundary (calculated as 4 * the natural 4-byte \n *          alignment of the float).  The alignment qualifiers here\n *          will only function properly if your compiler supports them\n *          and if you don't actively work to defeat them.  For example,\n *          in order for a cl_float4 to be 16 byte aligned in a struct,\n *          the start of the struct must itself be 16-byte aligned. \n *\n *          Maintaining proper alignment is the user's responsibility.\n */\n\n/* Define basic vector types */\n#if defined( __VEC__ )\n   #include <altivec.h>   /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */\n   typedef vector unsigned char     __cl_uchar16;\n   typedef vector signed char       __cl_char16;\n   typedef vector unsigned short    __cl_ushort8;\n   typedef vector signed short      __cl_short8;\n   typedef vector unsigned int      __cl_uint4;\n   typedef vector signed int        __cl_int4;\n   typedef vector float             __cl_float4;\n   #define  __CL_UCHAR16__  1\n   #define  __CL_CHAR16__   1\n   #define  __CL_USHORT8__  1\n   #define  __CL_SHORT8__   1\n   #define  __CL_UINT4__    1\n   #define  __CL_INT4__     1\n   #define  __CL_FLOAT4__   1\n#endif\n\n#if defined( __SSE__ )\n    #if defined( __MINGW64__ )\n        #include <intrin.h>\n    #else\n        #include <xmmintrin.h>\n    #endif\n    #if defined( __GNUC__ )\n        typedef float __cl_float4   __attribute__((vector_size(16)));\n    #else\n        typedef __m128 __cl_float4;\n    #endif\n    #define __CL_FLOAT4__   1\n#endif\n\n#if defined( __SSE2__ )\n    #if defined( __MINGW64__ )\n        #include <intrin.h>\n    #else\n        #include <emmintrin.h>\n    #endif\n    #if defined( __GNUC__ )\n        typedef cl_uchar    __cl_uchar16    __attribute__((vector_size(16)));\n        typedef cl_char     __cl_char16     __attribute__((vector_size(16)));\n        typedef cl_ushort   __cl_ushort8    __attribute__((vector_size(16)));\n        typedef cl_short    __cl_short8     __attribute__((vector_size(16)));\n        typedef cl_uint     __cl_uint4      __attribute__((vector_size(16)));\n        typedef cl_int      __cl_int4       __attribute__((vector_size(16)));\n        typedef cl_ulong    __cl_ulong2     __attribute__((vector_size(16)));\n        typedef cl_long     __cl_long2      __attribute__((vector_size(16)));\n        typedef cl_double   __cl_double2    __attribute__((vector_size(16)));\n    #else\n        typedef __m128i __cl_uchar16;\n        typedef __m128i __cl_char16;\n        typedef __m128i __cl_ushort8;\n        typedef __m128i __cl_short8;\n        typedef __m128i __cl_uint4;\n        typedef __m128i __cl_int4;\n        typedef __m128i __cl_ulong2;\n        typedef __m128i __cl_long2;\n        typedef __m128d __cl_double2;\n    #endif\n    #define __CL_UCHAR16__  1\n    #define __CL_CHAR16__   1\n    #define __CL_USHORT8__  1\n    #define __CL_SHORT8__   1\n    #define __CL_INT4__     1\n    #define __CL_UINT4__    1\n    #define __CL_ULONG2__   1\n    #define __CL_LONG2__    1\n    #define __CL_DOUBLE2__  1\n#endif\n\n#if defined( __MMX__ )\n    #include <mmintrin.h>\n    #if defined( __GNUC__ )\n        typedef cl_uchar    __cl_uchar8     __attribute__((vector_size(8)));\n        typedef cl_char     __cl_char8      __attribute__((vector_size(8)));\n        typedef cl_ushort   __cl_ushort4    __attribute__((vector_size(8)));\n        typedef cl_short    __cl_short4     __attribute__((vector_size(8)));\n        typedef cl_uint     __cl_uint2      __attribute__((vector_size(8)));\n        typedef cl_int      __cl_int2       __attribute__((vector_size(8)));\n        typedef cl_ulong    __cl_ulong1     __attribute__((vector_size(8)));\n        typedef cl_long     __cl_long1      __attribute__((vector_size(8)));\n        typedef cl_float    __cl_float2     __attribute__((vector_size(8)));\n    #else\n        typedef __m64       __cl_uchar8;\n        typedef __m64       __cl_char8;\n        typedef __m64       __cl_ushort4;\n        typedef __m64       __cl_short4;\n        typedef __m64       __cl_uint2;\n        typedef __m64       __cl_int2;\n        typedef __m64       __cl_ulong1;\n        typedef __m64       __cl_long1;\n        typedef __m64       __cl_float2;\n    #endif\n    #define __CL_UCHAR8__   1\n    #define __CL_CHAR8__    1\n    #define __CL_USHORT4__  1\n    #define __CL_SHORT4__   1\n    #define __CL_INT2__     1\n    #define __CL_UINT2__    1\n    #define __CL_ULONG1__   1\n    #define __CL_LONG1__    1\n    #define __CL_FLOAT2__   1\n#endif\n\n#if defined( __AVX__ )\n    #if defined( __MINGW64__ )\n        #include <intrin.h>\n    #else\n        #include <immintrin.h> \n    #endif\n    #if defined( __GNUC__ )\n        typedef cl_float    __cl_float8     __attribute__((vector_size(32)));\n        typedef cl_double   __cl_double4    __attribute__((vector_size(32)));\n    #else\n        typedef __m256      __cl_float8;\n        typedef __m256d     __cl_double4;\n    #endif\n    #define __CL_FLOAT8__   1\n    #define __CL_DOUBLE4__  1\n#endif\n\n/* Define alignment keys */\n#if defined( __GNUC__ )\n    #define CL_ALIGNED(_x)          __attribute__ ((aligned(_x)))\n#elif defined( _WIN32) && (_MSC_VER)\n    /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements     */\n    /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx                                                 */\n    /* #include <crtdefs.h>                                                                                             */\n    /* #define CL_ALIGNED(_x)          _CRT_ALIGN(_x)                                                                   */\n    #define CL_ALIGNED(_x)\n#else\n   #warning  Need to implement some method to align data here\n   #define  CL_ALIGNED(_x)\n#endif\n\n/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n    /* .xyzw and .s0123...{f|F} are supported */\n    #define CL_HAS_NAMED_VECTOR_FIELDS 1\n    /* .hi and .lo are supported */\n    #define CL_HAS_HI_LO_VECTOR_FIELDS 1\n#endif\n\n/* Define cl_vector types */\n\n/* ---- cl_charn ---- */\ntypedef union\n{\n    cl_char  CL_ALIGNED(2) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_char  x, y; };\n   __extension__ struct{ cl_char  s0, s1; };\n   __extension__ struct{ cl_char  lo, hi; };\n#endif\n#if defined( __CL_CHAR2__) \n    __cl_char2     v2;\n#endif\n}cl_char2;\n\ntypedef union\n{\n    cl_char  CL_ALIGNED(4) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_char  x, y, z, w; };\n   __extension__ struct{ cl_char  s0, s1, s2, s3; };\n   __extension__ struct{ cl_char2 lo, hi; };\n#endif\n#if defined( __CL_CHAR2__) \n    __cl_char2     v2[2];\n#endif\n#if defined( __CL_CHAR4__) \n    __cl_char4     v4;\n#endif\n}cl_char4;\n\n/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */\ntypedef  cl_char4  cl_char3;\n\ntypedef union\n{\n    cl_char   CL_ALIGNED(8) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_char  x, y, z, w; };\n   __extension__ struct{ cl_char  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_char4 lo, hi; };\n#endif\n#if defined( __CL_CHAR2__) \n    __cl_char2     v2[4];\n#endif\n#if defined( __CL_CHAR4__) \n    __cl_char4     v4[2];\n#endif\n#if defined( __CL_CHAR8__ )\n    __cl_char8     v8;\n#endif\n}cl_char8;\n\ntypedef union\n{\n    cl_char  CL_ALIGNED(16) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_char  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_char  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_char8 lo, hi; };\n#endif\n#if defined( __CL_CHAR2__) \n    __cl_char2     v2[8];\n#endif\n#if defined( __CL_CHAR4__) \n    __cl_char4     v4[4];\n#endif\n#if defined( __CL_CHAR8__ )\n    __cl_char8     v8[2];\n#endif\n#if defined( __CL_CHAR16__ )\n    __cl_char16    v16;\n#endif\n}cl_char16;\n\n\n/* ---- cl_ucharn ---- */\ntypedef union\n{\n    cl_uchar  CL_ALIGNED(2) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uchar  x, y; };\n   __extension__ struct{ cl_uchar  s0, s1; };\n   __extension__ struct{ cl_uchar  lo, hi; };\n#endif\n#if defined( __cl_uchar2__) \n    __cl_uchar2     v2;\n#endif\n}cl_uchar2;\n\ntypedef union\n{\n    cl_uchar  CL_ALIGNED(4) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uchar  x, y, z, w; };\n   __extension__ struct{ cl_uchar  s0, s1, s2, s3; };\n   __extension__ struct{ cl_uchar2 lo, hi; };\n#endif\n#if defined( __CL_UCHAR2__) \n    __cl_uchar2     v2[2];\n#endif\n#if defined( __CL_UCHAR4__) \n    __cl_uchar4     v4;\n#endif\n}cl_uchar4;\n\n/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */\ntypedef  cl_uchar4  cl_uchar3;\n\ntypedef union\n{\n    cl_uchar   CL_ALIGNED(8) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uchar  x, y, z, w; };\n   __extension__ struct{ cl_uchar  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_uchar4 lo, hi; };\n#endif\n#if defined( __CL_UCHAR2__) \n    __cl_uchar2     v2[4];\n#endif\n#if defined( __CL_UCHAR4__) \n    __cl_uchar4     v4[2];\n#endif\n#if defined( __CL_UCHAR8__ )\n    __cl_uchar8     v8;\n#endif\n}cl_uchar8;\n\ntypedef union\n{\n    cl_uchar  CL_ALIGNED(16) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uchar  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_uchar  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_uchar8 lo, hi; };\n#endif\n#if defined( __CL_UCHAR2__) \n    __cl_uchar2     v2[8];\n#endif\n#if defined( __CL_UCHAR4__) \n    __cl_uchar4     v4[4];\n#endif\n#if defined( __CL_UCHAR8__ )\n    __cl_uchar8     v8[2];\n#endif\n#if defined( __CL_UCHAR16__ )\n    __cl_uchar16    v16;\n#endif\n}cl_uchar16;\n\n\n/* ---- cl_shortn ---- */\ntypedef union\n{\n    cl_short  CL_ALIGNED(4) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_short  x, y; };\n   __extension__ struct{ cl_short  s0, s1; };\n   __extension__ struct{ cl_short  lo, hi; };\n#endif\n#if defined( __CL_SHORT2__) \n    __cl_short2     v2;\n#endif\n}cl_short2;\n\ntypedef union\n{\n    cl_short  CL_ALIGNED(8) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_short  x, y, z, w; };\n   __extension__ struct{ cl_short  s0, s1, s2, s3; };\n   __extension__ struct{ cl_short2 lo, hi; };\n#endif\n#if defined( __CL_SHORT2__) \n    __cl_short2     v2[2];\n#endif\n#if defined( __CL_SHORT4__) \n    __cl_short4     v4;\n#endif\n}cl_short4;\n\n/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */\ntypedef  cl_short4  cl_short3;\n\ntypedef union\n{\n    cl_short   CL_ALIGNED(16) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_short  x, y, z, w; };\n   __extension__ struct{ cl_short  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_short4 lo, hi; };\n#endif\n#if defined( __CL_SHORT2__) \n    __cl_short2     v2[4];\n#endif\n#if defined( __CL_SHORT4__) \n    __cl_short4     v4[2];\n#endif\n#if defined( __CL_SHORT8__ )\n    __cl_short8     v8;\n#endif\n}cl_short8;\n\ntypedef union\n{\n    cl_short  CL_ALIGNED(32) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_short  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_short  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_short8 lo, hi; };\n#endif\n#if defined( __CL_SHORT2__) \n    __cl_short2     v2[8];\n#endif\n#if defined( __CL_SHORT4__) \n    __cl_short4     v4[4];\n#endif\n#if defined( __CL_SHORT8__ )\n    __cl_short8     v8[2];\n#endif\n#if defined( __CL_SHORT16__ )\n    __cl_short16    v16;\n#endif\n}cl_short16;\n\n\n/* ---- cl_ushortn ---- */\ntypedef union\n{\n    cl_ushort  CL_ALIGNED(4) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ushort  x, y; };\n   __extension__ struct{ cl_ushort  s0, s1; };\n   __extension__ struct{ cl_ushort  lo, hi; };\n#endif\n#if defined( __CL_USHORT2__) \n    __cl_ushort2     v2;\n#endif\n}cl_ushort2;\n\ntypedef union\n{\n    cl_ushort  CL_ALIGNED(8) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ushort  x, y, z, w; };\n   __extension__ struct{ cl_ushort  s0, s1, s2, s3; };\n   __extension__ struct{ cl_ushort2 lo, hi; };\n#endif\n#if defined( __CL_USHORT2__) \n    __cl_ushort2     v2[2];\n#endif\n#if defined( __CL_USHORT4__) \n    __cl_ushort4     v4;\n#endif\n}cl_ushort4;\n\n/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */\ntypedef  cl_ushort4  cl_ushort3;\n\ntypedef union\n{\n    cl_ushort   CL_ALIGNED(16) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ushort  x, y, z, w; };\n   __extension__ struct{ cl_ushort  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_ushort4 lo, hi; };\n#endif\n#if defined( __CL_USHORT2__) \n    __cl_ushort2     v2[4];\n#endif\n#if defined( __CL_USHORT4__) \n    __cl_ushort4     v4[2];\n#endif\n#if defined( __CL_USHORT8__ )\n    __cl_ushort8     v8;\n#endif\n}cl_ushort8;\n\ntypedef union\n{\n    cl_ushort  CL_ALIGNED(32) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ushort  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_ushort  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_ushort8 lo, hi; };\n#endif\n#if defined( __CL_USHORT2__) \n    __cl_ushort2     v2[8];\n#endif\n#if defined( __CL_USHORT4__) \n    __cl_ushort4     v4[4];\n#endif\n#if defined( __CL_USHORT8__ )\n    __cl_ushort8     v8[2];\n#endif\n#if defined( __CL_USHORT16__ )\n    __cl_ushort16    v16;\n#endif\n}cl_ushort16;\n\n/* ---- cl_intn ---- */\ntypedef union\n{\n    cl_int  CL_ALIGNED(8) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_int  x, y; };\n   __extension__ struct{ cl_int  s0, s1; };\n   __extension__ struct{ cl_int  lo, hi; };\n#endif\n#if defined( __CL_INT2__) \n    __cl_int2     v2;\n#endif\n}cl_int2;\n\ntypedef union\n{\n    cl_int  CL_ALIGNED(16) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_int  x, y, z, w; };\n   __extension__ struct{ cl_int  s0, s1, s2, s3; };\n   __extension__ struct{ cl_int2 lo, hi; };\n#endif\n#if defined( __CL_INT2__) \n    __cl_int2     v2[2];\n#endif\n#if defined( __CL_INT4__) \n    __cl_int4     v4;\n#endif\n}cl_int4;\n\n/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */\ntypedef  cl_int4  cl_int3;\n\ntypedef union\n{\n    cl_int   CL_ALIGNED(32) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_int  x, y, z, w; };\n   __extension__ struct{ cl_int  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_int4 lo, hi; };\n#endif\n#if defined( __CL_INT2__) \n    __cl_int2     v2[4];\n#endif\n#if defined( __CL_INT4__) \n    __cl_int4     v4[2];\n#endif\n#if defined( __CL_INT8__ )\n    __cl_int8     v8;\n#endif\n}cl_int8;\n\ntypedef union\n{\n    cl_int  CL_ALIGNED(64) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_int  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_int  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_int8 lo, hi; };\n#endif\n#if defined( __CL_INT2__) \n    __cl_int2     v2[8];\n#endif\n#if defined( __CL_INT4__) \n    __cl_int4     v4[4];\n#endif\n#if defined( __CL_INT8__ )\n    __cl_int8     v8[2];\n#endif\n#if defined( __CL_INT16__ )\n    __cl_int16    v16;\n#endif\n}cl_int16;\n\n\n/* ---- cl_uintn ---- */\ntypedef union\n{\n    cl_uint  CL_ALIGNED(8) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uint  x, y; };\n   __extension__ struct{ cl_uint  s0, s1; };\n   __extension__ struct{ cl_uint  lo, hi; };\n#endif\n#if defined( __CL_UINT2__) \n    __cl_uint2     v2;\n#endif\n}cl_uint2;\n\ntypedef union\n{\n    cl_uint  CL_ALIGNED(16) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uint  x, y, z, w; };\n   __extension__ struct{ cl_uint  s0, s1, s2, s3; };\n   __extension__ struct{ cl_uint2 lo, hi; };\n#endif\n#if defined( __CL_UINT2__) \n    __cl_uint2     v2[2];\n#endif\n#if defined( __CL_UINT4__) \n    __cl_uint4     v4;\n#endif\n}cl_uint4;\n\n/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */\ntypedef  cl_uint4  cl_uint3;\n\ntypedef union\n{\n    cl_uint   CL_ALIGNED(32) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uint  x, y, z, w; };\n   __extension__ struct{ cl_uint  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_uint4 lo, hi; };\n#endif\n#if defined( __CL_UINT2__) \n    __cl_uint2     v2[4];\n#endif\n#if defined( __CL_UINT4__) \n    __cl_uint4     v4[2];\n#endif\n#if defined( __CL_UINT8__ )\n    __cl_uint8     v8;\n#endif\n}cl_uint8;\n\ntypedef union\n{\n    cl_uint  CL_ALIGNED(64) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_uint  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_uint  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_uint8 lo, hi; };\n#endif\n#if defined( __CL_UINT2__) \n    __cl_uint2     v2[8];\n#endif\n#if defined( __CL_UINT4__) \n    __cl_uint4     v4[4];\n#endif\n#if defined( __CL_UINT8__ )\n    __cl_uint8     v8[2];\n#endif\n#if defined( __CL_UINT16__ )\n    __cl_uint16    v16;\n#endif\n}cl_uint16;\n\n/* ---- cl_longn ---- */\ntypedef union\n{\n    cl_long  CL_ALIGNED(16) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_long  x, y; };\n   __extension__ struct{ cl_long  s0, s1; };\n   __extension__ struct{ cl_long  lo, hi; };\n#endif\n#if defined( __CL_LONG2__) \n    __cl_long2     v2;\n#endif\n}cl_long2;\n\ntypedef union\n{\n    cl_long  CL_ALIGNED(32) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_long  x, y, z, w; };\n   __extension__ struct{ cl_long  s0, s1, s2, s3; };\n   __extension__ struct{ cl_long2 lo, hi; };\n#endif\n#if defined( __CL_LONG2__) \n    __cl_long2     v2[2];\n#endif\n#if defined( __CL_LONG4__) \n    __cl_long4     v4;\n#endif\n}cl_long4;\n\n/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */\ntypedef  cl_long4  cl_long3;\n\ntypedef union\n{\n    cl_long   CL_ALIGNED(64) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_long  x, y, z, w; };\n   __extension__ struct{ cl_long  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_long4 lo, hi; };\n#endif\n#if defined( __CL_LONG2__) \n    __cl_long2     v2[4];\n#endif\n#if defined( __CL_LONG4__) \n    __cl_long4     v4[2];\n#endif\n#if defined( __CL_LONG8__ )\n    __cl_long8     v8;\n#endif\n}cl_long8;\n\ntypedef union\n{\n    cl_long  CL_ALIGNED(128) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_long  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_long  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_long8 lo, hi; };\n#endif\n#if defined( __CL_LONG2__) \n    __cl_long2     v2[8];\n#endif\n#if defined( __CL_LONG4__) \n    __cl_long4     v4[4];\n#endif\n#if defined( __CL_LONG8__ )\n    __cl_long8     v8[2];\n#endif\n#if defined( __CL_LONG16__ )\n    __cl_long16    v16;\n#endif\n}cl_long16;\n\n\n/* ---- cl_ulongn ---- */\ntypedef union\n{\n    cl_ulong  CL_ALIGNED(16) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ulong  x, y; };\n   __extension__ struct{ cl_ulong  s0, s1; };\n   __extension__ struct{ cl_ulong  lo, hi; };\n#endif\n#if defined( __CL_ULONG2__) \n    __cl_ulong2     v2;\n#endif\n}cl_ulong2;\n\ntypedef union\n{\n    cl_ulong  CL_ALIGNED(32) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ulong  x, y, z, w; };\n   __extension__ struct{ cl_ulong  s0, s1, s2, s3; };\n   __extension__ struct{ cl_ulong2 lo, hi; };\n#endif\n#if defined( __CL_ULONG2__) \n    __cl_ulong2     v2[2];\n#endif\n#if defined( __CL_ULONG4__) \n    __cl_ulong4     v4;\n#endif\n}cl_ulong4;\n\n/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */\ntypedef  cl_ulong4  cl_ulong3;\n\ntypedef union\n{\n    cl_ulong   CL_ALIGNED(64) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ulong  x, y, z, w; };\n   __extension__ struct{ cl_ulong  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_ulong4 lo, hi; };\n#endif\n#if defined( __CL_ULONG2__) \n    __cl_ulong2     v2[4];\n#endif\n#if defined( __CL_ULONG4__) \n    __cl_ulong4     v4[2];\n#endif\n#if defined( __CL_ULONG8__ )\n    __cl_ulong8     v8;\n#endif\n}cl_ulong8;\n\ntypedef union\n{\n    cl_ulong  CL_ALIGNED(128) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_ulong  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_ulong  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_ulong8 lo, hi; };\n#endif\n#if defined( __CL_ULONG2__) \n    __cl_ulong2     v2[8];\n#endif\n#if defined( __CL_ULONG4__) \n    __cl_ulong4     v4[4];\n#endif\n#if defined( __CL_ULONG8__ )\n    __cl_ulong8     v8[2];\n#endif\n#if defined( __CL_ULONG16__ )\n    __cl_ulong16    v16;\n#endif\n}cl_ulong16;\n\n\n/* --- cl_floatn ---- */\n\ntypedef union\n{\n    cl_float  CL_ALIGNED(8) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_float  x, y; };\n   __extension__ struct{ cl_float  s0, s1; };\n   __extension__ struct{ cl_float  lo, hi; };\n#endif\n#if defined( __CL_FLOAT2__) \n    __cl_float2     v2;\n#endif\n}cl_float2;\n\ntypedef union\n{\n    cl_float  CL_ALIGNED(16) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_float   x, y, z, w; };\n   __extension__ struct{ cl_float   s0, s1, s2, s3; };\n   __extension__ struct{ cl_float2  lo, hi; };\n#endif\n#if defined( __CL_FLOAT2__) \n    __cl_float2     v2[2];\n#endif\n#if defined( __CL_FLOAT4__) \n    __cl_float4     v4;\n#endif\n}cl_float4;\n\n/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */\ntypedef  cl_float4  cl_float3;\n\ntypedef union\n{\n    cl_float   CL_ALIGNED(32) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_float   x, y, z, w; };\n   __extension__ struct{ cl_float   s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_float4  lo, hi; };\n#endif\n#if defined( __CL_FLOAT2__) \n    __cl_float2     v2[4];\n#endif\n#if defined( __CL_FLOAT4__) \n    __cl_float4     v4[2];\n#endif\n#if defined( __CL_FLOAT8__ )\n    __cl_float8     v8;\n#endif\n}cl_float8;\n\ntypedef union\n{\n    cl_float  CL_ALIGNED(64) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_float  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_float  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_float8 lo, hi; };\n#endif\n#if defined( __CL_FLOAT2__) \n    __cl_float2     v2[8];\n#endif\n#if defined( __CL_FLOAT4__) \n    __cl_float4     v4[4];\n#endif\n#if defined( __CL_FLOAT8__ )\n    __cl_float8     v8[2];\n#endif\n#if defined( __CL_FLOAT16__ )\n    __cl_float16    v16;\n#endif\n}cl_float16;\n\n/* --- cl_doublen ---- */\n\ntypedef union\n{\n    cl_double  CL_ALIGNED(16) s[2];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_double  x, y; };\n   __extension__ struct{ cl_double s0, s1; };\n   __extension__ struct{ cl_double lo, hi; };\n#endif\n#if defined( __CL_DOUBLE2__) \n    __cl_double2     v2;\n#endif\n}cl_double2;\n\ntypedef union\n{\n    cl_double  CL_ALIGNED(32) s[4];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_double  x, y, z, w; };\n   __extension__ struct{ cl_double  s0, s1, s2, s3; };\n   __extension__ struct{ cl_double2 lo, hi; };\n#endif\n#if defined( __CL_DOUBLE2__) \n    __cl_double2     v2[2];\n#endif\n#if defined( __CL_DOUBLE4__) \n    __cl_double4     v4;\n#endif\n}cl_double4;\n\n/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */\ntypedef  cl_double4  cl_double3;\n\ntypedef union\n{\n    cl_double   CL_ALIGNED(64) s[8];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_double  x, y, z, w; };\n   __extension__ struct{ cl_double  s0, s1, s2, s3, s4, s5, s6, s7; };\n   __extension__ struct{ cl_double4 lo, hi; };\n#endif\n#if defined( __CL_DOUBLE2__) \n    __cl_double2     v2[4];\n#endif\n#if defined( __CL_DOUBLE4__) \n    __cl_double4     v4[2];\n#endif\n#if defined( __CL_DOUBLE8__ )\n    __cl_double8     v8;\n#endif\n}cl_double8;\n\ntypedef union\n{\n    cl_double  CL_ALIGNED(128) s[16];\n#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )\n   __extension__ struct{ cl_double  x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };\n   __extension__ struct{ cl_double  s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };\n   __extension__ struct{ cl_double8 lo, hi; };\n#endif\n#if defined( __CL_DOUBLE2__) \n    __cl_double2     v2[8];\n#endif\n#if defined( __CL_DOUBLE4__) \n    __cl_double4     v4[4];\n#endif\n#if defined( __CL_DOUBLE8__ )\n    __cl_double8     v8[2];\n#endif\n#if defined( __CL_DOUBLE16__ )\n    __cl_double16    v16;\n#endif\n}cl_double16;\n\n/* Macro to facilitate debugging \n * Usage:\n *   Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source. \n *   The first line ends with:   CL_PROGRAM_STRING_DEBUG_INFO \\\"\n *   Each line thereafter of OpenCL C source must end with: \\n\\\n *   The last line ends in \";\n *\n *   Example:\n *\n *   const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO \"\\\n *   kernel void foo( int a, float * b )             \\n\\\n *   {                                               \\n\\\n *      // my comment                                \\n\\\n *      *b[ get_global_id(0)] = a;                   \\n\\\n *   }                                               \\n\\\n *   \";\n *\n * This should correctly set up the line, (column) and file information for your source \n * string so you can do source level debugging.\n */\n#define  __CL_STRINGIFY( _x )               # _x\n#define  _CL_STRINGIFY( _x )                __CL_STRINGIFY( _x )\n#define  CL_PROGRAM_STRING_DEBUG_INFO       \"#line \"  _CL_STRINGIFY(__LINE__) \" \\\"\" __FILE__ \"\\\" \\n\\n\" \n  \n#ifdef __cplusplus\n}\n#endif\n\n#endif  /* __CL_PLATFORM_H  */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/CL/opencl.h",
    "content": "/*******************************************************************************\n * Copyright (c) 2008-2012 The Khronos Group Inc.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and/or associated documentation files (the\n * \"Materials\"), to deal in the Materials without restriction, including\n * without limitation the rights to use, copy, modify, merge, publish,\n * distribute, sublicense, and/or sell copies of the Materials, and to\n * permit persons to whom the Materials are furnished to do so, subject to\n * the following conditions:\n *\n * The above copyright notice and this permission notice shall be included\n * in all copies or substantial portions of the Materials.\n *\n * THE MATERIALS ARE PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.\n ******************************************************************************/\n\n/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */\n\n#ifndef __OPENCL_H\n#define __OPENCL_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#ifdef __APPLE__\n\n#include <OpenCL/cl.h>\n#include <OpenCL/cl_gl.h>\n#include <OpenCL/cl_gl_ext.h>\n#include <OpenCL/cl_ext.h>\n\n#else\n\n#include <CL/cl.h>\n#include <CL/cl_gl.h>\n#include <CL/cl_gl_ext.h>\n#include <CL/cl_ext.h>\n\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif  /* __OPENCL_H   */\n\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/libopencl-stub/include/libopencl.h",
    "content": "#ifndef LIBOPENCL_STUB_H\n#define LIBOPENCL_STUB_H\n\n#include <stdlib.h>\n#include <sys/stat.h>\n#include <dlfcn.h>\n#include <CL/cl.h>\n#include <CL/cl_gl.h>\n\n\ntypedef void (*f_pfn_notify)(const char *, const void *, size_t, void *);\n\ntypedef cl_int (*f_clGetPlatformIDs) (cl_uint, cl_platform_id *, cl_uint *);\n\ntypedef cl_int (*f_clGetPlatformInfo) (cl_platform_id, cl_platform_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clGetDeviceIDs) (cl_platform_id, cl_device_type, cl_uint, cl_device_id *, cl_uint *);\n\ntypedef cl_int (*f_clGetDeviceInfo) (cl_device_id, cl_device_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clCreateSubDevices) (cl_device_id, const cl_device_partition_property *,\n\t\t\t\t\tcl_uint, cl_device_id *, cl_uint *);\n\ntypedef cl_int (*f_clRetainDevice) (cl_device_id);\n\ntypedef cl_int (*f_clReleaseDevice) (cl_device_id);\n\ntypedef cl_context (*f_clCreateContext) (const cl_context_properties *, cl_uint, const cl_device_id *,\n                \t\t\tf_pfn_notify, void *, cl_int *);\n\ntypedef cl_context (*f_clCreateContextFromType) (const cl_context_properties *, cl_device_type,\n                        \t\tf_pfn_notify, void *, cl_int *);\n\ntypedef cl_int (*f_clRetainContext) (cl_context);\n\ntypedef cl_int (*f_clReleaseContext) (cl_context);\n\ntypedef cl_int (*f_clGetContextInfo) (cl_context, cl_context_info, size_t, void *, size_t *);\n\ntypedef cl_command_queue (*f_clCreateCommandQueue) (cl_context, cl_device_id, cl_command_queue_properties, cl_int *);\n\ntypedef cl_int (*f_clRetainCommandQueue) (cl_command_queue);\n\ntypedef cl_int (*f_clReleaseCommandQueue) (cl_command_queue);\n\ntypedef cl_int (*f_clGetCommandQueueInfo) (cl_command_queue, cl_command_queue_info, size_t, void *, size_t *);\n\ntypedef cl_mem (*f_clCreateBuffer) (cl_context, cl_mem_flags, size_t, void *, cl_int *);\n\ntypedef cl_mem (*f_clCreateSubBuffer) (cl_mem, cl_mem_flags, cl_buffer_create_type, const void *, cl_int *);\n\ntypedef cl_mem (*f_clCreateImage) (cl_context, cl_mem_flags, const cl_image_format *, const cl_image_desc *, void *, cl_int *);\n\ntypedef cl_int (*f_clRetainMemObject) (cl_mem);\n\ntypedef cl_int (*f_clReleaseMemObject) (cl_mem);\n\ntypedef cl_int (*f_clGetMemObjectInfo) (cl_mem, cl_mem_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clGetImageInfo) (cl_mem, cl_image_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clSetMemObjectDestructorCallback) (cl_mem, void (*pfn_notify)( cl_mem memobj, void* user_data), void *);\n\ntypedef cl_int (*f_clGetSupportedImageFormats) (cl_context, cl_mem_flags, cl_mem_object_type, cl_uint, cl_image_format *, cl_uint *);\n\ntypedef cl_sampler (*f_clCreateSampler) (cl_context, cl_bool, cl_addressing_mode, cl_filter_mode, cl_int *);\n\ntypedef cl_int (*f_clRetainSampler) (cl_sampler);\n\ntypedef cl_int (*f_clReleaseSampler) (cl_sampler);\n\ntypedef cl_int (*f_clGetSamplerInfo) (cl_sampler, cl_sampler_info, size_t, void *, size_t *);\n\ntypedef cl_program (*f_clCreateProgramWithSource) (cl_context, cl_uint, const char **, const size_t *, cl_int *);\n\ntypedef cl_program (*f_clCreateProgramWithBinary) (cl_context, cl_uint, const cl_device_id *,\n        const size_t *, const unsigned char **, cl_int *, cl_int *);\n\ntypedef cl_program (*f_clCreateProgramWithBuiltInKernels) (cl_context, cl_uint, const cl_device_id *, const char *, cl_int *);\n\ntypedef cl_int (*f_clRetainProgram) (cl_program);\n\ntypedef cl_int (*f_clReleaseProgram) (cl_program);\n\ntypedef cl_int (*f_clBuildProgram) (cl_program, cl_uint, const cl_device_id *, const char *, \n        void (*pfn_notify)(cl_program program, void * user_data), void *);\n\ntypedef cl_int (*f_clCompileProgram) (cl_program, cl_uint, const cl_device_id *, const char *, cl_uint, const cl_program *,\n        const char **, void (*pfn_notify)(cl_program program, void * user_data), void *);\n\ntypedef cl_program (*f_clLinkProgram) (cl_context, cl_uint, const cl_device_id *, const char *, cl_uint, const cl_program *,\n                    void (*pfn_notify)(cl_program program, void * user_data), void *, cl_int *);\n\ntypedef cl_int (*f_clUnloadPlatformCompiler)(cl_platform_id);\n\ntypedef cl_int (*f_clGetProgramInfo) (cl_program, cl_program_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clGetProgramBuildInfo) (cl_program, cl_device_id, cl_program_build_info, size_t, void *, size_t *);\n\ntypedef cl_kernel (*f_clCreateKernel) (cl_program, const char *, cl_int *);\n\ntypedef cl_int (*f_clCreateKernelsInProgram) (cl_program, cl_uint, cl_kernel *, cl_uint *);\n\ntypedef cl_int (*f_clRetainKernel) (cl_kernel);\n\ntypedef cl_int (*f_clReleaseKernel) (cl_kernel);\n\ntypedef cl_int (*f_clSetKernelArg) (cl_kernel, cl_uint, size_t,const void *);\n\ntypedef cl_int (*f_clGetKernelInfo) (cl_kernel, cl_kernel_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clGetKernelArgInfo) (cl_kernel, cl_uint, cl_kernel_arg_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clGetKernelWorkGroupInfo) (cl_kernel, cl_device_id, cl_kernel_work_group_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clWaitForEvents) (cl_uint, const cl_event *);\n\ntypedef cl_int (*f_clGetEventInfo) (cl_event, cl_event_info, size_t, void *, size_t *);\n\ntypedef cl_event (*f_clCreateUserEvent) (cl_context, cl_int *);\n\ntypedef cl_int (*f_clRetainEvent) (cl_event);\n\ntypedef cl_int (*f_clReleaseEvent) (cl_event);\n\ntypedef cl_int (*f_clSetUserEventStatus) (cl_event, cl_int);\n\ntypedef cl_int (*f_clSetEventCallback) (cl_event, cl_int, void (*pfn_notify)(cl_event, cl_int, void *), void *);\n\ntypedef cl_int (*f_clGetEventProfilingInfo) (cl_event, cl_profiling_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clFlush) (cl_command_queue);\n\ntypedef cl_int (*f_clFinish) (cl_command_queue);\n\ntypedef cl_int (*f_clEnqueueReadBuffer) (cl_command_queue, cl_mem, cl_bool, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueReadBufferRect) (cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, const size_t *,\n                            size_t, size_t, size_t, size_t, void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueWriteBuffer) (cl_command_queue, cl_mem, cl_bool, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueWriteBufferRect) (cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *, const size_t *,\n                            size_t, size_t, size_t, size_t, const void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueFillBuffer) (cl_command_queue, cl_mem, const void *, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueCopyBuffer) (cl_command_queue, cl_mem, cl_mem, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueCopyBufferRect) (cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, const size_t *,\n                            size_t, size_t, size_t, size_t, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueReadImage) (cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *,\n\t\t\t\t\t\t\tsize_t, size_t, void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueWriteImage) (cl_command_queue, cl_mem, cl_bool, const size_t *, const size_t *,\n\t\t\t\t\t\t\tsize_t, size_t, const void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueFillImage) (cl_command_queue, cl_mem, const void *, const size_t *, const size_t *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueCopyImage) (cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, const size_t *,\n          cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueCopyImageToBuffer) (cl_command_queue, cl_mem, cl_mem, const size_t *, const size_t *, size_t, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueCopyBufferToImage) (cl_command_queue, cl_mem, cl_mem, size_t, const size_t *, const size_t *, cl_uint, const cl_event *, cl_event *);\n\ntypedef void * (*f_clEnqueueMapBuffer) (cl_command_queue, cl_mem, cl_bool, cl_map_flags, size_t,\n\t\t\t\t\t\tsize_t, cl_uint, const cl_event *, cl_event *, cl_int *);\n\ntypedef void * (*f_clEnqueueMapImage) (cl_command_queue, cl_mem, cl_bool, cl_map_flags, const size_t *, const size_t *,\n                  size_t *, size_t *, cl_uint, const cl_event *, cl_event *, cl_int *);\n\ntypedef cl_int (*f_clEnqueueUnmapMemObject) (cl_command_queue, cl_mem, void *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueMigrateMemObjects)(cl_command_queue, cl_uint, const cl_mem *, cl_mem_migration_flags,\n\t\t\t\t\t\tcl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueNDRangeKernel)(cl_command_queue, cl_kernel, cl_uint, const size_t *, const size_t *,\n                       const size_t *, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueTask)(cl_command_queue, cl_kernel, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueNativeKernel)(cl_command_queue, void (*user_func)(void *),  void *, size_t,\n                      cl_uint, const cl_mem *, const void **, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueMarkerWithWaitList)(cl_command_queue, cl_uint, const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueBarrierWithWaitList)(cl_command_queue, cl_uint, const cl_event *, cl_event *);\n\ntypedef void * (*f_clGetExtensionFunctionAddressForPlatform)(cl_platform_id, const char *);\n\ntypedef cl_mem (*f_clCreateImage2D)(cl_context, cl_mem_flags,const cl_image_format *, size_t, size_t,\n                \t\t\t\tsize_t, void *, cl_int *);\n\ntypedef cl_mem (*f_clCreateImage3D)(cl_context, cl_mem_flags, const cl_image_format *, size_t,\n                \t\tsize_t, size_t, size_t, size_t, void *, cl_int *);\n\ntypedef cl_int (*f_clEnqueueMarker)(cl_command_queue, cl_event *);\n\ntypedef cl_int(*f_clEnqueueWaitForEvents)(cl_command_queue, cl_uint, const cl_event *);\n\ntypedef cl_int (*f_clEnqueueBarrier)(cl_command_queue);\n\ntypedef cl_int (*f_clUnloadCompiler)(void);\n\ntypedef void * (*f_clGetExtensionFunctionAddress)(const char *);\n\ntypedef cl_mem (*f_clCreateFromGLBuffer) (cl_context, cl_mem_flags, cl_GLuint, int *);\n\ntypedef cl_mem (*f_clCreateFromGLTexture) (cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int *);\n\ntypedef cl_mem (*f_clCreateFromGLRenderbuffer) (cl_context, cl_mem_flags, cl_GLuint, cl_int *);\n\ntypedef cl_int (*f_clGetGLObjectInfo) (cl_mem memobj, cl_gl_object_type *, cl_GLuint *);\n\ntypedef cl_int (*f_clGetGLTextureInfo) (cl_mem, cl_gl_texture_info, size_t, void *, size_t *);\n\ntypedef cl_int (*f_clEnqueueAcquireGLObjects) (cl_command_queue, cl_uint, const cl_mem *, cl_uint,\n                                        const cl_event *, cl_event *);\n\ntypedef cl_int (*f_clEnqueueReleaseGLObjects) (cl_command_queue, cl_uint, const cl_mem *, cl_uint,\n                                        const cl_event *, cl_event *);\n\ntypedef cl_mem (*f_clCreateFromGLTexture2D) (cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int *);\n\ntypedef cl_mem (*f_clCreateFromGLTexture3D) (cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int *);\n\n//typedef cl_uint     cl_gl_context_info;\ntypedef cl_int (*f_clGetGLContextInfoKHR) (const cl_context_properties *, cl_gl_context_info, size_t,\n                                        void *, size_t *);\n\n// Additional api to reset currently opened opencl shared-object\n// Subsequent calls will use newly set environment variables\nvoid stubOpenclReset();\n\n#endif    // LIBOPENCL_STUB_H\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/android/AndroidGLContext.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"../core/GLContext.h\"\n#include \"../core/GLTexture.h\"\n#include <unordered_map>\n\nenum GL_Renderer { Adreno, Mali /*, PowerVR */ };\n\nclass AndroidGLContext : public GLContext {\n private:\n  EGLContext _eglcontext;\n\n  EGLContext create_opengl_thread_context();\n  bool opengl_thread_context_exists();\n  bool release_opengl_thread_context();\n\n public:\n  AndroidGLContext();\n  ~AndroidGLContext();\n  void set_context();\n  void reset_context();\n  void flush_context();\n  void init_gles3();\n  GL_Renderer get_platform();\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/android/arm_neon_support.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include <arm_neon.h>\ntypedef __fp16 float16_t;\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/android/gl3stub.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef __gl3_h_\n#define __gl3_h_\n\n/*\n * stub gl3.h for dynamic loading, based on:\n * gl3.h last updated on $Date: 2013-02-12 14:37:24 -0800 (Tue, 12 Feb 2013) $\n *\n * Changes:\n * - Added #include <GLES2/gl2.h>\n * - Removed duplicate OpenGL ES 2.0 declarations\n * - Converted OpenGL ES 3.0 function prototypes to function pointer\n *   declarations\n * - Added gl3stubInit() declaration\n */\n\n#include <GLES2/gl2.h>\n#include <android/api-level.h>\n\n// clang-format off\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/* Call this function before calling any OpenGL ES 3.0 functions. It will\n * return GL_TRUE if the OpenGL ES 3.0 was successfully initialized, GL_FALSE\n * otherwise. */\nGLboolean gl3stubInit();\n\n/*-------------------------------------------------------------------------\n * Data type definitions\n *-----------------------------------------------------------------------*/\n\n/* OpenGL ES 3.0 */\n\ntypedef unsigned short   GLhalf;\n#if __ANDROID_API__ <= 19\ntypedef khronos_int64_t  GLint64;\ntypedef khronos_uint64_t GLuint64;\ntypedef struct __GLsync *GLsync;\n#endif\n\n/*-------------------------------------------------------------------------\n * Token definitions\n *-----------------------------------------------------------------------*/\n\n/* OpenGL ES core versions */\n#define GL_ES_VERSION_3_0                                1\n\n/* OpenGL ES 3.0 */\n\n#define GL_READ_BUFFER                                   0x0C02\n#define GL_UNPACK_ROW_LENGTH                             0x0CF2\n#define GL_UNPACK_SKIP_ROWS                              0x0CF3\n#define GL_UNPACK_SKIP_PIXELS                            0x0CF4\n#define GL_PACK_ROW_LENGTH                               0x0D02\n#define GL_PACK_SKIP_ROWS                                0x0D03\n#define GL_PACK_SKIP_PIXELS                              0x0D04\n#define GL_COLOR                                         0x1800\n#define GL_DEPTH                                         0x1801\n#define GL_STENCIL                                       0x1802\n#define GL_RED                                           0x1903\n#define GL_RGB8                                          0x8051\n#define GL_RGBA8                                         0x8058\n#define GL_RGB10_A2                                      0x8059\n#define GL_TEXTURE_BINDING_3D                            0x806A\n#define GL_UNPACK_SKIP_IMAGES                            0x806D\n#define GL_UNPACK_IMAGE_HEIGHT                           0x806E\n#define GL_TEXTURE_3D                                    0x806F\n#define GL_TEXTURE_WRAP_R                                0x8072\n#define GL_MAX_3D_TEXTURE_SIZE                           0x8073\n#define GL_UNSIGNED_INT_2_10_10_10_REV                   0x8368\n#define GL_MAX_ELEMENTS_VERTICES                         0x80E8\n#define GL_MAX_ELEMENTS_INDICES                          0x80E9\n#define GL_TEXTURE_MIN_LOD                               0x813A\n#define GL_TEXTURE_MAX_LOD                               0x813B\n#define GL_TEXTURE_BASE_LEVEL                            0x813C\n#define GL_TEXTURE_MAX_LEVEL                             0x813D\n#define GL_MIN                                           0x8007\n#define GL_MAX                                           0x8008\n#define GL_DEPTH_COMPONENT24                             0x81A6\n#define GL_MAX_TEXTURE_LOD_BIAS                          0x84FD\n#define GL_TEXTURE_COMPARE_MODE                          0x884C\n#define GL_TEXTURE_COMPARE_FUNC                          0x884D\n#define GL_CURRENT_QUERY                                 0x8865\n#define GL_QUERY_RESULT                                  0x8866\n#define GL_QUERY_RESULT_AVAILABLE                        0x8867\n#define GL_BUFFER_MAPPED                                 0x88BC\n#define GL_BUFFER_MAP_POINTER                            0x88BD\n#define GL_STREAM_READ                                   0x88E1\n#define GL_STREAM_COPY                                   0x88E2\n#define GL_STATIC_READ                                   0x88E5\n#define GL_STATIC_COPY                                   0x88E6\n#define GL_DYNAMIC_READ                                  0x88E9\n#define GL_DYNAMIC_COPY                                  0x88EA\n#define GL_MAX_DRAW_BUFFERS                              0x8824\n#define GL_DRAW_BUFFER0                                  0x8825\n#define GL_DRAW_BUFFER1                                  0x8826\n#define GL_DRAW_BUFFER2                                  0x8827\n#define GL_DRAW_BUFFER3                                  0x8828\n#define GL_DRAW_BUFFER4                                  0x8829\n#define GL_DRAW_BUFFER5                                  0x882A\n#define GL_DRAW_BUFFER6                                  0x882B\n#define GL_DRAW_BUFFER7                                  0x882C\n#define GL_DRAW_BUFFER8                                  0x882D\n#define GL_DRAW_BUFFER9                                  0x882E\n#define GL_DRAW_BUFFER10                                 0x882F\n#define GL_DRAW_BUFFER11                                 0x8830\n#define GL_DRAW_BUFFER12                                 0x8831\n#define GL_DRAW_BUFFER13                                 0x8832\n#define GL_DRAW_BUFFER14                                 0x8833\n#define GL_DRAW_BUFFER15                                 0x8834\n#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS               0x8B49\n#define GL_MAX_VERTEX_UNIFORM_COMPONENTS                 0x8B4A\n#define GL_SAMPLER_3D                                    0x8B5F\n#define GL_SAMPLER_2D_SHADOW                             0x8B62\n#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT               0x8B8B\n#define GL_PIXEL_PACK_BUFFER                             0x88EB\n#define GL_PIXEL_UNPACK_BUFFER                           0x88EC\n#define GL_PIXEL_PACK_BUFFER_BINDING                     0x88ED\n#define GL_PIXEL_UNPACK_BUFFER_BINDING                   0x88EF\n#define GL_FLOAT_MAT2x3                                  0x8B65\n#define GL_FLOAT_MAT2x4                                  0x8B66\n#define GL_FLOAT_MAT3x2                                  0x8B67\n#define GL_FLOAT_MAT3x4                                  0x8B68\n#define GL_FLOAT_MAT4x2                                  0x8B69\n#define GL_FLOAT_MAT4x3                                  0x8B6A\n#define GL_SRGB                                          0x8C40\n#define GL_SRGB8                                         0x8C41\n#define GL_SRGB8_ALPHA8                                  0x8C43\n#define GL_COMPARE_REF_TO_TEXTURE                        0x884E\n#define GL_MAJOR_VERSION                                 0x821B\n#define GL_MINOR_VERSION                                 0x821C\n#define GL_NUM_EXTENSIONS                                0x821D\n#define GL_RGBA32F                                       0x8814\n#define GL_RGB32F                                        0x8815\n#define GL_RGBA16F                                       0x881A\n#define GL_RGB16F                                        0x881B\n#define GL_VERTEX_ATTRIB_ARRAY_INTEGER                   0x88FD\n#define GL_MAX_ARRAY_TEXTURE_LAYERS                      0x88FF\n#define GL_MIN_PROGRAM_TEXEL_OFFSET                      0x8904\n#define GL_MAX_PROGRAM_TEXEL_OFFSET                      0x8905\n#define GL_MAX_VARYING_COMPONENTS                        0x8B4B\n#define GL_TEXTURE_2D_ARRAY                              0x8C1A\n#define GL_TEXTURE_BINDING_2D_ARRAY                      0x8C1D\n#define GL_R11F_G11F_B10F                                0x8C3A\n#define GL_UNSIGNED_INT_10F_11F_11F_REV                  0x8C3B\n#define GL_RGB9_E5                                       0x8C3D\n#define GL_UNSIGNED_INT_5_9_9_9_REV                      0x8C3E\n#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH         0x8C76\n#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE                0x8C7F\n#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS    0x8C80\n#define GL_TRANSFORM_FEEDBACK_VARYINGS                   0x8C83\n#define GL_TRANSFORM_FEEDBACK_BUFFER_START               0x8C84\n#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE                0x8C85\n#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN         0x8C88\n#define GL_RASTERIZER_DISCARD                            0x8C89\n#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A\n#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS       0x8C8B\n#define GL_INTERLEAVED_ATTRIBS                           0x8C8C\n#define GL_SEPARATE_ATTRIBS                              0x8C8D\n#define GL_TRANSFORM_FEEDBACK_BUFFER                     0x8C8E\n#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING             0x8C8F\n#define GL_RGBA32UI                                      0x8D70\n#define GL_RGB32UI                                       0x8D71\n#define GL_RGBA16UI                                      0x8D76\n#define GL_RGB16UI                                       0x8D77\n#define GL_RGBA8UI                                       0x8D7C\n#define GL_RGB8UI                                        0x8D7D\n#define GL_RGBA32I                                       0x8D82\n#define GL_RGB32I                                        0x8D83\n#define GL_RGBA16I                                       0x8D88\n#define GL_RGB16I                                        0x8D89\n#define GL_RGBA8I                                        0x8D8E\n#define GL_RGB8I                                         0x8D8F\n#define GL_RED_INTEGER                                   0x8D94\n#define GL_RGB_INTEGER                                   0x8D98\n#define GL_RGBA_INTEGER                                  0x8D99\n#define GL_SAMPLER_2D_ARRAY                              0x8DC1\n#define GL_SAMPLER_2D_ARRAY_SHADOW                       0x8DC4\n#define GL_SAMPLER_CUBE_SHADOW                           0x8DC5\n#define GL_UNSIGNED_INT_VEC2                             0x8DC6\n#define GL_UNSIGNED_INT_VEC3                             0x8DC7\n#define GL_UNSIGNED_INT_VEC4                             0x8DC8\n#define GL_INT_SAMPLER_2D                                0x8DCA\n#define GL_INT_SAMPLER_3D                                0x8DCB\n#define GL_INT_SAMPLER_CUBE                              0x8DCC\n#define GL_INT_SAMPLER_2D_ARRAY                          0x8DCF\n#define GL_UNSIGNED_INT_SAMPLER_2D                       0x8DD2\n#define GL_UNSIGNED_INT_SAMPLER_3D                       0x8DD3\n#define GL_UNSIGNED_INT_SAMPLER_CUBE                     0x8DD4\n#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY                 0x8DD7\n#define GL_BUFFER_ACCESS_FLAGS                           0x911F\n#define GL_BUFFER_MAP_LENGTH                             0x9120\n#define GL_BUFFER_MAP_OFFSET                             0x9121\n#define GL_DEPTH_COMPONENT32F                            0x8CAC\n#define GL_DEPTH32F_STENCIL8                             0x8CAD\n#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV                0x8DAD\n#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING         0x8210\n#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE         0x8211\n#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE               0x8212\n#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE             0x8213\n#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE              0x8214\n#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE             0x8215\n#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE             0x8216\n#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE           0x8217\n#define GL_FRAMEBUFFER_DEFAULT                           0x8218\n#define GL_FRAMEBUFFER_UNDEFINED                         0x8219\n#define GL_DEPTH_STENCIL_ATTACHMENT                      0x821A\n#define GL_DEPTH_STENCIL                                 0x84F9\n#define GL_UNSIGNED_INT_24_8                             0x84FA\n#define GL_DEPTH24_STENCIL8                              0x88F0\n#define GL_UNSIGNED_NORMALIZED                           0x8C17\n#define GL_DRAW_FRAMEBUFFER_BINDING                      GL_FRAMEBUFFER_BINDING\n#define GL_READ_FRAMEBUFFER                              0x8CA8\n#define GL_DRAW_FRAMEBUFFER                              0x8CA9\n#define GL_READ_FRAMEBUFFER_BINDING                      0x8CAA\n#define GL_RENDERBUFFER_SAMPLES                          0x8CAB\n#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER          0x8CD4\n#define GL_MAX_COLOR_ATTACHMENTS                         0x8CDF\n#define GL_COLOR_ATTACHMENT1                             0x8CE1\n#define GL_COLOR_ATTACHMENT2                             0x8CE2\n#define GL_COLOR_ATTACHMENT3                             0x8CE3\n#define GL_COLOR_ATTACHMENT4                             0x8CE4\n#define GL_COLOR_ATTACHMENT5                             0x8CE5\n#define GL_COLOR_ATTACHMENT6                             0x8CE6\n#define GL_COLOR_ATTACHMENT7                             0x8CE7\n#define GL_COLOR_ATTACHMENT8                             0x8CE8\n#define GL_COLOR_ATTACHMENT9                             0x8CE9\n#define GL_COLOR_ATTACHMENT10                            0x8CEA\n#define GL_COLOR_ATTACHMENT11                            0x8CEB\n#define GL_COLOR_ATTACHMENT12                            0x8CEC\n#define GL_COLOR_ATTACHMENT13                            0x8CED\n#define GL_COLOR_ATTACHMENT14                            0x8CEE\n#define GL_COLOR_ATTACHMENT15                            0x8CEF\n#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE            0x8D56\n#define GL_MAX_SAMPLES                                   0x8D57\n#define GL_HALF_FLOAT                                    0x140B\n#define GL_MAP_READ_BIT                                  0x0001\n#define GL_MAP_WRITE_BIT                                 0x0002\n#define GL_MAP_INVALIDATE_RANGE_BIT                      0x0004\n#define GL_MAP_INVALIDATE_BUFFER_BIT                     0x0008\n#define GL_MAP_FLUSH_EXPLICIT_BIT                        0x0010\n#define GL_MAP_UNSYNCHRONIZED_BIT                        0x0020\n#define GL_RG                                            0x8227\n#define GL_RG_INTEGER                                    0x8228\n#define GL_R8                                            0x8229\n#define GL_RG8                                           0x822B\n#define GL_R16F                                          0x822D\n#define GL_R32F                                          0x822E\n#define GL_RG16F                                         0x822F\n#define GL_RG32F                                         0x8230\n#define GL_R8I                                           0x8231\n#define GL_R8UI                                          0x8232\n#define GL_R16I                                          0x8233\n#define GL_R16UI                                         0x8234\n#define GL_R32I                                          0x8235\n#define GL_R32UI                                         0x8236\n#define GL_RG8I                                          0x8237\n#define GL_RG8UI                                         0x8238\n#define GL_RG16I                                         0x8239\n#define GL_RG16UI                                        0x823A\n#define GL_RG32I                                         0x823B\n#define GL_RG32UI                                        0x823C\n#define GL_VERTEX_ARRAY_BINDING                          0x85B5\n#define GL_R8_SNORM                                      0x8F94\n#define GL_RG8_SNORM                                     0x8F95\n#define GL_RGB8_SNORM                                    0x8F96\n#define GL_RGBA8_SNORM                                   0x8F97\n#define GL_SIGNED_NORMALIZED                             0x8F9C\n#define GL_PRIMITIVE_RESTART_FIXED_INDEX                 0x8D69\n#define GL_COPY_READ_BUFFER                              0x8F36\n#define GL_COPY_WRITE_BUFFER                             0x8F37\n#define GL_COPY_READ_BUFFER_BINDING                      GL_COPY_READ_BUFFER\n#define GL_COPY_WRITE_BUFFER_BINDING                     GL_COPY_WRITE_BUFFER\n#define GL_UNIFORM_BUFFER                                0x8A11\n#define GL_UNIFORM_BUFFER_BINDING                        0x8A28\n#define GL_UNIFORM_BUFFER_START                          0x8A29\n#define GL_UNIFORM_BUFFER_SIZE                           0x8A2A\n#define GL_MAX_VERTEX_UNIFORM_BLOCKS                     0x8A2B\n#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS                   0x8A2D\n#define GL_MAX_COMBINED_UNIFORM_BLOCKS                   0x8A2E\n#define GL_MAX_UNIFORM_BUFFER_BINDINGS                   0x8A2F\n#define GL_MAX_UNIFORM_BLOCK_SIZE                        0x8A30\n#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS        0x8A31\n#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS      0x8A33\n#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT               0x8A34\n#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH          0x8A35\n#define GL_ACTIVE_UNIFORM_BLOCKS                         0x8A36\n#define GL_UNIFORM_TYPE                                  0x8A37\n#define GL_UNIFORM_SIZE                                  0x8A38\n#define GL_UNIFORM_NAME_LENGTH                           0x8A39\n#define GL_UNIFORM_BLOCK_INDEX                           0x8A3A\n#define GL_UNIFORM_OFFSET                                0x8A3B\n#define GL_UNIFORM_ARRAY_STRIDE                          0x8A3C\n#define GL_UNIFORM_MATRIX_STRIDE                         0x8A3D\n#define GL_UNIFORM_IS_ROW_MAJOR                          0x8A3E\n#define GL_UNIFORM_BLOCK_BINDING                         0x8A3F\n#define GL_UNIFORM_BLOCK_DATA_SIZE                       0x8A40\n#define GL_UNIFORM_BLOCK_NAME_LENGTH                     0x8A41\n#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS                 0x8A42\n#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES          0x8A43\n#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER     0x8A44\n#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER   0x8A46\n#define GL_INVALID_INDEX                                 0xFFFFFFFFu\n#define GL_MAX_VERTEX_OUTPUT_COMPONENTS                  0x9122\n#define GL_MAX_FRAGMENT_INPUT_COMPONENTS                 0x9125\n#define GL_MAX_SERVER_WAIT_TIMEOUT                       0x9111\n#define GL_OBJECT_TYPE                                   0x9112\n#define GL_SYNC_CONDITION                                0x9113\n#define GL_SYNC_STATUS                                   0x9114\n#define GL_SYNC_FLAGS                                    0x9115\n#define GL_SYNC_FENCE                                    0x9116\n#define GL_SYNC_GPU_COMMANDS_COMPLETE                    0x9117\n#define GL_UNSIGNALED                                    0x9118\n#define GL_SIGNALED                                      0x9119\n#define GL_ALREADY_SIGNALED                              0x911A\n#define GL_TIMEOUT_EXPIRED                               0x911B\n#define GL_CONDITION_SATISFIED                           0x911C\n#define GL_WAIT_FAILED                                   0x911D\n#define GL_SYNC_FLUSH_COMMANDS_BIT                       0x00000001\n#define GL_TIMEOUT_IGNORED                               0xFFFFFFFFFFFFFFFFull\n#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR                   0x88FE\n#define GL_ANY_SAMPLES_PASSED                            0x8C2F\n#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE               0x8D6A\n#define GL_SAMPLER_BINDING                               0x8919\n#define GL_RGB10_A2UI                                    0x906F\n#define GL_TEXTURE_SWIZZLE_R                             0x8E42\n#define GL_TEXTURE_SWIZZLE_G                             0x8E43\n#define GL_TEXTURE_SWIZZLE_B                             0x8E44\n#define GL_TEXTURE_SWIZZLE_A                             0x8E45\n#define GL_GREEN                                         0x1904\n#define GL_BLUE                                          0x1905\n#define GL_INT_2_10_10_10_REV                            0x8D9F\n#define GL_TRANSFORM_FEEDBACK                            0x8E22\n#define GL_TRANSFORM_FEEDBACK_PAUSED                     0x8E23\n#define GL_TRANSFORM_FEEDBACK_ACTIVE                     0x8E24\n#define GL_TRANSFORM_FEEDBACK_BINDING                    0x8E25\n#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT               0x8257\n#define GL_PROGRAM_BINARY_LENGTH                         0x8741\n#define GL_NUM_PROGRAM_BINARY_FORMATS                    0x87FE\n#define GL_PROGRAM_BINARY_FORMATS                        0x87FF\n#define GL_COMPRESSED_R11_EAC                            0x9270\n#define GL_COMPRESSED_SIGNED_R11_EAC                     0x9271\n#define GL_COMPRESSED_RG11_EAC                           0x9272\n#define GL_COMPRESSED_SIGNED_RG11_EAC                    0x9273\n#define GL_COMPRESSED_RGB8_ETC2                          0x9274\n#define GL_COMPRESSED_SRGB8_ETC2                         0x9275\n#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2      0x9276\n#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2     0x9277\n#define GL_COMPRESSED_RGBA8_ETC2_EAC                     0x9278\n#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC              0x9279\n#define GL_TEXTURE_IMMUTABLE_FORMAT                      0x912F\n#define GL_MAX_ELEMENT_INDEX                             0x8D6B\n#define GL_NUM_SAMPLE_COUNTS                             0x9380\n#define GL_TEXTURE_IMMUTABLE_LEVELS                      0x82DF\n\n/*-------------------------------------------------------------------------\n * Entrypoint definitions\n *-----------------------------------------------------------------------*/\n\n/* OpenGL ES 3.0 */\n\nextern GL_APICALL void           (* GL_APIENTRY glReadBuffer) (GLenum mode);\nextern GL_APICALL void           (* GL_APIENTRY glDrawRangeElements) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid* indices);\nextern GL_APICALL void           (* GL_APIENTRY glTexImage3D) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* pixels);\nextern GL_APICALL void           (* GL_APIENTRY glTexSubImage3D) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* pixels);\nextern GL_APICALL void           (* GL_APIENTRY glCopyTexSubImage3D) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);\nextern GL_APICALL void           (* GL_APIENTRY glCompressedTexImage3D) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid* data);\nextern GL_APICALL void           (* GL_APIENTRY glCompressedTexSubImage3D) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid* data);\nextern GL_APICALL void           (* GL_APIENTRY glGenQueries) (GLsizei n, GLuint* ids);\nextern GL_APICALL void           (* GL_APIENTRY glDeleteQueries) (GLsizei n, const GLuint* ids);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glIsQuery) (GLuint id);\nextern GL_APICALL void           (* GL_APIENTRY glBeginQuery) (GLenum target, GLuint id);\nextern GL_APICALL void           (* GL_APIENTRY glEndQuery) (GLenum target);\nextern GL_APICALL void           (* GL_APIENTRY glGetQueryiv) (GLenum target, GLenum pname, GLint* params);\nextern GL_APICALL void           (* GL_APIENTRY glGetQueryObjectuiv) (GLuint id, GLenum pname, GLuint* params);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glUnmapBuffer) (GLenum target);\nextern GL_APICALL void           (* GL_APIENTRY glGetBufferPointerv) (GLenum target, GLenum pname, GLvoid** params);\nextern GL_APICALL void           (* GL_APIENTRY glDrawBuffers) (GLsizei n, const GLenum* bufs);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix2x3fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix3x2fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix2x4fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix4x2fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix3x4fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniformMatrix4x3fv) (GLint location, GLsizei count, GLboolean transpose, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glBlitFramebuffer) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);\nextern GL_APICALL void           (* GL_APIENTRY glRenderbufferStorageMultisample) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);\nextern GL_APICALL void           (* GL_APIENTRY glFramebufferTextureLayer) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);\nextern GL_APICALL GLvoid*        (* GL_APIENTRY glMapBufferRange) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);\nextern GL_APICALL void           (* GL_APIENTRY glFlushMappedBufferRange) (GLenum target, GLintptr offset, GLsizeiptr length);\nextern GL_APICALL void           (* GL_APIENTRY glBindVertexArray) (GLuint array);\nextern GL_APICALL void           (* GL_APIENTRY glDeleteVertexArrays) (GLsizei n, const GLuint* arrays);\nextern GL_APICALL void           (* GL_APIENTRY glGenVertexArrays) (GLsizei n, GLuint* arrays);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glIsVertexArray) (GLuint array);\nextern GL_APICALL void           (* GL_APIENTRY glGetIntegeri_v) (GLenum target, GLuint index, GLint* data);\nextern GL_APICALL void           (* GL_APIENTRY glBeginTransformFeedback) (GLenum primitiveMode);\nextern GL_APICALL void           (* GL_APIENTRY glEndTransformFeedback) (void);\nextern GL_APICALL void           (* GL_APIENTRY glBindBufferRange) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);\nextern GL_APICALL void           (* GL_APIENTRY glBindBufferBase) (GLenum target, GLuint index, GLuint buffer);\nextern GL_APICALL void           (* GL_APIENTRY glTransformFeedbackVaryings) (GLuint program, GLsizei count, const GLchar* const* varyings, GLenum bufferMode);\nextern GL_APICALL void           (* GL_APIENTRY glGetTransformFeedbackVarying) (GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLsizei* size, GLenum* type, GLchar* name);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribIPointer) (GLuint index, GLint size, GLenum type, GLsizei stride, const GLvoid* pointer);\nextern GL_APICALL void           (* GL_APIENTRY glGetVertexAttribIiv) (GLuint index, GLenum pname, GLint* params);\nextern GL_APICALL void           (* GL_APIENTRY glGetVertexAttribIuiv) (GLuint index, GLenum pname, GLuint* params);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribI4i) (GLuint index, GLint x, GLint y, GLint z, GLint w);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribI4ui) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribI4iv) (GLuint index, const GLint* v);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribI4uiv) (GLuint index, const GLuint* v);\nextern GL_APICALL void           (* GL_APIENTRY glGetUniformuiv) (GLuint program, GLint location, GLuint* params);\nextern GL_APICALL GLint          (* GL_APIENTRY glGetFragDataLocation) (GLuint program, const GLchar *name);\nextern GL_APICALL void           (* GL_APIENTRY glUniform1ui) (GLint location, GLuint v0);\nextern GL_APICALL void           (* GL_APIENTRY glUniform2ui) (GLint location, GLuint v0, GLuint v1);\nextern GL_APICALL void           (* GL_APIENTRY glUniform3ui) (GLint location, GLuint v0, GLuint v1, GLuint v2);\nextern GL_APICALL void           (* GL_APIENTRY glUniform4ui) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);\nextern GL_APICALL void           (* GL_APIENTRY glUniform1uiv) (GLint location, GLsizei count, const GLuint* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniform2uiv) (GLint location, GLsizei count, const GLuint* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniform3uiv) (GLint location, GLsizei count, const GLuint* value);\nextern GL_APICALL void           (* GL_APIENTRY glUniform4uiv) (GLint location, GLsizei count, const GLuint* value);\nextern GL_APICALL void           (* GL_APIENTRY glClearBufferiv) (GLenum buffer, GLint drawbuffer, const GLint* value);\nextern GL_APICALL void           (* GL_APIENTRY glClearBufferuiv) (GLenum buffer, GLint drawbuffer, const GLuint* value);\nextern GL_APICALL void           (* GL_APIENTRY glClearBufferfv) (GLenum buffer, GLint drawbuffer, const GLfloat* value);\nextern GL_APICALL void           (* GL_APIENTRY glClearBufferfi) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);\nextern GL_APICALL const GLubyte* (* GL_APIENTRY glGetStringi) (GLenum name, GLuint index);\nextern GL_APICALL void           (* GL_APIENTRY glCopyBufferSubData) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);\nextern GL_APICALL void           (* GL_APIENTRY glGetUniformIndices) (GLuint program, GLsizei uniformCount, const GLchar* const* uniformNames, GLuint* uniformIndices);\nextern GL_APICALL void           (* GL_APIENTRY glGetActiveUniformsiv) (GLuint program, GLsizei uniformCount, const GLuint* uniformIndices, GLenum pname, GLint* params);\nextern GL_APICALL GLuint         (* GL_APIENTRY glGetUniformBlockIndex) (GLuint program, const GLchar* uniformBlockName);\nextern GL_APICALL void           (* GL_APIENTRY glGetActiveUniformBlockiv) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint* params);\nextern GL_APICALL void           (* GL_APIENTRY glGetActiveUniformBlockName) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei* length, GLchar* uniformBlockName);\nextern GL_APICALL void           (* GL_APIENTRY glUniformBlockBinding) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);\nextern GL_APICALL void           (* GL_APIENTRY glDrawArraysInstanced) (GLenum mode, GLint first, GLsizei count, GLsizei instanceCount);\nextern GL_APICALL void           (* GL_APIENTRY glDrawElementsInstanced) (GLenum mode, GLsizei count, GLenum type, const GLvoid* indices, GLsizei instanceCount);\nextern GL_APICALL GLsync         (* GL_APIENTRY glFenceSync) (GLenum condition, GLbitfield flags);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glIsSync) (GLsync sync);\nextern GL_APICALL void           (* GL_APIENTRY glDeleteSync) (GLsync sync);\nextern GL_APICALL GLenum         (* GL_APIENTRY glClientWaitSync) (GLsync sync, GLbitfield flags, GLuint64 timeout);\nextern GL_APICALL void           (* GL_APIENTRY glWaitSync) (GLsync sync, GLbitfield flags, GLuint64 timeout);\nextern GL_APICALL void           (* GL_APIENTRY glGetInteger64v) (GLenum pname, GLint64* params);\nextern GL_APICALL void           (* GL_APIENTRY glGetSynciv) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length, GLint* values);\nextern GL_APICALL void           (* GL_APIENTRY glGetInteger64i_v) (GLenum target, GLuint index, GLint64* data);\nextern GL_APICALL void           (* GL_APIENTRY glGetBufferParameteri64v) (GLenum target, GLenum pname, GLint64* params);\nextern GL_APICALL void           (* GL_APIENTRY glGenSamplers) (GLsizei count, GLuint* samplers);\nextern GL_APICALL void           (* GL_APIENTRY glDeleteSamplers) (GLsizei count, const GLuint* samplers);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glIsSampler) (GLuint sampler);\nextern GL_APICALL void           (* GL_APIENTRY glBindSampler) (GLuint unit, GLuint sampler);\nextern GL_APICALL void           (* GL_APIENTRY glSamplerParameteri) (GLuint sampler, GLenum pname, GLint param);\nextern GL_APICALL void           (* GL_APIENTRY glSamplerParameteriv) (GLuint sampler, GLenum pname, const GLint* param);\nextern GL_APICALL void           (* GL_APIENTRY glSamplerParameterf) (GLuint sampler, GLenum pname, GLfloat param);\nextern GL_APICALL void           (* GL_APIENTRY glSamplerParameterfv) (GLuint sampler, GLenum pname, const GLfloat* param);\nextern GL_APICALL void           (* GL_APIENTRY glGetSamplerParameteriv) (GLuint sampler, GLenum pname, GLint* params);\nextern GL_APICALL void           (* GL_APIENTRY glGetSamplerParameterfv) (GLuint sampler, GLenum pname, GLfloat* params);\nextern GL_APICALL void           (* GL_APIENTRY glVertexAttribDivisor) (GLuint index, GLuint divisor);\nextern GL_APICALL void           (* GL_APIENTRY glBindTransformFeedback) (GLenum target, GLuint id);\nextern GL_APICALL void           (* GL_APIENTRY glDeleteTransformFeedbacks) (GLsizei n, const GLuint* ids);\nextern GL_APICALL void           (* GL_APIENTRY glGenTransformFeedbacks) (GLsizei n, GLuint* ids);\nextern GL_APICALL GLboolean      (* GL_APIENTRY glIsTransformFeedback) (GLuint id);\nextern GL_APICALL void           (* GL_APIENTRY glPauseTransformFeedback) (void);\nextern GL_APICALL void           (* GL_APIENTRY glResumeTransformFeedback) (void);\nextern GL_APICALL void           (* GL_APIENTRY glGetProgramBinary) (GLuint program, GLsizei bufSize, GLsizei* length, GLenum* binaryFormat, GLvoid* binary);\nextern GL_APICALL void           (* GL_APIENTRY glProgramBinary) (GLuint program, GLenum binaryFormat, const GLvoid* binary, GLsizei length);\nextern GL_APICALL void           (* GL_APIENTRY glProgramParameteri) (GLuint program, GLenum pname, GLint value);\nextern GL_APICALL void           (* GL_APIENTRY glInvalidateFramebuffer) (GLenum target, GLsizei numAttachments, const GLenum* attachments);\nextern GL_APICALL void           (* GL_APIENTRY glInvalidateSubFramebuffer) (GLenum target, GLsizei numAttachments, const GLenum* attachments, GLint x, GLint y, GLsizei width, GLsizei height);\nextern GL_APICALL void           (* GL_APIENTRY glTexStorage2D) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);\nextern GL_APICALL void           (* GL_APIENTRY glTexStorage3D) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);\nextern GL_APICALL void           (* GL_APIENTRY glGetInternalformativ) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint* params);\n\n#ifndef GL_EXT_texture_border_clamp\n#define GL_EXT_texture_border_clamp 1\n#define GL_TEXTURE_BORDER_COLOR_EXT       0x1004\n#define GL_CLAMP_TO_BORDER_EXT            0x812D\nextern GL_APICALL void           (* GL_APIENTRY  glTexParameterIivEXT) (GLenum target, GLenum pname, const GLint *params);\nextern GL_APICALL void           (* GL_APIENTRY  glTexParameterIuivEXT) (GLenum target, GLenum pname, const GLuint *params);\nextern GL_APICALL void           (* GL_APIENTRY  glGetTexParameterIivEXT) (GLenum target, GLenum pname, GLint *params);\nextern GL_APICALL void           (* GL_APIENTRY  glGetTexParameterIuivEXT) (GLenum target, GLenum pname, GLuint *params);\nextern GL_APICALL void           (* GL_APIENTRY  glSamplerParameterIivEXT) (GLuint sampler, GLenum pname, const GLint *param);\nextern GL_APICALL void           (* GL_APIENTRY  glSamplerParameterIuivEXT) (GLuint sampler, GLenum pname, const GLuint *param);\nextern GL_APICALL void           (* GL_APIENTRY  glGetSamplerParameterIivEXT) (GLuint sampler, GLenum pname, GLint *params);\nextern GL_APICALL void           (* GL_APIENTRY  glGetSamplerParameterIuivEXT) (GLuint sampler, GLenum pname, GLuint *params);\n#endif /* GL_EXT_texture_border_clamp */\n\n#ifdef __cplusplus\n}\n#endif\n// clang-format on\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/DataTransfer.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"arm_neon_support.h\"\n\nvoid interleaveSlice(void* output,\n                     const float* input,\n                     size_t width,\n                     size_t height,\n                     size_t row_stride,\n                     uint16_t input_channels);\nvoid deInterleaveSlice(float* output,\n                       const void* input,\n                       size_t width,\n                       size_t height,\n                       size_t input_stride,\n                       uint32_t output_channels);\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GL.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"caffe2/core/common.h\"\n\n#if CAFFE2_IOS\n#include <OpenGLES/ES3/gl.h>\n#include <OpenGLES/ES3/glext.h>\n#elif CAFFE2_ANDROID\n#include <EGL/egl.h>\n#include <GLES2/gl2.h>\n#include \"caffe2/mobile/contrib/opengl/android/gl3stub.h\"\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLContext.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"GLTexture.h\"\n#include \"caffe2/core/common.h\"\n#include <functional>\n\nclass GLContext {\n private:\n  static std::unique_ptr<GLContext> _glcontext;\n  std::function<const GLTexture*(const int width, const int height)> foreignTextureAllocator =\n      nullptr;\n\n protected:\n  bool half_float_supported = true;\n\n public:\n  virtual void set_context() = 0;\n  virtual void reset_context() = 0;\n  virtual void flush_context() = 0;\n  virtual ~GLContext(){};\n\n  static void initGLContext();\n  static GLContext* getGLContext();\n  static void deleteGLContext();\n\n  static bool GL_EXT_texture_border_clamp_defined();\n\n  inline bool halfFloatTextureSupported() { return half_float_supported; }\n\n  void setTextureAllocator(\n      std::function<const GLTexture*(const int width, const int height)> textureAllocator) {\n    foreignTextureAllocator = textureAllocator;\n  }\n\n  std::function<const GLTexture*(const int width, const int height)> getTextureAllocator() {\n    return foreignTextureAllocator;\n  }\n};\n\nbool supportOpenGLES3(bool* hfs = nullptr);\n\nbool isSupportedDevice();\n\n#if CAFFE2_IOS\nint iPhoneVersion();\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLFilter.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLContext.h\"\n#include \"GLTexture.h\"\n#include \"arm_neon_support.h\"\n\n#include <functional>\n#include <string>\n#include <vector>\n\n#define BINDING(variableName) (variableName = new binding{#variableName})\n#define ATTRIBUTE(variableName, value) (variableName = new binding{#variableName, value})\n\nclass GLFilter {\n protected:\n  const std::string kernel_name;\n  GLuint program = 0;\n  GLuint frameBuffer = 0;\n  static constexpr int kMaxUniformBlocks = 12;\n  struct uniformBlockInfo {\n    GLuint buffer;\n    GLint size;\n    bool initialized;\n  };\n  std::vector<std::vector<uniformBlockInfo>> uniformBlock;\n  bool frame_buffer_initialized = false;\n\n  // glGetError() can be expensive, we should turn error checking off when we're done with debugging\n\n  static constexpr bool check_opengl_errors = false;\n\n public:\n  typedef std::vector<std::pair<std::string, std::string>> replacements_t;\n\n  struct binding {\n    const std::string name;\n    GLint location;\n  };\n\n  struct texture_attachment {\n    const GLTexture* texture;\n    const binding* uniform;\n  };\n\n  GLFilter(const std::string kernel_name,\n           const std::string vertex_shader,\n           const std::string fragment_shader,\n           const std::vector<binding*> uniforms,\n           const std::vector<binding*> uniform_blocks = {},\n           const std::vector<binding*> attributes = {},\n           const replacements_t& replacements = {});\n\n  // TODO: The set and reset context need to be commented out for unit testing\n  ~GLFilter() {\n    releaseBuffers();\n    deleteProgram();\n    deleteBindings();\n  }\n\n  void throwRuntimeError(std::function<void(std::stringstream& errmsg)> error_formatter) const {\n    std::stringstream errmsg;\n    errmsg << kernel_name << \": \";\n    error_formatter(errmsg);\n    throw std::runtime_error(errmsg.str());\n  }\n\n  void checkGLError(std::function<void(std::stringstream& errmsg)> error_formatter) const {\n    if (check_opengl_errors) {\n      GLenum glError = glGetError();\n      if (glError != GL_NO_ERROR) {\n        throwRuntimeError([&](std::stringstream& errmsg) {\n          error_formatter(errmsg);\n          errmsg << \", \" << glError;\n        });\n      }\n    }\n  }\n\n  template <typename T>\n  void attach_uniform_buffer(const binding* block,\n                             GLuint bindingPoint,\n                             std::function<void(T*, size_t)> loader,\n                             int batch = -1);\n\n  void run(const std::vector<texture_attachment>& input,\n           const std::vector<const GLTexture*>& output,\n           std::function<void(void)> uniforms_initializer,\n           int width,\n           int height);\n\n  void releaseBuffers();\n  void deleteProgram();\n  void deleteBindings();\n\n  static const char* vertex_shader;\n\n private:\n  const std::vector<binding*> uniforms_;\n  const std::vector<binding*> uniform_blocks_;\n  const std::vector<binding*> attributes_;\n\n  std::string process_replacements(std::string source, const replacements_t& replacements) const;\n\n  bool createProgram(const GLchar* vertSource, const GLchar* fragSource, GLuint* program) const;\n\n  GLint compileShader(GLenum target, GLsizei count, const GLchar** sources, GLuint* shader) const;\n  GLint linkProgram(GLuint program) const;\n  GLint validateProgram(GLuint program) const;\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLImage.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLTexture.h\"\n#include \"caffe2/core/logging.h\"\n\n#include <functional>\n#include <vector>\n\ntemplate <typename T>\nclass GLImage {\n public:\n  const int width;\n  const int height;\n  const int channels;\n  const int data_size;\n\n  const int tile_x;\n  const int tile_y;\n  const int texture_width;\n  const int texture_height;\n  const int slices;\n\n  const std::vector<const GLTexture*> textures;\n\n  constexpr static int slice_channels = 4;\n\n  static constexpr int channels_to_slices(int channels, int tile_x, int tile_y) {\n    return ((channels + slice_channels - 1) / slice_channels + tile_x * tile_y - 1) /\n           (tile_x * tile_y);\n  }\n\n  static const std::vector<const GLTexture*> allocate_textures(\n      int slices, std::function<const GLTexture*(int slice)> texture_loader) {\n    std::vector<const GLTexture*> textures;\n    for (int i = 0; i < slices; i++) {\n      textures.push_back(texture_loader(i));\n    }\n    return textures;\n  }\n\n  GLImage(int _width,\n          int _height,\n          int _channels,\n          int _tile_x,\n          int _tile_y,\n          std::function<const GLTexture*(int slice)> texture_loader)\n      : width(_width),\n        height(_height),\n        channels(_channels),\n        data_size(sizeof(T)),\n        tile_x(_tile_x),\n        tile_y(_tile_y),\n        texture_width(_width * _tile_x),\n        texture_height(_height * _tile_y),\n        slices(channels_to_slices(_channels, _tile_x, _tile_y)),\n        textures(allocate_textures(slices, texture_loader)) {\n    CAFFE_ENFORCE_EQ(slices * tile_x * tile_y, (channels + 3) / 4);\n  }\n\n  GLImage(int _width,\n          int _height,\n          int _channels,\n          int _tile_x,\n          int _tile_y,\n          bool _destroy,\n          std::function<const GLTexture*(int slice)> texture_loader)\n      : width(_width),\n        height(_height),\n        channels(_channels),\n        data_size(sizeof(T)),\n        tile_x(_tile_x),\n        tile_y(_tile_y),\n        texture_width(_width * _tile_x),\n        texture_height(_height * _tile_y),\n        slices(channels_to_slices(_channels, _tile_x, _tile_y)),\n        textures(allocate_textures(slices, texture_loader)) {\n    CAFFE_ENFORCE_EQ(slices * tile_x * tile_y, (channels + 3) / 4);\n  }\n\n  GLImage()\n      : width(0),\n        height(0),\n        channels(0),\n        data_size(sizeof(T)),\n        tile_x(0),\n        tile_y(0),\n        texture_width(0),\n        texture_height(0),\n        slices(0){};\n\n  virtual ~GLImage() {\n    gl_log(GL_VERBOSE, \"deleting GLImage\\n\");\n    for (auto&& texture : textures) {\n      delete texture;\n    }\n  }\n};\n\ntemplate <typename T>\nclass GLImageVector {\n private:\n  std::vector<GLImage<T>*> images_;\n  int num_images_ = 0;\n  int width_ = 0;\n  int height_ = 0;\n  int channels_ = 0;\n  int tile_x_ = 0;\n  int tile_y_ = 0;\n\n public:\n  GLImage<T>* operator[](int index) const {\n    CAFFE_ENFORCE_LT(index, num_images_, \"Out of bounds when accessing GLImageVector\");\n    return images_[index];\n  }\n\n  void push_back(GLImage<T>* image) {\n    CAFFE_ENFORCE_EQ(image->channels, channels_);\n    CAFFE_ENFORCE_EQ(image->width, width_);\n    CAFFE_ENFORCE_EQ(image->height, height_);\n    CAFFE_ENFORCE_EQ(image->tile_x, tile_x_);\n    CAFFE_ENFORCE_EQ(image->tile_y, tile_y_);\n    images_.push_back(image);\n    CAFFE_ENFORCE_LE(images_.size(), num_images_);\n  }\n\n  int size() const { return images_.size(); }\n  int channels() const { return channels_; }\n  int width() const { return width_; }\n  int height() const { return height_; }\n  int tile_x() const { return tile_x_; }\n  int tile_y() const { return tile_y_; }\n  int slices() const { return size() > 0 ? images_[0]->slices : 0; }\n\n  GLImageVector(int num_images, int width, int height, int channels, int tile_x = 1, int tile_y = 1)\n      : num_images_(num_images),\n        width_(width),\n        height_(height),\n        channels_(channels),\n        tile_x_(tile_x),\n        tile_y_(tile_y) {}\n\n  GLImageVector() {}\n\n  ~GLImageVector() {\n    for (int i = 0; i < images_.size(); i++) {\n      delete images_[i];\n    }\n  }\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLImageAllocator.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLImage.h\"\n#include \"GLPlainTexture.h\"\n\ntemplate <class T>\nclass GLImageAllocator {\n public:\n  static const GLTexture::Type& type;\n\n  GLImageAllocator() { gl_log(GL_VERBOSE, \"%s\\n\", __PRETTY_FUNCTION__); }\n\n  virtual ~GLImageAllocator() { gl_log(GL_VERBOSE, \"%s\\n\", __PRETTY_FUNCTION__); }\n\n  virtual GLImageVector<T>* newImage(\n      int num_images, int width, int height, int channels, int tile_x, int tile_y, bool is_output);\n\n  virtual GLImageVector<T>* newImage(\n      int num_images,\n      int width,\n      int height,\n      int channels,\n      int tile_x,\n      int tile_y,\n      std::function<const GLTexture*(const int width, const int height)> textureAllocator);\n\n  virtual GLImageVector<T>* ShareTexture(const GLuint textureID,\n                                         int num_images,\n                                         int width,\n                                         int height,\n                                         int channels,\n                                         int tile_x = 1,\n                                         int tile_y = 1);\n\n  static GLImageAllocator<T>* newGLImageAllocator();\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLLogging.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include <stdarg.h>\n#include <stdio.h>\n\nenum { GL_ERR = -1, GL_LOG = 0, GL_VERBOSE = 1 };\n\nstatic constexpr int GL_LOG_LEVEL = GL_LOG;\n\nstatic inline int gl_log(int level, const char* format, ...) {\n  int r = 0;\n  if (level <= GL_LOG_LEVEL) {\n    va_list args;\n    va_start(args, format);\n    r = vfprintf(stderr, format, args);\n    va_end(args);\n  }\n  return r;\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLPBO.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLTexture.h\"\n#include <functional>\n\nclass GLPBO {\n  GLuint pboId = 0;\n  GLuint pboSize = 0;\n  GLuint pboFrameBuffer = 0;\n\n  ~GLPBO();\n\n  static GLPBO* pboContext;\n\n public:\n  void mapTextureData(GLuint _textureId,\n                      GLsizei _width,\n                      GLsizei _height,\n                      GLsizei _stride,\n                      GLsizei _channels,\n                      const GLTexture::Type& type,\n                      std::function<void(const void* buffer,\n                                         size_t width,\n                                         size_t height,\n                                         size_t stride,\n                                         size_t channels,\n                                         const GLTexture::Type& type)> process);\n\n  static GLPBO* getContext();\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLPlainTexture.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLContext.h\"\n#include \"GLTexture.h\"\n\nclass GLPlainTexture : public GLTexture {\n private:\n  bool isOwner = true;\n\n public:\n  GLPlainTexture(const Type& type,\n                 const void* input,\n                 GLsizei width,\n                 GLsizei height,\n                 bool use_padding = false,\n                 GLint filter = GL_NEAREST,\n                 GLint wrap = GL_CLAMP_TO_EDGE);\n\n  GLPlainTexture(const Type& type,\n                 const GLuint textureID,\n                 GLsizei width,\n                 GLsizei height,\n                 bool use_padding = false,\n                 GLint filter = GL_NEAREST,\n                 GLint wrap = GL_CLAMP_TO_EDGE);\n\n  ~GLPlainTexture() {\n    if (glIsTexture(_textureId)) {\n      if (isOwner) {\n        gl_log(GL_VERBOSE, \"~GLPlainTexture() - deleting texture %d\\n\", _textureId);\n        glDeleteTextures(1, &_textureId);\n      }\n    } else {\n      gl_log(GL_ERR, \"not deleting texture %d\\n\", _textureId);\n    }\n  }\n\n  GLuint name() const { return _textureId; };\n\n  GLenum target() const { return GL_TEXTURE_2D; };\n\n  bool flipped() const { return false; };\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLPredictor.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLImage.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/predictor.h\"\n\nnamespace caffe2 {\nclass GLPredictor : public Predictor {\n public:\n  GLPredictor(const NetDef& init_net,\n              const NetDef& run_net,\n              bool use_texture_input = false,\n              Workspace* parent = nullptr);\n\n  template <class T>\n  bool run(std::vector<GLImageVector<T>*>& inputs, std::vector<const GLImageVector<T>*>* outputs);\n\n  ~GLPredictor();\n};\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/GLTexture.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"GL.h\"\n#include \"GLLogging.h\"\n\nclass GLTexture {\n public:\n  struct Type {\n    const GLenum internalFormat;\n    const GLenum format;\n    const GLenum type;\n\n    int dataSize() const {\n      switch (type) {\n      case GL_UNSIGNED_INT:\n        return 4;\n      case GL_HALF_FLOAT:\n        return 2;\n      case GL_UNSIGNED_BYTE:\n        return 1;\n      default:\n        throw std::runtime_error(\"Unknown Texture Type\");\n      }\n    }\n\n    int channels() const {\n      switch (format) {\n      case GL_R8:\n        return 1;\n      case GL_RG8:\n        return 2;\n      // case GL_BGRA:\n      case GL_RG_INTEGER:\n      case GL_RGBA:\n        return 4;\n      default:\n        throw std::runtime_error(\"Unknown Texture Format\");\n      }\n    }\n  };\n\n  static const Type FP16;\n  static const Type FP16_COMPAT;\n  static const Type UI8;\n\n protected:\n  const Type& _type;\n\n  const GLsizei _width;\n  const GLsizei _height;\n  const GLsizei _stride;\n  const GLsizei _channels;\n  const bool _use_padding;\n\n  GLint _filter;\n  GLint _wrap;\n  GLuint _textureId;\n\n public:\n  GLTexture(const Type& type,\n            int width,\n            int height,\n            int stride,\n            bool use_padding,\n            GLint filter,\n            GLint wrap)\n      : _type(type),\n        _width(width),\n        _height(height),\n        _stride(stride),\n        _channels(type.channels()),\n        _use_padding(use_padding),\n        _filter(filter),\n        _wrap(wrap) {}\n\n  GLTexture(const Type& type, int width, int height, bool use_padding, GLint filter, GLint wrap)\n      : GLTexture(type,\n                  width,\n                  height,\n                  use_padding ? (width + 7) / 8 * 8 : width,\n                  use_padding,\n                  filter,\n                  wrap) {}\n\n  virtual ~GLTexture() {}\n  virtual GLuint name() const = 0;\n  virtual GLenum target() const = 0;\n  virtual bool flipped() const = 0;\n\n  virtual void map_read(std::function<void(const void* buffer,\n                                           size_t width,\n                                           size_t height,\n                                           size_t stride,\n                                           size_t channels,\n                                           const Type& type)> process) const;\n\n  virtual void map_load(std::function<void(void* buffer,\n                                           size_t width,\n                                           size_t height,\n                                           size_t stride,\n                                           size_t channels,\n                                           const Type& type)> process) const;\n\n  void loadData(const void* pixels) const;\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/ImageAllocator.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"GLImageAllocator.h\"\n\nnamespace caffe2 {\n\ntemplate <class T>\nclass ImageAllocator {\n  GLImageAllocator<T>* glImageAllocator;\n\n public:\n  ImageAllocator() : glImageAllocator(GLImageAllocator<T>::newGLImageAllocator()) {}\n\n  virtual ~ImageAllocator() { delete glImageAllocator; }\n\n  GLImageVector<T>* newImage(\n      int num_images, int width, int height, int channels, bool is_output = false) {\n    const int tile_x = 1, tile_y = 1;\n    return glImageAllocator->newImage(\n        num_images, width, height, channels, tile_x, tile_y, is_output);\n  }\n\n  GLImageVector<T>* newImage(int num_images,\n                             int width,\n                             int height,\n                             int channels,\n                             int tile_x,\n                             int tile_y,\n                             bool is_output = false) {\n    return glImageAllocator->newImage(\n        num_images, width, height, channels, tile_x, tile_y, is_output);\n  }\n\n  GLImageVector<T>* newImage(\n      int num_images,\n      int width,\n      int height,\n      int channels,\n      int tile_x,\n      int tile_y,\n      std::function<const GLTexture*(const int width, const int height)> textureAllocator) {\n    return glImageAllocator->newImage(\n        num_images, width, height, channels, tile_x, tile_y, textureAllocator);\n  }\n};\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/arm_neon_support.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/common.h\"\n\n#ifdef __ARM_NEON__\n#if CAFFE2_IOS\n#include \"arm_neon.h\"\n#elif CAFFE2_ANDROID\n#include \"caffe2/mobile/contrib/opengl/android/arm_neon_support.h\"\n#endif\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/core/rewrite_net.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n#include \"GLPredictor.h\"\n#include \"caffe2/core/predictor.h\"\n\nnamespace caffe2 {\nbool tryConvertToOpenGL(const NetDef& initNet,\n                        const NetDef& predictNet,\n                        NetDef* glPredictNet,\n                        bool useTextureInput = false,\n                        bool useTiling = false,\n                        bool runFusion = true);\n\n// Exposed for testing\nNetDef rewritePredictNetForOpenGL(const NetDef& predictNet,\n                                  bool useTextureInput = false,\n                                  bool useTiling = false,\n                                  bool runFusion = true);\nvoid dumpDefForOpenGL(const NetDef& net);\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/ios/IOSGLContext.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"../core/GLContext.h\"\n#include \"../core/GLTexture.h\"\n\n#import <CoreVideo/CoreVideo.h>\n\nclass IOSGLContext : public GLContext {\n  void* oglContext;\n  void* oldContext;\n  CVOpenGLESTextureCacheRef textureCache;\n\n public:\n  IOSGLContext();\n  ~IOSGLContext();\n\n  const GLTexture* createNewTexture(CVPixelBufferRef pixelBuffer, const GLTexture::Type& type);\n  void set_context();\n  void reset_context();\n  void flush_context();\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/ios/IOSGLImageAllocator.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"../core/GLImageAllocator.h\"\n\n#import <CoreVideo/CoreVideo.h>\n\ntemplate <class T>\nclass IOSGLImageAllocator : public GLImageAllocator<T> {\n  static const GLTexture::Type& type;\n\n  std::vector<CVPixelBufferRef> pixelbuffers;\n\n public:\n  static const FourCharCode pixelFormat;\n\n  IOSGLImageAllocator() : GLImageAllocator<T>() { gl_log(GL_VERBOSE, \"%s\\n\", __PRETTY_FUNCTION__); }\n\n  ~IOSGLImageAllocator() {\n    gl_log(GL_VERBOSE, \"%s\\n\", __PRETTY_FUNCTION__);\n\n    for (auto&& pixelbuffer : pixelbuffers) {\n      CFRelease(pixelbuffer);\n    }\n  }\n\n  GLImageVector<T>* newImage(int num_images,\n                             int width,\n                             int height,\n                             int channels,\n                             int tile_x,\n                             int tile_y,\n                             bool useCVPixelBuffer);\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/ios/IOSGLTexture.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"../core/GLContext.h\"\n#include \"../core/GLTexture.h\"\n\n#import <CoreVideo/CoreVideo.h>\n\nclass IOSGLTexture : public GLTexture {\n  CVOpenGLESTextureRef textureRef;\n\n  IOSGLTexture(const Type& type,\n               CVOpenGLESTextureCacheRef textureCache,\n               CVPixelBufferRef sourceImage,\n               GLint _filter = GL_NEAREST,\n               GLint _wrap = GL_CLAMP_TO_EDGE);\n\n  friend class IOSGLContext;\n\n public:\n  const CVPixelBufferRef sourceImage;\n\n  ~IOSGLTexture() { CFRelease(textureRef); }\n\n  void map_buffer(std::function<void(void* buffer,\n                                     size_t width,\n                                     size_t height,\n                                     size_t stride,\n                                     size_t channels,\n                                     const Type& type)> process) const;\n\n  virtual void map_read(std::function<void(const void* buffer,\n                                           size_t width,\n                                           size_t height,\n                                           size_t stride,\n                                           size_t channels,\n                                           const Type& type)> process) const;\n\n  virtual void map_load(std::function<void(void* buffer,\n                                           size_t width,\n                                           size_t height,\n                                           size_t stride,\n                                           size_t channels,\n                                           const Type& type)> process) const;\n\n  GLuint name() const { return CVOpenGLESTextureGetName(textureRef); }\n  GLenum target() const { return CVOpenGLESTextureGetTarget(textureRef); };\n  bool flipped() const { return CVOpenGLESTextureIsFlipped(textureRef); };\n\n  static CVPixelBufferRef createCVPixelBuffer(OSType pixelType, int32_t width, int32_t height);\n};\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/operators/gl_tiling_utils.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n// pragma once\n#include <cmath>\n\nnamespace {\nstruct point {\n  int x;\n  int y;\n};\n\nstruct tile_descriptor {\n  point tile_dims;\n  point tile_size;\n  int tiles;\n};\n} // namespace\n\nnamespace caffe2 {\ninline static void squareFactors(int N, int& r1, int& r2) {\n  int f = sqrt(N);\n\n  if (f * f == N) {\n    r1 = r2 = f;\n  } else {\n    while (N % f != 0) {\n      f--;\n    }\n    r1 = N / f;\n    r2 = f;\n  }\n}\n\ninline static void computeOutputTiles(int output_channels, int& output_tile_x, int& output_tile_y) {\n  squareFactors((output_channels + 3) / 4, output_tile_x, output_tile_y);\n}\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/test/TestGLConvolution.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\nvoid TestGLConvolution();\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/opengl/test/opengl_test.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\nvoid testOpenGL();\nvoid compareModelsForOpenGL(std::string name,\n                            const NetDef& initNet,\n                            NetDef predictNet,\n                            int width,\n                            int height,\n                            int channel,\n                            std::string input_type,\n                            std::string input_order);\n\nvoid compareBatchedToTiledModels(std::string name,\n                                 const NetDef& initNet,\n                                 NetDef predictNet,\n                                 int width,\n                                 int height,\n                                 int channel,\n                                 std::string input_type,\n                                 std::string input_order);\n\nint runModelBenchmarks(caffe2::NetDef& init_net,\n                       caffe2::NetDef& predict_net,\n                       int warm_up_runs,\n                       int main_runs,\n                       int channel,\n                       int height,\n                       int width,\n                       std::string input_type,\n                       std::string input_order,\n                       std::string engine,\n                       bool run_individual = false,\n                       bool use_texture_input = false,\n                       bool use_tiling = false,\n                       bool run_fusion = true);\n\ntypedef enum {\n  AveragePool,\n  MaxPool,\n  Conv,\n  ConvTranspose,\n  ConvPRelu,\n  ConvTransposePRelu,\n  ConvRelu,\n  ConvTransposeRelu\n} PoolOp;\n\nvoid testOpenGLConv(int N,\n                    int C,\n                    int H,\n                    int W,\n                    int K, // output_channels\n                    int kernel_h,\n                    int kernel_w,\n                    int pad,\n                    int stride,\n                    PoolOp poolOp,\n                    float error,\n                    bool random_input = true,\n                    int input_batch_size = 1,\n                    int output_batch_size = 1,\n                    int input_tile_x = 1,\n                    int input_tile_y = 1,\n                    bool tiling = false);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/snpe/snpe_ffi.h",
    "content": "#ifndef CAFFE2_SNPE_FFI_H_\n#define CAFFE2_SNPE_FFI_H_\n\n#include <stdint.h>\n#include <string>\n\nnamespace caffe2 {\n\nstd::string& gSNPELocation();\n\nconst char* const snpe_ffi_so = \"libsnpe_ffi.so\";\n\n}\n\nextern \"C\" {\n\nbool snpe_has_gpu();\n\nvoid* snpe_create(const uint8_t* container, size_t size, const char* input_name);\n\nvoid snpe_destroy(void* ctx);\n\nvoid snpe_get_input_dims(void* ctx, size_t const** dims, size_t* size);\n\nvoid snpe_run(void* ctx,\n              const float* inputData,\n              size_t inputSize,\n              size_t const** outputDims,\n              size_t* outputSize);\n\nvoid snpe_copy_output_to(void* ctx, float* outputData);\n\n}\n\n#endif  // CAFFE2_SNPE_FFI_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ulp2/ulp.h",
    "content": "#pragma once\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\nconstexpr size_t k2b1bXBits = 2;\n\nstruct ConvArgs {\n  int stride_w{1};\n  int stride_h{1};\n  int pad_l{0};\n  int pad_t{0};\n  int pad_b{0};\n  int pad_r{0};\n};\n\nusing ParallelFor = std::function<void(size_t, std::function<void(size_t)>)>;\n\nstruct QConvState {\n  std::vector<std::unique_ptr<TensorCPU>> XQs;\n  std::vector<std::unique_ptr<TensorCPU>> YQs;\n  std::unique_ptr<TensorCPU> WQ;\n  // architecture-dependent whether packing is used.\n  std::unique_ptr<TensorCPU> WQPacked;\n  std::unique_ptr<TensorCPU> WQN;\n  std::unique_ptr<TensorCPU> WQL1Norm;\n  // Useful for e.g. incomplete tiles\n  std::unique_ptr<TensorCPU> scratch;\n  std::unique_ptr<TensorCPU> scratchColBuffer;\n\n  std::unique_ptr<TensorCPU> bias;\n\n  ParallelFor parallelFor{nullptr};\n};\n\nvoid uniformQuantize2b1b(const TensorCPU& X,\n                         const std::vector<std::unique_ptr<TensorCPU>>& XQ,\n                         float offset,\n                         float inter_center_distance);\n\nvoid qpad_zero(const ConvArgs& args, const TensorCPU& X, TensorCPU* Y);\n\ninline size_t divRoundUp(size_t x, size_t d) { return (x + d - 1) / d; }\n\nvoid signQuantize(const TensorCPU& X, TensorCPU* XQ);\nvoid filterNormalization11(const TensorCPU& WQ, TensorCPU* WQN);\nvoid filterNormalizationL1(const TensorCPU& W, TensorCPU* WL1);\nstd::unique_ptr<QConvState> create2b1bConvState(Workspace* ws,\n                                                const TensorCPU& W,\n                                                const TensorCPU* b);\nvoid run2b1bConvGeneric(QConvState* state, const ConvArgs& args, const TensorCPU& X, TensorCPU* Y);\nvoid qconv(\n    const ConvArgs& args, const TensorCPU& X, const TensorCPU& W, const TensorCPU* b, TensorCPU* Y);\nvoid qim2col(const ConvArgs& args, const TensorCPU& XQ, const TensorCPU& WQ, TensorCPU* XQcol);\n\nvoid run2b1bUnification(QConvState* state,\n                        size_t N,\n                        size_t C,\n                        const float* WQNVdata,\n                        const float* YQs0Vdata,\n                        const float* YQs1Vdata,\n                        size_t YQstride,\n                        float* Ydata,\n                        size_t Ystride,\n                        const float* bias);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mobile/contrib/ulp2/ulp_neon.h",
    "content": "#pragma once\n\n#include \"ulp.h\"\n\nnamespace caffe2 {\n\nconstexpr size_t kGEMMTileSize = 64;\nconstexpr size_t kGEMMTileDepthBytes = 16;\n\nbool run2b1bConvNeon(QConvState* state, const ConvArgs& args, const TensorCPU& X, TensorCPU* Y);\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mpi/mpi_common.h",
    "content": "#ifndef CAFFE2_MPI_MPI_COMMON_H_\n#define CAFFE2_MPI_MPI_COMMON_H_\n\n#include <mpi.h>\n#include <mutex>\n\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\n\ninline void CheckInitializedMPI() {\n  int flag;\n  MPI_Initialized(&flag);\n  CAFFE_ENFORCE(flag, \"MPI does not seem to have been initialized.\");\n}\n\ntemplate <typename T> class MPIDataTypeWrapper;\n\n#define MPI_DATATYPE_WRAPPER(c_type, mpi_type)                                 \\\n  template<> class MPIDataTypeWrapper<c_type> {                                \\\n   public:                                                                     \\\n    inline static MPI_Datatype type() { return mpi_type; }                     \\\n  };\n\nMPI_DATATYPE_WRAPPER(char, MPI_CHAR)\nMPI_DATATYPE_WRAPPER(float, MPI_FLOAT)\nMPI_DATATYPE_WRAPPER(double, MPI_DOUBLE)\n// Note(Yangqing): as necessary, add more specializations.\n#undef MPI_DATATYPE_WRAPPER\n\n// For all Caffe MPI calls, we will wrap it inside an MPI mutex lock guard.\nstd::mutex& MPIMutex();\n\n#define MPI_CHECK(condition)                                 \\\n  do {                                                       \\\n    std::lock_guard<std::mutex> guard(::caffe2::MPIMutex()); \\\n    int error = (condition);                                 \\\n    CAFFE_ENFORCE(                                           \\\n        error == MPI_SUCCESS,                                \\\n        \"Caffe2 MPI Error at: \",                             \\\n        __FILE__,                                            \\\n        \":\",                                                 \\\n        __LINE__,                                            \\\n        \": \",                                                \\\n        error);                                              \\\n  } while (0)\n\n/**\n * @brief Gets the global MPI communicator used by Caffe2. In default, this\n * is MPI_COMM_WORLD unless you call SetGlobalMPIComm().\n */\nMPI_Comm GlobalMPIComm();\n\n/**\n * @brief Sets the global MPI communicator. Caffe2 takes over the ownership\n * of the passed in communicator.\n */\nvoid SetGlobalMPIComm(MPI_Comm new_comm);\n\n/**\n * @brief A helper function to return the size of the given communicator.\n */\nint MPICommSize(MPI_Comm comm);\n\n/**\n * @brief A helper function to return the rank of the given communicator.\n */\nint MPICommRank(MPI_Comm comm);\n\n/**\n * @brief A simple wrapper over an MPI common world.\n */\nclass MPICommonWorldWrapper {\n public:\n  /**\n   * @brief Creates a common world wrapper.\n   *\n   * The new common world is created by taking the existing communicator\n   * passed in as src_comm, and splitting it using the color and the rank\n   * specified. In default, we will split from Caffe2's global communicator,\n   * and use color 0 as well as rank implicitly given by src_comm. As a result,\n   * the default constructor basically creates a comm identical to the source\n   * comm world.\n   */\n  explicit MPICommonWorldWrapper(\n      MPI_Comm src_comm = MPI_COMM_NULL,\n      int color = 0,\n      int rank = -1) {\n    if (src_comm == MPI_COMM_NULL) {\n      src_comm = GlobalMPIComm();\n    }\n    if (rank == -1) {\n      MPI_CHECK(MPI_Comm_rank(src_comm, &rank));\n    }\n    MPI_CHECK(MPI_Comm_split(src_comm, color, rank, &comm_));\n    MPI_CHECK(MPI_Comm_size(comm_, &size_));\n    MPI_CHECK(MPI_Comm_rank(comm_, &rank_));\n  }\n\n  ~MPICommonWorldWrapper() {\n    int ret;\n    MPI_CHECK(MPI_Finalized(&ret));\n    if (!ret) {\n      MPI_Comm_free(&comm_);\n    }\n  }\n\n  /**\n   * @brief Returns the common world held by the wrapper.\n   */\n  inline MPI_Comm comm() const {\n    return comm_;\n  }\n  /**\n   * @brief Returns the size of the world.\n   */\n  inline int size() const {\n    return size_;\n  }\n  /**\n   * @brief Returns the rank of this process in the world.\n   */\n  inline int rank() const {\n    return rank_;\n  }\n\n private:\n  MPI_Comm comm_;\n  int size_;\n  int rank_;\n};\n\n/**\n * A function used to perform peer setup so one does not need to use\n * mpirun / mpiexec to run the binary. Note that if you use mpirun or mpiexec\n * to set up the common world, do not use this function - MPI_Init would have\n * already set that up.\n *\n * This also assumes that you have a common path (like NFS) that multiple\n * instances can read from.\n *\n * Inputs:\n *   replicas (int): the number of replicas that mpi will run with.\n *   role (string): the role of this process, \"server\" or \"client\".\n *   job_path (string): a file name that the server will write its port into\n *       and the clients will read the server's port from.\n */\nvoid MPISetupPeers(\n    const int replicas,\n    const string& role,\n    const string& job_path);\n}  // namespace caffe2\n\n#endif  // CAFFE2_MPI_MPI_COMMON_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/mpi/mpi_ops.h",
    "content": "#ifndef CAFFE2_MPI_MPI_OPS_H_\n#define CAFFE2_MPI_MPI_OPS_H_\n\n#include <mpi.h>\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/mpi/mpi_common.h\"\n\nnamespace caffe2 {\n\n// TODO(jiayq): if needed, write up the use of color and key with MPI split.\n// Currently, the operator simply creates a communicator that has the\n// same topology as the Caffe2 global communicator.\ntemplate <class Context>\nclass MPICreateCommonWorldOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MPICreateCommonWorldOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    OperatorBase::Outputs()[0]->Reset(new MPICommonWorldWrapper());\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass MPIBroadcastOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MPIBroadcastOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        root_(OperatorBase::template GetSingleArgument<int>(\"root\", 0)) {}\n  ~MPIBroadcastOp() {}\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();\n    CAFFE_ENFORCE(\n        OperatorBase::OutputIsType<Tensor<Context>>(0),\n        \"Output is of wrong type.\");\n    auto* output = Output(0);\n    // Make sure that output is already allocated.\n    CAFFE_ENFORCE(\n        output->size() > 0,\n        \"Broadcast op uses in-place operation so the output \"\n        \"should be already allocated.\");\n    MPI_CHECK(MPI_Bcast(\n        output->raw_mutable_data(),\n        output->nbytes(),\n        MPIDataTypeWrapper<char>::type(),\n        root_,\n        comm));\n    return true;\n  }\n\n protected:\n  int root_;\n};\n\n// MPIReduceOp does Reduce using MPI. Currently, only SUM is supported.\ntemplate <typename T, class Context>\nclass MPIReduceOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MPIReduceOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        root_(OperatorBase::template GetSingleArgument<int>(\"root\", 0)) {}\n  ~MPIReduceOp() {}\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();\n    auto& input = Input(1);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n    MPI_CHECK(MPI_Reduce(\n        const_cast<T*>(input.template data<T>()),\n        output->template mutable_data<T>(),\n        input.size(),\n        MPIDataTypeWrapper<T>::type(),\n        MPI_SUM,\n        root_,\n        comm));\n    return true;\n  }\n\n protected:\n  int root_;\n};\n\n// MPIAllgatherOp does MPIAllgather using MPI.\ntemplate <typename T, class Context>\nclass MPIAllgatherOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(MPIAllgatherOp);\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();\n    auto& input = Input(1);\n    auto* output = Output(0);\n    vector<TIndex> output_dims = input.dims();\n    output_dims[0] *= OperatorBase::Input<MPICommonWorldWrapper>(0).size();\n    output->Resize(output_dims);\n    MPI_CHECK(MPI_Allgather(\n        const_cast<T*>(input.template data<T>()),\n        input.size(),\n        MPIDataTypeWrapper<T>::type(),\n        output->template mutable_data<T>(),\n        input.size(),\n        MPIDataTypeWrapper<T>::type(),\n        comm));\n    return true;\n  }\n};\n\n// MPIAllreduceOp does MPIAllreduce using MPI. Currently, only SUM is supported.\ntemplate <typename T, class Context>\nclass MPIAllreduceOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(MPIAllreduceOp);\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();\n    auto& input = Input(1);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n    void* source;\n    if (output->template mutable_data<T>() == input.template data<T>()) {\n      // We are doing in-place call. Special case handling.\n      source = MPI_IN_PLACE;\n    } else {\n      // Normal allreduce takes the source from the input.\n      source = const_cast<T*>(input.template data<T>());\n    }\n    MPI_CHECK(MPI_Allreduce(\n        source,\n        output->template mutable_data<T>(),\n        input.size(),\n        MPIDataTypeWrapper<T>::type(),\n        MPI_SUM,\n        comm));\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass MPISendTensorOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MPISendTensorOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(int, \"dst\", dst_, MPI_ANY_SOURCE),\n        OP_SINGLE_ARG(int, \"tag\", tag_, MPI_ANY_TAG),\n        OP_SINGLE_ARG(bool, \"raw_buffer\", raw_buffer_, false) {\n    CAFFE_ENFORCE(raw_buffer_, \"non-raw-buffer transfer not supported yet.\");\n    CAFFE_ENFORCE(\n        dst_ != MPI_ANY_SOURCE || def.input_size() == 4,\n        \"You should explicitly specify the to rank either via \"\n        \"argument or via input blobs.\");\n    CAFFE_ENFORCE(\n        tag_ != MPI_ANY_TAG || def.input_size() == 4,\n        \"You should explicitly specify the tag either via \"\n        \"argument or via input blobs.\");\n  }\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(COMM).comm();\n    auto& input = Input(INPUT);\n    if (InputSize() == 4) {\n      dst_ = OperatorBase::Input<TensorCPU>(DST).template data<int>()[0];\n      tag_ = OperatorBase::Input<TensorCPU>(TAG).template data<int>()[0];\n    }\n    if (raw_buffer_) {\n      // We need to do a const cast to cope with the fact that, before OpenMPI\n      // 1.7, MPI_Send expects a non-const pointer although it uses it in a\n      // const way.\n      MPI_CHECK(MPI_Send(\n          const_cast<void*>(input.raw_data()),\n          input.nbytes(),\n          MPI_CHAR,\n          dst_,\n          tag_,\n          comm));\n    } else {\n      CAFFE_NOT_IMPLEMENTED;\n    }\n    return true;\n  }\n\n protected:\n  int dst_;\n  int tag_;\n  bool raw_buffer_;\n\n  INPUT_TAGS(COMM, INPUT, DST, TAG);\n};\n\ntemplate <class Context>\nclass MPIReceiveTensorOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MPIReceiveTensorOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(int, \"src\", src_, MPI_ANY_SOURCE),\n        OP_SINGLE_ARG(int, \"tag\", tag_, MPI_ANY_TAG),\n        OP_SINGLE_ARG(bool, \"raw_buffer\", raw_buffer_, false) {\n    CAFFE_ENFORCE(raw_buffer_, \"non-raw-buffer transfer not supported yet.\");\n  }\n\n  bool RunOnDevice() override {\n    MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(COMM).comm();\n    if (InputSize() == 4) {\n      src_ = OperatorBase::Input<TensorCPU>(SRC_IN).template data<int>()[0];\n      tag_ = OperatorBase::Input<TensorCPU>(TAG_IN).template data<int>()[0];\n    }\n    MPI_Status status;\n    if (raw_buffer_) {\n      auto* output = Output(OUTPUT);\n      MPI_CHECK(MPI_Recv(\n          output->raw_mutable_data(),\n          output->nbytes(),\n          MPI_CHAR,\n          src_,\n          tag_,\n          comm,\n          &status));\n    } else {\n      CAFFE_NOT_IMPLEMENTED;\n    }\n    auto* src_out = OperatorBase::Output<TensorCPU>(SRC_OUT);\n    src_out->Resize();\n    src_out->template mutable_data<int>()[0] = status.MPI_SOURCE;\n    auto* tag_out = OperatorBase::Output<TensorCPU>(TAG_OUT);\n    tag_out->Resize();\n    tag_out->template mutable_data<int>()[0] = status.MPI_TAG;\n    return true;\n  }\n\n protected:\n  int src_;\n  int tag_;\n  bool raw_buffer_;\n  INPUT_TAGS(COMM, INPUT, SRC_IN, TAG_IN);\n  OUTPUT_TAGS(OUTPUT, SRC_OUT, TAG_OUT);\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_MPI_MPI_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/accumulate_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_ACCUMULATE_OP_H_\n#define CAFFE2_OPERATORS_ACCUMULATE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass AccumulateOp final : public Operator<Context> {\n public:\n  AccumulateOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        gamma_(static_cast<T>(\n            OperatorBase::template GetSingleArgument<float>(\"gamma\", 1.0))) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    if (output->dims() != input.dims()) {\n      LOG(INFO) << \"Reshaping and initializing output.\";\n      output->ResizeLike(input);\n      math::Set<T, Context>(\n          output->size(), 0, output->template mutable_data<T>(), &context_);\n    }\n    math::Axpby<T, Context>(\n        input.size(),\n        static_cast<T>(1),\n        input.template data<T>(),\n        gamma_,\n        output->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  T gamma_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ACCUMULATE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/accuracy_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_ACCURACY_OP_H_\n#define CAFFE2_OPERATORS_ACCURACY_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass AccuracyOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  AccuracyOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        top_k_(OperatorBase::GetSingleArgument<int>(\"top_k\", 1)) {}\n        \n  bool RunOnDevice() override;\n\n protected:\n  int top_k_; \n  INPUT_TAGS(PREDICTION, LABEL);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ACCURACY_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/apmeter_op.h",
    "content": "#ifndef CAFFE2_MAP_OP_H_\n#define CAFFE2_MAP_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass APMeterOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  APMeterOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        buffer_size_(\n            OperatorBase::GetSingleArgument<int32_t>(\"buffer_size\", 1000)),\n        buffer_used_(0) {}\n\n  bool RunOnDevice() override;\n\n protected:\n  using BufferDataType = std::pair<float, int>;\n  // Buffer the predictions for each class\n  std::vector<std::vector<BufferDataType>> buffers_;\n  // Capacity of the buffer\n  int buffer_size_;\n  // Used buffer\n  int buffer_used_;\n\n  INPUT_TAGS(PREDICTION, LABEL);\n\n protected:\n  // Buffer predictions for N sample and D classes\n  void\n  BufferPredictions(const float* Xdata, const int* labelData, int N, int D);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_MAP_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/batch_box_cox_op.h",
    "content": "#ifndef CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_\n#define CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass BatchBoxCoxOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  BatchBoxCoxOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float, double>>::call(this, Input(DATA));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n protected:\n  INPUT_TAGS(DATA, LAMBDA1, LAMBDA2);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/batch_gather_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_\n#define CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass BatchGatherOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(BatchGatherOp)\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(INDICES));\n  }\n\n  template <typename TInd>\n  bool DoRunWithType() {\n    auto& data = Input(DATA);\n    auto& indices = Input(INDICES);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_GE(data.ndim(), 2, \"DATA should be at least 2-D\");\n\n    vector<TIndex> shape;\n    shape.push_back(data.dim(0));\n    shape.insert(shape.end(), indices.dims().begin(), indices.dims().end());\n    shape.insert(shape.end(), data.dims().begin() + 2, data.dims().end());\n    output->Resize(shape);\n\n    auto block_size = data.size_from_dim(2);\n    auto block_bytesize = block_size * data.meta().itemsize();\n    auto N = indices.size();\n    auto data_batch_bytesize = data.size_from_dim(1) * data.meta().itemsize();\n    auto gathered_batch_bytesize =\n        N * data.size_from_dim(2) * data.meta().itemsize();\n    const TInd* idxs = indices.template data<TInd>();\n    auto src_base = static_cast<const char*>(data.raw_data());\n    auto out = static_cast<char*>(output->raw_mutable_data(data.meta()));\n\n    for (auto batch = 0; batch < data.dim(0); ++batch) {\n      for (auto i = 0; i < N; ++i) {\n        auto idx = idxs[i];\n        CAFFE_ENFORCE(\n            0 <= idx && idx < data.dim(1),\n            \"INDICES element is out of DATA bounds, id=\",\n            idx,\n            \" data_dim=\",\n            data.dim(1));\n        auto src =\n            src_base + idx * block_bytesize + batch * data_batch_bytesize;\n        auto dst = out + i * block_bytesize + batch * gathered_batch_bytesize;\n        context_.template CopyItems<Context, Context>(\n            data.meta(), block_size, src, dst);\n      }\n    }\n    return true;\n  }\n\n  INPUT_TAGS(DATA, INDICES);\n};\n\ntemplate <class Context>\nclass BatchGatherGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(BatchGatherGradientOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(INDICES));\n  }\n\n  template <typename TInd>\n  bool DoRunWithType() {\n    return DispatchHelper<\n        TensorTypes2<float, GenericTensorImplementation>,\n        TInd>::call(this, Input(DATA));\n  }\n\n  template <typename TInd, typename TData>\n  bool DoRunWithType2() {\n    auto& data = Input(DATA);\n    auto& indices = Input(INDICES);\n    auto& grad = Input(GRAD);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_GE(data.ndim(), 2, \"DATA should be at least 2-D\");\n    CAFFE_ENFORCE_EQ(\n        data.dim(0), grad.dim(0), \"batch sizes should be the same\");\n\n    output->ResizeLike(data);\n    TData* out_data = output->template mutable_data<TData>();\n    memset(out_data, 0, output->nbytes());\n\n    const TData* grad_data = grad.template data<TData>();\n\n    auto block_size = data.size_from_dim(2);\n    auto N = indices.size();\n    auto data_batch_size = data.size_from_dim(1);\n    auto gathered_batch_size = N * data.size_from_dim(2);\n    const TInd* idxs = indices.template data<TInd>();\n\n    for (auto batch = 0; batch < grad.dim(0); ++batch) {\n      for (auto i = 0; i < N; ++i) {\n        auto idx = idxs[i];\n        CAFFE_ENFORCE(\n            0 <= idx && idx < data.dim(1),\n            \"INDICES element is out of DATA bounds, id=\",\n            idx,\n            \" data_dim=\",\n            data.dim(1));\n        math::Add(\n            block_size,\n            out_data + idx * block_size + batch * data_batch_size,\n            grad_data + i * block_size + batch * gathered_batch_size,\n            out_data + idx * block_size + batch * data_batch_size,\n            &context_);\n      }\n    }\n    return true;\n  }\n\n  template <typename TInd>\n  bool DoRunWithOtherType2() {\n    CAFFE_THROW(\n        \"BatchGatherGradient is not implemented on tensor of type \",\n        Input(DATA).meta().name(),\n        \"Consider adding it a type in the list DispatchHelper or implementing \"\n        \"a generic version (which won't work for duplicated indices though)\");\n  }\n\n  INPUT_TAGS(DATA, INDICES, GRAD);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/batch_matmul_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_MATMUL_OP_H_\n#define CAFFE2_OPERATORS_MATMUL_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context, class Engine = DefaultEngine>\nclass BatchMatMulOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  BatchMatMulOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        trans_a_(OperatorBase::GetSingleArgument<int>(\"trans_a\", 0)),\n        trans_b_(OperatorBase::GetSingleArgument<int>(\"trans_b\", 0)),\n        use_scratch_(OperatorBase::GetSingleArgument<int>(\"use_scratch\", 0)) {\n    if (use_scratch_)\n      scratch_ = std::make_shared<Tensor<Context> >();\n  }\n  ~BatchMatMulOp() {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& A = Input(0);\n    const auto& B = Input(1);\n    auto* Y = Output(0);\n\n    CAFFE_ENFORCE_EQ(A.ndim(), 3);\n    CAFFE_ENFORCE_EQ(B.ndim(), 3);\n    CAFFE_ENFORCE_EQ(A.dim32(0), B.dim32(0));\n\n    int a_dim0, a_dim1, b_dim0, b_dim1;\n\n    if (trans_a_) {\n      a_dim0 = A.dim32(2);\n      a_dim1 = A.dim32(1);\n    } else {\n      a_dim0 = A.dim32(1);\n      a_dim1 = A.dim32(2);\n    }\n\n    if (trans_b_) {\n      b_dim0 = B.dim32(2);\n      b_dim1 = B.dim32(1);\n    } else {\n      b_dim0 = B.dim32(1);\n      b_dim1 = B.dim32(2);\n    }\n\n    // Error checking\n    CAFFE_ENFORCE(\n        a_dim1 == b_dim0,\n        \"Dimension mismatch: \",\n        trans_a_ ? \"trans(A): \" : \"A: \",\n        a_dim0,\n        \" \",\n        a_dim1,\n        trans_b_ ? \", trans(B): \" : \", B: \",\n        b_dim0,\n        \" \",\n        b_dim1);\n\n    Y->Resize(A.dim(0), a_dim0, b_dim1);\n\n    if (!A.dim(0)) {\n      Y->template mutable_data<T>(); // create output tensor\n      return true;\n    }\n\n    // Y = A * B\n    math::GemmBatched<T, Context, Engine>(\n        trans_a_ ? CblasTrans : CblasNoTrans,\n        trans_b_ ? CblasTrans : CblasNoTrans,\n        A.size(),\n        A.dim32(0),\n        B.size(),\n        B.dim32(0),\n        a_dim0, // M\n        b_dim1, // N\n        a_dim1, // K\n        1,\n        A.template data<T>(),\n        B.template data<T>(),\n        0,\n        Y->template mutable_data<T>(),\n        &context_,\n        use_scratch_ ? scratch_.get() : nullptr);\n    return true;\n  }\n\n protected:\n  bool trans_a_;\n  bool trans_b_;\n\n  bool use_scratch_;\n  std::shared_ptr<Tensor<Context> > scratch_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MATMUL_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/boolean_mask_ops.h",
    "content": "#ifndef BOOLEAN_MASK_OPS_H\n#define BOOLEAN_MASK_OPS_H\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/utils/conversions.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass BooleanMaskOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n};\n\ntemplate <class Context>\nclass SequenceMaskOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  explicit SequenceMaskOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)),\n        radius_(OperatorBase::GetSingleArgument<int>(\"radius\", 10)),\n        grad_(OperatorBase::GetSingleArgument<bool>(\"grad\", false)),\n        fill_val_(OperatorBase::GetSingleArgument<float>(\n            \"fill_val\",\n            -1.0f * std::numeric_limits<float>::infinity())) {\n    // Mode argument is required\n    mode_ = GetArgument(operator_def, \"mode\").s();\n  }\n\n  bool RunOnDevice() override;\n\n  template <typename T>\n  bool DoRunWithType();\n\n private:\n  int axis_;\n  int radius_;\n  std::string mode_;\n  bool grad_;\n  float fill_val_;\n};\n\n} // caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/boolean_unmask_ops.h",
    "content": "#ifndef BOOLEAN_UNMASK_OPS_H\n#define BOOLEAN_UNMASK_OPS_H\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass BooleanUnmaskOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(BooleanUnmaskOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/cast_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/cast.h\"\n#include \"caffe2/utils/conversions.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/types.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass CastOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  CastOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    const ArgumentHelper helper(operator_def);\n    TensorProto_DataType to = cast::GetCastDataType(helper, \"to\");\n    TensorProto_DataType from = cast::GetCastDataType(helper, \"from_type\");\n\n    SetBody(to);\n  }\n\n  bool RunOnDevice() override {\n    return (this->*body_)();\n  }\n\n  // Allow for Context-specific implementations\n  void SetBody(TensorProto_DataType to);\n\n  template <typename DstType>\n  bool DoRunWithDstType();\n\n  template <typename DstType, typename SrcType>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n    const auto* data = input.template data<SrcType>();\n    auto* out = output->template mutable_data<DstType>();\n    auto N = input.size();\n    for (TIndex i = 0; i < N; ++i) {\n      out[i] = static_cast<DstType>(data[i]);\n    }\n    return true;\n  }\n\n private:\n  bool (CastOp::*body_)();\n};\n\n}  // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/channel_shuffle_op.h",
    "content": "#pragma once\n#include \"caffe2/operators/conv_pool_op_base.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nclass ChannelShuffleOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_OPERATOR_FUNCTIONS(Context);\n  ChannelShuffleOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws) {\n    OPERATOR_NEEDS_FEATURE(\n        this->order_ == StorageOrder::NCHW,\n        \"ChannelShuffleOp only supports NCHW order\");\n  }\n\n  bool RunOnDeviceWithOrderNCHW() override {\n    const auto& X = Input(0);\n    auto* Y = Output(0);\n    Y->ResizeLike(X);\n    const auto C = X.dim32(1);\n    CAFFE_ENFORCE(C % this->group_ == 0, \"\");\n    const auto K = C / this->group_;\n    const auto S = X.dim32(2) * X.dim32(3);\n    const auto G = this->group_;\n    for (auto n = 0; n < X.dim32(0); ++n) {\n      for (auto g = 0; g < G; ++g) {\n        // Scatter the group g block (of size KxS) to output channels\n        // g + 0 * G, g + 1 * G, g + 2 * G, g + G * (K - 1) etc.\n        math::CopyMatrix<Context>(\n            X.itemsize(),\n            K,\n            S,\n            X.template data<float>() + g * K * S + n * C * S,\n            S,\n            Y->template mutable_data<float>() + g * S + n * C * S,\n            G * S,\n            &context_);\n      }\n    }\n    return true;\n  }\n};\n\ntemplate <typename Context>\nclass ChannelShuffleGradientOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_OPERATOR_FUNCTIONS(Context);\n  ChannelShuffleGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws) {\n    OPERATOR_NEEDS_FEATURE(\n        this->order_ == StorageOrder::NCHW,\n        \"ChannelShuffleOp only supports NCHW order\");\n  }\n\n  bool RunOnDeviceWithOrderNCHW() override {\n    const auto& dY = Input(0);\n    auto* dX = Output(0);\n    dX->ResizeLike(dY);\n    const auto C = dY.dim32(1);\n    CAFFE_ENFORCE(C % this->group_ == 0, \"\");\n    const auto K = C / this->group_;\n    const auto S = dY.dim32(2) * dY.dim32(3);\n    const auto G = this->group_;\n    for (auto n = 0; n < dY.dim32(0); ++n) {\n      for (auto g = 0; g < G; ++g) {\n        // Gather the group g block (of size KxS) from output channels\n        // g + 0 * G, g + 1 * G, g + 2 * G, g + G * (K - 1) etc.\n        math::CopyMatrix<Context>(\n            dY.itemsize(),\n            K,\n            S,\n            dY.template data<float>() + g * S + n * C * S,\n            G * S,\n            dX->template mutable_data<float>() + g * K * S + n * C * S,\n            S,\n            &context_);\n      }\n    }\n    return true;\n  }\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/clip_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CLIP_OP_H_\n#define CAFFE2_OPERATORS_CLIP_OP_H_\n\n#include <limits>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ClipOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ClipOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        min_(std::numeric_limits<T>::lowest()),\n        max_(std::numeric_limits<T>::max()) {\n    if (HasArgument(\"min\")) {\n      min_ = static_cast<T>(OperatorBase::GetSingleArgument<float>(\"min\", 0));\n    }\n    if (HasArgument(\"max\")) {\n      max_ = static_cast<T>(OperatorBase::GetSingleArgument<float>(\"max\", 0));\n    }\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  T min_;\n  T max_;\n};\n\ntemplate <typename T, class Context>\nclass ClipGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ClipGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        min_(std::numeric_limits<T>::lowest()),\n        max_(std::numeric_limits<T>::max()) {\n    if (HasArgument(\"min\")) {\n      min_ = static_cast<T>(OperatorBase::GetSingleArgument<float>(\"min\", 0));\n    }\n    if (HasArgument(\"max\")) {\n      max_ = static_cast<T>(OperatorBase::GetSingleArgument<float>(\"max\", 0));\n    }\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  T min_;\n  T max_;\n  // Input: Y, dY; Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CLIP_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/concat_split_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_\n#define CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\ninline int GetDimFromOrderString(const string& str) {\n  auto order = StringToStorageOrder(str);\n  switch (order) {\n    case StorageOrder::NHWC:\n      return 3;\n    case StorageOrder::NCHW:\n      return 1;\n    default:\n      CAFFE_THROW(\"Unsupported storage order: \", str);\n      return -1;\n  }\n}\n} // namespace\n\ntemplate <class Context>\nclass SplitOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SplitOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        split_(OperatorBase::GetRepeatedArgument<int>(\"split\")) {\n    CAFFE_ENFORCE(\n      !(OperatorBase::HasArgument(\"axis\") && OperatorBase::HasArgument(\"order\")),\n        \"You shouldn't specify both the dim to split, and the order \"\n        \"in the case of 4-D images.\");\n    if (OperatorBase::HasArgument(\"axis\")) {\n      axis_ = OperatorBase::GetSingleArgument<int>(\"axis\", -1);\n      // only exists for computing the gradient of a Concat with 'add_axis'\n      add_axis_ = OperatorBase::GetSingleArgument<int>(\"add_axis\", 0);\n    } else {\n      axis_ = GetDimFromOrderString(\n          OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"));\n      add_axis_ = 0;\n    }\n    CAFFE_ENFORCE_GE(axis_, 0);\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n  int add_axis_;\n  vector<int> split_;\n  // Input: X, optionally split\n  // The split tensor is stored in CPU.\n};\n\ntemplate <class Context>\nclass ConcatOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ConcatOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    CAFFE_ENFORCE(\n      !(OperatorBase::HasArgument(\"axis\") && OperatorBase::HasArgument(\"order\")),\n        \"You shouldn't specify both the dim to concat, and the order \"\n        \"in the case of 4-D images.\");\n    if (OperatorBase::HasArgument(\"axis\")) {\n      axis_ = OperatorBase::GetSingleArgument<int>(\"axis\", -1);\n      add_axis_ = OperatorBase::GetSingleArgument<int>(\"add_axis\", 0);\n    } else {\n      axis_ = GetDimFromOrderString(\n          OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"));\n      add_axis_ = 0;\n    }\n    CAFFE_ENFORCE_GE(axis_, 0);\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n  int add_axis_;\n  // Input: a number of tensors. Output: Y, split\n  // The split are stored in CPU.\n};\n\n// Implementations\ntemplate <class Context>\nbool SplitOp<Context>::RunOnDevice() {\n  auto& input = Input(0);\n  CAFFE_ENFORCE_LT(axis_, input.ndim(), \"Axis not in input ndim range.\");\n  const int input_channels = input.dim32(axis_);\n  const int* axis_data;\n  vector<int> equal_split;\n  if (InputSize() == 2) {\n    // We obtain split from the input tensor.\n    CAFFE_ENFORCE_EQ(\n        split_.size(),\n        0,\n        \"If you set split with an input blob, do not pass in \"\n        \"split in the argument.\");\n    auto& split_tensor = OperatorBase::Input<TensorCPU>(1);\n    CAFFE_ENFORCE_EQ(split_tensor.size(), OutputSize());\n    axis_data = split_tensor.template data<int>();\n  } else if (split_.size() == 0) {\n    CAFFE_ENFORCE_EQ(\n        input_channels % OutputSize(),\n        0,\n        \"If you did not specify split explicitly, the number of \"\n        \"input channels should be divisible by the output size.\");\n    equal_split.resize(OutputSize(), input_channels / OutputSize());\n    axis_data = equal_split.data();\n  } else {\n    // We obtain split from the parameters.\n    CAFFE_ENFORCE_EQ(\n        split_.size(),\n        OutputSize(),\n        \"The number of splits specified should be equal to the \"\n        \"number of outputs.\");\n    axis_data = split_.data();\n  }\n\n  CAFFE_ENFORCE_EQ(\n      add_axis_ ? OutputSize()\n                : std::accumulate(axis_data, axis_data + OutputSize(), 0),\n      input_channels,\n      \"Sum of split dimensions do not match: should be \",\n      input_channels);\n  vector<TIndex> output_dims(input.dims());\n  int before = 1, after = 1;\n  for (int i = 0; i < axis_; ++i) {\n    before *= input.dim32(i);\n  }\n  for (int i = axis_ + 1; i < input.ndim(); ++i) {\n    after *= input.dim32(i);\n  }\n  if (add_axis_) {\n    output_dims.erase(output_dims.begin() + axis_);\n  }\n  size_t input_offset = 0;\n  for (int i = 0; i < OutputSize(); ++i) {\n    auto* output = Output(i);\n    auto axis_dim = add_axis_ ? 1 : axis_data[i];\n    if (!add_axis_) {\n      output_dims[axis_] = axis_data[i];\n    }\n    output->Resize(output_dims);\n    math::CopyMatrix<Context>(\n        input.itemsize(),\n        before,\n        axis_dim * after,\n        static_cast<const char*>(input.raw_data()) + input_offset,\n        input.dim32(axis_) * after,\n        output->raw_mutable_data(input.meta()),\n        axis_dim * after,\n        &context_);\n    input_offset += axis_dim * after * input.itemsize();\n  }\n  return true;\n}\n\ntemplate <class Context>\nbool ConcatOp<Context>::RunOnDevice() {\n  auto* output = Output(0);\n  TensorCPU* split = OperatorBase::Output<TensorCPU>(1);\n  split->Resize(vector<TIndex>(1, InputSize()));\n  int* axis_data = split->template mutable_data<int>();\n  auto& input_zero = Input(0);\n  CAFFE_ENFORCE_LT(\n      axis_,\n      input_zero.ndim() + (add_axis_ ? 1 : 0),\n      \"Axis not in input ndim range.\");\n  for (int i = 1; i < InputSize(); ++i) {\n    CAFFE_ENFORCE(\n        Input(i).meta() == input_zero.meta(),\n        \"All inputs must have the same type, expected: \",\n        input_zero.meta().name(),\n        \" but got: \",\n        Input(i).meta().name(),\n        \" for input: \",\n        i);\n  }\n\n  int before = 1, after = 1;\n  vector<TIndex> output_dims(input_zero.dims());\n  for (int i = 0; i < input_zero.ndim(); ++i) {\n    if (i == axis_ && !add_axis_) {\n      continue;\n    }\n    int dim = input_zero.dim32(i);\n    if (i < axis_) {\n      before *= dim;\n    } else { // i > axis_ || i == axis_ && add_axis_\n      after *= dim;\n    }\n    // check the input dims are compatible.\n    for (int j = 1; j < InputSize(); ++j) {\n      int dim_j = Input(j).dim32(i);\n      CAFFE_ENFORCE(\n          dim == dim_j,\n          \"Expect dimension = \",\n          dim,\n          \" got \",\n          dim_j,\n          \" at axis = \",\n          i,\n          \" for input: \",\n          j,\n          \". The input tensors can only have different dimensions \"\n          \"when arg 'add_axis' = 0 and along the axis = \",\n          axis_,\n          \" <\",\n          Input(0).dims(),\n          \"> vs <\",\n          Input(j).dims(),\n          \">.\");\n    }\n  }\n\n  int output_channels = 0;\n  for (int i = 0; i < InputSize(); ++i) {\n    axis_data[i] = add_axis_ ? 1 : Input(i).dim32(axis_);\n    output_channels += axis_data[i];\n  }\n  if (add_axis_) {\n    output_dims.insert(output_dims.begin() + axis_, output_channels);\n  } else {\n    output_dims[axis_] = output_channels;\n  }\n  output->Resize(output_dims);\n  size_t output_offset = 0;\n  for (int i = 0; i < InputSize(); ++i) {\n    auto& input = Input(i);\n    auto axis_dim = add_axis_ ? 1 : input.dim32(axis_);\n    math::CopyMatrix<Context>(\n        input.itemsize(),\n        before,\n        axis_dim * after,\n        input.raw_data(),\n        axis_dim * after,\n        static_cast<char*>(output->raw_mutable_data(input_zero.meta())) +\n            output_offset,\n        output_channels * after,\n        &context_);\n    output_offset += axis_dim * after * input.itemsize();\n  }\n  return true;\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conditional_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CONDITIONAL_OP_H\n#define CONDITIONAL_OP_H\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ConditionalOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ConditionalOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n};\n\n} // caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_OP_H_\n#define CAFFE2_OPERATORS_CONV_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n\nCAFFE2_DECLARE_bool(caffe2_force_shared_col_buffer);\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ConvOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  ConvOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws) {\n    // Since this is the default convolution implementation, we will\n    // use CAFFE_ENFORCE instead of OPERATOR_NEEDS_FEATURE.\n    CAFFE_ENFORCE(\n        group_ == 1 || order_ == StorageOrder::NCHW,\n        \"Group convolution only supports NCHW order right now.\");\n\n    // Create shared buffer mutex in the constructor\n    // to avoid race-condition in DAGNet.\n    if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n      createSharedBuffer<Context>(ws_);\n    }\n  }\n  ~ConvOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  Tensor<Context> col_buffer_;\n  Tensor<Context> bias_multiplier_;\n  Tensor<Context> img_shape_device_;\n  Tensor<Context> col_buffer_shape_device_;\n  // Input: X, W, b\n  // Output: Y\n  INPUT_TAGS(INPUT, FILTER, BIAS);\n};\n\ntemplate <typename T, class Context>\nclass ConvGradientOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  ConvGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws),\n        no_bias_(OperatorBase::GetSingleArgument<int>(\"no_bias\", 0)) {\n    CAFFE_ENFORCE(\n        !(no_bias_ && OutputSize() == 3),\n        \"If bias is not present, you should not have 3 grad output.\");\n    CAFFE_ENFORCE(\n        group_ == 1 || order_ == StorageOrder::NCHW,\n        \"Group convolution only supports NCHW order right now.\");\n  }\n  ~ConvGradientOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  Tensor<Context> col_buffer_;\n  Tensor<Context> bias_multiplier_;\n  Tensor<Context> img_shape_device_;\n  Tensor<Context> col_buffer_shape_device_;\n  bool no_bias_;\n  // input: X, W, dY\n  // output: dW, db, and optionally dX\n  INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);\n  OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_op_cache_cudnn.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_OP_CACHE_H_\n#define CAFFE2_OPERATORS_CONV_OP_CACHE_H_\n\n#include <functional>\n#include <unordered_map>\n#include <vector>\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\ntemplate <typename T>\nclass AlgorithmsCache {\n public:\n  T getAlgorithm(\n      const std::vector<TIndex>& bottom,\n      const std::vector<TIndex>& desc,\n      std::function<T()> generatingFunc);\n\n private:\n  std::unordered_map<int64_t, T> hash_;\n};\n\ntemplate <typename T>\nT AlgorithmsCache<T>::getAlgorithm(\n    const std::vector<TIndex>& vec1,\n    const std::vector<TIndex>& vec2,\n    std::function<T()> generatingFunc) {\n  int64_t seed = 0;\n  std::hash<TIndex> hashFn;\n  for (const auto num : vec1) {\n    // Copied from boost::hash_combine.\n    // Adding 1 to differentiate between first and second vector.\n    seed ^= hashFn(num) + 0x9e3779b9 + (seed << 6) + (seed >> 2) + 1;\n  }\n\n  for (const auto num : vec2) {\n    // Copied from boost::hash_combine.\n    seed ^= hashFn(num) + 0x9e3779b9 + (seed << 6) + (seed >> 2);\n  }\n\n  if (seed == 0) {\n    return generatingFunc();\n  }\n\n  if (hash_.find(seed) == hash_.end()) {\n    T value = generatingFunc();\n    hash_[seed] = value;\n  }\n\n  return hash_[seed];\n}\n}\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_op_impl.h",
    "content": "// conv_op_impl.h is the templated implementation of the conv_op.h file.\n#ifndef CAFFE2_OPERATORS_CONV_OP_IMPL_H_\n#define CAFFE2_OPERATORS_CONV_OP_IMPL_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/flags.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_op.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nbool ConvOp<T, Context>::RunOnDeviceWithOrderNCHW() {\n  const Tensor<Context>& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  Tensor<Context>* Y = Output(0);\n  const int N = X.dim32(0), C = X.dim32(1);\n  CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim());\n  const int M = filter.dim32(0);\n  CAFFE_ENFORCE(\n      C == filter.dim32(1) * group_,\n      \"Convolution op: input channels does not match: # of input channels \",\n      C,\n      \" is not equal to kernel channels * group:\",\n      filter.dim32(1),\n      \"*\",\n      group_);\n  CAFFE_ENFORCE(\n      M % group_ == 0,\n      \"The number of output channels is not divisible by group.\");\n\n  int kernel_dims_size = 1;\n  for (int i = 0; i < kernel_.size(); ++i) {\n    CAFFE_ENFORCE(filter.dim32(i + 2) == kernel_[i]);\n    kernel_dims_size *= kernel_[i];\n  }\n\n  ConvPoolOpBase<Context>::SetOutputSize(X, Y, filter.dim32(0));\n\n  const vector<int> input_dims = GetDims(X);\n  const vector<int> output_dims = GetDims(*Y);\n  const int input_image_size = this->GetDimsSize(X);\n  const int output_image_size = this->GetDimsSize(*Y);\n\n  vector<int> img_shape;\n  img_shape.assign(X.dims().begin() + 1, X.dims().end());\n\n  vector<int> buffer_shape;\n  buffer_shape.push_back(C / group_ * kernel_dims_size);\n  buffer_shape.insert(\n      buffer_shape.end(), output_dims.begin(), output_dims.end());\n\n  if (kernel_.size() != 2 && img_shape_device_.size() != img_shape.size()) {\n    img_shape_device_.Resize(img_shape.size());\n    context_.template Copy<int, CPUContext, Context>(\n        img_shape.size(),\n        img_shape.data(),\n        img_shape_device_.template mutable_data<int>());\n  }\n\n  if (kernel_.size() != 2 &&\n      col_buffer_shape_device_.size() != buffer_shape.size()) {\n    col_buffer_shape_device_.Resize(buffer_shape.size());\n    context_.template Copy<int, CPUContext, Context>(\n        buffer_shape.size(),\n        buffer_shape.data(),\n        col_buffer_shape_device_.template mutable_data<int>());\n  }\n\n  const int col_buffer_size =\n      (C / group_) * kernel_dims_size * output_image_size;\n\n  // The dimension of each kernel\n  const int kernel_dim = C / group_ * kernel_dims_size;\n  // The offset corresponding to a single input image, and a single output\n  // image.\n  const int input_offset = C / group_ * input_image_size;\n  const int output_offset = Y->size() / Y->dim32(0) / group_;\n  const int filter_offset = filter.size() / group_;\n\n  // The col buffer is stored in CHW order as well - kernel_dim, and the height\n  // and width.\n  const T* Xdata = X.template data<T>();\n  if (InputSize() == 3) {\n    auto& bias = Input(BIAS);\n    CAFFE_ENFORCE(bias.ndim() == 1);\n    CAFFE_ENFORCE(bias.dim32(0) == M);\n    if (bias_multiplier_.size() != output_image_size) {\n      // If the helper bias multiplier is not image size, reshape and fill it\n      // with\n      // one.\n      bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n  }\n  T* Ydata = Y->template mutable_data<T>();\n\n  auto f = [&](Tensor<Context>* col_buffer) {\n    col_buffer->Resize(buffer_shape);\n    T* col_buffer_data = col_buffer->template mutable_data<T>();\n    // Im2col, followed by gemm.\n    for (int image_id = 0; image_id < N; ++image_id) {\n      for (int group_id = 0; group_id < group_; ++group_id) {\n        if (kernel_.size() == 2) {\n          math::Im2col<T, Context, StorageOrder::NCHW>(\n              Xdata + group_id * input_offset,\n              C / group_,\n              input_dims[0],\n              input_dims[1],\n              kernel_h(),\n              kernel_w(),\n              dilation_h(),\n              dilation_w(),\n              pad_t(),\n              pad_l(),\n              pad_b(),\n              pad_r(),\n              stride_h(),\n              stride_w(),\n              col_buffer_data,\n              &context_);\n        } else {\n          math::Im2colNd<T, Context, StorageOrder::NCHW>(\n              Xdata + group_id * input_offset,\n              img_shape_device_.template data<int>(),\n              col_buffer_shape_device_.template data<int>(),\n              C * input_image_size,\n              col_buffer_size,\n              kernel_device_.template data<int>(),\n              stride_device_.template data<int>(),\n              dilation_device_.template data<int>(),\n              pads_device_.template data<int>(),\n              kernel_.size(),\n              col_buffer_data,\n              &context_);\n        }\n        // Weight term\n        math::Gemm<T, Context>(\n            CblasNoTrans,\n            CblasNoTrans,\n            M / group_,\n            output_image_size,\n            kernel_dim,\n            1,\n            filter.template data<T>() + group_id * filter_offset,\n            col_buffer_data,\n            0,\n            Ydata + group_id * output_offset,\n            &context_);\n      }\n      if (InputSize() == 3) {\n        // Bias term can be carried out outside the group definition\n        // to be efficient.\n        auto* bias_data = Input(BIAS).template data<T>();\n        math::Gemm<T, Context>(\n            CblasNoTrans,\n            CblasNoTrans,\n            M,\n            output_image_size,\n            1,\n            1,\n            bias_data,\n            bias_multiplier_.template data<T>(),\n            1,\n            Ydata,\n            &context_);\n      }\n      Xdata += input_offset * group_;\n      Ydata += output_offset * group_;\n    }\n  };\n\n  if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n    runWithSharedBuffer<Context>(ws_, f);\n  } else {\n    f(&col_buffer_);\n  }\n  return true;\n}\n\n// The implementations.\ntemplate <typename T, class Context>\nbool ConvOp<T, Context>::RunOnDeviceWithOrderNHWC() {\n  const Tensor<Context>& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  Tensor<Context>* Y = Output(0);\n  const int N = X.dim32(0), H = X.dim32(1), W = X.dim32(2), C = X.dim32(3);\n\n  CAFFE_ENFORCE_EQ(\n      kernel_.size(),\n      2,\n      \"Only 2d convolution is supported for NHWC storage type\");\n\n  CAFFE_ENFORCE(X.ndim(), filter.ndim());\n  const int M = filter.dim32(0);\n  CAFFE_ENFORCE(filter.dim32(1) == kernel_h());\n  CAFFE_ENFORCE(filter.dim32(2) == kernel_w());\n  CAFFE_ENFORCE(filter.dim32(3) == C);\n\n  ConvPoolOpBase<Context>::SetOutputSize(X, Y, filter.dim32(0));\n  // The dimension of each kernel\n  const int kernel_dim = kernel_h() * kernel_w() * C;\n  // The offset corresponding to a single input image, and a single output\n  // image.\n  const int input_offset = H * W * C;\n  const int output_offset = Y->size() / Y->dim32(0);\n  // The output image size is the spatial size of the output.\n  const int output_image_size = Y->dim32(1) * Y->dim32(2);\n  // The col buffer is stored in HWC order as well - kernel_dim, and the height\n  // and width.\n  const T* Xdata = X.template data<T>();\n  T* Ydata = Y->template mutable_data<T>();\n  // Specialized path for 1 by 1 convolution with stride 1, pad 0 - we\n  // can skip im2col.\n  if (kernel_dim == C && Y->dim32(1) == X.dim32(1) &&\n      Y->dim32(2) == X.dim32(2) && stride_h() == 1 && stride_w() == 1 &&\n      pad_t() == 0 && pad_b() == 0 && pad_l() == 0 && pad_r() == 0) {\n    math::Gemm<T, Context>(\n        CblasNoTrans,\n        CblasTrans,\n        N * H * W,\n        M,\n        C,\n        1,\n        Xdata,\n        filter.template data<T>(),\n        0,\n        Ydata,\n        &context_);\n    if (InputSize() == 3) {\n      auto& bias = Input(BIAS);\n      CAFFE_ENFORCE(1 == bias.ndim());\n      CAFFE_ENFORCE(bias.dim32(0) == M);\n      if (bias_multiplier_.size() != N * H * W) {\n        // If the helper bias multiplier is not M, reshape and fill it with one.\n        bias_multiplier_.Resize(vector<TIndex>(1, N * H * W));\n        math::Set<T, Context>(\n            N * H * W,\n            static_cast<T>(1),\n            bias_multiplier_.template mutable_data<T>(),\n            &context_);\n      }\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasNoTrans,\n          N * H * W,\n          M,\n          1,\n          1,\n          bias_multiplier_.template data<T>(),\n          bias.template data<T>(),\n          1,\n          Ydata,\n          &context_);\n    }\n  } else {\n    if (InputSize() == 3) {\n      auto& bias = Input(BIAS);\n      CAFFE_ENFORCE(1 == bias.ndim());\n      CAFFE_ENFORCE(bias.dim32(0) == M);\n      if (bias_multiplier_.size() != output_image_size) {\n        // If the helper bias multiplier is not M, reshape and fill it with one.\n        bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n        math::Set<T, Context>(\n            output_image_size,\n            static_cast<T>(1),\n            bias_multiplier_.template mutable_data<T>(),\n            &context_);\n      }\n    }\n    auto f = [&](Tensor<Context>* col_buffer) {\n      col_buffer->Resize(\n          vector<TIndex>{Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), C});\n      T* col_buffer_data = col_buffer->template mutable_data<T>();\n      // Im2col, followed by gemm.\n      for (int image_id = 0; image_id < N; ++image_id) {\n        math::Im2col<T, Context, StorageOrder::NHWC>(\n            Xdata,\n            C,\n            H,\n            W,\n            kernel_h(),\n            kernel_w(),\n            dilation_h(),\n            dilation_w(),\n            pad_t(),\n            pad_l(),\n            pad_b(),\n            pad_r(),\n            stride_h(),\n            stride_w(),\n            col_buffer_data,\n            &context_);\n        // Weight term\n        math::Gemm<T, Context>(\n            CblasNoTrans,\n            CblasTrans,\n            output_image_size,\n            M,\n            kernel_dim,\n            1,\n            col_buffer_data,\n            filter.template data<T>(),\n            0,\n            Ydata,\n            &context_);\n        if (InputSize() == 3) {\n          // Bias term\n          math::Gemm<T, Context>(\n              CblasNoTrans,\n              CblasNoTrans,\n              output_image_size,\n              M,\n              1,\n              1,\n              bias_multiplier_.template data<T>(),\n              Input(BIAS).template data<T>(),\n              1,\n              Ydata,\n              &context_);\n        }\n        Xdata += input_offset;\n        Ydata += output_offset;\n      }\n    };\n    if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n      runWithSharedBuffer<Context>(ws_, f);\n    } else {\n      f(&col_buffer_);\n    }\n  }\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {\n  auto& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  auto& dY = Input(OUTPUT_GRAD);\n  auto* dfilter = Output(FILTER_GRAD);\n  const int N = X.dim32(0), C = X.dim32(1);\n\n  const vector<int> input_dims = this->GetDims(X);\n  const int input_image_size = this->GetDimsSize(X);\n\n  const vector<int> output_dims = this->GetDims(dY);\n  // The output image size is the spatial size of the output.\n  const int output_image_size = this->GetDimsSize(dY);\n\n  ConvPoolOpBase<Context>::ComputePads(input_dims);\n  CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim());\n  const int M = filter.dim32(0);\n  CAFFE_ENFORCE(filter.dim32(1) * group_ == C);\n\n  int kernel_dims_size = 1;\n  for (int i = 0; i < kernel_.size(); ++i) {\n    CAFFE_ENFORCE(filter.dim32(i + 2) == kernel_[i]);\n    kernel_dims_size *= kernel_[i];\n  }\n\n  CAFFE_ENFORCE(M % group_ == 0);\n  dfilter->ResizeLike(filter);\n  // The dimension of each kernel\n  const int kernel_dim = C / group_ * kernel_dims_size;\n  // The offset corresponding to a single input image, and a single output\n  // image.\n  const int input_offset = C / group_ * input_image_size;\n  const int output_offset = dY.size() / dY.dim32(0) / group_;\n  const int filter_offset = filter.size() / group_;\n  // The col buffer is stored in CHW order as well - kernel_dim, and the height\n  // and width.\n\n  vector<int> img_shape;\n  img_shape.assign(X.dims().begin() + 1, X.dims().end());\n  vector<int> col_buffer_shape;\n  col_buffer_shape.push_back(C / group_ * kernel_dims_size);\n  col_buffer_shape.insert(\n      col_buffer_shape.end(), output_dims.begin(), output_dims.end());\n  col_buffer_.Resize(col_buffer_shape);\n\n  if (kernel_.size() != 2 && img_shape_device_.size() != img_shape.size()) {\n    img_shape_device_.Resize(img_shape.size());\n    context_.template Copy<int, CPUContext, Context>(\n        img_shape.size(),\n        img_shape.data(),\n        img_shape_device_.template mutable_data<int>());\n  }\n\n  const int col_buffer_size =\n      (C / group_) * kernel_dims_size * output_image_size;\n\n  if (kernel_.size() != 2 &&\n      col_buffer_shape_device_.size() != col_buffer_shape.size()) {\n    col_buffer_shape_device_.Resize(col_buffer_shape.size());\n    context_.template Copy<int, CPUContext, Context>(\n        col_buffer_shape.size(),\n        col_buffer_shape.data(),\n        col_buffer_shape_device_.template mutable_data<int>());\n  }\n\n  const T* Xdata = X.template data<T>();\n  const T* filter_data = filter.template data<T>();\n  const T* dYdata = dY.template data<T>();\n  T* col_buffer_data = col_buffer_.template mutable_data<T>();\n  T* dfilter_data = dfilter->template mutable_data<T>();\n\n  // Pre-setting the gradients to zero.\n  math::Set<T, Context>(dfilter->size(), 0, dfilter_data, &context_);\n\n  T* dbias_data = nullptr;\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    dbias->Resize(M);\n    if (bias_multiplier_.size() != output_image_size) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n    dbias_data = dbias->template mutable_data<T>();\n    math::Set<T, Context>(dbias->size(), 0, dbias_data, &context_);\n  }\n\n  for (int image_id = 0; image_id < N; ++image_id) {\n    for (int group_id = 0; group_id < group_; ++group_id) {\n      // When we compute the gradient with respect to the filters, we need to do\n      // im2col to allow gemm-type computation.\n      if (kernel_.size() == 2) {\n        math::Im2col<T, Context, StorageOrder::NCHW>(\n            Xdata + group_id * input_offset,\n            C / group_,\n            input_dims[0],\n            input_dims[1],\n            kernel_h(),\n            kernel_w(),\n            dilation_h(),\n            dilation_w(),\n            pad_t(),\n            pad_l(),\n            pad_b(),\n            pad_r(),\n            stride_h(),\n            stride_w(),\n            col_buffer_data,\n            &context_);\n      } else {\n        math::Im2colNd<T, Context, StorageOrder::NCHW>(\n            Xdata + group_id * input_offset,\n            img_shape_device_.template data<int>(),\n            col_buffer_shape_device_.template data<int>(),\n            C * input_image_size,\n            col_buffer_size,\n            kernel_device_.template data<int>(),\n            stride_device_.template data<int>(),\n            dilation_device_.template data<int>(),\n            pads_device_.template data<int>(),\n            kernel_.size(),\n            col_buffer_data,\n            &context_);\n      }\n      // Gradient with respect to filter.\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasTrans,\n          M / group_,\n          kernel_dim,\n          output_image_size,\n          1,\n          dYdata + group_id * output_offset,\n          col_buffer_data,\n          1,\n          dfilter_data + group_id * filter_offset,\n          &context_);\n    }\n    if (!no_bias_) {\n      // Gradient with respect to bias can be computed independent from group.\n      math::Gemv<T, Context>(\n          CblasNoTrans,\n          M,\n          output_image_size,\n          1,\n          dYdata,\n          bias_multiplier_.template data<T>(),\n          1,\n          dbias_data,\n          &context_);\n    }\n    Xdata += input_offset * group_;\n    dYdata += output_offset * group_;\n  }\n  if (OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))) {\n    // Compute the gradient w.r.t. the input.\n    auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD);\n    dX->ResizeLike(X);\n    T* dXdata = dX->template mutable_data<T>();\n    dYdata = dY.template data<T>();\n    for (int image_id = 0; image_id < N; ++image_id) {\n      for (int group_id = 0; group_id < group_; ++group_id) {\n        // Compute gradient into col_buffer.\n        math::Gemm<T, Context>(\n            CblasTrans,\n            CblasNoTrans,\n            kernel_dim,\n            output_image_size,\n            M / group_,\n            1,\n            filter_data + group_id * filter_offset,\n            dYdata,\n            0,\n            col_buffer_data,\n            &context_);\n        if (kernel_.size() == 2) {\n          math::Col2im<T, Context, StorageOrder::NCHW>(\n              col_buffer_data,\n              C / group_,\n              input_dims[0],\n              input_dims[1],\n              kernel_h(),\n              kernel_w(),\n              dilation_h(),\n              dilation_w(),\n              pad_t(),\n              pad_l(),\n              pad_b(),\n              pad_r(),\n              stride_h(),\n              stride_w(),\n              dXdata,\n              &context_);\n        } else {\n          math::Col2imNd<T, Context, StorageOrder::NCHW>(\n              col_buffer_data,\n              img_shape_device_.template data<int>(),\n              col_buffer_shape_device_.template data<int>(),\n              C * input_image_size,\n              col_buffer_size,\n              kernel_device_.template data<int>(),\n              stride_device_.template data<int>(),\n              dilation_device_.template data<int>(),\n              pads_device_.template data<int>(),\n              kernel_.size(),\n              dXdata,\n              &context_);\n        }\n        dXdata += input_offset;\n        dYdata += output_offset;\n      }\n    }\n  }\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvGradientOp<T, Context>::RunOnDeviceWithOrderNHWC() {\n  auto& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  auto& dY = Input(OUTPUT_GRAD);\n  auto* dfilter = Output(FILTER_GRAD);\n\n  const int N = X.dim32(0), H = X.dim32(1), W = X.dim32(2), C = X.dim32(3);\n  ConvPoolOpBase<Context>::ComputePads({H, W});\n  CAFFE_ENFORCE(4 == filter.ndim());\n  const int M = filter.dim32(0);\n  CAFFE_ENFORCE(filter.dim32(1) == kernel_h());\n  CAFFE_ENFORCE(filter.dim32(2) == kernel_w());\n  CAFFE_ENFORCE(filter.dim32(3) == C);\n  dfilter->ResizeLike(filter);\n\n  // The dimension of each kernel\n  const int kernel_dim = kernel_h() * kernel_w() * C;\n  // The offset corresponding to a single input image, and a single output\n  // image.\n  const int input_offset = H * W * C;\n  const int output_offset = dY.size() / dY.dim32(0);\n  // The output image size is the spatial size of the output.\n  const int output_image_size = dY.dim32(1) * dY.dim32(2);\n  // The col buffer is stored in CHW order as well - kernel_dim, and the height\n  // and width.\n  col_buffer_.Resize(output_image_size, kernel_dim);\n\n  const T* Xdata = X.template data<T>();\n  const T* const filter_data = filter.template data<T>();\n  const T* const dYdata = dY.template data<T>();\n  T* col_buffer_data = col_buffer_.template mutable_data<T>();\n  T* dfilter_data = dfilter->template mutable_data<T>();\n\n  // Pre-setting the gradients to zero.\n  math::Set<T, Context>(dfilter->size(), 0, dfilter_data, &context_);\n\n  T* dbias_data = nullptr;\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    dbias->Resize(M);\n    dbias_data = dbias->template mutable_data<T>();\n    math::Set<T, Context>(dbias->size(), 0, dbias_data, &context_);\n    if (bias_multiplier_.size() != output_image_size) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n  }\n\n  for (int image_id = 0; image_id < N; ++image_id) {\n    // When we compute the gradient with respect to the filters, we need to do\n    // im2col to allow gemm-type computation.\n    math::Im2col<T, Context, StorageOrder::NHWC>(\n        Xdata,\n        C,\n        H,\n        W,\n        kernel_h(),\n        kernel_w(),\n        dilation_h(),\n        dilation_w(),\n        pad_t(),\n        pad_l(),\n        pad_b(),\n        pad_r(),\n        stride_h(),\n        stride_w(),\n        col_buffer_data,\n        &context_);\n    // Gradient with respect to filter.\n    math::Gemm<T, Context>(\n        CblasTrans,\n        CblasNoTrans,\n        M,\n        kernel_dim,\n        output_image_size,\n        1,\n        dYdata + output_offset * image_id,\n        col_buffer_data,\n        1,\n        dfilter_data,\n        &context_);\n    if (!no_bias_) {\n      // Gradient with respect to bias\n      math::Gemv<T, Context>(\n          CblasTrans,\n          output_image_size,\n          M,\n          1,\n          dYdata + output_offset * image_id,\n          bias_multiplier_.template data<T>(),\n          1,\n          dbias_data,\n          &context_);\n    }\n    Xdata += input_offset;\n  }\n\n  if (OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))) {\n    // Compute the gradient w.r.t. the input.\n    auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD);\n    dX->ResizeLike(X);\n    T* dXdata = dX->template mutable_data<T>();\n    for (int image_id = 0; image_id < N; ++image_id) {\n      // Compute gradient into col_buffer.\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasNoTrans,\n          output_image_size,\n          kernel_dim,\n          M,\n          1,\n          dYdata + output_offset * image_id,\n          filter_data,\n          0,\n          col_buffer_data,\n          &context_);\n      math::Col2im<T, Context, StorageOrder::NHWC>(\n          col_buffer_data,\n          C,\n          H,\n          W,\n          kernel_h(),\n          kernel_w(),\n          dilation_h(),\n          dilation_w(),\n          pad_t(),\n          pad_l(),\n          pad_b(),\n          pad_r(),\n          stride_h(),\n          stride_w(),\n          dXdata,\n          &context_);\n      dXdata += input_offset;\n    }\n  }\n  return true;\n}\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_OP_IMPL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_op_shared.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_OP_SHARED_H_\n#define CAFFE2_OPERATORS_CONV_OP_SHARED_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n\nnamespace caffe2 {\n\n/**\n * Creates a mutex and shared buffer in the workspace.\n * Not thread-safe, must be called from the constructor.\n */\ntemplate <typename Context>\nvoid createSharedBuffer(Workspace* ws);\n\n/**\n * Thread-safe, can be invoked from RunOnDevice() to serialize\n * access to shared buffer.\n */\ntemplate <typename Context>\nvoid runWithSharedBuffer(\n    Workspace* ws,\n    std::function<void(Tensor<Context>* buffer)> f);\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_OP_SHARED_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_pool_op_base.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_\n#define CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2_legacy.pb.h\"\n#include \"caffe2/utils/math.h\"\n\n// This macro is here just to allow us to experiment with padding values that\n// determines, when we have an odd number of pads, which side gets the one\n// additional pad value, the head side, or the tail side. Setting it to false\n// will enable the TensorFlow behavior, and setting it to true will enable\n// a behavior more consistent with Caffe and CuDNN.\n// This only affects the case when you set legacy pad to VALID or SAME. The\n// behavior inherits from the early designs of Google's CNN implementation,\n// where padding values are implicitly calculated instead of explicitly\n// specified. This is still the case with TensorFlow. Many frameworks have\n// followed a slightly different approach of explicitly giving padding values,\n// in which case the value of this constant value does not matter.\nconst bool CAFFE2_PAD_HEAD_MORE = false;\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ConvPoolOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ConvPoolOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        legacy_pad_(\n            static_cast<LegacyPadding>(OperatorBase::GetSingleArgument<int>(\n                \"legacy_pad\",\n                LegacyPadding::NOTSET))),\n        global_pooling_(\n            OperatorBase::GetSingleArgument<int>(\"global_pooling\", 0)),\n        kernel_(OperatorBase::GetRepeatedArgument<int>(\"kernels\")),\n        dilation_(OperatorBase::GetRepeatedArgument<int>(\"dilations\")),\n        stride_(OperatorBase::GetRepeatedArgument<int>(\"strides\")),\n        pads_(OperatorBase::GetRepeatedArgument<int>(\"pads\")),\n        group_(OperatorBase::GetSingleArgument<int>(\"group\", 1)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        shared_buffer_(\n            OperatorBase::GetSingleArgument<int>(\"shared_buffer\", 0)),\n        ws_(ws) {\n    // For the padding, they should either be the legacy padding strategy\n    // (VALID or SAME), or an explicit, non-negative value.\n    if (legacy_pad_ == LegacyPadding::VALID ||\n        legacy_pad_ == LegacyPadding::SAME) {\n      CAFFE_ENFORCE(\n          !OperatorBase::HasArgument(\"pads\"),\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n    }\n\n    // Get old arguments values.\n    if (OperatorBase::HasArgument(\"kernel\")) {\n      kernel_.resize(2, OperatorBase::GetSingleArgument<int>(\"kernel\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"kernel_h\") &&\n        OperatorBase::HasArgument(\"kernel_w\")) {\n      kernel_.push_back(OperatorBase::GetSingleArgument<int>(\"kernel_h\", 0));\n      kernel_.push_back(OperatorBase::GetSingleArgument<int>(\"kernel_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"stride\")) {\n      stride_.resize(2, OperatorBase::GetSingleArgument<int>(\"stride\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"stride_h\") &&\n        OperatorBase::HasArgument(\"stride_w\")) {\n      stride_.push_back(OperatorBase::GetSingleArgument<int>(\"stride_h\", 0));\n      stride_.push_back(OperatorBase::GetSingleArgument<int>(\"stride_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"dilation\")) {\n      dilation_.resize(2, OperatorBase::GetSingleArgument<int>(\"dilation\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"dilation_h\") &&\n        OperatorBase::HasArgument(\"dilation_w\")) {\n      dilation_.push_back(\n          OperatorBase::GetSingleArgument<int>(\"dilation_h\", 0));\n      dilation_.push_back(\n          OperatorBase::GetSingleArgument<int>(\"dilation_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"pad\")) {\n      CAFFE_ENFORCE(\n          legacy_pad_ != LegacyPadding::VALID &&\n              legacy_pad_ != LegacyPadding::SAME,\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n      pads_.resize(4, OperatorBase::GetSingleArgument<int>(\"pad\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"pad_t\") &&\n        OperatorBase::HasArgument(\"pad_l\") &&\n        OperatorBase::HasArgument(\"pad_b\") &&\n        OperatorBase::HasArgument(\"pad_r\")) {\n      CAFFE_ENFORCE(\n          legacy_pad_ != LegacyPadding::VALID &&\n              legacy_pad_ != LegacyPadding::SAME,\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_t\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_l\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_b\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_r\", 0));\n    }\n\n    // Fill default values.\n    if (kernel_.size() == 0) {\n      kernel_.assign({0, 0});\n    }\n\n    if (stride_.size() == 0) {\n      stride_.resize(kernel_.size(), 1);\n    }\n\n    if (pads_.size() == 0) {\n      pads_.resize(kernel_.size() * 2, 0);\n    }\n\n    if (dilation_.size() == 0) {\n      dilation_.resize(kernel_.size(), 1);\n    }\n\n    CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());\n    CAFFE_ENFORCE_EQ(dilation_.size(), kernel_.size());\n\n    if (legacy_pad_ != LegacyPadding::VALID &&\n        legacy_pad_ != LegacyPadding::SAME) {\n      CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());\n    }\n\n    if (global_pooling_) {\n      for (int dim = 0; dim < kernel_.size(); ++dim) {\n        CAFFE_ENFORCE(\n            pads_[2 * dim] == 0 && pads_[2 * dim + 1] == 0 &&\n                dilation_[dim] == 1 && stride_[dim] == 1,\n            \"If global_pooling is set dilation and stride shouldn't be set.\");\n      }\n    }\n\n    AllocateAndCopy(kernel_, kernel_device_);\n    AllocateAndCopy(stride_, stride_device_);\n    AllocateAndCopy(dilation_, dilation_device_);\n    AllocateAndCopy(pads_, pads_device_);\n\n    // Check kernel only if we are doing conv or pooling. The reason is that a\n    // few other ops, like PadImage, are also using this base class. We really\n    // need to clean this up.\n    if (operator_def.name().find(\"Conv\") == 0 ||\n        operator_def.name().find(\"Pool\") != std::string::npos) {\n      for (int dim = 0; dim < kernel_.size(); ++dim) {\n        CAFFE_ENFORCE_GE(pads_[dim], 0);\n        CAFFE_ENFORCE_GE(pads_[kernel_.size() + dim], 0);\n        CAFFE_ENFORCE(\n            kernel_[dim],\n            \"If you are doing convolution or pooling, you will need to set \"\n            \"explicitly the kernel size.\");\n      }\n    }\n\n    for (int dim = 0; dim < kernel_.size(); ++dim) {\n      CAFFE_ENFORCE_GE(kernel_[dim], 0);\n      CAFFE_ENFORCE_GE(dilation_[dim], 0);\n      CAFFE_ENFORCE_GE(stride_[dim], 0);\n    }\n\n    if (group_ != 1) {\n      for (int dim = 0; dim < kernel_.size(); ++dim) {\n        CAFFE_ENFORCE_EQ(\n            dilation_[dim],\n            1,\n            \"When group is used, dilation should not be set at the same time.\");\n      }\n    }\n  }\n\n  // Returns the input image dimensions for the current storage order type.\n  vector<int> GetDims(const Tensor<Context>& input) {\n    vector<int> dims;\n    switch (order_) {\n      case StorageOrder::NCHW:\n        dims.assign(input.dims().begin() + 2, input.dims().end());\n        break;\n      case StorageOrder::NHWC:\n        dims.assign(input.dims().begin() + 1, input.dims().end() - 1);\n        break;\n      default:\n        CAFFE_THROW(\"Unknown storage order : \", order_);\n    }\n    return dims;\n  }\n\n  // Returns the size of the input image for the current storage type.\n  int GetDimsSize(const Tensor<Context>& input) {\n    int size = 0;\n    switch (order_) {\n      case StorageOrder::NCHW:\n        size = std::accumulate(\n            input.dims().begin() + 2,\n            input.dims().end(),\n            1,\n            std::multiplies<int>());\n        break;\n      case StorageOrder::NHWC:\n        size = std::accumulate(\n            input.dims().begin() + 1,\n            input.dims().end() - 1,\n            1,\n            std::multiplies<int>());\n        break;\n      default:\n        CAFFE_THROW(\"Unknown storage order : \", order_);\n    }\n    return size;\n  }\n\n  // Sets the output size. The output channel is manually provided since\n  // it may not be identical to the input channels.\n  // This function can be used in the forward functions to obtain the output\n  // sizes.\n  // Note(jiayq): the templatization of this function is mainly to help\n  // implementations that do not use first-class Tensor objects, such as the\n  // MKL operator. One can still call this function with dummy\n  // Tensor<CPUContext> objects in order to obtain the sizes.\n  template <typename AlternativeContext>\n  void SetOutputSize(\n      const Tensor<AlternativeContext>& input,\n      Tensor<AlternativeContext>* output,\n      int output_channel) {\n    CAFFE_ENFORCE(input.size() > 0);\n    vector<int> output_dims;\n    int N = input.dim32(0);\n    bool channel_first;\n    InferOutputSize(\n        input.dims(),\n        output_channel,\n        order_,\n        global_pooling_,\n        legacy_pad_,\n        N,\n        kernel_,\n        output_dims,\n        dilation_,\n        stride_,\n        pads_,\n        channel_first);\n\n    if (channel_first) {\n      output_dims.insert(output_dims.begin(), {N, output_channel});\n    } else {\n      output_dims.insert(output_dims.begin(), N);\n      output_dims.push_back(output_channel);\n    }\n    output->Resize(output_dims);\n  }\n\n  // Helper function that is also called from OperatorSchema. Modified\n  // kernel parameters and output output_dims and channel_first.\n  static inline void InferOutputSize(\n      vector<TIndex> input_dims,\n      int /*output_channel*/,\n      StorageOrder order,\n      bool global_pooling,\n      LegacyPadding legacy_pad,\n      int /*N*/,\n      vector<int>& kernel,\n      vector<int>& output_dims,\n      vector<int> dilation,\n      vector<int> stride,\n      vector<int> pads,\n      bool& channel_first) {\n    channel_first = false; // initialized to suppress compiler warning.\n    vector<TIndex> dims;\n    switch (order) {\n      case StorageOrder::NHWC:\n        channel_first = false;\n        dims.assign(input_dims.begin() + 1, input_dims.end() - 1);\n        break;\n      case StorageOrder::NCHW:\n        // Old Caffe order.\n        channel_first = true;\n        dims.assign(input_dims.begin() + 2, input_dims.end());\n        break;\n      default:\n        CAFFE_THROW(\"Unknown Storage order: \", order);\n    }\n\n    if (global_pooling) {\n      kernel.assign(dims.begin(), dims.end());\n      output_dims.assign(dims.size(), 1);\n    } else {\n      for (int dim = 0; dim < dims.size(); ++dim) {\n        int dim_size = 0;\n        ComputeSizeAndPad(\n            dims[dim],\n            stride[dim],\n            kernel[dim],\n            dilation[dim],\n            legacy_pad,\n            &pads[dim],\n            &pads[dims.size() + dim],\n            &dim_size);\n        output_dims.push_back(dim_size);\n      }\n    }\n\n  }\n\n  // ComputePads could be used in backward functions to figure out the padding\n  // values for the given input.\n  void ComputePads(const vector<int>& dims) {\n    if (global_pooling_) {\n      kernel_ = dims;\n    } else if (legacy_pad_ != LegacyPadding::NOTSET) {\n      int output_unused;\n      for (int dim = 0; dim < dims.size(); ++dim) {\n        ComputeSizeAndPad(\n            dims[dim],\n            stride_[dim],\n            kernel_[dim],\n            dilation_[dim],\n            legacy_pad_,\n            &pads_[dim],\n            &pads_[dims.size() + dim],\n            &output_unused);\n      }\n    }\n  }\n\n  bool RunOnDevice() override {\n    if (!global_pooling_) {\n      for (int dim = 0; dim < kernel_.size(); ++dim) {\n        CAFFE_ENFORCE_GT(kernel_[dim], 0);\n      }\n    }\n    switch (order_) {\n      case StorageOrder::NHWC:\n        // VLOG(2) << \"Running NHWC\";\n        return RunOnDeviceWithOrderNHWC();\n      case StorageOrder::NCHW:\n        // VLOG(2) << \"Running NCHW\";\n        return RunOnDeviceWithOrderNCHW();\n      default:\n        CAFFE_THROW(\"Unknown Storage order: \", order_);\n    }\n  }\n\n  // The actual function that does the computation, if the different\n  // storage order leads to different implementations.\n  virtual bool RunOnDeviceWithOrderNHWC() {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n  virtual bool RunOnDeviceWithOrderNCHW() {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n  static struct OpSchema::Cost CostInferenceForConv(\n      const OperatorDef& def,\n      const vector<TensorShape>& inputs) {\n    struct OpSchema::Cost c;\n    const TensorShape X = inputs[0];\n    const TensorShape W = inputs[1];\n\n    ArgumentHelper helper(def);\n    const auto order =\n        StringToStorageOrder(helper.GetSingleArgument<string>(\"order\", \"NCHW\"));\n\n    unsigned long long X_h;\n    unsigned long long X_w;\n    unsigned long long kernel_h;\n    unsigned long long kernel_w;\n    unsigned long long in_channels;\n    unsigned long long out_channels;\n    if (order == StorageOrder::NHWC) {\n      X_h = X.dims(1);\n      X_w = X.dims(2);\n      kernel_h = W.dims(1);\n      kernel_w = W.dims(2);\n      in_channels = W.dims(3);\n      out_channels = W.dims(0);\n    } else {\n      X_h = X.dims(2);\n      X_w = X.dims(3);\n      kernel_h = W.dims(2);\n      kernel_w = W.dims(3);\n      in_channels = W.dims(1);\n      out_channels = W.dims(0);\n    }\n    c.flops = (X_h - kernel_h + 1) * (X_w - kernel_w + 1) * kernel_w *\n        kernel_h * in_channels * out_channels * 2;\n    return c;\n  }\n\n  static vector<TensorShape> TensorInferenceForSchema(\n      const OperatorDef& def,\n      const vector<TensorShape>& in,\n      int output_channel) {\n    ArgumentHelper helper(def);\n    CAFFE_ENFORCE_GT(in.size(), 0);\n    CAFFE_ENFORCE_GT(in[0].dims_size(), 0);\n    int N = in[0].dims(0);\n    bool channel_first;\n\n    vector<int> pads = helper.GetRepeatedArgument<int>(\"pads\");\n    vector<int> kernel = helper.GetRepeatedArgument<int>(\"kernels\");\n    vector<int> strides = helper.GetRepeatedArgument<int>(\"strides\");\n    vector<int> dilations = helper.GetRepeatedArgument<int>(\"dilation\");\n\n    if (helper.HasArgument(\"pad\")) {\n      pads.resize(4, helper.GetSingleArgument<int>(\"pad\", 0));\n    } else if (\n        helper.HasArgument(\"pad_t\") && helper.HasArgument(\"pad_l\") &&\n        helper.HasArgument(\"pad_b\") && helper.HasArgument(\"pad_r\")) {\n      pads.push_back(helper.GetSingleArgument<int>(\"pad_t\", 0));\n      pads.push_back(helper.GetSingleArgument<int>(\"pad_l\", 0));\n      pads.push_back(helper.GetSingleArgument<int>(\"pad_b\", 0));\n      pads.push_back(helper.GetSingleArgument<int>(\"pad_r\", 0));\n    }\n\n    if (helper.HasArgument(\"kernel\")) {\n      kernel.resize(2, helper.GetSingleArgument<int>(\"kernel\", 1));\n    } else if (\n        helper.HasArgument(\"kernel_h\") && helper.HasArgument(\"kernel_w\")) {\n      kernel.push_back(helper.GetSingleArgument<int>(\"kernel_h\", 1));\n      kernel.push_back(helper.GetSingleArgument<int>(\"kernel_w\", 1));\n    }\n\n    if (helper.HasArgument(\"stride\")) {\n      strides.resize(2, helper.GetSingleArgument<int>(\"stride\", 1));\n    } else if (\n        helper.HasArgument(\"stride_h\") && helper.HasArgument(\"stride_w\")) {\n      strides.push_back(helper.GetSingleArgument<int>(\"stride_h\", 1));\n      strides.push_back(helper.GetSingleArgument<int>(\"stride_w\", 1));\n    }\n\n    if (helper.HasArgument(\"dilation\")) {\n      strides.resize(2, helper.GetSingleArgument<int>(\"dilation\", 1));\n    } else if (\n        helper.HasArgument(\"dilation_h\") && helper.HasArgument(\"dilation_w\")) {\n      strides.push_back(helper.GetSingleArgument<int>(\"dilation_h\", 1));\n      strides.push_back(helper.GetSingleArgument<int>(\"dilation_w\", 1));\n    }\n\n    auto check_and_set_default_value = [](\n        vector<int>& vec, int size, int value) {\n      if (vec.size() == 0) {\n        vec.resize(size, value);\n      }\n    };\n\n    check_and_set_default_value(pads, 4, 0);\n    check_and_set_default_value(kernel, 2, 1);\n    check_and_set_default_value(strides, 2, 1);\n    check_and_set_default_value(dilations, 2, 1);\n\n    vector<int> output_dims;\n    ConvPoolOpBase<CPUContext>::InferOutputSize(\n        GetDimsVector(in[0]),\n        output_channel,\n        StringToStorageOrder(helper.GetSingleArgument<string>(\"order\", \"NCHW\")),\n        helper.GetSingleArgument<int>(\"global_pooling\", 0),\n        static_cast<LegacyPadding>(\n            helper.GetSingleArgument<int>(\"legacy_pad\", LegacyPadding::NOTSET)),\n        N,\n        kernel,\n        output_dims,\n        dilations,\n        strides,\n        pads,\n        channel_first);\n    vector<TensorShape> out(1);\n    if (channel_first) {\n      output_dims.insert(output_dims.begin(), {N, output_channel});\n    } else {\n      output_dims.push_back(output_channel);\n      output_dims.insert(output_dims.begin(), N);\n   }\n\n   out[0] = CreateTensorShape(output_dims, TensorProto::FLOAT);\n   return out;\n }\n\n static vector<TensorShape> TensorInferenceForConv(\n   const OperatorDef& def,\n   const vector<TensorShape>& in) {\n   return TensorInferenceForSchema(def, in, in[1].dims(0));\n }\n\n static vector<TensorShape> TensorInferenceForPool(\n     const OperatorDef& def,\n     const vector<TensorShape>& in) {\n   ArgumentHelper helper(def);\n   auto order =\n       StringToStorageOrder(helper.GetSingleArgument<string>(\"order\", \"NCHW\"));\n   int num_channels =\n       (order == StorageOrder::NCHW ? in[0].dims(1) : in[0].dims(3));\n   return TensorInferenceForSchema(def, in, num_channels);\n }\n\n virtual ~ConvPoolOpBase() {}\n\nprotected:\n LegacyPadding legacy_pad_;\n bool global_pooling_;\n vector<int> kernel_;\n vector<int> dilation_;\n vector<int> stride_;\n vector<int> pads_;\n\n // We need the above parameters to be available for the devices.\n Tensor<Context> kernel_device_;\n Tensor<Context> dilation_device_;\n Tensor<Context> stride_device_;\n Tensor<Context> pads_device_;\n\n int group_;\n StorageOrder order_;\n bool shared_buffer_;\n Workspace* ws_;\n\n static inline void ComputeSizeAndPad(\n     const int in_size,\n     const int stride,\n     const int kernel,\n     const int dilation,\n     LegacyPadding legacy_pad,\n     int* pad_head,\n     int* pad_tail,\n     int* out_size) {\n   const int dkernel = dilation * (kernel - 1) + 1;\n   switch (legacy_pad) {\n     case LegacyPadding::NOTSET:\n       // We will just use the direct padding head and tail values, but we\n       // will verify that they are non-negative.\n       CAFFE_ENFORCE_GE(in_size + *pad_head + *pad_tail, dkernel);\n       *out_size = static_cast<int>(\n           static_cast<float>(in_size + *pad_head + *pad_tail - dkernel) /\n               stride +\n           1);\n       break;\n     case LegacyPadding::VALID:\n       *pad_head = 0;\n       *pad_tail = 0;\n       *out_size = (in_size - dkernel) / stride + 1;\n       break;\n     case LegacyPadding::SAME: {\n       CAFFE_ENFORCE(\n           1 == dilation, \"Dilation not supported for legacy padding.\");\n       int legacy_target_size = (in_size + stride - 1) / stride;\n       int pad_needed = (legacy_target_size - 1) * stride + kernel - in_size;\n       if (CAFFE2_PAD_HEAD_MORE) {\n         *pad_head = (pad_needed + 1) / 2;\n       } else {\n         *pad_head = pad_needed / 2;\n       }\n       *pad_tail = pad_needed - *pad_head;\n       *out_size = (in_size + pad_needed - dkernel) / stride + 1;\n       break;\n     }\n     case LegacyPadding::CAFFE_LEGACY_POOLING:\n       // This is in order to adapt Caffe's pooling padding case. In this case,\n       // we will only use pad_head and will compute pad_tail to match the\n       // old caffe pooling strategy. Also see caffe2_legacy.proto for more\n       // details.\n       CAFFE_ENFORCE_GE(*pad_head, 0);\n       // Here, notice that caffe casts UP while caffe2 casts DOWN for the\n       // output size computation.\n       *out_size = std::ceil(\n           static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);\n       // If we have padding, caffe also ensures that the last pooling starts\n       // strictly inside the image (instead of at the padding); otherwise clip\n       // the last.\n       if (*pad_head > 0 && (*out_size - 1) * stride >= in_size + *pad_head) {\n         --*out_size;\n       }\n       // Now, compare the output size with the standard Caffe2 output size.\n       // The\n       // caffe2 standard output size should always be no larger than the\n       // output\n       // size of caffe.\n       int standard_out_size = static_cast<int>(\n           static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);\n       CAFFE_ENFORCE_GE(\n           *out_size,\n           standard_out_size,\n           \"This should never happen. If this happens, double check the logic \"\n           \"above.\");\n       if (*out_size > standard_out_size) {\n         LOG(WARNING)\n             << \"You are hitting a case where Caffe's legacy padding calculation \"\n                \"is hit. This leads to inefficient and sometimes incorrect \"\n                \"results. We are keeping this behavior for backward compatibility\"\n                \", but you are strongly recommended to move away from it.\";\n       }\n       *pad_tail = *pad_head + stride * (*out_size - standard_out_size);\n       break;\n   }\n  }\n\n  // Accessors for 2D conv params.\n\n  inline int pad_t() const {\n    return pads_[0];\n  }\n\n  inline int pad_l() const {\n    return pads_[1];\n  }\n\n  inline int pad_b() const {\n    return pads_[2];\n  }\n\n  inline int pad_r() const {\n    return pads_[3];\n  }\n\n  inline int kernel_h() const {\n    return kernel_[0];\n  }\n\n  inline int kernel_w() const {\n    return kernel_[1];\n  }\n\n  inline int stride_h() const {\n    return stride_[0];\n  }\n\n  inline int stride_w() const {\n    return stride_[1];\n  }\n\n  inline int dilation_h() const {\n    return dilation_[0];\n  }\n\n  inline int dilation_w() const {\n    return dilation_[1];\n  }\n\n private:\n inline void AllocateAndCopy(const vector<int>& vec, Tensor<Context>& tensor) {\n      tensor.Resize(vec.size());\n      context_.template Copy<int, CPUContext, Context>(\n          vec.size(), vec.data(), tensor.template mutable_data<int>());\n }\n\n#define USE_CONV_POOL_BASE_FUNCTIONS(Context)      \\\n  USE_OPERATOR_FUNCTIONS(Context);                 \\\n  using ConvPoolOpBase<Context>::pads_;            \\\n  using ConvPoolOpBase<Context>::pads_device_;     \\\n  using ConvPoolOpBase<Context>::pad_t;            \\\n  using ConvPoolOpBase<Context>::pad_l;            \\\n  using ConvPoolOpBase<Context>::pad_b;            \\\n  using ConvPoolOpBase<Context>::pad_r;            \\\n  using ConvPoolOpBase<Context>::legacy_pad_;      \\\n  using ConvPoolOpBase<Context>::global_pooling_;  \\\n  using ConvPoolOpBase<Context>::kernel_;          \\\n  using ConvPoolOpBase<Context>::kernel_device_;   \\\n  using ConvPoolOpBase<Context>::kernel_h;         \\\n  using ConvPoolOpBase<Context>::kernel_w;         \\\n  using ConvPoolOpBase<Context>::dilation_;        \\\n  using ConvPoolOpBase<Context>::dilation_device_; \\\n  using ConvPoolOpBase<Context>::dilation_h;       \\\n  using ConvPoolOpBase<Context>::dilation_w;       \\\n  using ConvPoolOpBase<Context>::stride_;          \\\n  using ConvPoolOpBase<Context>::stride_device_;   \\\n  using ConvPoolOpBase<Context>::stride_h;         \\\n  using ConvPoolOpBase<Context>::stride_w;         \\\n  using ConvPoolOpBase<Context>::group_;           \\\n  using ConvPoolOpBase<Context>::order_;           \\\n  using ConvPoolOpBase<Context>::shared_buffer_;   \\\n  using ConvPoolOpBase<Context>::GetDims;          \\\n  using ConvPoolOpBase<Context>::GetDimsSize;      \\\n  using ConvPoolOpBase<Context>::ws_\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_transpose_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_\n#define CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_transpose_unpool_op_base.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ConvTransposeOp final : public ConvTransposeUnpoolBase<Context> {\n public:\n  USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);\n  ConvTransposeOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvTransposeUnpoolBase<Context>(operator_def, ws) {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  Tensor<Context> col_buffer_;\n  Tensor<Context> bias_multiplier_;\n  // Input: X, W, b\n  // Output: Y\n  INPUT_TAGS(INPUT, FILTER, BIAS);\n};\n\ntemplate <typename T, class Context>\nclass ConvTransposeGradientOp final : public ConvTransposeUnpoolBase<Context> {\n public:\n  USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);\n  ConvTransposeGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvTransposeUnpoolBase<Context>(operator_def, ws),\n        no_bias_(OperatorBase::GetSingleArgument<bool>(\"no_bias\", false)) {\n    CAFFE_ENFORCE(\n        !(no_bias_ && OutputSize() == 3),\n        \"If bias is not present, you should not have 3 grad output.\");\n  }\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  Tensor<Context> col_buffer_;\n  Tensor<Context> bias_multiplier_;\n  const bool no_bias_;\n  // input: X, W, dY\n  // output: dW, optionally db and dX\n  INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);\n  OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_transpose_op_impl.h",
    "content": "// conv_transpose_op_impl.h is the templated implementation of the\n// conv_transpose_op.h file.\n#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_\n#define CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n#include \"caffe2/operators/conv_transpose_op.h\"\n#include \"caffe2/operators/conv_transpose_unpool_op_base.h\"\n#include \"caffe2/utils/math.h\"\n\nCAFFE2_DECLARE_bool(caffe2_force_shared_col_buffer);\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nbool ConvTransposeOp<T, Context>::RunOnDeviceWithOrderNCHW() {\n  const Tensor<Context>& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  Tensor<Context>* Y = Output(0);\n  const int N = X.dim32(0), M = X.dim32(1), H = X.dim32(2), W = X.dim32(3);\n  CAFFE_ENFORCE(filter.ndim() == 4, \"filter must be 4D tensor\");\n  CAFFE_ENFORCE(\n      filter.dim32(0) == M,\n      \"filter number must be equal to input channel number\");\n  const int C = filter.dim32(1);\n  CAFFE_ENFORCE(\n      filter.dim32(2) == this->kernel_h(),\n      \"filter height must be equal to kernel height\");\n  CAFFE_ENFORCE(\n      filter.dim32(3) == this->kernel_w(),\n      \"filter width must be equal to kernel width\");\n  ConvTransposeUnpoolBase<Context>::SetOutputSize(X, Y, C);\n\n  const int kernel_dim = C * this->kernel_h() * this->kernel_w();\n  const int input_image_size = H * W;\n  const int output_image_size = Y->dim32(2) * Y->dim32(3);\n\n#ifndef __ARM_NEON__\n  if (InputSize() == 3) {\n    auto& bias = Input(BIAS);\n    CAFFE_ENFORCE(bias.ndim() == 1, \"bias must be 1D tensor\");\n    CAFFE_ENFORCE(\n        bias.dim32(0) == C,\n        \"bias dimension must be equal to output channel number\");\n    if (bias_multiplier_.size() != output_image_size) {\n      bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n      T* bm_data = bias_multiplier_.template mutable_data<T>();\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bm_data,\n          &context_);\n    }\n  }\n#endif // !__ARM_NEON__\n\n  const T* Xdata = X.template data<T>();\n  const T* filter_data = filter.template data<T>();\n  T* Ydata = Y->template mutable_data<T>();\n\n  auto f = [&](Tensor<Context>* col_buffer) {\n    col_buffer->Resize(\n        vector<TIndex>{C, this->kernel_h(), this->kernel_w(), H, W});\n    T* col_buffer_data = col_buffer->template mutable_data<T>();\n    for (auto image_id = 0; image_id < N; ++image_id) {\n      // Weight term\n      math::Gemm<T, Context>(\n          CblasTrans,\n          CblasNoTrans,\n          kernel_dim,\n          input_image_size,\n          M,\n          1,\n          filter_data,\n          Xdata,\n          0,\n          col_buffer_data,\n          &context_);\n\n      // Col2im\n      math::Col2im<T, Context, StorageOrder::NCHW>(\n          col_buffer_data,\n          C,\n          Y->dim32(2),\n          Y->dim32(3),\n          this->kernel_h(),\n          this->kernel_w(),\n          1,\n          1,\n          this->pad_t(),\n          this->pad_l(),\n          this->pad_b(),\n          this->pad_r(),\n          this->stride_h(),\n          this->stride_w(),\n          Ydata,\n          &context_);\n\n      // Bias term\n      if (InputSize() == 3) {\n        const T* bias_data = Input(BIAS).template data<T>();\n#ifndef __ARM_NEON__\n        const T* bm_data = bias_multiplier_.template data<T>();\n        math::Gemm<T, Context>(\n            CblasNoTrans,\n            CblasNoTrans,\n            C,\n            output_image_size,\n            1,\n            1,\n            bias_data,\n            bm_data,\n            1,\n            Ydata,\n            &context_);\n#else\n        math::BiasCHW<T, Context>(\n            bias_data,\n            C,\n            output_image_size,\n            Ydata,\n            &context_);\n#endif // !__ARM_NEON__\n      }\n\n      Xdata += M * H * W;\n      Ydata += Y->size() / Y->dim32(0);\n    }\n  };\n  if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n    runWithSharedBuffer<Context>(ws_, f);\n  } else {\n    f(&col_buffer_);\n  }\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvTransposeOp<T, Context>::RunOnDeviceWithOrderNHWC() {\n  const Tensor<Context>& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  Tensor<Context>* Y = Output(0);\n  const auto N = X.dim32(0), H = X.dim32(1), W = X.dim32(2), M = X.dim32(3);\n  CAFFE_ENFORCE(filter.ndim() == 4, \"filter must be 4D tensor\");\n  CAFFE_ENFORCE(\n      filter.dim32(0) == M,\n      \"filter number must be equal to input channel number\");\n  CAFFE_ENFORCE(\n      filter.dim32(1) == this->kernel_h(),\n      \"filter height must be equal to kernel height\");\n  CAFFE_ENFORCE(\n      filter.dim32(2) == this->kernel_w(),\n      \"filter width must be equal to kernel width\");\n  const int C = filter.dim32(3);\n  ConvTransposeUnpoolBase<Context>::SetOutputSize(X, Y, C);\n\n  const auto kernel_dim = C * this->kernel_h() * this->kernel_w();\n  const auto input_image_size = H * W;\n  const auto output_image_size = Y->dim32(1) * Y->dim32(2);\n\n  if (InputSize() == 3) {\n    auto& bias = Input(BIAS);\n    CAFFE_ENFORCE(bias.ndim() == 1, \"bias must be 1D tensor\");\n    CAFFE_ENFORCE(\n        bias.dim32(0) == C,\n        \"bias dimension must be equal to output channel number\");\n    if (bias_multiplier_.size() != output_image_size) {\n      bias_multiplier_.Resize(vector<TIndex>(1, output_image_size));\n      T* bm_data = bias_multiplier_.template mutable_data<T>();\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bm_data,\n          &context_);\n    }\n  }\n  const T* Xdata = X.template data<T>();\n  const T* filter_data = filter.template data<T>();\n  T* Ydata = Y->template mutable_data<T>();\n\n  auto f = [&](Tensor<Context>* /*col_buffer*/) {\n    col_buffer_.Resize(\n        vector<TIndex>{H, W, this->kernel_h(), this->kernel_w(), C});\n    T* col_buffer_data = col_buffer_.template mutable_data<T>();\n    for (auto image_id = 0; image_id < N; ++image_id) {\n      // Weight term\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasNoTrans,\n          input_image_size,\n          kernel_dim,\n          M,\n          1,\n          Xdata,\n          filter_data,\n          0,\n          col_buffer_data,\n          &context_);\n      // Col2im\n      math::Col2im<T, Context, StorageOrder::NHWC>(\n          col_buffer_data,\n          C,\n          Y->dim32(1),\n          Y->dim32(2),\n          this->kernel_h(),\n          this->kernel_w(),\n          1,\n          1,\n          this->pad_t(),\n          this->pad_l(),\n          this->pad_b(),\n          this->pad_r(),\n          this->stride_h(),\n          this->stride_w(),\n          Ydata,\n          &context_);\n      // Bias term\n      if (InputSize() == 3) {\n        const T* bm_data = bias_multiplier_.template data<T>();\n        const T* bias_data = Input(BIAS).template data<T>();\n        math::Gemm<T, Context>(\n            CblasNoTrans,\n            CblasNoTrans,\n            output_image_size,\n            C,\n            1,\n            1,\n            bm_data,\n            bias_data,\n            1,\n            Ydata,\n            &context_);\n      }\n      Xdata += M * H * W;\n      Ydata += Y->size() / Y->dim32(0);\n    }\n  };\n  if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n    runWithSharedBuffer<Context>(ws_, f);\n  } else {\n    f(&col_buffer_);\n  }\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvTransposeGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {\n  auto& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  auto& dY = Input(OUTPUT_GRAD);\n  auto* dfilter = Output(FILTER_GRAD);\n  const int N = X.dim32(0), M = X.dim32(1), H = X.dim32(2), W = X.dim32(3);\n  // We only handle LegacyPadding::NOTSET case and ignore cases of\n  // LegacyPadding::VALID and LegacyPadding::SAME\n  // Thus, we don't need to manually compute padding values\n  // We simply use the values from the user\n  CAFFE_ENFORCE(filter.ndim() == 4);\n  const int C = filter.dim32(1);\n  CAFFE_ENFORCE(\n      filter.dim32(2) == this->kernel_h(),\n      \"filter height must be equal to kernel height\");\n  CAFFE_ENFORCE(\n      filter.dim32(3) == this->kernel_w(),\n      \"filter width must be equal to kernel width\");\n  dfilter->ResizeLike(filter);\n\n  const int kernel_dim = C * this->kernel_h() * this->kernel_w();\n  const int output_image_size = dY.dim32(2) * dY.dim32(3);\n  // The col buffer is stored in CHW order as well\n  col_buffer_.Resize(\n      vector<TIndex>{C, this->kernel_h(), this->kernel_w(), H, W});\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    dbias->Resize(C);\n    if (bias_multiplier_.size() != output_image_size) {\n      bias_multiplier_.Resize(1, output_image_size);\n      T* bm_data = bias_multiplier_.template mutable_data<T>();\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bm_data,\n          &context_);\n    }\n  }\n  T* col_buffer_data = col_buffer_.template mutable_data<T>();\n  const T* Xdata = X.template data<T>();\n  const T* filter_data = filter.template data<T>();\n  const T* dYdata = dY.template data<T>();\n  T* dfilter_data = dfilter->template mutable_data<T>();\n  // Pre-setting the gradients to zero\n  math::Set<T, Context>(dfilter->size(), 0, dfilter_data, &context_);\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    T* dbias_data = dbias->template mutable_data<T>();\n    math::Set<T, Context>(dbias->size(), 0, dbias_data, &context_);\n  }\n  for (auto image_id = 0; image_id < N; ++image_id) {\n    // gradient w.r.t. filters. Im2col followed by Gemm\n    // Im2col.\n    math::Im2col<T, Context, StorageOrder::NCHW>(\n        dYdata,\n        C,\n        dY.dim32(2),\n        dY.dim32(3),\n        this->kernel_h(),\n        this->kernel_w(),\n        1,\n        1,\n        this->pad_t(),\n        this->pad_l(),\n        this->pad_b(),\n        this->pad_r(),\n        this->stride_h(),\n        this->stride_w(),\n        col_buffer_data,\n        &context_);\n    // Gemm\n    math::Gemm<T, Context>(\n        CblasNoTrans,\n        CblasTrans,\n        M,\n        kernel_dim,\n        H * W,\n        1,\n        Xdata,\n        col_buffer_data,\n        1,\n        dfilter_data,\n        &context_);\n    // gradient w.r.t. bias\n    if (!no_bias_) {\n      const T* bm_data = bias_multiplier_.template data<T>();\n      T* input_grad_data = Output(BIAS_OR_INPUT_GRAD)->template mutable_data<T>();\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasNoTrans,\n          C,\n          1,\n          output_image_size,\n          1,\n          dYdata,\n          bm_data,\n          1,\n          input_grad_data,\n          &context_);\n    }\n    dYdata += dY.size() / dY.dim32(0);\n    Xdata += X.size() / X.dim32(0);\n  }\n  if (OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))) {\n    // Compute gradients w.r.t. the input\n    // Since we have changed dYdata in the above loop, we will need to reset.\n    dYdata = dY.template data<T>();\n    auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD);\n    dX->ResizeLike(X);\n    T* dXdata = dX->template mutable_data<T>();\n    for (auto image_id = 0; image_id < N; ++image_id) {\n      // Im2col.\n      // TODO(zyan3): Probably duplicate work as in gradient computation\n      // w.r.t filters\n      math::Im2col<T, Context, StorageOrder::NCHW>(\n          dYdata,\n          C,\n          dY.dim32(2),\n          dY.dim32(3),\n          this->kernel_h(),\n          this->kernel_w(),\n          1,\n          1,\n          this->pad_t(),\n          this->pad_l(),\n          this->pad_b(),\n          this->pad_r(),\n          this->stride_h(),\n          this->stride_w(),\n          col_buffer_data,\n          &context_);\n      // Gemm\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasNoTrans,\n          M,\n          H * W,\n          kernel_dim,\n          1,\n          filter_data,\n          col_buffer_data,\n          0,\n          dXdata,\n          &context_);\n      dYdata += dY.size() / dY.dim32(0);\n      dXdata += X.size() / X.dim32(0);\n    }\n  }\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvTransposeGradientOp<T, Context>::RunOnDeviceWithOrderNHWC() {\n  auto& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  auto& dY = Input(OUTPUT_GRAD);\n  auto* dfilter = Output(FILTER_GRAD);\n  const int N = X.dim32(0), H = X.dim32(1), W = X.dim32(2), M = X.dim32(3);\n  // We only handle LegacyPadding::NOTSET case and ignore cases of\n  // LegacyPadding::VALID and LegacyPadding::SAME\n  // Thus, we don't need to manually compute padding values\n  // We simply use the values from the user\n  CAFFE_ENFORCE(filter.ndim() == 4, \"filter must be 4D tensor\");\n  CAFFE_ENFORCE(\n      filter.dim32(1) == this->kernel_h(),\n      \"filter height must be equal to kernel height\");\n  CAFFE_ENFORCE(\n      filter.dim32(2) == this->kernel_w(),\n      \"filter width must be equal to kernel width\");\n  const int C = filter.dim32(3);\n  dfilter->ResizeLike(filter);\n\n  const int kernel_dim = C * this->kernel_h() * this->kernel_w();\n  const int output_image_size = dY.dim32(1) * dY.dim32(2);\n  // The col buffer is stored in HWC order as well\n  col_buffer_.Resize(\n      vector<TIndex>{H, W, this->kernel_h(), this->kernel_w(), C});\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    dbias->Resize(C);\n    if (bias_multiplier_.size() != output_image_size) {\n      bias_multiplier_.Resize(1, output_image_size);\n      T* bm_data = bias_multiplier_.template mutable_data<T>();\n      math::Set<T, Context>(\n          output_image_size,\n          static_cast<T>(1),\n          bm_data,\n          &context_);\n    }\n  }\n  T* col_buffer_data = col_buffer_.template mutable_data<T>();\n  const T* Xdata = X.template data<T>();\n  const T* filter_data = filter.template data<T>();\n  const T* dYdata = dY.template data<T>();\n  T* dfilter_data = dfilter->template mutable_data<T>();\n  // Pre-setting the gradients to zero\n  math::Set<T, Context>(dfilter->size(), 0, dfilter_data, &context_);\n  if (!no_bias_) {\n    auto* dbias = Output(BIAS_OR_INPUT_GRAD);\n    T* dbias_data = dbias->template mutable_data<T>();\n    math::Set<T, Context>(dbias->size(), 0, dbias_data, &context_);\n  }\n  for (auto image_id = 0; image_id < N; ++image_id) {\n    // gradient w.r.t. filters. Im2col followed by Gemm\n    // Im2col.\n    math::Im2col<T, Context, StorageOrder::NHWC>(\n        dYdata,\n        C,\n        dY.dim32(1),\n        dY.dim32(2),\n        this->kernel_h(),\n        this->kernel_w(),\n        1,\n        1,\n        this->pad_t(),\n        this->pad_l(),\n        this->pad_b(),\n        this->pad_r(),\n        this->stride_h(),\n        this->stride_w(),\n        col_buffer_data,\n        &context_);\n    // Gemm\n    math::Gemm<T, Context>(\n        CblasTrans,\n        CblasNoTrans,\n        M,\n        kernel_dim,\n        H * W,\n        1,\n        Xdata,\n        col_buffer_data,\n        1,\n        dfilter_data,\n        &context_);\n    // gradients w.r.t. bias\n    if (!no_bias_) {\n      const T* bm_data = bias_multiplier_.template data<T>();\n      T* input_grad_data = Output(BIAS_OR_INPUT_GRAD)->template mutable_data<T>();\n      math::Gemm<T, Context>(\n          CblasTrans,\n          CblasNoTrans,\n          C,\n          1,\n          output_image_size,\n          1,\n          dYdata,\n          bm_data,\n          1,\n          input_grad_data,\n          &context_);\n    }\n    dYdata += dY.size() / dY.dim32(0);\n    Xdata += X.size() / X.dim32(0);\n  }\n  if (OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))) {\n    // Compute gradients w.r.t. the input\n    // Since we have changed dYdata in the above loop, we will need to reset.\n    dYdata = dY.template data<T>();\n    auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD);\n    dX->ResizeLike(X);\n    T* dXdata = dX->template mutable_data<T>();\n    for (auto image_id = 0; image_id < N; ++image_id) {\n      // Im2col.\n      // TODO(zyan3): Probably duplicate work as in gradient computation\n      // w.r.t filters\n      math::Im2col<T, Context, StorageOrder::NHWC>(\n          dYdata,\n          C,\n          dY.dim32(1),\n          dY.dim32(2),\n          this->kernel_h(),\n          this->kernel_w(),\n          1,\n          1,\n          this->pad_t(),\n          this->pad_l(),\n          this->pad_b(),\n          this->pad_r(),\n          this->stride_h(),\n          this->stride_w(),\n          col_buffer_data,\n          &context_);\n      // Gemm\n      math::Gemm<T, Context>(\n          CblasNoTrans,\n          CblasTrans,\n          H * W,\n          M,\n          kernel_dim,\n          1,\n          col_buffer_data,\n          filter_data,\n          0,\n          dXdata,\n          &context_);\n      dYdata += dY.size() / dY.dim32(0);\n      dXdata += X.size() / X.dim32(0);\n    }\n  }\n  return true;\n}\n\n} // namespace caffe2\n#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_transpose_op_mobile.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_\n#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_\n\n#include \"caffe2/core/common.h\"\n\n#ifndef CAFFE2_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n#if CAFFE2_MOBILE\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_transpose_unpool_op_base.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {\n public:\n  USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);\n  ConvTransposeMobileOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvTransposeUnpoolBase<Context>(operator_def, ws) {\n    OPERATOR_NEEDS_FEATURE(order_ == StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n    OPERATOR_NEEDS_FEATURE(\n        this->pad_l() == 0, \"operator does not handle row width padding\");\n    OPERATOR_NEEDS_FEATURE(\n        this->pad_r() == 0, \"operator does not handle row width padding\");\n    OPERATOR_NEEDS_FEATURE(this->stride_w() <= 4, \"stride width must be <= 4\");\n  }\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  // We store a numThreasds per-worker  tiles of Y, and numThreads per-worker threadBuffer for the\n  // gemm output, laid out in that order.\n  TensorCPU threadBuffer_;\n\n  // Input: X, W, b\n  // Output: Y\n  INPUT_TAGS(INPUT, FILTER, BIAS);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_MOBILE\n\n#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_transpose_op_mobile_impl.h",
    "content": "// conv_transpose_op_impl.h is the templated implementation of the\n// conv_transpose_op.h file.\n#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_\n#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_\n\n#include \"caffe2/core/common.h\"\n\n#ifndef CAFFE2_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n#if CAFFE2_MOBILE\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n#include \"caffe2/operators/conv_transpose_op_mobile.h\"\n#include \"caffe2/utils/cpu_neon.h\"\n#include \"caffe2/utils/fixed_divisor.h\"\n#include \"caffe2/utils/math.h\"\n\nCAFFE2_DECLARE_bool(caffe2_force_shared_col_buffer);\n\nnamespace caffe2 {\n\ntemplate <typename T, typename Context>\nvoid runTileContiguous(\n    int tileId,\n    int N,\n    int M,\n    int H,\n    int W,\n    int outputH,\n    int outputW,\n    int C,\n    int kernelH,\n    int kernelW,\n    int strideH,\n    int strideW,\n    int padT,\n    const T* filterData,\n    const T* Xdata,\n    T* colBufferData,\n    T* Ydata,\n    Context* context) {\n  // The tile size is exactly the length of a single row\n  int tileSize = W;\n\n  auto kernelDataSize = C * kernelH * kernelW;\n  auto currentTileStart = tileSize * tileId;\n\n  // gemm tile\n  math::GemmEx<T, Context>(\n      CblasTrans,\n      CblasNoTrans,\n      kernelDataSize,\n      tileSize,\n      M,\n      1,\n      filterData,\n      kernelDataSize,\n      Xdata + currentTileStart,\n      H * W,\n      0,\n      colBufferData,\n      tileSize,\n      context);\n\n  // col2im tile\n  // We assume that there is no padding in the columns (padL and padR\n  // == 0).\n  // FIXME: it is actually possible for us to handle padding, figure\n  // out how to adjust the bounds\n\n  // We write into Y in a de-interleaved fashion; in other words,\n  // every column (mod strideW) == 0 together in one block,\n  // every column (mod strideW) == 1 in another,\n  // ... and so on.\n  int colBlockSize = (W + kernelW / strideW);\n  int numColBlocks = strideW;\n\n  for (int c = 0; c < kernelDataSize; ++c) {\n    int w_offset = c % kernelW;\n    int h_offset = (c / kernelW) % kernelH;\n    int c_im = c / kernelH / kernelW;\n\n    // Each row is a separate tile that we handle. First determine the\n    // row into which we are writing the output.\n    // We can properly handle padding for the rows.\n    int rowY = tileId * strideH - padT + h_offset;\n\n    // If this row is out of bounds, then skip it\n    if (!math::is_a_ge_zero_and_a_lt_b(rowY, outputH)) {\n      continue;\n    }\n\n    // FIXME: we don't actually handle a dynamic padL > 0\n    constexpr int kPadL = 0;\n    int colOffsetStart = -kPadL + w_offset;\n    int colBlockY = colOffsetStart % strideW;\n\n    // However, within a block we may not start writing at offset\n    // 0. The offset at which we begin writing is determined by\n    // colOffsetStart\n    int colWithinBlockOffsetY = colOffsetStart / strideW;\n\n    // So, this is where we begin reading/writing in Y\n    int colY = colBlockY * colBlockSize + colWithinBlockOffsetY;\n\n    // This is the complete offset into Y from the start\n    // Each row has strideW blocks of size colBlockSize\n    int offsetY = rowY * colBlockSize * numColBlocks + colY;\n\n    T* colBufferPointer = colBufferData + c * tileSize;\n    T* yPointer =\n        Ydata + c_im * outputH * (colBlockSize * numColBlocks) + offsetY;\n\n    int b = 0;\n#ifdef __ARM_NEON__\n    // We vectorize the loop within the row\n    {\n      constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float)) * 4;\n      int limit = (tileSize / kUnroll) * kUnroll;\n\n      for (; b < limit; b += kUnroll) {\n        float32x4_t cb0 = vld1q_f32(colBufferPointer + 0);\n        float32x4_t cb1 = vld1q_f32(colBufferPointer + 4);\n        float32x4_t cb2 = vld1q_f32(colBufferPointer + 8);\n        float32x4_t cb3 = vld1q_f32(colBufferPointer + 12);\n\n        float32x4_t y0 = vld1q_f32(yPointer + 0);\n        float32x4_t y1 = vld1q_f32(yPointer + 4);\n        float32x4_t y2 = vld1q_f32(yPointer + 8);\n        float32x4_t y3 = vld1q_f32(yPointer + 12);\n\n        y0 = vaddq_f32(y0, cb0);\n        y1 = vaddq_f32(y1, cb1);\n        y2 = vaddq_f32(y2, cb2);\n        y3 = vaddq_f32(y3, cb3);\n\n        vst1q_f32(yPointer + 0, y0);\n        vst1q_f32(yPointer + 4, y1);\n        vst1q_f32(yPointer + 8, y2);\n        vst1q_f32(yPointer + 12, y3);\n\n        colBufferPointer += kUnroll;\n        yPointer += kUnroll;\n      }\n    }\n\n    {\n      constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));\n      int limit = (tileSize / kUnroll) * kUnroll;\n\n      for (; b < limit; b += kUnroll) {\n        float32x4_t cb0 = vld1q_f32(colBufferPointer);\n        float32x4_t y0 = vld1q_f32(yPointer);\n\n        y0 = vaddq_f32(y0, cb0);\n\n        vst1q_f32(yPointer, y0);\n\n        colBufferPointer += kUnroll;\n        yPointer += kUnroll;\n      }\n    }\n#endif\n\n    // Handle un-vectorizable epilogue\n    for (; b < tileSize; ++b) {\n      *yPointer += *colBufferPointer;\n      ++yPointer;\n      ++colBufferPointer;\n    }\n  }\n}\n\ntemplate <typename T, int N>\nstruct StoreInterleaved {};\n\ntemplate <>\nstruct StoreInterleaved<float, 1> {\n#ifdef __ARM_NEON__\n  inline static void store(float* p, float32x4_t v[1]) {\n    vst1q_f32(p, v[0]);\n  }\n#endif\n\n  inline static void store(float* p, float v[1]) {\n    p[0] = v[0];\n  }\n};\n\ntemplate <>\nstruct StoreInterleaved<float, 2> {\n#ifdef __ARM_NEON__\n  inline static void store(float* p, float32x4_t v[2]) {\n    float32x4x2_t x = {{v[0], v[1]}};\n    vst2q_f32(p, x);\n  }\n#endif\n\n  inline static void store(float* p, float v[2]) {\n    p[0] = v[0];\n    p[1] = v[1];\n  }\n};\n\ntemplate <>\nstruct StoreInterleaved<float, 3> {\n#ifdef __ARM_NEON__\n  inline static void store(float* p, float32x4_t v[3]) {\n    float32x4x3_t x = {{v[0], v[1], v[2]}};\n    vst3q_f32(p, x);\n  }\n#endif\n\n  inline static void store(float* p, float v[3]) {\n    p[0] = v[0];\n    p[1] = v[1];\n    p[2] = v[2];\n  }\n};\n\ntemplate <>\nstruct StoreInterleaved<float, 4> {\n#ifdef __ARM_NEON__\n  inline static void store(float* p, float32x4_t v[4]) {\n    float32x4x4_t x = {{v[0], v[1], v[2], v[3]}};\n    vst4q_f32(p, x);\n  }\n#endif\n\n  inline static void store(float* p, float v[4]) {\n    p[0] = v[0];\n    p[1] = v[1];\n    p[2] = v[2];\n    p[3] = v[3];\n  }\n};\n\ntemplate <int kStrideW>\nvoid reinterleaveRows(\n    const float* src,\n    const float* bias,\n    int c,\n    int h,\n    float* dst,\n    int outputC,\n    int outputH,\n    int outputW,\n    int inputW,\n    int kernelW,\n    int strideW,\n    int adjH) {\n  // Each row in src is of the form:\n  // [w mod strideW == 0 elements]...[w mod strideW == strideW - 1\n  // elements]\n  // We need to re-interleave the values and write them in the output\n  int colBlockSize = inputW + kernelW / kStrideW;\n  int noAdjOutputW = (inputW - 1) * kStrideW + kernelW;\n\n  int point = c * outputH + h;\n  src += point * colBlockSize * kStrideW;\n  dst += point * outputW;\n\n  float b = bias ? bias[c] : 0;\n#ifdef __ARM_NEON__\n  float32x4_t biasV = vdupq_n_f32(b);\n#endif\n\n  int w = 0;\n#ifdef __ARM_NEON__\n  constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float)) * 2;\n  int limit = ((inputW - 1) / kUnroll) * kUnroll;\n\n  for (; w < limit; w += kUnroll) {\n    // We need to interleave in terms of kStrideW units\n    float32x4_t v0[kStrideW];\n    float32x4_t v1[kStrideW];\n\n    for (int i = 0; i < kStrideW; ++i) {\n      v0[i] = vld1q_f32(src + i * colBlockSize);\n      v1[i] = vld1q_f32(src + i * colBlockSize + 4);\n    }\n\n    // add per-channel bias\n    for (int i = 0; i < kStrideW; ++i) {\n      v0[i] = vaddq_f32(v0[i], biasV);\n      v1[i] = vaddq_f32(v1[i], biasV);\n    }\n\n    // Write interleaved into the output\n    StoreInterleaved<float, kStrideW>::store(dst + 0 * kStrideW, v0);\n    StoreInterleaved<float, kStrideW>::store(dst + 4 * kStrideW, v1);\n\n    src += kUnroll;\n    dst += kUnroll * kStrideW;\n  }\n#endif\n\n  // Handle non-vectorizable remainder\n  for (; w < inputW - 1; ++w) {\n    float v[kStrideW];\n\n    for (int i = 0; i < kStrideW; ++i) {\n      v[i] = src[i * colBlockSize];\n    }\n\n    // add per-channel bias\n    for (int i = 0; i < kStrideW; ++i) {\n      v[i] += b;\n    }\n\n    // Write interleaved into the output\n    StoreInterleaved<float, kStrideW>::store(dst, v);\n\n    src += 1;\n    dst += kStrideW;\n  }\n\n  // We have handled 0 .. (inputW - 1) * stride inclusive so far.\n  // Handle the remainder\n  int outputPoint = (inputW - 1) * kStrideW;\n  int block = 0;\n\n  // Output width may include adjustment into which we don't\n  // write; ignore it\n  while (outputPoint < noAdjOutputW) {\n    float v = src[block * colBlockSize];\n    dst[0] = v + b;\n    ++outputPoint;\n    dst += 1;\n\n    ++block;\n    if (block >= kStrideW) {\n      block = 0;\n      src += 1;\n    }\n  }\n\n  // Remainder of the buffer comprised of just the `adj` must have\n  // bias added\n  for (; outputPoint < outputW; ++outputPoint) {\n    dst[0] = b;\n    dst += 1;\n  }\n}\n\ntemplate <int N, typename T, typename Context>\nvoid reinterleaveMultithreaded(\n    const T* y0,\n    const T* bias_data,\n    T* y,\n    int outputC,\n    int outputH,\n    int outputW,\n    int inputW,\n    int kernelW,\n    int strideW,\n    int adjH,\n    ThreadPool* pool) {\n  // # channels times height\n  size_t totalTiles = (size_t)outputC * outputH;\n  FixedDivisor<int> divOutputH(outputH);\n\n#define REINTERLEAVE(N)  \\\n  do {                   \\\n    reinterleaveRows<N>( \\\n        y0,              \\\n        bias_data,       \\\n        c,               \\\n        h,               \\\n        y,               \\\n        outputC,         \\\n        outputH,         \\\n        outputW,         \\\n        inputW,          \\\n        kernelW,         \\\n        strideW,         \\\n        adjH);           \\\n  } while (false)\n\n  std::function<void(int, size_t)> fnReinterleave = [&](int threadId,\n                                                        size_t tileId) {\n    int h;\n    int c;\n    divOutputH.divMod((int)tileId, c, h);\n\n    REINTERLEAVE(N);\n  };\n\n#undef REINTERLEAVE\n\n  pool->run(fnReinterleave, totalTiles);\n}\n\n#ifdef __ARM_NEON__\ntemplate <int N>\nstruct SumMultiple {\n  static void sumInto(float* acc, float** toSum, size_t size);\n};\n\ntemplate <>\nstruct SumMultiple<1> {\n  static void sumInto(float* acc, float** toSum, size_t size) {\n    constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));\n    int limit = (size / kUnroll) * kUnroll;\n\n    auto toSum0 = toSum[0];\n\n    size_t i = 0;\n    for (; i < limit; i += kUnroll) {\n      float32x4_t v0 = vld1q_f32_aligned(acc + i);\n      float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);\n\n      v0 = vaddq_f32(v0, v1);\n\n      vst1q_f32_aligned(acc + i, v0);\n    }\n\n    for (; i < size; ++i) {\n      float v0 = acc[i];\n      float v1 = toSum0[i];\n\n      v0 += v1;\n\n      acc[i] = v0;\n    }\n  }\n};\n\ntemplate <>\nstruct SumMultiple<2> {\n  static void sumInto(float* acc, float** toSum, size_t size) {\n    constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));\n    int limit = (size / kUnroll) * kUnroll;\n\n    auto toSum0 = toSum[0];\n    auto toSum1 = toSum[1];\n\n    size_t i = 0;\n    for (; i < limit; i += kUnroll) {\n      float32x4_t v0 = vld1q_f32_aligned(acc + i);\n      float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);\n      float32x4_t v2 = vld1q_f32_aligned(toSum1 + i);\n\n      v0 = vaddq_f32(v0, v1);\n      v0 = vaddq_f32(v0, v2);\n\n      vst1q_f32_aligned(acc + i, v0);\n    }\n\n    for (; i < size; ++i) {\n      float v0 = acc[i];\n      float v1 = toSum0[i];\n      float v2 = toSum1[i];\n\n      v0 += v1;\n      v0 += v2;\n\n      acc[i] = v0;\n    }\n  }\n};\n\ntemplate <>\nstruct SumMultiple<3> {\n  static void sumInto(float* acc, float** toSum, size_t size) {\n    constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));\n    int limit = (size / kUnroll) * kUnroll;\n\n    auto toSum0 = toSum[0];\n    auto toSum1 = toSum[1];\n    auto toSum2 = toSum[2];\n\n    size_t i = 0;\n    for (; i < limit; i += kUnroll) {\n      float32x4_t v0 = vld1q_f32_aligned(acc + i);\n      float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);\n      float32x4_t v2 = vld1q_f32_aligned(toSum1 + i);\n      float32x4_t v3 = vld1q_f32_aligned(toSum2 + i);\n\n      v0 = vaddq_f32(v0, v1);\n      v2 = vaddq_f32(v2, v3);\n      v0 = vaddq_f32(v0, v2);\n\n      vst1q_f32_aligned(acc + i, v0);\n    }\n\n    for (; i < size; ++i) {\n      float v0 = acc[i];\n      float v1 = toSum0[i];\n      float v2 = toSum1[i];\n      float v3 = toSum2[i];\n\n      v0 += v1;\n      v2 += v3;\n      v0 += v2;\n\n      acc[i] = v0;\n    }\n  }\n};\n#endif\n\n// Performs acc[i] += sum_j toSum_j[i] pointwise\nvoid sumInto(float* acc, std::vector<float*>& toSum, size_t size) {\n#ifdef __ARM_NEON__\n  if (toSum.size() == 1) {\n    SumMultiple<1>::sumInto(acc, toSum.data(), size);\n    return;\n  } else if (toSum.size() == 2) {\n    SumMultiple<2>::sumInto(acc, toSum.data(), size);\n    return;\n  } else if (toSum.size() == 3) {\n    SumMultiple<3>::sumInto(acc, toSum.data(), size);\n    return;\n  }\n#endif\n\n  // Otherwise, use fallback implementation\n  EigenVectorArrayMap<float> accT(acc, size);\n\n  for (auto p : toSum) {\n    accT += ConstEigenVectorArrayMap<float>(p, size);\n  }\n}\n\ntemplate <typename T, class Context>\nbool ConvTransposeMobileOp<T, Context>::RunOnDeviceWithOrderNCHW() {\n  const Tensor<Context>& X = Input(INPUT);\n  auto& filter = Input(FILTER);\n  Tensor<Context>* Y = Output(0);\n  const int N = X.dim32(0), M = X.dim32(1), H = X.dim32(2), W = X.dim32(3);\n  CAFFE_ENFORCE(filter.ndim() == 4, \"filter must be 4D tensor\");\n  CAFFE_ENFORCE(\n      filter.dim32(0) == M,\n      \"filter number must be equal to input channel number\");\n  const int C = filter.dim32(1);\n  CAFFE_ENFORCE(\n      filter.dim32(2) == this->kernel_h(),\n      \"filter height must be equal to kernel height\");\n  CAFFE_ENFORCE(\n      filter.dim32(3) == this->kernel_w(),\n      \"filter width must be equal to kernel width\");\n  if (InputSize() == 3) {\n    auto& bias = Input(BIAS);\n    CAFFE_ENFORCE(bias.ndim() == 1, \"bias must be 1D tensor\");\n    CAFFE_ENFORCE(\n        bias.dim32(0) == C,\n        \"bias dimension must be equal to output channel number\");\n  }\n\n  ConvTransposeUnpoolBase<Context>::SetOutputSize(X, Y, C);\n\n  const int outputH = Y->dim32(2);\n  const int outputW = Y->dim32(3);\n  const int outputPlaneSize = outputH * outputW;\n  const int outputBatchElementSize = Y->dim32(1) * outputPlaneSize;\n\n  auto Xdata = X.template data<T>();\n  auto Ydata = Y->template mutable_data<T>();\n\n  auto pool = ws_->GetThreadPool();\n  auto numThreads = pool->getNumThreads();\n\n  // Initialize per-thread buffers for output\n  // The main thread will write directly into the output Y, we just\n  // need buffers for the worker threads\n  size_t colBlockSize = W + this->kernel_w() / this->stride_w();\n  size_t threadYBufferSize = C * outputH * colBlockSize * this->stride_w();\n  // Require 16 byte alignment, so 4-element alignment as these are floats.\n  size_t threadYBufferSizeAligned =\n      ((C * outputH * colBlockSize * this->stride_w() + 3) / 4) * 4;\n  size_t threadColBufferSize = C * this->kernel_h() * this->kernel_w() * W;\n\n  // Work around GCC 4.9 bug when this is declared inside the inner lambda.\n  auto runLocalTile = [&](TensorCPU* threadBuffer,\n                          int threadId,\n                          size_t tileId) {\n    auto localYData = threadBuffer->template mutable_data<T>() +\n        threadId * threadYBufferSizeAligned;\n\n    auto localColBufferData = threadBuffer->template mutable_data<T>() +\n        numThreads * threadYBufferSizeAligned + threadId * threadColBufferSize;\n\n    runTileContiguous<T, Context>(\n        tileId,\n        N,\n        M,\n        H,\n        W,\n        outputH,\n        outputW,\n        C,\n        this->kernel_h(),\n        this->kernel_w(),\n        this->stride_h(),\n        this->stride_w(),\n        this->pad_t(),\n        filter.template data<T>(),\n        Xdata,\n        localColBufferData,\n        localYData,\n        &context_);\n  };\n\n  auto f = [&](Tensor<Context>* threadBuffer) {\n    threadBuffer->Resize(\n        numThreads * threadYBufferSizeAligned +\n        numThreads * threadColBufferSize);\n    // Group together thread buffers for accumulation\n    std::vector<T*> toSum(numThreads - 1);\n    for (int i = 1; i < numThreads; ++i) {\n      toSum[i - 1] = threadBuffer->template mutable_data<T>() +\n          i * threadYBufferSizeAligned;\n    }\n\n    for (auto image_id = 0; image_id < N; ++image_id) {\n      // Each time through, we have to reset all per-thread output\n      // buffers, since the output buffer is only per-batch element\n      // The column buffers are overwritten by the matrix multiplication\n      // each time, so we need not clear them out each round\n      math::Set<T, Context>(\n          numThreads * threadYBufferSizeAligned,\n          0,\n          threadBuffer->template mutable_data<T>(),\n          &context_);\n\n      // Run tiled gemm and col2im in our threadpool; all of these tiles\n      // are guaranteed to be full tiles\n      // Each tile handles a single row of the input\n      pool->run(\n          [&](int threadId, int tileId) {\n            runLocalTile(threadBuffer, threadId, tileId);\n          },\n          H);\n\n      // We need to accumulate the per-thread results into the output\n      // Y; the first worker thread (main thread) already produced its\n      // results in Y\n      sumInto(\n          threadBuffer->template mutable_data<T>(), toSum, threadYBufferSize);\n\n// y0 now contains the final output, but it is in deinterleaved\n// form. We have to re-interleave it to produce the final form in Y\n// This operation also handles adding the per-channel bias.\n#define REINTERLEAVE(N)                                              \\\n  do {                                                               \\\n    reinterleaveMultithreaded<N, T, Context>(                        \\\n        threadBuffer->template mutable_data<T>(),                    \\\n        InputSize() == 3 ? Input(BIAS).template data<T>() : nullptr, \\\n        Ydata,                                                       \\\n        Y->dim32(1),                                                 \\\n        Y->dim32(2),                                                 \\\n        Y->dim32(3),                                                 \\\n        W,                                                           \\\n        this->kernel_w(),                                            \\\n        this->stride_w(),                                            \\\n        this->adj_h(),                                               \\\n        pool);                                                       \\\n  } while (false)\n\n      if (this->stride_w() == 1) {\n        REINTERLEAVE(1);\n      } else if (this->stride_w() == 2) {\n        REINTERLEAVE(2);\n      } else if (this->stride_w() == 3) {\n        REINTERLEAVE(3);\n      } else if (this->stride_w() == 4) {\n        REINTERLEAVE(4);\n      }\n\n#undef REINTERLEAVE\n\n      Xdata += M * H * W;\n      Ydata += Y->size() / Y->dim32(0);\n    }\n  };\n  if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n    runWithSharedBuffer<Context>(ws_, f);\n  } else {\n    f(&threadBuffer_);\n  }\n\n  return true;\n}\n\ntemplate <typename T, class Context>\nbool ConvTransposeMobileOp<T, Context>::RunOnDeviceWithOrderNHWC() {\n  CAFFE_THROW(\"Not implemented.\");\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_MOBILE\n\n#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/conv_transpose_unpool_op_base.h",
    "content": "#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_\n#define CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_op_shared.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/proto/caffe2_legacy.pb.h\"\n#include \"caffe2/utils/math.h\"\n\nCAFFE2_DECLARE_bool(caffe2_force_shared_col_buffer);\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ConvTransposeUnpoolBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ConvTransposeUnpoolBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        legacy_pad_(\n            static_cast<LegacyPadding>(OperatorBase::GetSingleArgument<int>(\n                \"legacy_pad\",\n                LegacyPadding::NOTSET))),\n        kernel_(OperatorBase::GetRepeatedArgument<int>(\"kernels\")),\n        stride_(OperatorBase::GetRepeatedArgument<int>(\"strides\")),\n        pads_(OperatorBase::GetRepeatedArgument<int>(\"pads\")),\n        adj_(OperatorBase::GetRepeatedArgument<int>(\"adjs\")),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        shared_buffer_(\n            OperatorBase::GetSingleArgument<int>(\"shared_buffer\", 0)),\n        ws_(ws) {\n    // For the padding, they should either be the legacy padding strategy\n    // (VALID or SAME), or an explicit, non-negative value.\n    if (legacy_pad_ == LegacyPadding::VALID ||\n        legacy_pad_ == LegacyPadding::SAME) {\n      CAFFE_ENFORCE(\n          !OperatorBase::HasArgument(\"pads\"),\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n    }\n    // Get old arguments values.\n    if (OperatorBase::HasArgument(\"kernel\")) {\n      kernel_.resize(2, OperatorBase::GetSingleArgument<int>(\"kernel\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"kernel_h\") &&\n        OperatorBase::HasArgument(\"kernel_w\")) {\n      kernel_.push_back(OperatorBase::GetSingleArgument<int>(\"kernel_h\", 0));\n      kernel_.push_back(OperatorBase::GetSingleArgument<int>(\"kernel_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"stride\")) {\n      stride_.resize(2, OperatorBase::GetSingleArgument<int>(\"stride\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"stride_h\") &&\n        OperatorBase::HasArgument(\"stride_w\")) {\n      stride_.push_back(OperatorBase::GetSingleArgument<int>(\"stride_h\", 0));\n      stride_.push_back(OperatorBase::GetSingleArgument<int>(\"stride_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"adj\")) {\n      adj_.resize(2, OperatorBase::GetSingleArgument<int>(\"adj\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"adj_h\") &&\n        OperatorBase::HasArgument(\"adj_w\")) {\n      adj_.push_back(OperatorBase::GetSingleArgument<int>(\"adj_h\", 0));\n      adj_.push_back(OperatorBase::GetSingleArgument<int>(\"adj_w\", 0));\n    }\n\n    if (OperatorBase::HasArgument(\"pad\")) {\n      CAFFE_ENFORCE(\n          legacy_pad_ != LegacyPadding::VALID &&\n              legacy_pad_ != LegacyPadding::SAME,\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n      pads_.resize(4, OperatorBase::GetSingleArgument<int>(\"pad\", 0));\n    } else if (\n        OperatorBase::HasArgument(\"pad_t\") &&\n        OperatorBase::HasArgument(\"pad_l\") &&\n        OperatorBase::HasArgument(\"pad_b\") &&\n        OperatorBase::HasArgument(\"pad_r\")) {\n      CAFFE_ENFORCE(\n          legacy_pad_ != LegacyPadding::VALID &&\n              legacy_pad_ != LegacyPadding::SAME,\n          \"If you use legacy padding VALID or SAME, you should not specify \"\n          \"any specific padding values.\");\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_t\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_l\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_b\", 0));\n      pads_.push_back(OperatorBase::GetSingleArgument<int>(\"pad_r\", 0));\n    }\n\n    // Fill default values.\n    if (kernel_.size() == 0) {\n      kernel_.assign({0, 0});\n    }\n\n    if (stride_.size() == 0) {\n      stride_.resize(kernel_.size(), 1);\n    }\n\n    if (pads_.size() == 0) {\n      pads_.resize(kernel_.size() * 2, 0);\n    }\n\n    if (adj_.size() == 0) {\n      adj_.resize(kernel_.size(), 0);\n    }\n\n    CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());\n    CAFFE_ENFORCE_EQ(adj_.size(), kernel_.size());\n\n    if (legacy_pad_ != LegacyPadding::VALID &&\n        legacy_pad_ != LegacyPadding::SAME) {\n      CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());\n    }\n\n    for (int dim = 0; dim < kernel_.size(); ++dim) {\n      CAFFE_ENFORCE_GT(kernel_[dim], 0);\n      CAFFE_ENFORCE_GT(stride_[dim], 0);\n      CAFFE_ENFORCE_GE(adj_[dim], 0);\n      CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);\n    }\n\n    // Create shared buffer mutex in the constructor\n    // to avoid race-condition in DAGNet.\n    if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {\n      createSharedBuffer<Context>(ws_);\n    }\n  }\n  // Sets the output size. The output channel is manually specified.\n  void SetOutputSize(\n      const Tensor<Context>& input,\n      Tensor<Context>* output,\n      int output_channel) {\n    CAFFE_ENFORCE(4 == input.ndim());\n    CAFFE_ENFORCE(input.size() > 0);\n    int N = input.dim32(0);\n    bool channel_first = false; // initialized to suppress compiler warning.\n    int H = 0, W = 0; // initialized to suppress compiler warning.\n    int M = 0;\n    switch (order_) {\n      case StorageOrder::NHWC:\n        channel_first = false;\n        H = input.dim32(1);\n        W = input.dim32(2);\n        M = input.dim32(3);\n        break;\n      case StorageOrder::NCHW:\n        channel_first = true;\n        M = input.dim32(1);\n        H = input.dim32(2);\n        W = input.dim32(3);\n        break;\n      default:\n        LOG(FATAL) << \"Unknown Storage order: \" << order_;\n    }\n    int output_height = 0, output_width = 0;\n    ComputeSizeAndPad(\n        H,\n        stride_[0],\n        kernel_[0],\n        adj_[0],\n        &pads_[0],\n        &pads_[2],\n        &output_height);\n    ComputeSizeAndPad(\n        W,\n        stride_[1],\n        kernel_[1],\n        adj_[1],\n        &pads_[1],\n        &pads_[3],\n        &output_width);\n    if (channel_first) {\n      output->Resize(N, output_channel, output_height, output_width);\n    } else {\n      output->Resize(N, output_height, output_width, output_channel);\n    }\n    VLOG(2) << \"In: N \" << N << \" M \" << M << \" H \" << H << \" W \" << W;\n    VLOG(2) << \"Out: output_channel \" << output_channel << \" H \"\n            << output_height << \" W \" << output_width;\n  }\n\n  bool RunOnDevice() override {\n    switch (order_) {\n      case StorageOrder::NHWC:\n        return RunOnDeviceWithOrderNHWC();\n      case StorageOrder::NCHW:\n        return RunOnDeviceWithOrderNCHW();\n      default:\n        LOG(FATAL) << \"Unknown storage order: \" << order_;\n    }\n    // To suppress old compiler warnings\n    return true;\n  }\n\n  virtual bool RunOnDeviceWithOrderNCHW() {\n    CAFFE_THROW(\"Not implemented\");\n  }\n\n  virtual bool RunOnDeviceWithOrderNHWC() {\n    CAFFE_THROW(\"Not implemented\");\n  }\n\n  virtual ~ConvTransposeUnpoolBase() {}\n\n private:\n  LegacyPadding legacy_pad_;\n  int pad_;\n\n protected:\n  vector<int> kernel_;\n  vector<int> stride_;\n  vector<int> pads_;\n  vector<int> adj_;\n  StorageOrder order_;\n  bool shared_buffer_;\n  Workspace* ws_;\n\n  // Accessors for 2D conv params.\n\n  inline int pad_t() const {\n    return pads_[0];\n  }\n\n  inline int pad_l() const {\n    return pads_[1];\n  }\n\n  inline int pad_b() const {\n    return pads_[2];\n  }\n\n  inline int pad_r() const {\n    return pads_[3];\n  }\n\n  inline int kernel_h() const {\n    return kernel_[0];\n  }\n\n  inline int kernel_w() const {\n    return kernel_[1];\n  }\n\n  inline int stride_h() const {\n    return stride_[0];\n  }\n\n  inline int stride_w() const {\n    return stride_[1];\n  }\n\n  inline int adj_h() const {\n    return adj_[0];\n  }\n\n  inline int adj_w() const {\n    return adj_[1];\n  }\n\n  inline void ComputeSizeAndPad(\n      const int in_size,\n      const int stride,\n      const int kernel,\n      const int adj,\n      int* pad_head,\n      int* pad_tail,\n      int* out_size) {\n    switch (legacy_pad_) {\n      case LegacyPadding::NOTSET:\n        CAFFE_ENFORCE(*pad_head >= 0);\n        CAFFE_ENFORCE(*pad_tail >= 0);\n        *out_size =\n            (in_size - 1) * stride + kernel + adj - *pad_head - *pad_tail;\n        break;\n      // We handle cases of LegacyPadding::VALID and LegacyPadding::SAME\n      // the same way\n      case LegacyPadding::VALID:\n      case LegacyPadding::SAME:\n        *pad_head = 0;\n        *pad_tail = 0;\n        *out_size = (in_size - 1) * stride + kernel + adj;\n        break;\n      case LegacyPadding::CAFFE_LEGACY_POOLING:\n        LOG(FATAL) << \"CAFFE_LEGACY_POOLING is no longer supported.\";\n        break;\n    }\n  }\n};\n\n#define USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context) \\\n  USE_OPERATOR_FUNCTIONS(Context);                        \\\n  using ConvTransposeUnpoolBase<Context>::kernel_;        \\\n  using ConvTransposeUnpoolBase<Context>::stride_;        \\\n  using ConvTransposeUnpoolBase<Context>::pads_;          \\\n  using ConvTransposeUnpoolBase<Context>::adj_;           \\\n  using ConvTransposeUnpoolBase<Context>::order_;         \\\n  using ConvTransposeUnpoolBase<Context>::shared_buffer_; \\\n  using ConvTransposeUnpoolBase<Context>::ws_\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/cosine_embedding_criterion_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_\n#define CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass CosineEmbeddingCriterionOp final : public Operator<Context> {\n public:\n  CosineEmbeddingCriterionOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(float, \"margin\", margin_, 0.0) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float margin_;\n};\n\ntemplate <class Context>\nclass CosineEmbeddingCriterionGradientOp final : public Operator<Context> {\n public:\n  CosineEmbeddingCriterionGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(float, \"margin\", margin_, 0.0) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float margin_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/counter_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_COUNTER_OPS_H\n#define CAFFE2_OPERATORS_COUNTER_OPS_H\n\n#include <atomic>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\ntemplate <typename T>\nclass Counter {\n public:\n  explicit Counter(T count) : count_(count) {}\n  bool countDown() {\n    if (count_-- > 0) {\n      return false;\n    }\n    return true;\n  }\n\n  T countUp() {\n    return count_++;\n  }\n\n  T retrieve() const {\n    return count_.load();\n  }\n\n  T checkIfDone() const {\n    return (count_.load() <= 0);\n  }\n\n  T reset(T init_count) {\n    return count_.exchange(init_count);\n  }\n\n private:\n  std::atomic<T> count_;\n};\n\n// TODO(jiayq): deprecate these ops & consolidate them with IterOp/AtomicIterOp\n\ntemplate <typename T, class Context>\nclass CreateCounterOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CreateCounterOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        init_count_(OperatorBase::GetSingleArgument<T>(\"init_count\", 0)) {\n    CAFFE_ENFORCE_LE(0, init_count_, \"negative init_count is not permitted.\");\n  }\n\n  bool RunOnDevice() override {\n    *OperatorBase::Output<std::unique_ptr<Counter<T>>>(0) =\n        std::unique_ptr<Counter<T>>(new Counter<T>(init_count_));\n    return true;\n  }\n\n private:\n  T init_count_ = 0;\n};\n\ntemplate <typename T, class Context>\nclass ResetCounterOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ResetCounterOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        init_count_(OperatorBase::GetSingleArgument<T>(\"init_count\", 0)) {\n    CAFFE_ENFORCE_LE(0, init_count_, \"negative init_count is not permitted.\");\n  }\n\n  bool RunOnDevice() override {\n    auto& counterPtr = OperatorBase::Input<std::unique_ptr<Counter<T>>>(0);\n    auto previous = counterPtr->reset(init_count_);\n    if (OutputSize() == 1) {\n      auto* output = OperatorBase::Output<TensorCPU>(0);\n      output->Resize();\n      *output->template mutable_data<T>() = previous;\n    }\n    return true;\n  }\n\n private:\n  T init_count_;\n};\n\n// Will always use TensorCPU regardless the Context\ntemplate <typename T, class Context>\nclass CountDownOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CountDownOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& counterPtr = OperatorBase::Input<std::unique_ptr<Counter<T>>>(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<int>{});\n    *output->template mutable_data<bool>() = counterPtr->countDown();\n    return true;\n  }\n};\n\n// Will always use TensorCPU regardless the Context\ntemplate <typename T, class Context>\nclass CheckCounterDoneOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CheckCounterDoneOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& counterPtr = OperatorBase::Input<std::unique_ptr<Counter<T>>>(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<int>{});\n    *output->template mutable_data<bool>() = counterPtr->checkIfDone();\n    return true;\n  }\n};\n\n// Will always use TensorCPU regardless the Context\ntemplate <typename T, class Context>\nclass CountUpOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CountUpOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& counterPtr = OperatorBase::Input<std::unique_ptr<Counter<T>>>(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<int>{});\n    *output->template mutable_data<T>() = counterPtr->countUp();\n    return true;\n  }\n};\n\n// Will always use TensorCPU regardless the Context\ntemplate <typename T, class Context>\nclass RetrieveCountOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RetrieveCountOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& counterPtr = OperatorBase::Input<std::unique_ptr<Counter<T>>>(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<int>{});\n    *output->template mutable_data<T>() = counterPtr->retrieve();\n    return true;\n  }\n};\n\n} // namespace caffe2\n#endif // CAFFE2_OPERATORS_COUNTER_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/create_scope_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_\n#define CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_\n\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nCAFFE2_DECLARE_bool(caffe2_workspace_stack_debug);\n\nnamespace caffe2 {\nnamespace detail {\n\n/*\n * Keeps track of forward and backward gradient workspaces in stack,\n * reuses previously created workspaces, non-thread safe\n */\nclass WorkspaceStack {\n public:\n  explicit WorkspaceStack() : parent_ws_(nullptr), top_(-1) {}\n\n  std::shared_ptr<Workspace> pushForwardWorkspace(\n      Workspace* parent_ws,\n      const std::unordered_map<std::string, std::string>& blob_bindings) {\n    checkStack();\n    if (FLAGS_caffe2_workspace_stack_debug) {\n      if (parent_ws_) {\n        CAFFE_ENFORCE_EQ(parent_ws_, parent_ws, \"Parent workspace mismatch\");\n      } else {\n        parent_ws_ = parent_ws;\n      }\n      if (!blob_bindings_.empty()) {\n        checkBindingsMatch(blob_bindings_, blob_bindings);\n      } else {\n        blob_bindings_ = blob_bindings;\n      }\n    }\n\n    if (top_ == workspaces_.size() - 1) {\n      workspaces_.push_back(\n          std::make_shared<Workspace>(parent_ws, blob_bindings));\n    }\n    return workspaces_[++top_];\n  }\n\n  std::shared_ptr<Workspace> popGradientWorkspace(\n      Workspace* parent_ws,\n      const std::unordered_map<std::string, std::string>& grad_blob_bindings) {\n    checkStack();\n    if (FLAGS_caffe2_workspace_stack_debug) {\n      if (parent_ws_) {\n        CAFFE_ENFORCE_EQ(parent_ws_, parent_ws, \"Parent workspace mismatch\");\n      } else {\n        parent_ws_ = parent_ws;\n      }\n      if (!grad_blob_bindings_.empty()) {\n        checkBindingsMatch(grad_blob_bindings_, grad_blob_bindings);\n      } else {\n        grad_blob_bindings_ = grad_blob_bindings;\n      }\n    }\n\n    if (top_ < 0) {\n      return nullptr;\n    }\n    auto& grad_workspace = workspaces_[top_];\n    grad_workspace->AddBlobMapping(parent_ws, grad_blob_bindings);\n    --top_;\n    return grad_workspace;\n  }\n\n  void clear() {\n    checkStack();\n    top_ = -1;\n  }\n\n private:\n  void checkStack() const {\n    CAFFE_ENFORCE_GT(\n        (int)workspaces_.size(), top_, \"Corrupted workspaces stack\");\n  }\n\n  void checkBindingsMatch(\n      const std::unordered_map<std::string, std::string>& bindings,\n      const std::unordered_map<std::string, std::string>& test_bindings) const {\n    CAFFE_ENFORCE_EQ(\n        bindings.size(), test_bindings.size(), \"Blob bindings mismatch\");\n    for (const auto& blob_binding : bindings) {\n      CAFFE_ENFORCE(\n          test_bindings.count(blob_binding.first), \"Blob bindings mismatch\");\n      CAFFE_ENFORCE_EQ(\n          test_bindings.at(blob_binding.first),\n          blob_binding.second,\n          \"Blob bindings mismatch\");\n    }\n  }\n\n  std::unordered_map<std::string, std::string> blob_bindings_;\n  std::unordered_map<std::string, std::string> grad_blob_bindings_;\n  Workspace* parent_ws_;\n  int top_;\n  std::vector<std::shared_ptr<Workspace>> workspaces_;\n};\n}\n\ntemplate <class Context>\nclass CreateScopeOp final : public Operator<Context> {\n public:\n  CreateScopeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/cross_entropy_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_\n#define CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass LabelCrossEntropyOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(LabelCrossEntropyOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  static constexpr T kLOG_THRESHOLD() {\n    return static_cast<T>(1e-20);\n  }\n  // Input: X, label\n  // Output: Y\n};\n\ntemplate <typename T, class Context>\nclass LabelCrossEntropyGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(LabelCrossEntropyGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, label, dY\n  // Ouptut: dX. There is no gradient with respect to the label.\n  static constexpr T kLOG_THRESHOLD() {\n    return static_cast<T>(1e-20);\n  }\n};\n\n// Hacky: turns a vector of probabilities into a 2-column matrix with\n// complimentary probabilities for binary classification\ntemplate <typename T, class Context>\nclass MakeTwoClassOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(MakeTwoClassOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X\n  // Output: Y = vstack(1-X, X)\n};\n\ntemplate <typename T, class Context>\nclass MakeTwoClassGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(MakeTwoClassGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  // Input: dY\n  // Ouptut: dX\n};\n\ntemplate <typename T, class Context>\nclass SigmoidCrossEntropyWithLogitsOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(SigmoidCrossEntropyWithLogitsOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n};\n\ntemplate <typename T, class Context>\nclass SigmoidCrossEntropyWithLogitsGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(SigmoidCrossEntropyWithLogitsGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n};\n\ntemplate <typename T, class Context>\nclass CrossEntropyOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(CrossEntropyOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, label\n  // Output: Y\n  static constexpr T kLOG_THRESHOLD() {\n    return static_cast<T>(1e-20);\n  }\n};\n\ntemplate <typename T, class Context>\nclass CrossEntropyGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(CrossEntropyGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, label, dY\n  // Ouptut: dX. There is no gradient with respect to the label.\n  static constexpr T kLOG_THRESHOLD() {\n    return static_cast<T>(1e-20);\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/dataset_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_DATASET_OPS_H_\n#define CAFFE2_OPERATORS_DATASET_OPS_H_\n\n#include <memory>\n#include <mutex>\n#include <string>\n#include <vector>\n#include \"caffe2/core/blob.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\nnamespace dataset_ops {\n\n// used for lengths tensors in the dataset\nusing TLength = int32_t;\n// used for all internal dataset operations (offsets, sizes to read, etc.)\nusing TOffset = int64_t;\n\n/**\n * Provides functionality to iterate across a list of tensors where some\n * of those tensors represent lengths in a hierarchical structure.\n */\nclass TreeIterator {\n public:\n  struct FieldDesc {\n    int id;\n    int lengthFieldId = -1;\n    std::string name;\n  };\n\n  explicit TreeIterator(const std::vector<std::string>& fields);\n\n  void advance(\n      const std::vector<const TLength*>& lengths,\n      std::vector<TOffset>& offsets,\n      std::vector<TOffset>& sizes,\n      std::vector<TOffset>& limits,\n      TOffset num);\n\n  // Corresponds to the number of fields that have \"length\" as its last name\n  int numLengthFields() const {\n    return lengthFieldIds_.size();\n  }\n\n  // Corresponds to the number of length fields + 1 (for the top-level domain)\n  int numOffsetFields() const {\n    return numLengthFields() + 1;\n  }\n\n  // Get lengthField description for the given field\n  const FieldDesc* lengthFieldFor(const FieldDesc& desc) {\n    return (desc.lengthFieldId == -1)\n        ? nullptr\n        : &fields_.at(lengthFieldIds_.at(desc.lengthFieldId));\n  }\n\n  // Get lengthField description for the given lengthFieldId, where\n  // 0 <= lengthFieldId < numLengthFields()\n  const FieldDesc& lengthField(int lengthFieldId) {\n    return fields_.at(lengthFieldIds_.at(lengthFieldId));\n  }\n\n  // Returns the index into the 'offset' vector for the given field.\n  int offsetFieldIdFor(const FieldDesc& fieldDesc) {\n    return fieldDesc.lengthFieldId + 1;\n  }\n\n  // Returns the field description for all fields.\n  const std::vector<FieldDesc>& fields() {\n    return fields_;\n  }\n\n  const std::vector<int>& lengthFieldIds() const {\n    return lengthFieldIds_;\n  }\n\n private:\n  // Description of each field\n  std::vector<FieldDesc> fields_;\n  // Index into fields_ above for the fields that are lengths.\n  std::vector<int> lengthFieldIds_;\n};\n\nclass TreeCursor {\n public:\n  explicit TreeCursor(const TreeIterator& iterator) : it(iterator) {}\n  std::vector<TOffset> offsets;\n  std::mutex mutex_;\n  TreeIterator it;\n};\n\n/**\n * Simple wrapper class allowing an easy traversal of the tensors representing\n * the hirerarchical structure.\n */\nclass TreeWalker {\n public:\n  TreeWalker(const vector<const Blob*>& inputs, TreeCursor& cursor);\n\n  // Returns the number of records in a dataset\n  inline TOffset size() const {\n    return limits_.at(0);\n  }\n\n  void advance();\n\n private:\n  inline const TensorCPU& input(int32_t idx) const {\n    return inputs_[idx]->Get<TensorCPU>();\n  }\n\n  // TODO: Change to fieldDesc\n  inline const TreeIterator::FieldDesc& field(int idx) const {\n    return cursor_.it.fields().at(idx);\n  }\n\n  inline int lengthIdx(int fieldId) const {\n    return field(fieldId).lengthFieldId + 1;\n  }\n\n  inline TOffset offset(int fieldId) const {\n    return prevOffsets_[lengthIdx(fieldId)];\n  }\n\n  std::vector<TIndex> fieldDim(int fieldId) const;\n\n  void* fieldPtr(int fieldId) const;\n\n public:\n  // Simple Proxy class to expose nicer API for field access\n  class Field {\n   public:\n    Field(TreeWalker& walker, int fieldId)\n        : walker_(walker), fieldId_(fieldId) {}\n\n    inline std::vector<TIndex> dim() const {\n      return walker_.fieldDim(fieldId_);\n    }\n\n    inline TIndex size() const {\n      TIndex size = 1;\n      for (const auto d : dim()) {\n        size *= d;\n      }\n      return size;\n    }\n\n    inline const TypeMeta& meta() const {\n      return walker_.input(fieldId_).meta();\n    }\n\n    inline void* ptr() const {\n      return walker_.fieldPtr(fieldId_);\n    }\n\n    int fieldId() const {\n      return fieldId_;\n    }\n\n    inline TOffset offset() const {\n      return walker_.offset(fieldId_);\n    }\n\n   private:\n    const TreeWalker& walker_;\n    const int fieldId_;\n  };\n\n  // Notice that a reference is returned. If advance() is called the fields will\n  // be updated to represent the new state.\n  inline const std::vector<Field>& fields() const {\n    return fields_;\n  }\n\n private:\n  void gatherLengthData();\n\n  void gatherSizeLimits();\n\n  const vector<const Blob*>& inputs_;\n  TreeCursor& cursor_;\n  std::vector<Field> fields_;\n\n  std::vector<const TLength*> lengths_;\n  std::vector<TOffset> limits_;\n  std::vector<TOffset> sizes_;\n  std::vector<TOffset> offsets_;\n  std::vector<TOffset> prevOffsets_;\n};\n\nusing SharedTensorVectorPtr = std::shared_ptr<std::vector<TensorCPU>>;\n\ntemplate <class Context>\nusing TensorVectorPtr = std::unique_ptr<std::vector<Tensor<Context>>>;\n\n} // dataset_ops\n} // caffe2\n\n#endif // CAFFE2_OPERATORS_DATASET_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/distance_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_DISTANCE_OP_H_\n#define CAFFE2_OPERATORS_DISTANCE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SquaredL2DistanceOp : public Operator<Context> {\n public:\n  SquaredL2DistanceOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, Y; Output: Distance\n};\n\ntemplate <typename T, class Context>\nclass SquaredL2DistanceGradientOp final : public Operator<Context> {\n public:\n  SquaredL2DistanceGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& X = Input(0);\n    auto& Y = Input(1);\n    auto& dDistance = Input(2);\n    auto* dX = Output(0);\n    auto* dY = Output(1);\n    int N = X.ndim() > 0 ? X.dim32(0) : 1;\n    int D = N > 0 ? X.size() / N : 0;\n    CAFFE_ENFORCE(X.ndim() == Y.ndim());\n    for (int i = 0; i < X.ndim(); ++i) {\n      CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));\n    }\n    CAFFE_ENFORCE(dDistance.ndim() == 1);\n    CAFFE_ENFORCE(dDistance.dim32(0) == N);\n    dX->ResizeLike(X);\n    dY->ResizeLike(Y);\n    math::Sub<T, Context>(\n        X.size(),\n        X.template data<T>(),\n        Y.template data<T>(),\n        dX->template mutable_data<T>(),\n        &context_);\n    for (int i = 0; i < N; ++i) {\n      math::Scale<T, Context>(\n          D,\n          dDistance.template data<T>() + i,\n          dX->template data<T>() + i * D,\n          dX->template mutable_data<T>() + i * D,\n          &context_);\n    }\n    // The gradient of the other side is basically the negative.\n    math::Scale<T, Context>(\n        X.size(),\n        -1,\n        dX->template data<T>(),\n        dY->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  // Input: X, Y, dDistance; Output: dX, dY\n};\n\ntemplate <typename T, class Context>\nclass L1DistanceOp : public Operator<Context> {\n public:\n  L1DistanceOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, Y; Output: Distance\n};\n\ntemplate <typename T, class Context>\nclass L1DistanceGradientOp : public Operator<Context> {\n public:\n  L1DistanceGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  // Input: X, Y, dDistance; Output: dX, dY\n};\n\ntemplate <typename T, class Context>\nclass DotProductOp : public Operator<Context> {\n public:\n  DotProductOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(X_IN, Y_IN);\n  OUTPUT_TAGS(DOT_OUT);\n};\n\ntemplate <typename T, class Context>\nclass DotProductGradientOp final : public Operator<Context> {\n public:\n  DotProductGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(X_IN, Y_IN, DER_DOT_IN);\n  OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);\n};\n\ntemplate <typename T, class Context>\nclass DotProductWithPaddingOp : public Operator<Context> {\n public:\n  DotProductWithPaddingOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        pad_value_(OperatorBase::GetSingleArgument<float>(\"pad_value\", 0.0)),\n        replicate_(OperatorBase::GetSingleArgument<bool>(\"replicate\", false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float pad_value_;\n  bool replicate_;\n  INPUT_TAGS(X_IN, Y_IN);\n  OUTPUT_TAGS(DOT_OUT);\n};\n\ntemplate <typename T, class Context>\nclass CosineSimilarityOp : public Operator<Context> {\n public:\n  CosineSimilarityOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(X_IN, Y_IN);\n  OUTPUT_TAGS(COS_OUT);\n\n private:\n  Tensor<Context> aux_;\n};\n\ntemplate <typename T, class Context>\nclass CosineSimilarityGradientOp final : public Operator<Context> {\n public:\n  CosineSimilarityGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(X_IN, Y_IN, DER_COS_IN);\n  OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);\n\n private:\n  Tensor<Context> aux_;\n};\n\ntemplate <typename T, class Context>\nclass DotProductWithPaddingGradientOp final : public Operator<Context> {\n public:\n  DotProductWithPaddingGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        pad_value_(OperatorBase::GetSingleArgument<float>(\"pad_value\", 0.0)),\n        replicate_(OperatorBase::GetSingleArgument<bool>(\"replicate\", false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& X = Input(X_IN);\n    auto& Y = Input(Y_IN);\n    auto& dDot = Input(DER_DOT_IN);\n    auto* dX = Output(DER_X_OUT);\n    auto* dY = Output(DER_Y_OUT);\n    int N, D, DX, DY, restD;\n    if (X.size() > 0) {\n      N = X.ndim() > 0 ? X.dim32(0) : 1;\n      DX = X.size() / N;\n      DY = Y.size() / N;\n    } else {\n      N = 0;\n      DX = 0;\n      DY = 0;\n    }\n    CAFFE_ENFORCE(!replicate_ || DX % DY == 0 || DY % DX == 0);\n    D = std::min(DX, DY);\n    restD = std::max(DX, DY) - D;\n    CAFFE_ENFORCE_EQ(X.ndim(), Y.ndim());\n    CAFFE_ENFORCE_EQ(X.dim32(0), Y.dim32(0));\n    CAFFE_ENFORCE_EQ(dDot.ndim(), 1);\n    CAFFE_ENFORCE_EQ(dDot.dim32(0), N);\n    dX->ResizeLike(X);\n    dY->ResizeLike(Y);\n\n    const auto* X_data = X.template data<T>();\n    const auto* Y_data = Y.template data<T>();\n    const auto* dDot_data = dDot.template data<T>();\n    auto* dX_data = dX->template mutable_data<T>();\n    auto* dY_data = dY->template mutable_data<T>();\n    for (int i = 0; i < N; ++i) { // TODO: multithreading\n      auto offsetX = i * DX;\n      auto offsetY = i * DY;\n      if (replicate_) {\n        // L_ for longer vector and S_ for shorter vector\n        const T *L_data, *S_data;\n        T *dL_data, *dS_data;\n        int DL, DS;\n        if (DX > DY) {\n          L_data = X_data + offsetX;\n          S_data = Y_data + offsetY;\n          dL_data = dX_data + offsetX;\n          dS_data = dY_data + offsetY;\n          DL = DX;\n          DS = DY;\n        } else {\n          L_data = Y_data + offsetY;\n          S_data = X_data + offsetX;\n          dL_data = dY_data + offsetY;\n          dS_data = dX_data + offsetX;\n          DL = DY;\n          DS = DX;\n        }\n\n        // TODO: get rid of temp memory use\n        std::vector<T> tmp_data(DS);\n        math::Set<T, Context>(DS, 0.0, dS_data, &context_);\n        for (int j = 0; j < DL / DS; j++) {\n          math::Scale<T, Context>(\n              DS, dDot_data[i], S_data, dL_data + j * DS, &context_);\n          math::Scale<T, Context>(\n              DS, dDot_data[i], L_data + j * DS, tmp_data.data(), &context_);\n          math::Axpy<T, Context>(DS, 1.0, tmp_data.data(), dS_data, &context_);\n        }\n      } else {\n        math::Scale<T, Context>(\n            D, dDot_data[i], X_data + offsetX, dY_data + offsetY, &context_);\n        math::Scale<T, Context>(\n            D, dDot_data[i], Y_data + offsetY, dX_data + offsetX, &context_);\n      }\n\n      if (!replicate_ && DX != DY) {\n        T* rest_data;\n        if (DX > DY) {\n          rest_data = dX_data + offsetX + D;\n        } else {\n          rest_data = dY_data + offsetY + D;\n        }\n        auto pad_gradient = dDot_data[i] * pad_value_;\n        math::Set<T, Context>(restD, pad_gradient, rest_data, &context_);\n      }\n    }\n\n    return true;\n  }\n\n protected:\n  float pad_value_;\n  bool replicate_;\n  INPUT_TAGS(X_IN, Y_IN, DER_DOT_IN);\n  OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_DISTANCE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/do_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_DO_OP_H_\n#define CAFFE2_OPERATORS_DO_OP_H_\n\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass DoOp final : public Operator<Context> {\n public:\n  DoOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), parent_ws_(ws) {\n    CAFFE_ENFORCE(\n        this->template HasSingleArgumentOfType<NetDef>(\"net\"),\n        \"net must be specified in Do operator\");\n    net_def_ = this->template GetSingleArgument<NetDef>(\"net\", NetDef());\n    is_gradient_op_ = operator_def.is_gradient_op();\n\n    const auto& inner_blobs =\n        this->template GetRepeatedArgument<std::string>(\"inner_blobs\");\n    const auto& outer_blobs_idx =\n        this->template GetRepeatedArgument<int>(\"outer_blobs_idx\");\n    CAFFE_ENFORCE_EQ(\n        inner_blobs.size(),\n        outer_blobs_idx.size(),\n        \"Invalid blob bindings: different inner/outer blobs lengths\");\n\n    const auto& outer_blob_names = checkAndGetOuterNames(operator_def);\n    std::unordered_set<std::string> used_outer_names;\n    for (size_t blob_idx = 0; blob_idx < inner_blobs.size(); ++blob_idx) {\n      CAFFE_ENFORCE(\n          !blob_bindings_.count(inner_blobs[blob_idx]),\n          \"Invalid blob bindings: redefinition of inner blob \" +\n              inner_blobs[blob_idx]);\n      CAFFE_ENFORCE(\n          outer_blobs_idx[blob_idx] >= 0 &&\n              outer_blobs_idx[blob_idx] < outer_blob_names.size(),\n          \"Invalid blob bindings: outer blob index (\" +\n              caffe2::to_string(outer_blobs_idx[blob_idx]) + \", inner name: \" +\n              inner_blobs[blob_idx] + \") is out of bounds [0, \" +\n              caffe2::to_string(outer_blob_names.size() - 1) + \"]\");\n      const auto& outer_name = outer_blob_names[outer_blobs_idx[blob_idx]];\n      CAFFE_ENFORCE(\n          !used_outer_names.count(outer_name),\n          \"Reusage of outer name: \" + outer_name);\n      used_outer_names.insert(outer_name);\n      blob_bindings_[inner_blobs[blob_idx]] = outer_name;\n    }\n    std::unordered_set<std::string> all_outer_names(\n        outer_blob_names.begin(), outer_blob_names.end());\n    CAFFE_ENFORCE_EQ(\n        used_outer_names.size(),\n        all_outer_names.size(),\n        \"Not all outer names are used in blob bindings\");\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n private:\n  // returns vector of input blob names followed by output blob names in\n  // operator definition order; ensures that input (output) names are unique,\n  // checks number of input (output) blobs\n  std::vector<std::string> checkAndGetOuterNames(\n      const OperatorDef& operator_def) const {\n    auto input_names = getInputBlobNames(operator_def);\n    CAFFE_ENFORCE(!input_names.empty(), \"Expected at least one input blob\");\n    std::string input_ws_blob = input_names.back(); // copy\n    // removing blob that holds pointer op workspace\n    input_names.pop_back();\n\n    std::unordered_set<std::string> all_input_names(\n        input_names.begin(), input_names.end());\n    CAFFE_ENFORCE_EQ(\n        input_names.size(), all_input_names.size(), \"Duplicate input blobs\");\n\n    auto output_names = getOutputBlobNames(operator_def);\n    CAFFE_ENFORCE(!output_names.empty(), \"Expected at least one output blob\");\n    const auto& output_ws_blob = output_names.back();\n    CAFFE_ENFORCE_EQ(\n        input_ws_blob,\n        output_ws_blob,\n        \"Expected same input/output workspace blob\");\n    // remove blob that holds pointer to op workspace\n    output_names.pop_back();\n\n    std::unordered_set<std::string> all_output_names(\n        output_names.begin(), output_names.end());\n    CAFFE_ENFORCE_EQ(\n        output_names.size(), all_output_names.size(), \"Duplicate output blobs\");\n\n    std::vector<std::string> outer_blob_names;\n    outer_blob_names.reserve(input_names.size() + output_names.size());\n    outer_blob_names.insert(\n        outer_blob_names.end(), input_names.begin(), input_names.end());\n    outer_blob_names.insert(\n        outer_blob_names.end(), output_names.begin(), output_names.end());\n    return outer_blob_names;\n  }\n\n  std::vector<std::string> getInputBlobNames(\n      const OperatorDef& operator_def) const {\n    std::vector<std::string> names;\n    names.reserve(operator_def.input_size());\n    for (auto idx = 0; idx < operator_def.input_size(); ++idx) {\n      names.push_back(operator_def.input(idx));\n    }\n    return names;\n  }\n\n  std::vector<std::string> getOutputBlobNames(\n      const OperatorDef& operator_def) const {\n    std::vector<std::string> names;\n    names.reserve(operator_def.output_size());\n    for (auto idx = 0; idx < operator_def.output_size(); ++idx) {\n      names.push_back(operator_def.output(idx));\n    }\n    return names;\n  }\n\n  std::unordered_map<std::string, std::string> blob_bindings_;\n  bool is_gradient_op_;\n  NetDef net_def_;\n  Workspace* parent_ws_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_DO_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/dropout_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_DROPOUT_OP_H_\n#define CAFFE2_OPERATORS_DROPOUT_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass DropoutOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  DropoutOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ratio_(OperatorBase::GetSingleArgument<float>(\"ratio\", 0.5)),\n        is_test_(\n            OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)) {\n    CAFFE_ENFORCE_GE(ratio_, 0);\n    CAFFE_ENFORCE_LT(ratio_, 1);\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  float ratio_;\n  bool is_test_;\n  // Input: X; Output: Y, mask.\n};\n\ntemplate <typename T, class Context>\nclass DropoutGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  DropoutGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ratio_(OperatorBase::GetSingleArgument<float>(\"ratio\", 0.5)),\n        is_test_(\n            OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)) {\n    CAFFE_ENFORCE_GE(ratio_, 0);\n    CAFFE_ENFORCE_LT(ratio_, 1);\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  float ratio_;\n  bool is_test_;\n  // Input: dY, mask; Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_DROPOUT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/elementwise_linear_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_\n#define CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass ElementwiseLinearOp final : public Operator<Context> {\n public:\n  ElementwiseLinearOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n};\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass ElementwiseLinearGradientOp final : public Operator<Context> {\n public:\n  ElementwiseLinearGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n};\n\n} // namespace caffe2\n\n#endif  // CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/elementwise_logical_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_\n#define CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/elementwise_op.h\"\n\n#include <unordered_set>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass WhereOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_FUNCTIONS(Context);\n  USE_DISPATCH_HELPER;\n\n  WhereOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(bool, \"broadcast_on_rows\", enable_broadcast_, 0) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<\n        TensorTypes<float, double, int, long, std::string, bool>>::\n        call(this, Input(1));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& select = Input(0);\n    auto& left = Input(1);\n    auto& right = Input(2);\n    auto* output = Output(0);\n    if (enable_broadcast_) {\n      CAFFE_ENFORCE_EQ(select.ndim(), 1);\n      CAFFE_ENFORCE_EQ(select.dim(0), right.dim(0));\n      CAFFE_ENFORCE_EQ(left.dims(), right.dims());\n    } else {\n      CAFFE_ENFORCE_EQ(select.dims(), left.dims());\n      CAFFE_ENFORCE_EQ(select.dims(), right.dims());\n    }\n    output->ResizeLike(left);\n\n    const bool* select_data = select.template data<bool>();\n    const T* left_data = left.template data<T>();\n    const T* right_data = right.template data<T>();\n    T* output_data = output->template mutable_data<T>();\n\n    if (enable_broadcast_) {\n      size_t block_size = left.size_from_dim(1);\n      for (int i = 0; i < select.size(); i++) {\n        size_t offset = i * block_size;\n        if (select_data[i]) {\n          context_.template CopyItems<Context, Context>(\n              output->meta(),\n              block_size,\n              left_data + offset,\n              output_data + offset);\n        } else {\n          context_.template CopyItems<Context, Context>(\n              output->meta(),\n              block_size,\n              right_data + offset,\n              output_data + offset);\n        }\n      }\n    } else {\n      for (int i = 0; i < select.size(); ++i) {\n        output_data[i] = select_data[i] ? left_data[i] : right_data[i];\n      }\n    }\n    return true;\n  }\n\n private:\n  bool enable_broadcast_;\n};\n\nclass IsMemberOfValueHolder {\n  std::unordered_set<int32_t> int32_values_;\n  std::unordered_set<int64_t> int64_values_;\n  std::unordered_set<bool> bool_values_;\n  std::unordered_set<std::string> string_values_;\n  bool has_values_ = false;\n\n public:\n  template <typename T>\n  std::unordered_set<T>& get();\n\n  template <typename T>\n  void set(const std::vector<T>& args) {\n    has_values_ = true;\n    auto& values = get<T>();\n    values.insert(args.begin(), args.end());\n  }\n\n  bool has_values() {\n    return has_values_;\n  }\n};\n\ntemplate <class Context>\nclass IsMemberOfOp final : public Operator<Context> {\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n\n  static constexpr const char* VALUE_TAG = \"value\";\n\n public:\n  using TestableTypes = TensorTypes<int32_t, int64_t, bool, std::string>;\n\n  IsMemberOfOp(const OperatorDef& op, Workspace* ws)\n      : Operator<Context>(op, ws) {\n    auto dtype =\n        static_cast<TensorProto_DataType>(OperatorBase::GetSingleArgument<int>(\n            \"dtype\", TensorProto_DataType_UNDEFINED));\n    switch (dtype) {\n      case TensorProto_DataType_INT32:\n        values_.set(OperatorBase::GetRepeatedArgument<int32_t>(VALUE_TAG));\n        break;\n      case TensorProto_DataType_INT64:\n        values_.set(OperatorBase::GetRepeatedArgument<int64_t>(VALUE_TAG));\n        break;\n      case TensorProto_DataType_BOOL:\n        values_.set(OperatorBase::GetRepeatedArgument<bool>(VALUE_TAG));\n        break;\n      case TensorProto_DataType_STRING:\n        values_.set(OperatorBase::GetRepeatedArgument<std::string>(VALUE_TAG));\n        break;\n      case TensorProto_DataType_UNDEFINED:\n        // If dtype is not provided, values_ will be filled the first time that\n        // DoRunWithType is called.\n        break;\n      default:\n        CAFFE_THROW(\"Unexpected 'dtype' argument value: \", dtype);\n    }\n  }\n  virtual ~IsMemberOfOp() noexcept {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<\n        TensorTypes<int32_t, int64_t, bool, std::string>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n\n    if (!values_.has_values()) {\n      values_.set(OperatorBase::GetRepeatedArgument<T>(VALUE_TAG));\n    }\n    const auto& values = values_.get<T>();\n\n    const T* input_data = input.template data<T>();\n    bool* output_data = output->template mutable_data<bool>();\n    for (int i = 0; i < input.size(); ++i) {\n      output_data[i] = values.find(input_data[i]) != values.end();\n    }\n    return true;\n  }\n\n protected:\n  IsMemberOfValueHolder values_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/elementwise_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OP_H_\n#define CAFFE2_OPERATORS_ELEMENTWISE_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nusing NumericTypes = TensorTypes<int32_t, int64_t, float, double>;\nusing IntTypes = TensorTypes<int32_t, int64_t>;\nusing BoolTypes = TensorTypes<bool>;\n\nstruct SameTypeAsInput {\n  template <typename T>\n  using type = T;\n};\n\ntemplate <typename R>\nstruct FixedType {\n  template <typename T>\n  using type = R;\n};\n\ntemplate <\n    typename InputTypes,\n    class Context,\n    class Functor,\n    class TypeMap = SameTypeAsInput>\nclass UnaryElementwiseWithArgsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  UnaryElementwiseWithArgsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), functor_(*this) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<InputTypes>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n    using R = typename TypeMap::template type<T>;\n    functor_(\n        input.size(),\n        input.template data<T>(),\n        output->template mutable_data<R>(),\n        &context_);\n    return true;\n  }\n\n private:\n  Functor functor_;\n};\n\n/**\n * WithDefaultConstructor is a functor that can be used as the functor of an\n * UnaryElementwiseWithArgsOp. It simply forwards the operator() call into\n * another functor that doesn't accept arguments in its constructor.\n */\ntemplate <typename Functor>\nstruct WithDefaultConstructor {\n  explicit WithDefaultConstructor(OperatorBase& /*op*/) {}\n\n  template <typename In, typename Out, typename Context>\n  void operator()(int n, const In* in, Out* out, Context* c) {\n    Functor()(n, in, out, c);\n  }\n};\n\n/**\n * UnaryElementwiseOp is a wrapper around UnaryElementwiseWithArgsOp, with the\n * difference that it takes a functor with default constructor, e.g. that does\n * not need to take into consideration any arguments during operator creation.\n */\ntemplate <\n    typename InputTypes,\n    class Context,\n    class Functor,\n    class OutputType = SameTypeAsInput>\nusing UnaryElementwiseOp = UnaryElementwiseWithArgsOp<\n    InputTypes,\n    Context,\n    WithDefaultConstructor<Functor>,\n    OutputType>;\n\n/**\n * Performs a binary operation (e.g. +, - or /) with optional broadcast support.\n *\n * Functor specifies actual operation to be performed.\n *\n * If AllowBroadcast=false tensors has to be of exactly the same shape.\n *\n * If AllowBroadcast=true it support limited broadcasting of the right-hand-side\n * argument to match the shape of left-hand-side argument. Only suffix matching\n * is supported for now, 1-dim expansion doesn't work yet. More precisely\n * tensors A and B can be operated on iff\n *   `shape(A)[-len(shape(B)):] == * shape(B)`\n */\ntemplate <\n    typename InputTypes,\n    class Context,\n    class Functor,\n    class TypeMap = SameTypeAsInput>\nclass BinaryElementwiseOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  BinaryElementwiseOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(bool, \"broadcast\", enable_broadcast_, 0),\n        OP_SINGLE_ARG(int, \"axis\", axis_, -1),\n        OP_SINGLE_ARG(string, \"axis_str\", axis_str_, \"\"),\n        OP_SINGLE_ARG(string, \"order\", order_, \"NCHW\"),\n        functor_() {\n    // Figure out the correct axis to use.\n    if (enable_broadcast_) {\n      if (axis_ != -1) {\n        // Get axis from an explicit axis argument.\n        CAFFE_ENFORCE_EQ(\n            axis_str_.size(),\n            0,\n            \"Args axis and axis_str cannot be used simultaneously.\");\n      } else if (axis_str_.size()) {\n        // Get the axis index semantically.\n        CAFFE_ENFORCE_EQ(\n            axis_str_.size(), 1, \"Unsupported axis string\", axis_str_);\n        size_t semantic_axis_ = order_.find(axis_str_);\n        CAFFE_ENFORCE_NE(\n            semantic_axis_,\n            string::npos,\n            \"Unrecognizable axis string \",\n            axis_str_,\n            \" from order string \",\n            order_);\n        axis_ = semantic_axis_;\n      }\n    } else {\n      CAFFE_ENFORCE(\n          axis_ == -1 && axis_str_.size() == 0,\n          \"Do not specify axis or axis_str if broadcast is not enabled.\");\n    }\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<InputTypes>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& A = Input(0);\n    const auto& B = Input(1);\n    auto* C = Output(0);\n    CAFFE_ENFORCE(\n        &B != C || !enable_broadcast_,\n        \"In-place is allowed only with the first tensor when broadcasting\");\n    C->ResizeLike(A);\n    const T* Adata = A.template data<T>();\n    const T* Bdata = B.template data<T>();\n    auto* Cdata =\n        C->template mutable_data<typename TypeMap::template type<T>>();\n    if (!enable_broadcast_) {\n      CAFFE_ENFORCE_EQ(\n          A.dims(),\n          B.dims(),\n          \"Dimension mismatch - did you forget to set broadcast=1?\");\n      functor_.template Run<false>(A.size(), Adata, Bdata, Cdata, &context_);\n    } else if (B.size() == 1) {\n      functor_.template Run<true>(A.size(), Adata, Bdata, Cdata, &context_);\n    } else {\n      CAFFE_ENFORCE_GT(\n          A.ndim(),\n          B.ndim(),\n          \"If you are doing broadcasting, input1 should have \"\n          \"a smaller number of dimensions.\");\n      const int axis = (axis_ == -1 ? A.ndim() - B.ndim() : axis_);\n      CAFFE_ENFORCE(\n          axis >= 0 && axis < A.ndim(),\n          \"Broadcast axis should be in the range of the number \"\n          \"of dimensions of the first input.\");\n      size_t pre = 1, n = 1, post = 1;\n      for (int i = 0; i < axis; ++i) {\n        pre *= A.dim(i);\n      }\n      for (int i = 0; i < B.ndim(); ++i) {\n        CAFFE_ENFORCE_EQ(\n            A.dim(i + axis), B.dim(i), \"Broadcast dimension mismatch.\");\n        n *= B.dim(i);\n      }\n      for (int i = axis + B.ndim(); i < A.ndim(); ++i) {\n        post *= A.dim(i);\n      }\n      if (post == 1) {\n        functor_.RunWithBroadcast(Adata, Bdata, Cdata, pre, n, &context_);\n      } else {\n        functor_.RunWithBroadcast2(\n            Adata, Bdata, Cdata, pre, n, post, &context_);\n      }\n    }\n    return true;\n  }\n\n private:\n  bool enable_broadcast_;\n  int axis_;\n  string axis_str_;\n  string order_;\n  Functor functor_;\n};\n\ntemplate <typename Functor>\nstruct WithoutBroadcast {\n  template <bool b_is_scalar, typename T, typename R, typename Context>\n  inline void Run(size_t n, const T* a, const T* b, R* out, Context* c) {\n    if (b_is_scalar) {\n      CAFFE_THROW(\"Broadcast not supported.\");\n    } else {\n      Functor().Run(n, a, b, out, c);\n    }\n  }\n  template <typename T, typename R, typename Context>\n  inline void RunWithBroadcast(\n      const T* /*a*/,\n      const T* /*b*/,\n      R* /*out*/,\n      size_t /*pre*/,\n      size_t /*n*/,\n      Context*) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n  template <typename T, typename R, typename Context>\n  inline void RunWithBroadcast2(\n      const T* /*a*/,\n      const T* /*b*/,\n      R* /*out*/,\n      size_t /*pre*/,\n      size_t /*n*/,\n      size_t /*post*/,\n      Context*) {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n};\n\n// Gradient operator for elementwise division.\ntemplate <class Context>\nclass DivGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(DivGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n};\n\nnamespace SRLHelper {\n\ntemplate <typename T>\nvoid sum2one(const T* a, T* y, size_t n);\n\ntemplate <typename T>\nvoid RunWithBroadcastFront(const T* a, T* y, size_t pre, size_t n, CPUContext*);\n\ntemplate <typename T>\nvoid RunWithBroadcastBack(const T* a, T* y, size_t post, size_t n, CPUContext*);\n\ntemplate <typename T>\nvoid RunWithBroadcast2(\n    const T* a,\n    T* y,\n    size_t pre,\n    size_t n,\n    size_t post,\n    CPUContext*);\n\n} // namespace SRLHelper\n\n// Sum reduction operator that is used for computing the gradient in cases\n// where the forward op is in broadcast mode.\ntemplate <class Context>\nclass SumReduceLikeOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SumReduceLikeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(int, \"axis\", axis_, -1),\n        OP_SINGLE_ARG(string, \"axis_str\", axis_str_, \"\"),\n        OP_SINGLE_ARG(string, \"order\", order_, \"NCHW\") {\n    if (axis_ != -1) {\n      // Get axis from an explicit axis argument.\n      CAFFE_ENFORCE_EQ(\n          axis_str_.size(),\n          0,\n          \"Args axis and axis_str cannot be used simultaneously.\");\n    } else if (axis_str_.size()) {\n      // Get the axis index semantically.\n      CAFFE_ENFORCE_EQ(\n          axis_str_.size(), 1, \"Unsupported axis string\", axis_str_);\n      size_t semantic_axis = order_.find(axis_str_);\n      CAFFE_ENFORCE_NE(\n          semantic_axis,\n          string::npos,\n          \"Unrecognizable axis string \",\n          axis_str_,\n          \" from order string \",\n          order_);\n      axis_ = semantic_axis;\n    }\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n private:\n  int axis_;\n  string axis_str_;\n  string order_;\n  Tensor<Context> ones_;\n  Tensor<Context> sum_buffer_;\n};\n\ntemplate <class Context>\nbool DivGradientOp<Context>::RunOnDevice() {\n  auto& Y = Input(0);\n  auto& Z = Input(1);\n  auto& dZ = Input(2);\n  auto* dX = Output(0);\n  auto* dY = Output(1);\n  CAFFE_ENFORCE_GT(Y.size(), 0);\n  CAFFE_ENFORCE_GT(Z.size(), 0);\n  dX->ResizeLike(Y);\n  dY->ResizeLike(Y);\n\n  const float* Ydata = Y.template data<float>();\n  const float* Zdata = Z.template data<float>();\n  const float* dZdata = dZ.template data<float>();\n  float* dXdata = dX->template mutable_data<float>();\n  float* dYdata = dY->template mutable_data<float>();\n\n  ElementWiseDivide(context_, Y.size(), dXdata, dYdata, dZdata, Ydata, Zdata);\n  return true;\n}\n\n// For arithmetic operators, Eigen provides a good way to vectorize even\n// when broadcasting.\n#define EIGEN_FUNCTOR(name, eigen_op, input_type, output_type)               \\\n  struct Eigen##name##Functor {                                              \\\n    template <int b_is_scalar, typename T, typename R>                       \\\n    inline void Run(size_t n, const T* a, const T* b, R* out, CPUContext*) { \\\n      if (b_is_scalar) {                                                     \\\n        EigenVectorArrayMap<R>(out, n) =                                     \\\n            eigen_op((ConstEigenVectorArrayMap<T>(a, n)), (b[0]));           \\\n      } else {                                                               \\\n        EigenVectorArrayMap<R>(out, n) = eigen_op(                           \\\n            (ConstEigenVectorArrayMap<T>(a, n)),                             \\\n            (ConstEigenVectorArrayMap<T>(b, n)));                            \\\n      }                                                                      \\\n    }                                                                        \\\n    template <typename T, typename R>                                        \\\n    void RunWithBroadcast(                                                   \\\n        const T* a,                                                          \\\n        const T* b,                                                          \\\n        R* out,                                                              \\\n        size_t pre,                                                          \\\n        size_t n,                                                            \\\n        CPUContext*) {                                                       \\\n      EigenArrayMap<R>(out, n, pre) = eigen_op(                              \\\n          (ConstEigenArrayMap<T>(a, n, pre).colwise()),                      \\\n          (ConstEigenVectorArrayMap<T>(b, n)));                              \\\n    }                                                                        \\\n    template <typename T, typename R>                                        \\\n    void RunWithBroadcast2(                                                  \\\n        const T* a,                                                          \\\n        const T* b,                                                          \\\n        R* out,                                                              \\\n        size_t pre,                                                          \\\n        size_t n,                                                            \\\n        size_t post,                                                         \\\n        CPUContext*) {                                                       \\\n      for (int i = 0; i < pre; ++i) {                                        \\\n        EigenArrayMap<R>(out + i * n * post, post, n) = eigen_op(            \\\n            (ConstEigenArrayMap<T>(a + i * n * post, post, n).rowwise()),    \\\n            (Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>>(b, n)));   \\\n      }                                                                      \\\n    }                                                                        \\\n  };                                                                         \\\n  REGISTER_CPU_OPERATOR(                                                     \\\n      name,                                                                  \\\n      BinaryElementwiseOp<                                                   \\\n          input_type,                                                        \\\n          CPUContext,                                                        \\\n          Eigen##name##Functor,                                              \\\n          output_type>)\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ELEMENTWISE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/elementwise_op_test.h",
    "content": "#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_\n#define CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_\n\n#include <iostream>\n#include <string>\n#include <vector>\n\n#include \"caffe2/operators/elementwise_op.h\"\n#include <gtest/gtest.h>\n\ntemplate <typename Context, typename T>\nvoid CopyVector(const int N, const T* x, T* y);\n\ntemplate <typename Context, typename I_Type, typename O_Type>\nvoid FillTensor(\n    caffe2::Workspace* ws,\n    const std::string& name,\n    const std::vector<caffe2::TIndex>& shape,\n    const std::vector<I_Type>& values) {\n  auto* blob = ws->CreateBlob(name);\n  auto* tensor = blob->GetMutable<caffe2::Tensor<Context>>();\n  tensor->Resize(shape);\n  auto* mutable_data = tensor->template mutable_data<O_Type>();\n  const O_Type* data = reinterpret_cast<const O_Type*>(values.data());\n  CopyVector<Context, O_Type>(values.size(), data, mutable_data);\n}\n\ntemplate <typename Context>\ncaffe2::OperatorDef CreateOperatorDef() {\n  caffe2::OperatorDef def;\n  return def;\n}\n\ntemplate <typename Context>\ncaffe2::OperatorDef DefineOperator(const std::string& op_type) {\n  caffe2::OperatorDef def = CreateOperatorDef<Context>();\n  def.set_name(\"test\");\n  def.set_type(op_type);\n  def.add_input(\"X\");\n  def.add_input(\"Y\");\n  def.add_output(\"Z\");\n  return def;\n}\n\ntemplate <typename Context>\nvoid elementwiseAnd() {\n  const int N = 4;\n  const int M = 2;\n  caffe2::Workspace ws;\n  auto def = DefineOperator<Context>(\"And\");\n  { // equal size\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {N}, {true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), N);\n    std::vector<bool> result{true, false, false, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n  { // broadcast\n    auto* arg = def.add_arg();\n    arg->set_name(\"broadcast\");\n    arg->set_i(1);\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {M, N}, {true, false, true, false, true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), M * N);\n    std::vector<bool> result{\n        true, false, false, false, true, false, false, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n}\n\ntemplate <typename Context>\nvoid elementwiseOr() {\n  const int N = 4;\n  const int M = 2;\n  caffe2::Workspace ws;\n  auto def = DefineOperator<Context>(\"Or\");\n  { // equal size\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {N}, {true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), N);\n    std::vector<bool> result{true, true, true, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n  { // broadcast\n    auto* arg = def.add_arg();\n    arg->set_name(\"broadcast\");\n    arg->set_i(1);\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {M, N}, {true, false, true, false, true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), M * N);\n    std::vector<bool> result{true, true, true, false, true, true, true, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n}\n\ntemplate <typename Context>\nvoid elementwiseXor() {\n  const int N = 4;\n  const int M = 2;\n  caffe2::Workspace ws;\n  auto def = DefineOperator<Context>(\"Xor\");\n  { // equal size\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {N}, {true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), N);\n    std::vector<bool> result{false, true, true, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n  { // broadcast\n    auto* arg = def.add_arg();\n    arg->set_name(\"broadcast\");\n    arg->set_i(1);\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"X\", {M, N}, {true, false, true, false, true, false, true, false});\n    FillTensor<Context, uint8_t, bool>(\n        &ws, \"Y\", {N}, {true, true, false, false});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), M * N);\n    std::vector<bool> result{\n        false, true, true, false, false, true, true, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n}\n\ntemplate <typename Context>\nvoid elementwiseNot() {\n  const int N = 2;\n  caffe2::Workspace ws;\n  caffe2::OperatorDef def = CreateOperatorDef<Context>();\n  def.set_name(\"test\");\n  def.set_type(\"Not\");\n  def.add_input(\"X\");\n  def.add_output(\"Y\");\n  FillTensor<Context, uint8_t, bool>(&ws, \"X\", {N}, {true, false});\n  std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n  EXPECT_NE(nullptr, op.get());\n  EXPECT_TRUE(op->Run());\n  auto* blob = ws.GetBlob(\"Y\");\n  EXPECT_NE(nullptr, blob);\n  caffe2::TensorCPU Y(blob->Get<caffe2::Tensor<Context>>());\n  EXPECT_EQ(Y.size(), N);\n  std::vector<bool> result{false, true};\n  for (size_t i = 0; i < Y.size(); ++i) {\n    EXPECT_EQ(Y.template data<bool>()[i], result[i]);\n  }\n}\n\ntemplate <typename Context>\nvoid elementwiseEQ() {\n  const int N = 4;\n  const int M = 2;\n  caffe2::Workspace ws;\n  auto def = DefineOperator<Context>(\"EQ\");\n  { // equal size\n    FillTensor<Context, int32_t, int32_t>(&ws, \"X\", {N}, {1, 100, 5, -10});\n    FillTensor<Context, int32_t, int32_t>(&ws, \"Y\", {N}, {0, 100, 4, -10});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), N);\n    std::vector<bool> result{false, true, false, true};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n  { // broadcast\n    auto* arg = def.add_arg();\n    arg->set_name(\"broadcast\");\n    arg->set_i(1);\n    FillTensor<Context, int32_t, int32_t>(\n        &ws, \"X\", {M, N}, {1, 100, 5, -10, 3, 6, -1000, 33});\n    FillTensor<Context, int32_t, int32_t>(&ws, \"Y\", {N}, {1, 6, -1000, -10});\n    std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));\n    EXPECT_NE(nullptr, op.get());\n    EXPECT_TRUE(op->Run());\n    auto* blob = ws.GetBlob(\"Z\");\n    EXPECT_NE(nullptr, blob);\n    caffe2::TensorCPU Z(blob->Get<caffe2::Tensor<Context>>());\n    EXPECT_EQ(Z.size(), M * N);\n    std::vector<bool> result{\n        true, false, false, true, false, true, true, false};\n    for (size_t i = 0; i < Z.size(); ++i) {\n      EXPECT_EQ(Z.template data<bool>()[i], result[i]);\n    }\n  }\n}\n\n#endif // CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/elu_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass EluOp final : public Operator<Context> {\n public:\n  EluOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        alpha_(OperatorBase::GetSingleArgument<float>(\"alpha\", 1.0)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T alpha_;\n};\n\ntemplate <typename T, class Context>\nclass EluGradientOp final : public Operator<Context> {\n public:\n  EluGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        alpha_(OperatorBase::GetSingleArgument<float>(\"alpha\", 1.0)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T alpha_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/feed_blob_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FEED_BLOB_OP_H_\n#define CAFFE2_OPERATORS_FEED_BLOB_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass FeedBlobOp : public Operator<Context> {\n public:\n  FeedBlobOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {\n    CAFFE_ENFORCE(\n        OperatorBase::HasSingleArgumentOfType<string>(\"value\"),\n        \"value argument must exist and be passed as a string\");\n    value_ = OperatorBase::GetSingleArgument<string>(\"value\", \"\");\n  }\n\n  bool RunOnDevice() override {\n    *OperatorBase::Output<std::string>(0) = value_;\n    return true;\n  }\n\n private:\n  std::string value_;\n};\n\n} // namespace caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/filler_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FILLER_OP_H_\n#define CAFFE2_OPERATORS_FILLER_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// FillerOp takes in either zero or one input.\n//\n// If the number of input is 1, the shape will be identical to that of the input\n// at run time with optional additional dimensions appended at the end as\n// specified by \"extra_shape\" argument. In that case the \"shape\" parameter\n// should not be set.\n//\n// If the number of inputs is 0, the full shape must be provided via \"shape\"\n// argument\ntemplate <class Context>\nclass FillerOp : public Operator<Context> {\n public:\n  FillerOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        shape_(ToVectorTIndex(OperatorBase::GetRepeatedArgument<int>(\"shape\"))),\n        extra_shape_(ToVectorTIndex(\n            OperatorBase::GetRepeatedArgument<int>(\"extra_shape\"))),\n        input_as_shape_(\n            OperatorBase::GetSingleArgument<bool>(\"input_as_shape\", false)) {\n    if (InputSize()) {\n      if (shape_.size() != 0) {\n        CAFFE_THROW(\n            \"Cannot set the shape argument and pass in an input at \"\n            \"the same time\");\n      }\n    } else {\n      if (!extra_shape_.empty()) {\n        CAFFE_THROW(\"Cannot set extra_shape when there is no input\");\n      }\n      if (input_as_shape_) {\n        CAFFE_THROW(\"An input must be given if input_as_shape is true\");\n      }\n      if (shape_.size() == 0 &&\n          OperatorBase::HasSingleArgumentOfType<int>(\"shape\")) {\n        CAFFE_THROW(\"Fill 'shape' argument was a scalar, list expected\");\n      }\n    }\n  }\n\n  virtual ~FillerOp() {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto* output = Operator<Context>::Output(0);\n    if (InputSize()) {\n      auto shape = vector<TIndex>{};\n      if (input_as_shape_) {\n        // Shape input must be in CPU context\n        auto& input = OperatorBase::Input<Tensor<CPUContext>>(0);\n        CAFFE_ENFORCE_EQ(\n            input.ndim(),\n            1,\n            \"When input_as_shape is true, the input must be a 1D tensor of \"\n            \"data type TIndex\");\n        auto* shape_data = input.template data<TIndex>();\n        shape.insert(shape.end(), shape_data, shape_data + input.dim32(0));\n      } else {\n        auto& input = Input(0);\n        shape.insert(shape.end(), input.dims().begin(), input.dims().end());\n      }\n      shape.insert(shape.end(), extra_shape_.begin(), extra_shape_.end());\n      output->Resize(shape);\n    } else {\n      output->Resize(shape_);\n    }\n    return Fill(output);\n  }\n\n  virtual bool Fill(Tensor<Context>* output) = 0;\n\n protected:\n  vector<TIndex> shape_;\n  vector<TIndex> extra_shape_;\n  bool input_as_shape_;\n};\n\ntemplate <typename T, class Context>\nclass UniformFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  UniformFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws),\n        min_(OperatorBase::template GetSingleArgument<T>(\"min\", 0)),\n        max_(OperatorBase::template GetSingleArgument<T>(\"max\", 1)) {\n    if (InputSize() == 3) {\n      CAFFE_ENFORCE(\n          !OperatorBase::HasSingleArgumentOfType<T>(\"min\"),\n          \"Cannot set both min arg and min input blob\");\n      CAFFE_ENFORCE(\n          !OperatorBase::HasSingleArgumentOfType<T>(\"max\"),\n          \"Cannot set both max arg and max input blob\");\n    } else {\n      CAFFE_ENFORCE_LT(\n          min_, max_, \"Max value should be bigger than min value.\");\n    }\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    T min = min_;\n    T max = max_;\n    if (InputSize() == 3) {\n      CAFFE_ENFORCE_EQ(1, Input(1).size(), \"min blob must be scalar\");\n      CAFFE_ENFORCE_EQ(1, Input(2).size(), \"max blob must be scalar\");\n      min = *Input(1).template data<T>();\n      max = *Input(2).template data<T>();\n      if (min > max) {\n        auto shape = output->dims();\n        shape[0] = 0;\n        output->Resize(shape);\n        output->template mutable_data<T>();\n        return true;\n      }\n    }\n    math::RandUniform<T, Context>(\n        output->size(),\n        min,\n        max,\n        output->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n private:\n  T min_;\n  T max_;\n};\n\ntemplate <class Context>\nclass UniqueUniformFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  UniqueUniformFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {\n    TensorProto_DataType dtype =\n        static_cast<TensorProto_DataType>(OperatorBase::GetSingleArgument<int>(\n            \"dtype\", TensorProto_DataType_INT32));\n\n    switch (dtype) {\n      case TensorProto_DataType_INT32:\n        CheckRange<int>();\n        body_ = &UniqueUniformFillOp::FillWithType<int>;\n        break;\n      case TensorProto_DataType_INT64:\n        CheckRange<int64_t>();\n        body_ = &UniqueUniformFillOp::FillWithType<int64_t>;\n        break;\n      case TensorProto_DataType_UNDEFINED:\n        CAFFE_THROW(\n            \"UniqueUniformFill op cannot have undefined 'dtype' argument\");\n      // break;\n      default:\n        CAFFE_THROW(\"Unexpected 'dtype' argument value: \", dtype);\n    }\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    return (this->*body_)(output);\n  }\n\n private:\n  template <typename T>\n  void CheckRange() {\n    CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<T>(\"min\"));\n    CAFFE_ENFORCE(OperatorBase::HasSingleArgumentOfType<T>(\"max\"));\n    CAFFE_ENFORCE_LT(\n        OperatorBase::GetSingleArgument<T>(\"min\", 0),\n        OperatorBase::GetSingleArgument<T>(\"max\", 0),\n        \"Max value should be bigger than min value.\");\n  }\n\n  template <typename T>\n  bool FillWithType(Tensor<Context>* output) {\n    T min = OperatorBase::GetSingleArgument<T>(\"min\", 0);\n    T max = OperatorBase::GetSingleArgument<T>(\"max\", 0);\n\n    const T* avoid_data = nullptr;\n    size_t avoid_size = 0;\n    if (InputSize() >= 2) {\n      auto& avoid = Input(1);\n      avoid_data = avoid.template data<T>();\n      avoid_size = avoid.size();\n    }\n    math::RandUniformUnique<T, Context>(\n        output->size(),\n        min,\n        max,\n        output->template mutable_data<T>(),\n        avoid_size,\n        avoid_data,\n        &context_);\n    return true;\n  }\n\n  bool (UniqueUniformFillOp::*body_)(Tensor<Context>* output);\n};\n\ntemplate <class Context>\nclass ConstantFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ConstantFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {\n    TensorProto_DataType dtype =\n        static_cast<TensorProto_DataType>(OperatorBase::GetSingleArgument<int>(\n            \"dtype\", TensorProto_DataType_FLOAT));\n\n    if (!OperatorBase::HasArgument(\"dtype\") &&\n        OperatorBase::HasArgument(\"value\")) {\n      // If 'dtype' is not provided, infer type based on the type of 'value'\n      // Currently, single argument contains either float, int64 or bytes\n      if (OperatorBase::HasSingleArgumentOfType<float>(\"value\")) {\n        dtype = TensorProto_DataType_FLOAT;\n      } else if (OperatorBase::HasSingleArgumentOfType<int64_t>(\"value\")) {\n        dtype = TensorProto_DataType_INT64;\n      } else {\n        CAFFE_THROW(\"Argument 'value' is of unexpected type\");\n      }\n      VLOG(1) << \"Argument 'dtype' is not provided. Assume the data type is \"\n              << \"the same as that of argument 'value': \" << dtype;\n    }\n\n    switch (dtype) {\n      case TensorProto_DataType_FLOAT:\n        body_ = &ConstantFillOp::FillWithType<float>;\n        break;\n      case TensorProto_DataType_DOUBLE:\n        body_ = &ConstantFillOp::FillWithType<double>;\n        break;\n      case TensorProto_DataType_BOOL:\n        body_ = &ConstantFillOp::FillWithType<bool>;\n        break;\n      case TensorProto_DataType_INT8:\n        body_ = &ConstantFillOp::FillWithType<int8_t>;\n        break;\n      case TensorProto_DataType_INT16:\n        body_ = &ConstantFillOp::FillWithType<int16_t>;\n        break;\n      case TensorProto_DataType_INT32:\n        body_ = &ConstantFillOp::FillWithType<int>;\n        break;\n      case TensorProto_DataType_INT64:\n        body_ = &ConstantFillOp::FillWithType<int64_t>;\n        break;\n      case TensorProto_DataType_UINT8:\n        body_ = &ConstantFillOp::FillWithType<uint8_t>;\n        break;\n      case TensorProto_DataType_UINT16:\n        body_ = &ConstantFillOp::FillWithType<uint16_t>;\n        break;\n      case TensorProto_DataType_STRING:\n        body_ = &ConstantFillOp::FillWithString;\n        break;\n      case TensorProto_DataType_UNDEFINED:\n        CAFFE_THROW(\"ConstantFill op cannot have undefined 'dtype' argument\");\n      // break;\n      default:\n        CAFFE_THROW(\"Unexpected 'dtype' argument value: \", dtype);\n    }\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    return (this->*body_)(output);\n  }\n\n  template <typename T>\n  bool FillWithType(Tensor<Context>* output) {\n    T value = OperatorBase::GetSingleArgument<T>(\"value\", 0);\n    auto* data = output->template mutable_data<T>();\n    if (output->size()) {\n      math::Set<T, Context>(output->size(), value, data, &context_);\n    }\n    return true;\n  }\n\n  bool FillWithString(Tensor<Context>* output) {\n    auto value = OperatorBase::GetSingleArgument<std::string>(\"value\", \"\");\n    auto* data = output->template mutable_data<std::string>();\n    for (int i = 0; i < output->size(); ++i) {\n      data[i] = value;\n    }\n    return true;\n  }\n\n private:\n  bool (ConstantFillOp::*body_)(Tensor<Context>* output);\n};\n\ntemplate <class Context>\nclass DiagonalFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  DiagonalFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {\n    TensorProto_DataType dtype =\n        static_cast<TensorProto_DataType>(OperatorBase::GetSingleArgument<int>(\n            \"dtype\", TensorProto_DataType_FLOAT));\n\n    if (!OperatorBase::HasArgument(\"dtype\") &&\n        OperatorBase::HasArgument(\"value\")) {\n      // If 'dtype' is not provided, infer type based on the type of 'value'\n      // Currently, single argument contains either float, int64 or bytes\n      if (OperatorBase::HasSingleArgumentOfType<float>(\"value\")) {\n        dtype = TensorProto_DataType_FLOAT;\n      } else if (OperatorBase::HasSingleArgumentOfType<int64_t>(\"value\")) {\n        dtype = TensorProto_DataType_INT64;\n      } else {\n        CAFFE_THROW(\"Argument 'value' is of unexpected type\");\n      }\n      VLOG(1) << \"Argument 'dtype' is not provided. Assume the data type is \"\n              << \"the same as that of argument 'value': \" << dtype;\n    }\n\n    switch (dtype) {\n      case TensorProto_DataType_FLOAT:\n        body_ = &DiagonalFillOp::FillWithType<float>;\n        break;\n      case TensorProto_DataType_DOUBLE:\n        body_ = &DiagonalFillOp::FillWithType<double>;\n        break;\n      case TensorProto_DataType_BOOL:\n        body_ = &DiagonalFillOp::FillWithType<bool>;\n        break;\n      case TensorProto_DataType_INT8:\n        body_ = &DiagonalFillOp::FillWithType<int8_t>;\n        break;\n      case TensorProto_DataType_INT16:\n        body_ = &DiagonalFillOp::FillWithType<int16_t>;\n        break;\n      case TensorProto_DataType_INT32:\n        body_ = &DiagonalFillOp::FillWithType<int>;\n        break;\n      case TensorProto_DataType_INT64:\n        body_ = &DiagonalFillOp::FillWithType<int64_t>;\n        break;\n      case TensorProto_DataType_UINT8:\n        body_ = &DiagonalFillOp::FillWithType<uint8_t>;\n        break;\n      case TensorProto_DataType_UINT16:\n        body_ = &DiagonalFillOp::FillWithType<uint16_t>;\n        break;\n      case TensorProto_DataType_UNDEFINED:\n        CAFFE_THROW(\"Cannot have undefined 'dtype' argument\");\n      default:\n        CAFFE_THROW(\"Unexpected 'dtype' argument value: \", dtype);\n    }\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    return (this->*body_)(output);\n  }\n\n  template <typename T>\n  bool FillWithType(Tensor<Context>* output);\n\n private:\n  void VerifyOutputShape(Tensor<Context>* output) {\n    CAFFE_ENFORCE(output->ndim() >= 2, \"Input shape must be >= 2D\");\n  }\n\n  TIndex GetStepSize(Tensor<Context>* output) {\n    TIndex step;\n    if (output->ndim() == 2) {\n      step = output->dim(1) + 1;\n    } else {\n      TIndex prev_i = output->dim(0);\n      for (auto i : output->dims()) {\n        if (i != prev_i) {\n          CAFFE_THROW(\"All dimensions of input must be of equal length\");\n        }\n      }\n      vector<TIndex> cumprod(output->ndim());\n      auto dims = output->dims();\n      std::partial_sum(\n          dims.begin(),\n          dims.end() - 1,\n          cumprod.begin(),\n          std::multiplies<TIndex>());\n      step = 1 +\n          std::accumulate(\n                 cumprod.begin(), cumprod.end(), static_cast<TIndex>(0));\n      VLOG(0) << step;\n    }\n    return step;\n  }\n\n  bool (DiagonalFillOp::*body_)(Tensor<Context>* output);\n};\n\ntemplate <typename T, class Context>\nclass GaussianFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  GaussianFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws),\n        mean_(OperatorBase::template GetSingleArgument<float>(\"mean\", 0)),\n        std_(OperatorBase::template GetSingleArgument<float>(\"std\", 1)) {\n    DCHECK_GT(std_, 0) << \"Standard deviation should be nonnegative.\";\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    math::RandGaussian<T, Context>(\n        output->size(),\n        mean_,\n        std_,\n        output->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n private:\n  T mean_;\n  T std_;\n};\n\ntemplate <typename T, class Context>\nclass XavierFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  XavierFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {}\n\n  bool Fill(Tensor<Context>* output) override {\n    const int fan_in = output->size() / output->dim32(0);\n    T scale = std::sqrt(T(3) / fan_in);\n    math::RandUniform<T, Context>(\n        output->size(),\n        -scale,\n        scale,\n        output->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n};\n\ntemplate <typename T, class Context>\nclass MSRAFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MSRAFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {}\n\n  bool Fill(Tensor<Context>* output) override {\n    const int fan_out = output->size() / output->dim32(1);\n    T scale = std::sqrt(T(2) / fan_out);\n    math::RandGaussian<T, Context>(\n        output->size(),\n        0.0,\n        scale,\n        output->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n};\n\n// This is mostly used just as a debugging purpose stuff: it fills a tensor\n// sequentially with values 0, 1, 2..., which can then be used to check e.g.\n// reshape operations by allowing one to read the indices more easily.\ntemplate <typename T, class Context>\nclass RangeFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RangeFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {}\n\n  bool Fill(Tensor<Context>* output) override;\n};\n\ntemplate <class Context>\nclass LengthsRangeFillOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsRangeFillOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    auto* input_data = input.template data<int32_t>();\n\n    CAFFE_ENFORCE_EQ(input.ndim(), 1, \"Input must be a vector.\");\n\n    auto len_sum = std::accumulate(input_data, input_data + input.size(), 0);\n\n    output->Resize(len_sum);\n    auto* output_data = output->template mutable_data<int32_t>();\n\n    int32_t offset = 0;\n    for (int i = 0; i < input.size(); ++i) {\n      auto len = input_data[i];\n      auto start = output_data + offset;\n      std::iota(\n          start,\n          start + len,\n          0); // make the third argument the arg of this operator\n      offset += len;\n    }\n    return true;\n  }\n};\n\ninline std::vector<TensorShape> FillerTensorInference(\n    const OperatorDef& def,\n    const vector<TensorShape>& in) {\n  vector<TensorShape> out(1);\n  ArgumentHelper helper(def);\n  out[0].set_data_type(static_cast<TensorProto_DataType>(\n      helper.GetSingleArgument<int>(\"dtype\", TensorProto_DataType_FLOAT)));\n\n  if (in.size()) {\n    // TODO\n    bool input_as_shape =\n        helper.GetSingleArgument<bool>(\"input_as_shape\", false);\n    if (input_as_shape) {\n      out[0].set_unknown_shape(true);\n      return out;\n    }\n    for (int d : in[0].dims()) {\n      out[0].add_dims(d);\n    }\n  } else {\n    auto shape = helper.GetRepeatedArgument<int>(\"shape\");\n    for (int d : shape) {\n      out[0].add_dims(d);\n    }\n  }\n  return out;\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FILLER_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/find_duplicate_elements_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H\n#define CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H\n\n#include <unordered_map>\n#include <vector>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass FindDuplicateElementsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(FindDuplicateElementsOp);\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float, double, int, long, std::string>>::\n        call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& data = Input(0);\n    CAFFE_ENFORCE(data.ndim() == 1, \"data should be 1-D.\");\n\n    const auto* data_ptr = data.template data<T>();\n    std::unordered_map<T, int64_t> dict;\n    std::vector<int64_t> dupIndices;\n    // i is the index of unique elements, j is the index of all elements\n    for (int64_t i = 0, j = 0; j < data.dims()[0]; ++i, ++j) {\n      bool retVal = dict.insert({data_ptr[j], i}).second;\n      if (!retVal) {\n        --i;\n        dupIndices.push_back(j);\n      }\n    }\n\n    const auto dupSize = dupIndices.size();\n    auto* output = Output(0);\n    output->Resize(dupSize);\n    auto* out_ptr = output->template mutable_data<int64_t>();\n    for (int64_t i = 0; i < dupSize; ++i) {\n      out_ptr[i] = dupIndices[i];\n    }\n\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/find_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FIND_OP_H_\n#define CAFFE2_OPERATORS_FIND_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\n#include <unordered_map>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass FindOp final : public Operator<Context> {\n public:\n  FindOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        missing_value_(\n            OperatorBase::GetSingleArgument<int>(\"missing_value\", -1)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() {\n    return DispatchHelper<TensorTypes<int, long>>::call(this, Input(0));\n  }\n\n protected:\n  template <typename T>\n  bool DoRunWithType() {\n    auto& idx = Input(0);\n    auto& needles = Input(1);\n    auto* res_indices = Output(0);\n    res_indices->ResizeLike(needles);\n\n    const T* idx_data = idx.template data<T>();\n    const T* needles_data = needles.template data<T>();\n    T* res_data = res_indices->template mutable_data<T>();\n    auto idx_size = idx.size();\n\n    // Use an arbitrary cut-off for when to use brute-force\n    // search. For larger needle sizes we first put the\n    // index into a map\n    if (needles.size() < 16) {\n      // Brute force O(nm)\n      for (int i = 0; i < needles.size(); i++) {\n        T x = needles_data[i];\n        T res = static_cast<T>(missing_value_);\n        for (int j = idx_size - 1; j >= 0; j--) {\n          if (idx_data[j] == x) {\n            res = j;\n            break;\n          }\n        }\n        res_data[i] = res;\n      }\n    } else {\n      // O(n + m)\n      std::unordered_map<T, int> idx_map;\n      for (int j = 0; j < idx_size; j++) {\n        idx_map[idx_data[j]] = j;\n      }\n      for (int i = 0; i < needles.size(); i++) {\n        T x = needles_data[i];\n        auto it = idx_map.find(x);\n        res_data[i] = (it == idx_map.end() ? missing_value_ : it->second);\n      }\n    }\n\n    return true;\n  }\n\n protected:\n  int missing_value_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FIND_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/free_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FREE_OP_H_\n#define CAFFE2_OPERATORS_FREE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// FreeOp frees the content of the output blob. We allow it to take in input\n// blobs purely for the reason that it can \"wait\" on the input blobs to be\n// produced by some of the earlier operators before a free is called.\ntemplate <class Context>\nclass FreeOp : public Operator<Context> {\n public:\n  FreeOp(const OperatorDef& def, Workspace* ws) : Operator<Context>(def, ws) {}\n\n  bool RunOnDevice() override {\n    for (Blob* output : OperatorBase::Outputs()) {\n      output->Reset();\n    }\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FREE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/fully_connected_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/conversions.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// This is Caffe's InnerProductOp, with a name that fits its purpose better.\ntemplate <class Context, class Engine = DefaultEngine>\nclass FullyConnectedOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FullyConnectedOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int32_t>(\"axis\", 1)),\n        axis_w_(OperatorBase::GetSingleArgument<int32_t>(\"axis_w\", 1)) {}\n  ~FullyConnectedOp() {}\n\n  template <\n      typename T_X,\n      typename T_W,\n      typename T_B,\n      typename T_Y,\n      typename MATH>\n  bool DoRunWithType() {\n    const auto& X = Input(0);\n    const auto& W = Input(1);\n    const auto& b = Input(2);\n    auto* Y = Output(0);\n    CAFFE_ENFORCE(b.ndim() == 1, b.ndim());\n    // batch size\n    const auto canonical_axis = X.canonical_axis_index(axis_);\n    const auto M = X.size_to_dim(canonical_axis);\n    const auto K = X.size_from_dim(canonical_axis);\n    const auto canonical_axis_w = W.canonical_axis_index(axis_w_);\n    const int N = W.size_to_dim(canonical_axis_w);\n\n    auto dimErrorString = [&]() {\n      return MakeString(\n          \"Dimension mismatch: \",\n          \"X: \",\n          X.dims(),\n          \", W: \",\n          W.dims(),\n          \", b: \",\n          b.dims(),\n          \", axis: \",\n          axis_,\n          \", M: \",\n          M,\n          \", N: \",\n          N,\n          \", K: \",\n          K);\n    };\n\n    // Error checking\n    CAFFE_ENFORCE(M == X.size() / K, dimErrorString());\n    CAFFE_ENFORCE(K == W.size() / N, dimErrorString());\n    CAFFE_ENFORCE(N == b.dim32(0), dimErrorString());\n    CAFFE_ENFORCE(N == b.size(), dimErrorString());\n\n    Y_shape_cache_ = X.dims();\n    // This is an invariant of canonical_axis, so we can DCHECK.\n    DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());\n    Y_shape_cache_.resize(canonical_axis + 1);\n    Y_shape_cache_[canonical_axis] = N;\n    Y->Resize(Y_shape_cache_);\n    CAFFE_ENFORCE(M * N == Y->size(), dimErrorString());\n\n    if (X.size() == 0) {\n      // skip the rest of the computation if X is empty\n      Y->template mutable_data<T_Y>();\n      return true;\n    }\n\n    // W * x\n    math::Gemm<T_X, Context, Engine>(\n        CblasNoTrans,\n        CblasTrans,\n        M,\n        N,\n        K,\n        1,\n        X.template data<T_X>(),\n        W.template data<T_W>(),\n        0,\n        Y->template mutable_data<T_Y>(),\n        &context_);\n    // Add bias term\n    if (bias_multiplier_.size() != M) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(M);\n      math::Set<T_B, Context>(\n          M,\n          convert::To<float, T_B>(1),\n          bias_multiplier_.template mutable_data<T_B>(),\n          &context_);\n    }\n    math::Gemm<T_B, Context, Engine>(\n        CblasNoTrans,\n        CblasNoTrans,\n        M,\n        N,\n        1,\n        1,\n        bias_multiplier_.template data<T_B>(),\n        b.template data<T_B>(),\n        1,\n        Y->template mutable_data<T_Y>(),\n        &context_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DoRunWithType<\n        float, // X\n        float, // W\n        float, // B\n        float, // Y\n        float>(); // Math\n  }\n\n protected:\n  size_t axis_{1};\n  size_t axis_w_{1};\n  // A local vector to cache the output shape so we don't need to recreate\n  // a vector object every time we run Run().\n  vector<TIndex> Y_shape_cache_;\n  Tensor<Context> bias_multiplier_;\n};\n\ntemplate <class Context, class Engine = DefaultEngine>\nclass FullyConnectedGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FullyConnectedGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int32_t>(\"axis\", 1)),\n        axis_w_(OperatorBase::GetSingleArgument<int32_t>(\"axis_w\", 1)) {}\n  ~FullyConnectedGradientOp() {}\n\n  template <\n      typename T_X,\n      typename T_W,\n      typename T_DY,\n      typename T_B,\n      typename T_DX,\n      typename T_DW,\n      typename T_DB,\n      typename MATH>\n  bool DoRunWithType() {\n    const auto& X = Input(0);\n    const auto& W = Input(1);\n    const auto& dY = Input(2);\n    // batch size\n    const auto canonical_axis = X.canonical_axis_index(axis_);\n    const int M = X.size_to_dim(canonical_axis);\n    const int K = X.size_from_dim(canonical_axis);\n    const auto canonical_axis_w = W.canonical_axis_index(axis_w_);\n    const int N = W.size_to_dim(canonical_axis_w);\n    CAFFE_ENFORCE(M * K == X.size());\n    CAFFE_ENFORCE(K * N == W.size());\n\n    auto* dW = Output(0);\n    auto* db = Output(1);\n    dW->ResizeLike(W);\n    db->Resize(N);\n\n    if (X.size() == 0) {\n      // generate a zero blob for db and dW when X is empty\n      math::Set<T_DB, Context>(\n          db->size(),\n          convert::To<float, T_DB>(0),\n          db->template mutable_data<T_DB>(),\n          &context_);\n      math::Set<T_DW, Context>(\n          dW->size(),\n          convert::To<float, T_DW>(0),\n          dW->template mutable_data<T_DW>(),\n          &context_);\n\n      if (OutputSize() == 3) {\n        auto* dX = Output(2);\n        dX->ResizeLike(X);\n        dX->template mutable_data<T_DX>();\n      }\n\n      return true;\n    }\n\n    // Compute dW\n    math::Gemm<T_DY, Context, Engine>(\n        CblasTrans,\n        CblasNoTrans,\n        N,\n        K,\n        M,\n        convert::To<float, MATH>(1),\n        dY.template data<T_DY>(),\n        X.template data<T_X>(),\n        convert::To<float, MATH>(0),\n        dW->template mutable_data<T_DW>(),\n        &context_);\n    if (bias_multiplier_.size() != M) {\n      // If the helper bias multiplier is not M, reshape and fill it\n      // with one.\n      bias_multiplier_.Resize(M);\n      math::Set<T_B, Context>(\n          M,\n          convert::To<float, T_B>(1),\n          bias_multiplier_.template mutable_data<T_B>(),\n          &context_);\n    }\n    // Compute dB\n    math::Gemv<T_DY, Context>(\n        CblasTrans,\n        M,\n        N,\n        convert::To<float, MATH>(1),\n        dY.template data<T_DY>(),\n        bias_multiplier_.template data<T_B>(),\n        convert::To<float, MATH>(0),\n        db->template mutable_data<T_DB>(),\n        &context_);\n\n    // Compute dX\n    if (OutputSize() == 3) {\n      auto* dX = Output(2);\n      dX->ResizeLike(X);\n      math::Gemm<T_DX, Context, Engine>(\n          CblasNoTrans,\n          CblasNoTrans,\n          M,\n          K,\n          N,\n          convert::To<float, MATH>(1),\n          dY.template data<T_DY>(),\n          W.template data<T_W>(),\n          convert::To<float, MATH>(0),\n          dX->template mutable_data<T_DX>(),\n          &context_);\n    }\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DoRunWithType<\n        float, //  X\n        float, //  W\n        float, // dY\n        float, //  B\n        float, // dX\n        float, // dW\n        float, // dB\n        float>(); // Math\n  }\n\n protected:\n  size_t axis_{1};\n  size_t axis_w_{1};\n  Tensor<Context> bias_multiplier_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/given_tensor_fill_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/filler_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass GivenTensorFillOp final : public FillerOp<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  GivenTensorFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : FillerOp<Context>(operator_def, ws) {\n    auto source_values =\n        OperatorBase::template GetRepeatedArgument<T>(\"values\");\n    values_.Resize(source_values.size());\n    T* values_data = values_.template mutable_data<T>();\n    for (int i = 0; i < source_values.size(); i++) {\n      values_data[i] = static_cast<T>(source_values[i]);\n    }\n  }\n\n  bool Fill(Tensor<Context>* output) override {\n    DCHECK_EQ(output->size(), values_.size())\n        << \"output size: \" << output->size()\n        << \" given size: \" << values_.size();\n    auto* data = output->template mutable_data<T>();\n    const T* values_data = values_.template data<T>();\n    if (output->size()) {\n      context_.template Copy<T, CPUContext, Context>(\n          output->size(), values_data, data);\n    }\n    return true;\n  }\n\n private:\n  TensorCPU values_;\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/gru_unit_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_GRU_UNIT_OP_H_\n#define CAFFE2_OPERATORS_GRU_UNIT_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\nnamespace detail {\n\ntemplate <typename T>\ninline T sigmoid(T x) {\n  return 1.0f / (1.0f + exp(-x));\n}\n\ntemplate <typename T>\ninline T host_tanh(T x) {\n  return 2.0f * sigmoid(2.0f * x) - 1.0f;\n}\n\ntemplate <typename T, typename Context>\nvoid GRUUnit(\n    int N,\n    int D,\n    int t,\n    const T* H_prev,\n    const T* X,\n    const int32_t* seqLengths,\n    bool drop_states,\n    T* H,\n    Context* /*context*/) {\n  for (int n = 0; n < N; ++n) {\n    const bool valid = t < seqLengths[n];\n\n    for (int d = 0; d < D; ++d) {\n      if (!valid) {\n        if (drop_states) {\n          H[d] = 0;\n        } else {\n          H[d] = H_prev[d];\n        }\n      } else {\n        const T update = X[1 * D + d];\n        const T output = X[2 * D + d];\n        T sigmoid_update = sigmoid(update);\n        H[d] = H_prev[d] * sigmoid_update +\n            host_tanh(output) * (1.0f - sigmoid_update);\n      }\n    }\n\n    H_prev += D;\n    X += 3 * D;\n    H += D;\n  }\n}\n\ntemplate <typename T, typename Context>\nvoid GRUUnitGradient(\n    int N,\n    int D,\n    int t,\n    const T* H_prev,\n    const T* X,\n    const int32_t* seqLengths,\n    const T* H,\n    const T* H_diff,\n    bool drop_states,\n    T* H_prev_diff,\n    T* X_diff,\n    Context* /*context*/) {\n  for (int n = 0; n < N; ++n) {\n    const bool valid = t < seqLengths[n];\n\n    for (int d = 0; d < D; ++d) {\n      T* h_prev_diff = H_prev_diff + d;\n      T* reset_diff = X_diff + 0 * D + d;\n      T* update_diff = X_diff + 1 * D + d;\n      T* output_diff = X_diff + 2 * D + d;\n\n      if (!valid) {\n        if (drop_states) {\n          *h_prev_diff = 0;\n        } else {\n          *h_prev_diff = H_diff[d];\n        }\n        *reset_diff = 0;\n        *update_diff = 0;\n        *output_diff = 0;\n      } else {\n        // Calculate Gate Outputs\n        const T u = sigmoid(X[1 * D + d]);\n        const T o = host_tanh(X[2 * D + d]);\n\n        *h_prev_diff = H_diff[d] * u;\n        *reset_diff = 0; // 0 contribution to gradient from this operation\n        *update_diff = (H_diff[d] * H_prev[d] - H_diff[d] * o) * u * (1.0f - u);\n        *output_diff = H_diff[d] * (1.0f - u) * (1.0f - o * o);\n      }\n    }\n\n    H_prev += D;\n    X += 3 * D;\n    H += D;\n    H_diff += D;\n    X_diff += 3 * D;\n    H_prev_diff += D;\n  }\n}\n\n} // namespace detail\n\ntemplate <typename T, typename Context>\nclass GRUUnitOp : public Operator<Context> {\n public:\n  GRUUnitOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        drop_states_(OperatorBase::template GetSingleArgument<bool>(\n            \"drop_states\",\n            false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    // Extract N\n    const auto N = Input(HIDDEN_T_M_1).dim(1);\n\n    // Gates: 1xNxG\n    const auto G = Input(GATES).dim(2);\n    const auto D = Input(HIDDEN_T_M_1).dim(2);\n\n    CAFFE_ENFORCE_EQ(3 * D, G);\n    const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();\n    const auto* X = Input(GATES).template data<T>();\n    CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).size(), N);\n    const auto* seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();\n    const auto t = OperatorBase::Input<Tensor<CPUContext>>(TIMESTEP)\n                       .template data<int32_t>()[0];\n    Output(HIDDEN_T)->ResizeLike(Input(HIDDEN_T_M_1));\n    auto* H = Output(HIDDEN_T)->template mutable_data<T>();\n\n    detail::GRUUnit<T, Context>(\n        N, D, t, H_prev, X, seqLengths, drop_states_, H, &context_);\n    return true;\n  }\n\n protected:\n  INPUT_TAGS(HIDDEN_T_M_1, GATES, SEQ_LENGTHS, TIMESTEP);\n  OUTPUT_TAGS(HIDDEN_T);\n\n private:\n  bool drop_states_;\n};\n\ntemplate <typename T, typename Context>\nclass GRUUnitGradientOp : public Operator<Context> {\n public:\n  GRUUnitGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        drop_states_(OperatorBase::template GetSingleArgument<bool>(\n            \"drop_states\",\n            false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    // Extract N\n    const auto N = Input(HIDDEN_T_M_1).dim(1);\n\n    // Gates: 1xNxG\n    const auto G = Input(GATES).dim(2);\n    const auto D = Input(HIDDEN_T_M_1).dim(2);\n\n    CAFFE_ENFORCE_EQ(3 * D, G);\n    const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();\n    const auto* X = Input(GATES).template data<T>();\n    const auto t = OperatorBase::Input<Tensor<CPUContext>>(TIMESTEP)\n                       .template data<int32_t>()[0];\n    const auto* H = Input(HIDDEN_T).template data<T>();\n    const auto* H_diff = Input(HIDDEN_T_GRAD).template data<T>();\n    const auto* seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();\n    Output(HIDDEN_T_M_1_GRAD)->ResizeLike(Input(HIDDEN_T_M_1));\n    auto* H_prev_diff = Output(HIDDEN_T_M_1_GRAD)->template mutable_data<T>();\n    Output(GATES_GRAD)->ResizeLike(Input(GATES));\n    auto* X_diff = Output(GATES_GRAD)->template mutable_data<T>();\n\n    detail::GRUUnitGradient<T, Context>(\n        N,\n        D,\n        t,\n        H_prev,\n        X,\n        seqLengths,\n        H,\n        H_diff,\n        drop_states_,\n        H_prev_diff,\n        X_diff,\n        &context_);\n    return true;\n  }\n\n protected:\n  INPUT_TAGS(\n      HIDDEN_T_M_1,\n      GATES,\n      SEQ_LENGTHS,\n      TIMESTEP,\n      HIDDEN_T,\n      HIDDEN_T_GRAD, );\n  OUTPUT_TAGS(HIDDEN_T_M_1_GRAD, GATES_GRAD);\n\n private:\n  bool drop_states_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_GRU_UNIT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/h_softmax_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_H_SOFTMAX_OP_H_\n#define CAFFE2_OPERATORS_H_SOFTMAX_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/hsm.pb.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, typename Context>\nclass HSoftmaxOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  HSoftmaxOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    HierarchyProto hierarchy;\n    CAFFE_ENFORCE(hierarchy.ParseFromString(\n        OperatorBase::GetSingleArgument<string>(\"hierarchy\", \"\")));\n    for (const auto& path : hierarchy.paths()) {\n      hierarchy_all_map_.emplace(path.word_id(), path);\n    }\n  }\n\n protected:\n  std::unordered_map<int, PathProto> hierarchy_all_map_;\n  Tensor<Context> scale_;\n  Tensor<Context> sum_multiplier_;\n  Tensor<Context> bias_multiplier_;\n  static constexpr T kLOG_THRESHOLD() {\n    return 1e-20;\n  }\n  static std::unordered_map<int, PathProto> getHierarchyForLabels(\n      int M,\n      const int* labels,\n      const std::unordered_map<int, PathProto>& hierarchy_all_map) {\n    std::unordered_map<int, PathProto> hierarchy_map;\n    std::set<int> label_set = std::set<int>(labels, labels + M);\n    for (const auto& label : label_set) {\n      auto search = hierarchy_all_map.find(label);\n      CAFFE_ENFORCE(search != hierarchy_all_map.end(), \"incorrect label.\");\n      hierarchy_map.emplace(search->first, search->second);\n    }\n    return hierarchy_map;\n  }\n  int getIntermediateOutputSize(\n      const int* labels,\n      int M,\n      std::unordered_map<int, PathProto>& hierarchy) const {\n    int size = 0;\n    for (int label = 0; label < M; ++label) {\n      int word_id = labels[label];\n      const auto& path = hierarchy[word_id];\n      size += std::accumulate(\n          path.path_nodes().begin(),\n          path.path_nodes().end(),\n          0,\n          // Output of FC + Output of Softmax\n          [](int sz, PathNodeProto node) {\n            return sz + 2 * node.length();\n          });\n    }\n    return size;\n  }\n};\n\ntemplate <typename T, class Context>\nclass HSoftmaxOp : public HSoftmaxOpBase<T, Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using HSoftmaxOpBase<T, Context>::HSoftmaxOpBase;\n\n  bool RunOnDevice() override;\n\n protected:\n  float RunForwardSingle(\n      const float* X,\n      const float* W,\n      const float* b,\n      int target,\n      float* output,\n      const float* bias_multiplier,\n      int w_length,\n      int K,\n      int& output_offset);\n};\n\ntemplate <typename T, class Context>\nclass HSoftmaxGradientOp final : public HSoftmaxOpBase<T, Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using HSoftmaxOpBase<T, Context>::HSoftmaxOpBase;\n  bool RunOnDevice() override;\n\n private:\n  void RunBackwardSingle(\n      const float* X,\n      const float* dY,\n      const float* W,\n      int target,\n      const float* int_output,\n      float* dX,\n      float* dW,\n      float* db,\n      float* dOutput,\n      int dim_in,\n      int w_length,\n      int& output_offset);\n};\n\ntemplate <typename T, class Context>\nclass HSoftmaxSearchOp final : public HSoftmaxOp<T, Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  HSoftmaxSearchOp(const OperatorDef& operator_def, Workspace* ws)\n      : HSoftmaxOp<T, Context>(operator_def, ws),\n        top_n_(OperatorBase::GetSingleArgument<int>(\"topN\", 5)),\n        beam_(OperatorBase::GetSingleArgument<float>(\"beam\", 0.01)) {\n    CAFFE_ENFORCE(tree_.ParseFromString(\n        OperatorBase::GetSingleArgument<string>(\"tree\", \"\")));\n  }\n  bool RunOnDevice() override;\n\n private:\n  int top_n_;\n  float beam_;\n  TreeProto tree_;\n  bool pruning(\n      const float* X,\n      int sample,\n      int K,\n      const float* W,\n      const float* b,\n      const NodeProto& src_node,\n      NodeProto& dst_node,\n      float parent_score,\n      float beam);\n  bool extractNodes(\n      const NodeProto& node,\n      std::vector<std::pair<string, float>>& info);\n};\n\ntemplate <typename T, class Context>\nclass HuffmanTreeHierarchyOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  HuffmanTreeHierarchyOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_classes_(OperatorBase::GetSingleArgument<int>(\"num_classes\", -1)) {}\n  bool RunOnDevice() override;\n\n private:\n  // Internal huffman tree data.\n  struct Node {\n    Node(T l, int count)\n        : label(l), count(count), left_ch_index(-1), right_ch_index(-1) {}\n    T label;\n    int count;\n    int left_ch_index;\n    int right_ch_index;\n  };\n\n  struct NodeComparator {\n    bool operator()(const Node& node_a, const Node& node_b) {\n      return node_a.count > node_b.count;\n    }\n  };\n\n  int num_classes_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SOFTMAX_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/half_float_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_\n#define CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass FloatToHalfOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(FloatToHalfOp);\n\n  bool RunOnDevice() override;\n};\n\ntemplate <class Context>\nclass HalfToFloatOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(HalfToFloatOp);\n\n  bool RunOnDevice() override;\n};\n\nclass Float16ConstantFillOp : public Operator<CPUContext> {\n public:\n  Float16ConstantFillOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws),\n        shape_(\n            ToVectorTIndex(OperatorBase::GetRepeatedArgument<int>(\"shape\"))) {}\n\n  USE_OPERATOR_FUNCTIONS(CPUContext);\n  virtual ~Float16ConstantFillOp() {}\n\n  bool RunOnDevice() override;\n\n private:\n  vector<TIndex> shape_;\n};\n\ninline std::vector<TensorShape> Float16FillerTensorInference(\n    const OperatorDef& def,\n    const vector<TensorShape>& in) {\n  vector<TensorShape> out(1);\n  ArgumentHelper helper(def);\n  out[0].set_data_type(static_cast<TensorProto_DataType>(\n      helper.GetSingleArgument<int>(\"dtype\", TensorProto_DataType_FLOAT)));\n  auto shape = helper.GetRepeatedArgument<int>(\"shape\");\n  for (int d : shape) {\n    out[0].add_dims(d);\n  }\n  return out;\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/if_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_IF_OP_H_\n#define CAFFE2_OPERATORS_IF_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass IfOp final : public Operator<Context> {\n public:\n  IfOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    CAFFE_ENFORCE(\n        this->template HasSingleArgumentOfType<NetDef>(\"then_net\"),\n        \"then_net must be specified in If operator\");\n    auto then_net_def =\n        this->template GetSingleArgument<NetDef>(\"then_net\", NetDef());\n    then_net_ = CreateNet(then_net_def, ws);\n    CAFFE_ENFORCE(then_net_, \"Failed to initialize then subnet\");\n\n    if (this->template HasSingleArgumentOfType<NetDef>(\"else_net\")) {\n      auto else_net_def =\n          this->template GetSingleArgument<NetDef>(\"else_net\", NetDef());\n      else_net_ = CreateNet(else_net_def, ws);\n      CAFFE_ENFORCE(else_net_, \"Failed to initialize else subnet\");\n    }\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n private:\n  std::unique_ptr<NetBase> then_net_;\n  std::unique_ptr<NetBase> else_net_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_IF_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/im2col_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_IM2COL_OP_H_\n#define CAFFE2_OPERATORS_IM2COL_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass Im2ColOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  Im2ColOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        pad_(OperatorBase::GetSingleArgument<int>(\"pad\", 0)),\n        kernel_h_(OperatorBase::GetSingleArgument<int>(\n            \"kernel_h\",\n            OperatorBase::GetSingleArgument<int>(\"kernel\", 0))),\n        kernel_w_(OperatorBase::GetSingleArgument<int>(\n            \"kernel_w\",\n            OperatorBase::GetSingleArgument<int>(\"kernel\", 0))),\n        dilation_h_(OperatorBase::GetSingleArgument<int>(\n            \"dilation_h\",\n            OperatorBase::GetSingleArgument<int>(\"dilation\", 1))),\n        dilation_w_(OperatorBase::GetSingleArgument<int>(\n            \"dilation_w\",\n            OperatorBase::GetSingleArgument<int>(\"dilation\", 1))),\n        stride_h_(OperatorBase::GetSingleArgument<int>(\n            \"stride_h\",\n            OperatorBase::GetSingleArgument<int>(\"stride\", 1))),\n        stride_w_(OperatorBase::GetSingleArgument<int>(\n            \"stride_w\",\n            OperatorBase::GetSingleArgument<int>(\"stride\", 1))),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(kernel_h_ > 0);\n    CAFFE_ENFORCE(kernel_w_ > 0);\n    CAFFE_ENFORCE(dilation_h_ > 0);\n    CAFFE_ENFORCE(dilation_w_ > 0);\n    CAFFE_ENFORCE(stride_h_ > 0);\n    CAFFE_ENFORCE(stride_w_ > 0);\n    CAFFE_ENFORCE(pad_ >= 0);\n  }\n\n  bool RunOnDevice() override {\n    auto& X = Input(0);\n    auto* Y = Output(0);\n    CAFFE_ENFORCE(4 == X.ndim());\n\n    int N = 0, C = 0, H = 0, W = 0;\n    switch (order_) {\n      case StorageOrder::NCHW:\n        N = X.dim32(0);\n        C = X.dim32(1);\n        H = X.dim32(2);\n        W = X.dim32(3);\n        break;\n      case StorageOrder::NHWC:\n        N = X.dim32(0);\n        H = X.dim32(1);\n        W = X.dim32(2);\n        C = X.dim32(3);\n        break;\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n\n    const int dkernel_h = dilation_h_ * (kernel_h_ - 1) + 1;\n    const int dkernel_w = dilation_w_ * (kernel_w_ - 1) + 1;\n    CAFFE_ENFORCE(H >= dkernel_h);\n    CAFFE_ENFORCE(W >= dkernel_w);\n    const int out_h = (H + 2 * pad_ - dkernel_h) / stride_h_ + 1;\n    const int out_w = (W + 2 * pad_ - dkernel_w) / stride_w_ + 1;\n\n    switch (order_) {\n      case StorageOrder::NCHW: {\n        Y->Resize(\n            std::vector<TIndex>{N, C * kernel_h_ * kernel_w_, out_h, out_w});\n\n        const size_t dx = X.size() / N;\n        const size_t dy = Y->size() / N;\n        for (int n = 0; n < N; ++n) {\n          const auto* xdata = X.template data<T>() + (n * dx);\n          auto* ydata = Y->template mutable_data<T>() + (n * dy);\n          math::Im2col<T, Context, StorageOrder::NCHW>(\n              xdata,\n              C,\n              H,\n              W,\n              kernel_h_,\n              kernel_w_,\n              dilation_h_,\n              dilation_w_,\n              pad_,\n              pad_,\n              pad_,\n              pad_,\n              stride_h_,\n              stride_w_,\n              ydata,\n              &context_);\n        }\n      }; break;\n      case StorageOrder::NHWC: {\n        Y->Resize(\n            std::vector<TIndex>{N, out_h, out_w, kernel_h_ * kernel_w_ * C});\n\n        const size_t dx = X.size() / N;\n        const size_t dy = Y->size() / N;\n        for (int n = 0; n < N; ++n) {\n          const auto* xdata = X.template data<T>() + (n * dx);\n          auto* ydata = Y->template mutable_data<T>() + (n * dy);\n          math::Im2col<T, Context, StorageOrder::NHWC>(\n              xdata,\n              C,\n              H,\n              W,\n              kernel_h_,\n              kernel_w_,\n              dilation_h_,\n              dilation_w_,\n              pad_,\n              pad_,\n              pad_,\n              pad_,\n              stride_h_,\n              stride_w_,\n              ydata,\n              &context_);\n        }\n      }; break;\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n\n    return true;\n  }\n\n private:\n  int pad_;\n  int kernel_h_;\n  int kernel_w_;\n  int dilation_h_;\n  int dilation_w_;\n  int stride_h_;\n  int stride_w_;\n  StorageOrder order_;\n};\n\ntemplate <typename T, class Context>\nclass Col2ImOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  Col2ImOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        pad_(OperatorBase::GetSingleArgument<int>(\"pad\", 0)),\n        kernel_h_(OperatorBase::GetSingleArgument<int>(\n            \"kernel_h\",\n            OperatorBase::GetSingleArgument<int>(\"kernel\", 0))),\n        kernel_w_(OperatorBase::GetSingleArgument<int>(\n            \"kernel_w\",\n            OperatorBase::GetSingleArgument<int>(\"kernel\", 0))),\n        dilation_h_(OperatorBase::GetSingleArgument<int>(\n            \"dilation_h\",\n            OperatorBase::GetSingleArgument<int>(\"dilation\", 1))),\n        dilation_w_(OperatorBase::GetSingleArgument<int>(\n            \"dilation_w\",\n            OperatorBase::GetSingleArgument<int>(\"dilation\", 1))),\n        stride_h_(OperatorBase::GetSingleArgument<int>(\n            \"stride_h\",\n            OperatorBase::GetSingleArgument<int>(\"stride\", 1))),\n        stride_w_(OperatorBase::GetSingleArgument<int>(\n            \"stride_w\",\n            OperatorBase::GetSingleArgument<int>(\"stride\", 1))),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(kernel_h_ > 0);\n    CAFFE_ENFORCE(kernel_w_ > 0);\n    CAFFE_ENFORCE(dilation_h_ > 0);\n    CAFFE_ENFORCE(dilation_w_ > 0);\n    CAFFE_ENFORCE(stride_h_ > 0);\n    CAFFE_ENFORCE(stride_w_ > 0);\n    CAFFE_ENFORCE(pad_ >= 0);\n  }\n\n  bool RunOnDevice() override {\n    auto& X = Input(0);\n    auto& Z = Input(1);\n    auto* Y = Output(0);\n    Y->ResizeLike(Z);\n    CAFFE_ENFORCE(4 == Y->ndim());\n\n    int N = 0, C = 0, H = 0, W = 0;\n    switch (order_) {\n      case StorageOrder::NCHW:\n        N = Y->dim32(0);\n        C = Y->dim32(1);\n        H = Y->dim32(2);\n        W = Y->dim32(3);\n        break;\n      case StorageOrder::NHWC:\n        N = Y->dim32(0);\n        H = Y->dim32(1);\n        W = Y->dim32(2);\n        C = Y->dim32(3);\n        break;\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n\n    const int dkernel_h = dilation_h_ * (kernel_h_ - 1) + 1;\n    const int dkernel_w = dilation_w_ * (kernel_w_ - 1) + 1;\n    CAFFE_ENFORCE(H >= dkernel_h);\n    CAFFE_ENFORCE(W >= dkernel_w);\n    const int out_h = (H + 2 * pad_ - dkernel_h) / stride_h_ + 1;\n    const int out_w = (W + 2 * pad_ - dkernel_w) / stride_w_ + 1;\n    CAFFE_ENFORCE(X.size() == N * kernel_h_ * kernel_w_ * C * out_h * out_w);\n\n    const size_t dx = X.size() / N;\n    const size_t dy = Y->size() / N;\n\n    // could template-specialize this, but it's test code...\n    switch (order_) {\n      case StorageOrder::NCHW: {\n        for (int n = 0; n < N; ++n) {\n          const auto* xdata = X.template data<T>() + (n * dx);\n          auto* ydata = Y->template mutable_data<T>() + (n * dy);\n          math::Col2im<T, Context, StorageOrder::NCHW>(\n              xdata,\n              C,\n              H,\n              W,\n              kernel_h_,\n              kernel_w_,\n              dilation_h_,\n              dilation_w_,\n              pad_,\n              pad_,\n              pad_,\n              pad_,\n              stride_h_,\n              stride_w_,\n              ydata,\n              &context_);\n        }\n      }; break;\n      case StorageOrder::NHWC: {\n        for (int n = 0; n < N; ++n) {\n          const auto* xdata = X.template data<T>() + (n * dx);\n          auto* ydata = Y->template mutable_data<T>() + (n * dy);\n          math::Col2im<T, Context, StorageOrder::NHWC>(\n              xdata,\n              C,\n              H,\n              W,\n              kernel_h_,\n              kernel_w_,\n              dilation_h_,\n              dilation_w_,\n              pad_,\n              pad_,\n              pad_,\n              pad_,\n              stride_h_,\n              stride_w_,\n              ydata,\n              &context_);\n        }\n      }; break;\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n\n    return true;\n  }\n\n private:\n  int pad_;\n  int kernel_h_;\n  int kernel_w_;\n  int dilation_h_;\n  int dilation_w_;\n  int stride_h_;\n  int stride_w_;\n  StorageOrder order_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_IM2COL_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/index_hash_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_INDEX_HASH_OPS_H_\n#define CAFFE2_OPERATORS_INDEX_HASH_OPS_H_\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass IndexHashOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  IndexHashOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        seed_(OperatorBase::GetSingleArgument<int64_t>(\"seed\", 0)),\n        modulo_(OperatorBase::GetSingleArgument<int64_t>(\"modulo\", 0)) {\n    CAFFE_ENFORCE_GT(modulo_, 0, \"MODULO should be > 0\");\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& indices = Input(INDICES);\n    auto* hashed_indices = Output(HASHED_INDICES);\n    hashed_indices->ResizeLike(indices);\n\n    CAFFE_ENFORCE_GE(\n        static_cast<int64_t>(std::numeric_limits<T>::max()),\n        modulo_,\n        \"MODULO shouldn't be larger than the numeric limit of the indices\");\n\n    auto N = indices.size();\n    auto* indices_data = indices.template data<T>();\n    auto* hashed_indices_data = hashed_indices->template mutable_data<T>();\n\n    for (auto i = 0; i < N; i++) {\n      hashed_indices_data[i] = hash(indices_data[i]);\n    }\n\n    return true;\n  }\n\n protected:\n  template <typename T>\n  T hash(T id) {\n    int8_t* bytes = (int8_t*)&id;\n    T hashed = seed_ * 0xDEADBEEF;\n    for (int i = 0; i < sizeof(T) / sizeof(int8_t); i++) {\n      hashed = hashed * 65537 + bytes[i];\n    }\n    hashed = (modulo_ + hashed % modulo_) % modulo_;\n    return hashed;\n  }\n\n private:\n  INPUT_TAGS(INDICES);\n  OUTPUT_TAGS(HASHED_INDICES);\n\n  int64_t seed_;\n  int64_t modulo_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_INDEX_HASH_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/instance_norm_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_\n#define CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass InstanceNormOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  InstanceNormOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        epsilon_(OperatorBase::GetSingleArgument<T>(\"epsilon\", 1e-5)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(epsilon_ >= 0, \"Must pass a nonnegative epsilon.\");\n  }\n  ~InstanceNormOp() {}\n\n  bool RunOnDevice() {\n    switch (order_) {\n      case StorageOrder::NHWC:\n        return RunOnDeviceWithOrderNHWC();\n      case StorageOrder::NCHW:\n        return RunOnDeviceWithOrderNCHW();\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n  }\n\n  bool RunOnDeviceWithOrderNHWC();\n  bool RunOnDeviceWithOrderNCHW();\n\n protected:\n  // parameters\n  T epsilon_;\n  StorageOrder order_;\n\n  // temp results that get passed to the gradient, but are otherwise stored here\n  Tensor<Context> mean_;\n  Tensor<Context> inv_stdev_;\n\n  INPUT_TAGS(INPUT, SCALE, BIAS);\n  OUTPUT_TAGS(OUTPUT, MEAN, INV_STDEV);\n};\n\ntemplate <typename T, class Context>\nclass InstanceNormGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  InstanceNormGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        epsilon_(OperatorBase::GetSingleArgument<T>(\"epsilon\", 1e-5)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(epsilon_ >= 0, \"Must pass a nonnegative epsilon.\");\n  }\n  ~InstanceNormGradientOp() {}\n\n  bool RunOnDevice() {\n    switch (order_) {\n      case StorageOrder::NHWC:\n        return RunOnDeviceWithOrderNHWC();\n      case StorageOrder::NCHW:\n        return RunOnDeviceWithOrderNCHW();\n      default:\n        CAFFE_THROW(\"Unknown storage order: \", order_);\n    }\n  }\n\n  bool RunOnDeviceWithOrderNHWC();\n  bool RunOnDeviceWithOrderNCHW();\n\n protected:\n  // parameters\n  T epsilon_;\n  StorageOrder order_;\n\n  // temp results that could get passed through to this gradient, but if not,\n  // are stored here\n  Tensor<Context> mean_;\n  Tensor<Context> inv_stdev_;\n\n  INPUT_TAGS(INPUT, SCALE, BIAS, OUTPUT_GRAD, MEAN, INV_STDEV);\n  OUTPUT_TAGS(INPUT_GRAD, SCALE_GRAD, BIAS_GRAD);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/layer_norm_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LAYER_NORM_OP_H\n#define CAFFE2_OPERATORS_LAYER_NORM_OP_H\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass LayerNormOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LayerNormOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n  ~LayerNormOp() {}\n\n  template <typename T>\n  bool DoRunWithType();\n\n  bool RunOnDevice() override {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  int axis_;\n  float epsilon_;\n\n  Tensor<Context> scratch_;\n  Tensor<Context> seg_indices_;\n};\n\ntemplate <class Context>\nclass LayerNormGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LayerNormGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 0.001)) {}\n  ~LayerNormGradientOp() {}\n\n  template <typename T>\n  bool DoRunWithType();\n\n  bool RunOnDevice() override {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  int axis_;\n  float epsilon_;\n\n  Tensor<Context> scratch_;\n  Tensor<Context> gscratch_;\n  Tensor<Context> seg_indices_;\n  Tensor<Context> dstdev_;\n  Tensor<Context> dmean_;\n};\n\n} // namespace caffe2\n\n#endif /* CAFFE2_OPERATORS_LAYER_NORM_OP_H */\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/leaky_relu_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass LeakyReluOp : public Operator<Context> {\n public:\n  LeakyReluOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), alpha_(0) {\n    if (HasArgument(\"alpha\")) {\n      alpha_ =\n          static_cast<T>(OperatorBase::GetSingleArgument<float>(\"alpha\", 0));\n    }\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T alpha_;\n};\n\ntemplate <typename T, class Context>\nclass LeakyReluGradientOp final : public Operator<Context> {\n public:\n  LeakyReluGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), alpha_(0) {\n    if (HasArgument(\"alpha\")) {\n      alpha_ =\n          static_cast<T>(OperatorBase::GetSingleArgument<float>(\"alpha\", 0));\n    }\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T alpha_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lengths_reducer_ops.h",
    "content": "#pragma once\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/perfkernels/embedding_lookup.h\"\n\nnamespace caffe2 {\n\n// A templated class that implements SparseLengths[Sum,WeightedSum,Mean].\ntemplate <\n    typename T, // output type\n    class InputTypes, // supported input types, such as TensorTypes<float>\n    bool USE_WEIGHT = 0, // Whether it is SparseLengthsWeightedSum\n    bool USE_MEAN = 0 // Whether this is SparseLengthsMean\n    >\nclass CPUSparseLengthsReductionOp : public Operator<CPUContext> {\n public:\n  USE_OPERATOR_FUNCTIONS(CPUContext);\n  CPUSparseLengthsReductionOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws) {\n    static_assert(\n        !(USE_WEIGHT & USE_MEAN), \"Cannot both specify weight and mean.\");\n  }\n\n  ~CPUSparseLengthsReductionOp() {}\n\n  // Currently, we support float and float16 inputs for input data type, and\n  // int32_t and int64_t for the index type.\n\n  bool RunOnDevice() override {\n    return DispatchHelper<InputTypes>::call(this, Input(DATA));\n  }\n\n  template <typename InputType>\n  bool DoRunWithType() {\n    return DispatchHelper<TensorTypes2<int32_t, int64_t>, InputType>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename InputType, typename IndexType>\n  bool DoRunWithType2() {\n    auto& dataInput = Input(DATA);\n    auto& indicesInput = Input(INDICES);\n    auto& lengthsInput = Input(LENGTHS);\n\n    CAFFE_ENFORCE_EQ(1, indicesInput.ndim(), \"INDICES must be a vector\");\n    CAFFE_ENFORCE_EQ(1, lengthsInput.ndim(), \"LENGTHS must be a vector\");\n    const TIndex N = dataInput.dim(0);\n    const int D = dataInput.size_from_dim(1);\n    const TIndex M = lengthsInput.dim(0);\n    const TIndex indices_size = indicesInput.size();\n\n    auto* output = Output(0);\n    auto shape = dataInput.dims();\n    shape[0] = M;\n    output->Resize(shape);\n    T* out_data = output->template mutable_data<T>();\n\n    const InputType* in_data = dataInput.template data<InputType>();\n    const IndexType* indices = indicesInput.template data<IndexType>();\n    const int* lengths = lengthsInput.template data<int>();\n    const T* in_weight = nullptr;\n\n    if (USE_WEIGHT) { // static if\n      auto& weightInput = Input(WEIGHT);\n      CAFFE_ENFORCE_EQ(1, weightInput.ndim(), \"WEIGHT must be a vector\");\n      CAFFE_ENFORCE_EQ(\n          weightInput.size(),\n          indices_size,\n          \"Weight should have the same length as indices.\");\n      in_weight = weightInput.template data<T>();\n    }\n\n    // delegate work to perfkernel that branches based on architecture\n    EmbeddingLookup(\n        D,\n        M,\n        indices_size,\n        N,\n        in_data,\n        indices,\n        lengths,\n        in_weight,\n        nullptr, // scale_bias field is only used in SparseLengths8BitsRowwiseOp\n        USE_MEAN,\n        out_data);\n    return true;\n  }\n\n private:\n  enum {\n    DATA = 0, // Data input.\n    WEIGHT = 1, // Weight input used in SparseLengthsWeightedSum\n    INDICES = 1 + USE_WEIGHT, // 1 in SparseLengths[Sum,Mean] and\n                              // 2 in SparseLengthsWeightedSum\n    LENGTHS = 2 + USE_WEIGHT, // 2 in SparseLengths[Sum, Mean],\n                              // 3 in SparseLengthsWeightedSum\n  };\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lengths_reducer_rowwise_8bit_ops.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_OP_H_\n#define CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_OP_H_\n// SparseLengthsSum8bits\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/reducer_functors.h\"\n#include \"caffe2/perfkernels/embedding_lookup.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nnamespace {\nconst float kEqualityThreshold = 1e-10;\n}\n\ntemplate <\n    class Context,\n    bool USE_WEIGHTS = 0,\n    bool USE_MEAN = 0,\n    class OutDataT = float>\nclass SparseLengths8BitsRowwiseOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(SparseLengths8BitsRowwiseOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename IndexType>\n  bool DoRunWithType() {\n    auto& dataInput = Input(DATA);\n    auto& lengthsInput = Input(LENGTHS);\n    auto* output = Output(0);\n    auto* scale_bias = Input(SCALE_BIAS).template data<float>();\n    CAFFE_ENFORCE_EQ(1, lengthsInput.ndim(), \"LENGTHS must be a vector\");\n    const TIndex outputSize = lengthsInput.dim(0);\n\n    auto& indicesInput = Input(INDICES);\n    CAFFE_ENFORCE_EQ(\n        2, Input(SCALE_BIAS).ndim(), \"scale_bias has to be matrix\");\n    CAFFE_ENFORCE_EQ(\n        dataInput.dim(0),\n        Input(SCALE_BIAS).dim(0),\n        \"scale_bias must have the same first dim as data\");\n    CAFFE_ENFORCE_EQ(\n        2,\n        Input(SCALE_BIAS).dim(1),\n        \"the second dim of scale_bias has to be equal to 2\");\n    CAFFE_ENFORCE_EQ(1, indicesInput.ndim(), \"INDICES must be a vector\");\n    const IndexType* indices = indicesInput.template data<IndexType>();\n    TIndex dataToReduceSize = indicesInput.dim(0);\n\n    const int* lengths = lengthsInput.template data<int>();\n    vector<TIndex> shape = dataInput.dims();\n    shape[0] = outputSize;\n    output->Resize(shape);\n    const float* w = nullptr;\n    if (USE_WEIGHTS) {\n      w = Input(WEIGHTS).template data<float>();\n    }\n    TIndex in_block_size = dataInput.size_from_dim(1);\n    OutDataT* out = output->template mutable_data<OutDataT>();\n    const uint8_t* input_data = dataInput.template data<uint8_t>();\n\n    // delegate work to perfkernel that branches based on architecture\n    const TIndex indices_size = indicesInput.size();\n    const TIndex N = dataInput.dim(0);\n    EmbeddingLookup(\n        in_block_size,\n        outputSize,\n        indices_size,\n        N, // embeding table length\n        input_data,\n        indices,\n        lengths,\n        w,\n        scale_bias,\n        USE_MEAN,\n        out);\n\n    return true;\n  }\n\n  enum {\n    DATA = 0,\n    WEIGHTS = 1,\n    INDICES = 1 + USE_WEIGHTS,\n    LENGTHS = 2 + USE_WEIGHTS,\n    SCALE_BIAS = 3 + USE_WEIGHTS\n  };\n};\n\ntemplate <class Context>\nclass FloatToRowwiseQuantized8BitsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(FloatToRowwiseQuantized8BitsOp);\n  bool RunOnDevice() override {\n    auto& input = Input(DATA_FLOAT);\n    auto* output = Output(DATA_UINT8);\n    auto* scale_bias = Output(SCALE_BIAS);\n    auto* input_data = input.template data<float>();\n    output->ResizeLike(input);\n    vector<TIndex> scale_bias_dims = {input.dim(0), 2};\n    scale_bias->Resize(scale_bias_dims);\n    auto* output_data = output->template mutable_data<uint8_t>();\n    float* scale_bias_data = scale_bias->template mutable_data<float>();\n    size_t n_blocks = input.dim(0);\n    size_t block_size = input.size_from_dim(1);\n    for (size_t i = 0; i < n_blocks; ++i) {\n      ConstEigenVectorArrayMap<float> input_row(\n          input_data + i * block_size, block_size);\n      EigenVectorArrayMap<uint8_t> output_row(\n          output_data + i * block_size, block_size);\n      auto min_element = input_row.minCoeff();\n      auto max_element = input_row.maxCoeff();\n      if (max_element - min_element < kEqualityThreshold) {\n        scale_bias_data[2 * i] = 1.0f;\n        scale_bias_data[2 * i + 1] = min_element;\n        memset(output_data + i * block_size, 0, block_size);\n      } else {\n        scale_bias_data[2 * i] = (max_element - min_element) / 255.0f;\n        scale_bias_data[2 * i + 1] = min_element;\n        const float inv_scale = 1.0f / scale_bias_data[2 * i];\n        output_row = ((input_row - scale_bias_data[2 * i + 1]) * inv_scale)\n                         .round()\n                         .template cast<uint8_t>();\n      }\n    }\n    return true;\n  }\n\n private:\n  INPUT_TAGS(DATA_FLOAT);\n  OUTPUT_TAGS(DATA_UINT8, SCALE_BIAS);\n};\n\ntemplate <class Context>\nclass Rowwise8BitQuantizedToFloatOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(Rowwise8BitQuantizedToFloatOp);\n  bool RunOnDevice() override {\n    auto& input = Input(DATA_UINT8);\n    auto& scale_bias = Input(SCALE_BIAS);\n    auto* output = Output(DATA_FLOAT);\n    CAFFE_ENFORCE_EQ(2, scale_bias.ndim(), \"scale_bias has to be matrix\");\n    CAFFE_ENFORCE_EQ(\n        input.dim(0),\n        scale_bias.dim(0),\n        \"scale_bias must have the same first dim as data\");\n    CAFFE_ENFORCE_EQ(\n        2,\n        scale_bias.dim(1),\n        \"the second dim of scale_bias has to be equal to 2\");\n    output->ResizeLike(input);\n    auto* input_data = input.template data<uint8_t>();\n    auto* scale_bias_data = scale_bias.template data<float>();\n\n    auto* output_data = output->template mutable_data<float>();\n    size_t block_size = input.size_from_dim(1);\n    size_t n_blocks = input.dim(0);\n\n    for (size_t i = 0; i < n_blocks; ++i) {\n      ConstEigenVectorArrayMap<uint8_t> input_row(\n          input_data + i * block_size, block_size);\n      EigenVectorArrayMap<float> output_row(\n          output_data + i * block_size, block_size);\n      output_row = input_row.template cast<float>() * scale_bias_data[2 * i] +\n          scale_bias_data[2 * i + 1];\n    }\n    return true;\n  }\n\n private:\n  INPUT_TAGS(DATA_UINT8, SCALE_BIAS);\n  OUTPUT_TAGS(DATA_FLOAT);\n};\n}\n#endif // CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lengths_tile_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LENGTHS_TILE_OP_H_\n#define CAFFE2_OPERATORS_LENGTHS_TILE_OP_H_\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass LengthsTileOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsTileOp);\n\n  bool RunOnDevice() override {\n    auto& data = Input(DATA);\n    auto& lengths = Input(LENGTHS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(lengths.ndim(), 1, \"LENGTHS must be 1-D\");\n    CAFFE_ENFORCE_GE(data.ndim(), 1, \"DATA should be at least 1-D\");\n    CAFFE_ENFORCE_EQ(lengths.size(), data.dim(0));\n\n    auto* lengths_data = lengths.template data<int32_t>();\n    int32_t total_length = 0;\n    math::Sum<int32_t, Context>(\n        lengths.size(), lengths_data, &total_length, &context_);\n\n    auto shape = data.dims();\n    shape[0] = total_length;\n    output->Resize(shape);\n\n    auto block_size = data.size_from_dim(1);\n    auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();\n    auto N = lengths.size();\n\n    auto src = static_cast<const char*>(data.raw_data());\n    auto out = static_cast<char*>(output->raw_mutable_data(data.meta()));\n\n    for (TIndex i = 0; i < N; ++i) {\n      auto length = lengths_data[i];\n      CAFFE_ENFORCE_GE(length, 0);\n      for (int32_t j = 0; j < length; ++j) {\n        context_.template CopyItems<Context, Context>(\n            data.meta(), block_size, src, out);\n        out += block_bytesize;\n      }\n      src += block_bytesize;\n    }\n    return true;\n  }\n\n  INPUT_TAGS(DATA, LENGTHS);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LENGTHS_TILE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lengths_top_k_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_\n#define CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\ntemplate <typename T, class Context>\nclass LengthsTopKOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  LengthsTopKOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), OP_SINGLE_ARG(int, \"k\", k_, -1) {\n    CAFFE_ENFORCE_GE(k_, 1, \"k argument must be >= 1\");\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  int k_;\n  INPUT_TAGS(X_IN, Y_IN);\n  OUTPUT_TAGS(TOPK_VALUES_OUT, TOPK_INDICES_OUT);\n};\n\ntemplate <typename T, class Context>\nclass LengthsTopKGradientOp : public Operator<Context> {\n public:\n  LengthsTopKGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws), OP_SINGLE_ARG(int, \"k\", k_, -1) {\n    CAFFE_ENFORCE_GE(k_, 1, \"k argument must be >= 1\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  int k_;\n  INPUT_TAGS(LENGTH_IN, INDICES_IN, DER_TOPK_IN);\n  OUTPUT_TAGS(DER_X_OUT);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/load_save_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LOAD_SAVE_OP_H_\n#define CAFFE2_OPERATORS_LOAD_SAVE_OP_H_\n\n#include <cstdio>\n#include <map>\n#include <unordered_set>\n\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/db.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\nnamespace {\nstruct BlobState {\n  int64_t total_size;\n  int64_t current_size;\n  bool is_tensor;\n  std::set<int32_t> seen_chunks_ids;\n\n  explicit BlobState(\n      int64_t total_size = 0,\n      int64_t current_size = 0,\n      bool is_tensor = false)\n      : total_size(total_size),\n        current_size(current_size),\n        is_tensor(is_tensor) {}\n};\n} // namespace\n\nusing db::Cursor;\nusing db::DB;\nusing db::Transaction;\n\ntemplate <class Context>\nclass DBExistsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  DBExistsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        absolute_path_(\n            OperatorBase::GetSingleArgument<int>(\"absolute_path\", false)),\n        db_name_(OperatorBase::GetSingleArgument<string>(\"db_name\", \"\")),\n        db_type_(OperatorBase::GetSingleArgument<string>(\"db_type\", \"\")) {}\n\n  bool RunOnDevice() override {\n    string full_db_name =\n        absolute_path_ ? db_name_ : (ws_->RootFolder() + \"/\" + db_name_);\n    auto* output = Output(0);\n    output->Resize();\n    bool* exists = output->template mutable_data<bool>();\n\n    *exists = caffe2::db::DBExists(db_type_, full_db_name);\n    return true;\n  }\n\n private:\n  Workspace* ws_;\n  bool absolute_path_;\n  std::string db_name_;\n  std::string db_type_;\n};\n\ntemplate <class Context>\nclass LoadOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LoadOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        absolute_path_(\n            OperatorBase::GetSingleArgument<int>(\"absolute_path\", false)),\n        add_prefix_(OperatorBase::GetSingleArgument<string>(\"add_prefix\", \"\")),\n        strip_prefix_(\n            OperatorBase::GetSingleArgument<string>(\"strip_prefix\", \"\")),\n        db_name_(OperatorBase::GetSingleArgument<string>(\"db\", \"\")),\n        db_type_(OperatorBase::GetSingleArgument<string>(\"db_type\", \"\")),\n        keep_device_(OperatorBase::GetSingleArgument<int>(\"keep_device\", 0)),\n        load_all_(OperatorBase::GetSingleArgument<int>(\"load_all\", 0)),\n        allow_incomplete_(\n            OperatorBase::GetSingleArgument<bool>(\"allow_incomplete\", false)),\n        blob_names_(OperatorBase::GetRepeatedArgument<string>(\n            \"source_blob_names\")) {\n    if (InputSize() == 0) {\n      CAFFE_ENFORCE_GT(db_name_.size(), 0, \"Must specify a db name.\");\n      CAFFE_ENFORCE_GT(db_type_.size(), 0, \"Must specify a db type.\");\n    }\n    CAFFE_ENFORCE(blob_names_.empty() || blob_names_.size() == OutputSize(),\n      \"Number of output blobs and source_blob_names mismatch.\");\n    CAFFE_ENFORCE(blob_names_.empty() || strip_prefix_.empty(),\n        \"strip_prefix and source_blob_names are mutually exclusive.\");\n    CAFFE_ENFORCE(blob_names_.empty() || !load_all_,\n        \"cannot load_all_ while using source_blob_names.\");\n    if (!load_all_) {\n      // blob_names_ will be filled with ''source blob names'' in file/db\n      // if argument source_blob_names is not given, then blob_names_ is\n      // inferred from operator output\n      if(blob_names_.empty()) {\n        for (const string& name : operator_def.output()) {\n          blob_names_.push_back(name);\n        }\n      }\n      int idx = 0;\n      std::set<std::string> name_set;\n      for (const string& name : blob_names_) {\n        CAFFE_ENFORCE(name_set.insert(name).second,\n            \"Duplicated source blob name: \", name);\n        output_indices_[name] = idx++;\n      }\n    }\n  }\n\n  void SetCurrentDevice(BlobProto* proto);\n\n  bool RunOnDevice() override {\n    if (InputSize() == 1) {\n      const db::DBReader& reader = OperatorBase::Input<db::DBReader>(0);\n      extract(reader.cursor());\n    } else {\n      string full_db_name =\n          absolute_path_ ? db_name_ : (ws_->RootFolder() + \"/\" + db_name_);\n      std::unique_ptr<DB> in_db(\n          caffe2::db::CreateDB(db_type_, full_db_name, caffe2::db::READ));\n      CAFFE_ENFORCE(in_db.get(), \"Cannot open db: \", db_name_);\n      std::unique_ptr<Cursor> cursor(in_db->NewCursor());\n      extract(cursor.get());\n    }\n\n    return true;\n  }\n\n private:\n  void extract(Cursor* cursor) {\n    if (load_all_) {\n      extractAll(cursor);\n    } else {\n      extractFrom(cursor, OperatorBase::Outputs());\n    }\n  }\n\n  void extractAll(Cursor* cursor) {\n    CAFFE_ENFORCE(cursor, \"cursor is not valid\");\n    std::unordered_map<string, BlobState> blob_states;\n    int loaded_blobs = 0;\n    for (; cursor->Valid(); cursor->Next()) {\n      const auto key = buildBlobNameFromDbKey(cursor->key());\n      BlobProto proto;\n      CAFFE_ENFORCE(\n          proto.ParseFromString(cursor->value()), \"Couldn't parse Proto\");\n      if (!keep_device_) {\n        // If we are not keeping the device as the one specified in the\n        // proto, we will set the current device.\n        SetCurrentDevice(&proto);\n      }\n\n      Blob* blob = ws_->CreateBlob(key);\n      ProcessBlob(blob, proto, &blob_states, key, &loaded_blobs);\n    }\n\n    VLOG(1) << \"Loaded \" << loaded_blobs << \" from db\";\n    validateBlobStates(blob_states);\n  }\n\n  void extractFrom(Cursor* cursor, const vector<Blob*>& outputs) {\n    CAFFE_ENFORCE(cursor);\n    std::unordered_map<string, BlobState> blob_states;\n    int loaded_blobs = 0;\n    for (; cursor->Valid(); cursor->Next()) {\n      const auto key = buildBlobNameFromDbKey(cursor->key());\n      if (!output_indices_.count(key)) {\n        VLOG(1) << \"Key \" << key << \" not used. Skipping.\";\n      } else {\n        VLOG(2) << \"Deserializing blob \" << key;\n        BlobProto proto;\n        CAFFE_ENFORCE(proto.ParseFromString(cursor->value()));\n        if (!keep_device_) {\n          // If we are not keeping the device as the one specified in the\n          // proto, we will set the current device.\n          SetCurrentDevice(&proto);\n        }\n        auto blobIndex = output_indices_[key];\n        Blob* blob = outputs.at(blobIndex);\n        ProcessBlob(blob, proto, &blob_states, key, &loaded_blobs);\n\n        if (loaded_blobs == OutputSize()) {\n          VLOG(1) << \"Read all required blobs\";\n          break;\n        }\n      }\n    }\n\n    validateBlobStates(blob_states);\n    VLOG(1) << \"Fully loaded \" << blob_states.size() << \" blobs\";\n\n    if (loaded_blobs != OutputSize()) {\n      if (allow_incomplete_ && loaded_blobs < OutputSize()) {\n        VLOG(1) << \"Loaded \" << loaded_blobs << \" blobs out of \" << OutputSize()\n                << \" blobs from db.\";\n        return;\n      }\n      for (const string& output_name : this->debug_def().output()) {\n        if (blob_states.count(output_name) == 0) {\n          LOG(ERROR) << \"Failed to load blob: \" << output_name;\n        }\n      }\n      CAFFE_THROW(\n          \"Expected to load \",\n          OutputSize(),\n          \" blobs, got \",\n          loaded_blobs,\n          \" only.\\n\");\n    }\n  }\n\n  string buildBlobNameFromDbKey(const string& dbKey) {\n    string key = dbKey.substr(0, dbKey.find(kChunkIdSeparator));\n    if (!strip_prefix_.empty()) {\n      auto match_pos = key.find(strip_prefix_);\n      if (match_pos != string::npos) {\n        key = key.substr(match_pos + strip_prefix_.size());\n      }\n    }\n    key = add_prefix_ + key;\n    return key;\n  }\n\n private:\n  // We are tracking sizes of already read tensor parts while reading data\n  // chunks. This way we can make sure that all chunks were loaded in the end.\n  void ProcessBlob(\n      Blob* blob,\n      const BlobProto& proto,\n      std::unordered_map<string, BlobState>* blob_states_ptr,\n      const string& key,\n      int* loaded_blobs) {\n    auto& blob_states = *blob_states_ptr;\n    if (blob_states.count(key) == 0) {\n      // We reset the blob so that any existing content is destroyed. This\n      // is to guaranee correct device placement: if we are deserializing\n      // into a TensorCUDA, without explicit Reset we might be loading data\n      // into an existing TensorCUDA that has pre-allocated memory on a\n      // different GPU.\n      blob->Reset();\n    }\n    blob->Deserialize(proto);\n    if (proto.has_content_num_chunks()) {\n      if (!blob_states.count(key)) {\n        blob_states[key] = BlobState(proto.content_num_chunks());\n      }\n      CAFFE_ENFORCE(\n          blob_states[key]\n              .seen_chunks_ids.insert(proto.content_chunk_id())\n              .second,\n          \"Chunk with the same id has occured twice for: \",\n          key);\n      CAFFE_ENFORCE(\n          proto.content_chunk_id() >= 0 &&\n              proto.content_chunk_id() < blob_states[key].total_size,\n          \"Chunk id has to be not less than 0 and \"\n          \"less than content_num_chunks for key: \",\n          key);\n      blob_states[key].current_size++;\n      CAFFE_ENFORCE(\n          !blob_states[key].is_tensor,\n          \"Proto with content_chunks can not store tensor: \",\n          key);\n      CAFFE_ENFORCE(\n          blob_states[key].current_size <= blob_states[key].total_size,\n          \"Found an extra part for an already filled blob: \",\n          key);\n      if (blob_states[key].current_size == blob_states[key].total_size) {\n        (*loaded_blobs)++;\n      }\n      return;\n    }\n    if (!proto.has_tensor()) {\n      // If blob is divided into chunks the field content_chunks has to be set,\n      // otherwise only tensors can be seen multiple times as chunks.\n      CAFFE_ENFORCE(blob_states.count(key) == 0, \"Blob duplicated: \", key);\n      blob_states[key] = BlobState();\n      (*loaded_blobs)++;\n      return;\n    }\n    CAFFE_ENFORCE(proto.has_tensor());\n    if (blob_states.count(key)) {\n      CAFFE_ENFORCE(blob_states[key].is_tensor, \"Must be tensor \", key);\n      CAFFE_ENFORCE(\n          blob_states[key].current_size < blob_states[key].total_size,\n          \"Found an extra part for an already filled tensor: \",\n          key);\n      CAFFE_ENFORCE(\n          proto.tensor().has_segment(),\n          \"Partial tensor must have a segment: \",\n          key);\n      blob_states[key].current_size +=\n          proto.tensor().segment().end() - proto.tensor().segment().begin();\n      CAFFE_ENFORCE(\n          blob_states[key].current_size <= blob_states[key].total_size,\n          \"Tensor parts are bigger than target size for tensor: \",\n          key);\n    } else {\n      const auto& dims = proto.tensor().dims();\n      int64_t total_size = 1;\n      for (const auto& dim : dims) {\n        total_size *= dim;\n      }\n      auto current_size = total_size;\n      if (proto.tensor().has_segment()) {\n        current_size =\n            proto.tensor().segment().end() - proto.tensor().segment().begin();\n      }\n      blob_states[key] =\n          BlobState(total_size, current_size, true /* is_tensor */);\n    }\n\n    if (blob_states[key].current_size == blob_states[key].total_size) {\n      (*loaded_blobs)++;\n    }\n  }\n\n  void validateBlobStates(\n      const std::unordered_map<string, BlobState>& blob_states) {\n    for (const auto& iter : blob_states) {\n      const BlobState& blob_state = iter.second;\n      CAFFE_ENFORCE(\n          blob_state.current_size == blob_state.total_size,\n          \"Data size mismatch for blob \",\n          iter.first,\n          \". Expected: \",\n          blob_state.total_size,\n          \" Read: \",\n          blob_state.current_size);\n    }\n  }\n\n  Workspace* ws_;\n  bool absolute_path_;\n  string add_prefix_;\n  string strip_prefix_;\n  string db_name_;\n  string db_type_;\n  bool keep_device_;\n  bool load_all_;\n  bool allow_incomplete_;\n  std::map<string, int> output_indices_;\n  std::vector<std::string> blob_names_;\n};\n\ntemplate <class Context>\nclass SaveOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SaveOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        absolute_path_(\n            OperatorBase::GetSingleArgument<int>(\"absolute_path\", false)),\n        strip_prefix_(\n            OperatorBase::GetSingleArgument<string>(\"strip_prefix\", \"\")),\n        db_name_(OperatorBase::GetSingleArgument<string>(\"db\", \"\")),\n        db_type_(OperatorBase::GetSingleArgument<string>(\"db_type\", \"\")),\n        blob_names_(\n            OperatorBase::GetRepeatedArgument<string>(\"blob_name_overrides\")) {\n    CAFFE_ENFORCE_GT(db_name_.size(), 0, \"Must specify a db name.\");\n    CAFFE_ENFORCE_GT(db_type_.size(), 0, \"Must specify a db type.\");\n    CAFFE_ENFORCE(\n        blob_names_.empty() ||\n            blob_names_.size() == OperatorBase::Inputs().size(),\n        \"Number of blobs and blob_name_overrides mismatch.\");\n    CAFFE_ENFORCE(\n        blob_names_.empty() || strip_prefix_.empty(),\n        \"strip_prefix and blob_name_overrides are mutually exclusive.\");\n\n    if (blob_names_.empty()) {\n      std::set<std::string> input_names;\n      blob_names_.resize(OperatorBase::Inputs().size());\n      for (int i = 0; i < blob_names_.size(); ++i) {\n        std::string name;\n        if (strip_prefix_.empty()) {\n          name = operator_def.input(i);\n        } else {\n          auto match_pos = operator_def.input(i).find(strip_prefix_);\n          if (match_pos == string::npos) {\n            name = operator_def.input(i);\n          } else {\n            name = operator_def.input(i).substr(\n                match_pos + strip_prefix_.size(), string::npos);\n          }\n        }\n        CAFFE_ENFORCE(\n            input_names.insert(name).second, \"Duplicated input: \", name);\n        blob_names_[i] = name;\n      }\n    }\n  }\n\n  bool RunOnDevice() override {\n    string full_db_name =\n        absolute_path_ ? db_name_ : (ws_->RootFolder() + \"/\" + db_name_);\n    std::unique_ptr<DB> out_db(\n        caffe2::db::CreateDB(db_type_, full_db_name, caffe2::db::NEW));\n    CAFFE_ENFORCE(out_db.get(), \"Cannot open db for writing: \", full_db_name);\n\n    BlobSerializerBase::SerializationAcceptor acceptor = [&](\n        const std::string& blobName, const std::string& data) {\n      // transaction should take care of locking\n      VLOG(2) << \"Sending \" << blobName << \" blob's data of size \"\n              << data.size() << \" to db\";\n      auto transaction = out_db->NewTransaction();\n      transaction->Put(blobName, data);\n      transaction->Commit();\n    };\n\n    const vector<const Blob*>& inputs = OperatorBase::Inputs();\n    for (int i = 0; i < inputs.size(); ++i) {\n      inputs[i]->Serialize(blob_names_[i], acceptor);\n    }\n    out_db->Close();\n    return true;\n  }\n\n private:\n  Workspace* ws_;\n  bool absolute_path_;\n  string strip_prefix_;\n  string db_name_;\n  string db_type_;\n  std::vector<std::string> blob_names_;\n};\n\ntemplate <typename... Ts>\nstring FormatString(const string& pattern, Ts... values) {\n  // Note(Yangqing): We believe that 1024 is enough, but who are we to assert\n  // that?\n  // As a result, if things go wrong, we'll just throw the towel and quit loud.\n  // Yeah, I know that there is snprintf, but it is not present in *some*\n  // platforms unfortunately.\n  char buffer[1024];\n  int written = sprintf(buffer, pattern.c_str(), values...);\n  if (written < 0 || written + 1 > 1024) {\n    LOG(FATAL) << \"FormatString fails: total bytes written \" << written;\n  }\n  return string(buffer);\n  /*\n   * The following is the snprintf version that is safe; enable it one day?\n  unsigned int required =\n      std::snprintf(nullptr, 0, pattern.c_str(), values...) + 1;\n  char bytes[required];\n  std::snprintf(bytes, required, pattern.c_str(), values...);\n  return string(bytes);\n  */\n}\n\n// CheckpointOp is a wrapper over a SaveFloatTensorOp that basically allows\n// flexible naming over iterations.\n// The file pattern in db_name should be a format string that can be passed into\n// sprintf with an int argument specifying the current iteration. An example:\n//     \"/path/to/my/checkpoint/checkpoint_at_%d.pb\"\ntemplate <class Context>\nclass CheckpointOp final : public Operator<Context> {\n public:\n  CheckpointOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        db_pattern_(OperatorBase::GetSingleArgument<string>(\"db\", \"\")),\n        every_(OperatorBase::GetSingleArgument<int>(\"every\", 1)),\n        ws_(ws),\n        save_op_def_(operator_def) {\n    CAFFE_ENFORCE_GT(\n        db_pattern_.size(), 0, \"Must specify a checkpoint file pattern.\");\n    CAFFE_ENFORCE_GT(every_, 0, \"Checkpoint interval should be positive.\");\n    if (every_ == 1) {\n      // Just issue a warning, but it's totally legal so we don't do anything.\n      LOG(WARNING) << \"It seems that we are checkpointting every iteration. \"\n                   << \"Is that intended?\";\n    }\n    save_op_def_.set_type(\"Save\");\n  }\n\n  bool RunOnDevice() override {\n    int64_t iter =\n        OperatorBase::Input<TensorCPU>(0).template data<int64_t>()[0];\n    if (iter % every_ == 0) {\n      GetMutableArgument(\"db\", true, &save_op_def_)\n          ->set_s(FormatString(db_pattern_, iter));\n      SaveOp<Context> sub_op(save_op_def_, ws_);\n      return sub_op.Run();\n    } else {\n      return true;\n    }\n  }\n\n private:\n  string db_pattern_;\n  int every_;\n  Workspace* ws_;\n  OperatorDef save_op_def_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LOAD_SAVE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/local_response_normalization_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_\n#define CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass LRNOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LRNOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        size_(OperatorBase::GetSingleArgument<int>(\"size\", 0)),\n        alpha_(OperatorBase::GetSingleArgument<float>(\"alpha\", 0)),\n        beta_(OperatorBase::GetSingleArgument<float>(\"beta\", 0)),\n        bias_(OperatorBase::GetSingleArgument<float>(\"bias\", 1)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        pre_pad_((size_ - 1) / 2) {\n    DCHECK_GT(size_, 0);\n    DCHECK_EQ(size_ % 2, 1);\n    DCHECK_GT(alpha_, 0);\n    DCHECK_GT(beta_, 0);\n  }\n\n  bool RunOnDevice() override {\n    switch (order_) {\n      case StorageOrder::NHWC:\n        return RunOnDeviceWithOrderNHWC();\n      case StorageOrder::NCHW:\n        return RunOnDeviceWithOrderNCHW();\n      default:\n        LOG(FATAL) << \"Unknown storage order: \" << order_;\n    }\n    // To suppress old compiler warnings\n    return true;\n  }\n\n  virtual bool RunOnDeviceWithOrderNCHW() = 0;\n  virtual bool RunOnDeviceWithOrderNHWC() = 0;\n\n protected:\n  const int size_;\n  const float alpha_;\n  const float beta_;\n  const float bias_;\n  const StorageOrder order_;\n  const int pre_pad_;\n  // Input: X; Output: Y, scale.\n};\n\ntemplate <typename T, class Context>\nclass LRNOp final : public LRNOpBase<T, Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LRNOp(const OperatorDef& operator_def, Workspace* ws)\n      : LRNOpBase<T, Context>(operator_def, ws) {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n protected:\n  // Input: X; Output: Y, scale.\n  OUTPUT_TAGS(OUTPUT, SCALE);\n  Tensor<Context>* scale_ = nullptr;\n  Tensor<Context> local_scale_tensor_;\n};\n\ntemplate <typename T, class Context>\nclass LRNGradientOp final : public LRNOpBase<T, Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LRNGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : LRNOpBase<T, Context>(operator_def, ws) {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n protected:\n  // Input: X, Y, scale, dY; Output: dX\n  INPUT_TAGS(INPUT, OUTPUT, SCALE, OUTPUT_GRAD);\n  Tensor<Context>* scale_ = nullptr;\n  Tensor<Context> local_scale_tensor_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/loss_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LOSS_OP_H_\n#define CAFFE2_OPERATORS_LOSS_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/reduction_ops.h\"\n#include \"caffe2/operators/utility_ops.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// AveragedLoss takes in the input and produces the output loss value as\n// the average of the input.\ntemplate <typename T, class Context>\nclass AveragedLoss final : public SumElementsOp<T, Context> {\n public:\n  AveragedLoss(const OperatorDef& operator_def, Workspace* ws)\n      : SumElementsOp<T, Context>(operator_def, ws, true) {}\n  ~AveragedLoss() {}\n};\n\ntemplate <typename T, class Context>\nclass AveragedLossGradient final : public SumElementsGradientOp<T, Context> {\n public:\n  AveragedLossGradient(const OperatorDef& operator_def, Workspace* ws)\n      : SumElementsGradientOp<T, Context>(operator_def, ws, true) {}\n  ~AveragedLossGradient() {}\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LOSS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lpnorm_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LPNORM_OP_H_\n#define CAFFE2_OPERATORS_LPNORM_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass LpNormOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LpNormOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        p_(OperatorBase::GetSingleArgument<int>(\"p\", 2)) {\n    CAFFE_ENFORCE(p_ == 1 || p_ == 2, \"p should be either 1 or 2.\");\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  int p_;\n  INPUT_TAGS(X_IN);\n  OUTPUT_TAGS(OUT);\n  // Input: X; Output: Norm\n};\n\ntemplate <typename T, class Context>\nclass LpNormGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LpNormGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        p_(OperatorBase::GetSingleArgument<int>(\"p\", 2)) {\n    CAFFE_ENFORCE(p_ == 1 || p_ == 2, \"p should be either 1 or 2.\");\n  }\n\n  bool RunOnDevice() override;\n\n protected:\n  int p_;\n  INPUT_TAGS(X_IN, DER_NORM_IN);\n  OUTPUT_TAGS(DER_X_OUT);\n  // Input: X, dNorm; Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LPNORM_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/lstm_unit_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_LSTM_UNIT_OP_H_\n#define CAFFE2_OPERATORS_LSTM_UNIT_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/conversions.h\"\n\nnamespace caffe2 {\nnamespace detail {\ntemplate <typename T>\ninline T sigmoid(T x) {\n  return 1. / (1. + exp(-x));\n}\n\ntemplate <typename T>\ninline T host_tanh(T x) {\n  return 2. * sigmoid(2. * x) - 1.;\n}\n\ntemplate <typename T, typename Context>\nvoid LSTMUnit(\n    int N,\n    int D,\n    int t,\n    const T* H_prev,\n    const T* C_prev,\n    const T* X,\n    const int32_t* seqLengths,\n    bool drop_states,\n    T* C,\n    T* H,\n    const float forget_bias,\n    Context* /*context*/) {\n  for (int n = 0; n < N; ++n) {\n    const bool valid = t < seqLengths[n];\n\n    for (int d = 0; d < D; ++d) {\n      if (!valid) {\n        if (drop_states) {\n          H[d] = 0;\n          C[d] = 0;\n        } else {\n          H[d] = H_prev[d];\n          C[d] = C_prev[d];\n        }\n      } else {\n        const T i = sigmoid(X[d]);\n        const T f = sigmoid(X[1 * D + d] + convert::To<float, T>(forget_bias));\n        const T o = sigmoid(X[2 * D + d]);\n        const T g = host_tanh(X[3 * D + d]);\n        const T c_prev = C_prev[d];\n        const T c = f * c_prev + i * g;\n        C[d] = c;\n        const T host_tanh_c = host_tanh(c);\n        H[d] = o * host_tanh_c;\n      }\n    }\n    H_prev += D;\n    C_prev += D;\n    X += 4 * D;\n    C += D;\n    H += D;\n  }\n}\n\ntemplate <typename T, typename Context>\nvoid LSTMUnitGradient(\n    int N,\n    int D,\n    int t,\n    const T* C_prev,\n    const T* X,\n    const int32_t* seqLengths,\n    const T* C,\n    const T* H,\n    const T* C_diff,\n    const T* H_diff,\n    bool drop_states,\n    T* H_prev_diff,\n    T* C_prev_diff,\n    T* X_diff,\n    const float forget_bias,\n    Context* /*context*/) {\n  for (int n = 0; n < N; ++n) {\n    const bool valid = t < seqLengths[n];\n\n    for (int d = 0; d < D; ++d) {\n      T* c_prev_diff = C_prev_diff + d;\n      T* h_prev_diff = H_prev_diff + d;\n      T* i_diff = X_diff + d;\n      T* f_diff = X_diff + 1 * D + d;\n      T* o_diff = X_diff + 2 * D + d;\n      T* g_diff = X_diff + 3 * D + d;\n\n      if (!valid) {\n        if (drop_states) {\n          *h_prev_diff = 0;\n          *c_prev_diff = 0;\n        } else {\n          *h_prev_diff = H_diff[d];\n          *c_prev_diff = C_diff[d];\n        }\n        *i_diff = 0;\n        *f_diff = 0;\n        *o_diff = 0;\n        *g_diff = 0;\n      } else {\n        const T i = sigmoid(X[d]);\n        const T f = sigmoid(X[1 * D + d] + convert::To<float, T>(forget_bias));\n        const T o = sigmoid(X[2 * D + d]);\n        const T g = host_tanh(X[3 * D + d]);\n        const T c_prev = C_prev[d];\n        const T c = C[d];\n        const T host_tanh_c = host_tanh(c);\n        const T c_term_diff = C_diff[d] + H_diff[d] * o * (1 - host_tanh_c * host_tanh_c);\n        *c_prev_diff = c_term_diff * f;\n        *h_prev_diff = 0; // not used in 'valid' case\n        *i_diff = c_term_diff * g * i * (1 - i);\n        *f_diff = c_term_diff * c_prev * f * (1 - f);\n        *o_diff = H_diff[d] * host_tanh_c * o * (1 - o);\n        *g_diff = c_term_diff * i * (1 - g * g);\n      }\n    }\n    C_prev += D;\n    X += 4 * D;\n    C += D;\n    H += D;\n    C_diff += D;\n    H_diff += D;\n    X_diff += 4 * D;\n    H_prev_diff += D;\n    C_prev_diff += D;\n  }\n}\n} // namespace detail\n\ntemplate <typename Context>\nclass LSTMUnitOp : public Operator<Context> {\n public:\n  LSTMUnitOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        forget_bias_(\n            static_cast<float>(OperatorBase::template GetSingleArgument<float>(\n                \"forget_bias\",\n                0.0))),\n        drop_states_(OperatorBase::template GetSingleArgument<bool>(\n            \"drop_states\",\n            false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n\n  template <typename T>\n  bool DoRunWithType() {\n    // Extract N\n    const auto N = Input(CELL_T_M_1).dim(1);\n\n    // Gates: 1xNxG\n    const auto G = Input(GATES).dim(2);\n    const auto D = Input(CELL_T_M_1).dim(2);\n\n    CAFFE_ENFORCE_EQ(4 * D, G);\n    const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();\n    const auto* C_prev = Input(CELL_T_M_1).template data<T>();\n    const auto* X = Input(GATES).template data<T>();\n    CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).size(), N);\n    const auto* seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();\n    const auto t = OperatorBase::Input<Tensor<CPUContext>>(TIMESTEP)\n                       .template data<int32_t>()[0];\n    Output(CELL_T)->ResizeLike(Input(CELL_T_M_1));\n    auto* C = Output(CELL_T)->template mutable_data<T>();\n    Output(HIDDEN_T)->ResizeLike(Input(CELL_T_M_1));\n    auto* H = Output(HIDDEN_T)->template mutable_data<T>();\n    detail::LSTMUnit<T, Context>(\n        N,\n        D,\n        t,\n        H_prev,\n        C_prev,\n        X,\n        seqLengths,\n        drop_states_,\n        C,\n        H,\n        forget_bias_,\n        &context_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  INPUT_TAGS(HIDDEN_T_M_1, CELL_T_M_1, GATES, SEQ_LENGTHS, TIMESTEP);\n  OUTPUT_TAGS(HIDDEN_T, CELL_T);\n\n  float forget_bias_;\n\n private:\n  bool drop_states_;\n};\n\ntemplate <typename Context>\nclass LSTMUnitGradientOp : public Operator<Context> {\n public:\n  LSTMUnitGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        forget_bias_(\n            static_cast<float>(OperatorBase::template GetSingleArgument<float>(\n                \"forget_bias\",\n                0.0))),\n        drop_states_(OperatorBase::template GetSingleArgument<bool>(\n            \"drop_states\",\n            false)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  template <typename T>\n  bool DoRunWithType() {\n    // Extract N\n    const auto N = Input(CELL_T_M_1).dim(1);\n\n    // Gates: 1xNxG\n    const auto G = Input(GATES).dim(2);\n    const auto D = Input(CELL_T_M_1).dim(2);\n\n    CAFFE_ENFORCE_EQ(4 * D, G);\n    const auto* C_prev = Input(CELL_T_M_1).template data<T>();\n    const auto* X = Input(GATES).template data<T>();\n    const auto t = OperatorBase::Input<Tensor<CPUContext>>(TIMESTEP)\n                       .template data<int32_t>()[0];\n    const auto* C = Input(CELL_T).template data<T>();\n    const auto* H = Input(HIDDEN_T).template data<T>();\n    const auto* C_diff = Input(CELL_T_GRAD).template data<T>();\n    const auto* H_diff = Input(HIDDEN_T_GRAD).template data<T>();\n    const auto* seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();\n    Output(HIDDEN_T_M_1_GRAD)->ResizeLike(Input(HIDDEN_T_M_1));\n    auto* H_prev_diff = Output(HIDDEN_T_M_1_GRAD)->template mutable_data<T>();\n    Output(CELL_T_M_1_GRAD)->ResizeLike(Input(CELL_T_M_1));\n    auto* C_prev_diff = Output(CELL_T_M_1_GRAD)->template mutable_data<T>();\n    Output(GATES_GRAD)->ResizeLike(Input(GATES));\n    auto* X_diff = Output(GATES_GRAD)->template mutable_data<T>();\n\n    detail::LSTMUnitGradient<T, Context>(\n        N,\n        D,\n        t,\n        C_prev,\n        X,\n        seqLengths,\n        C,\n        H,\n        C_diff,\n        H_diff,\n        drop_states_,\n        H_prev_diff,\n        C_prev_diff,\n        X_diff,\n        forget_bias_,\n        &context_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  INPUT_TAGS(\n      HIDDEN_T_M_1,\n      CELL_T_M_1,\n      GATES,\n      SEQ_LENGTHS,\n      TIMESTEP,\n      HIDDEN_T,\n      CELL_T,\n      HIDDEN_T_GRAD,\n      CELL_T_GRAD, );\n  OUTPUT_TAGS(HIDDEN_T_M_1_GRAD, CELL_T_M_1_GRAD, GATES_GRAD);\n\n  float forget_bias_;\n\n private:\n  bool drop_states_;\n};\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_LSTM_UNIT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/map_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_MAP_OPS_H_\n#define CAFFE2_OPERATORS_MAP_OPS_H_\n\n#include <algorithm>\n#include <iterator>\n#include <string>\n#include <typeinfo>\n#include <unordered_map>\n#include <utility>\n#include <vector>\n\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T>\nstruct TypeNameTraits {\n  static constexpr const char* name = \"unknown\";\n};\n\ntemplate <>\nstruct TypeNameTraits<int64_t> {\n  static constexpr const char* name = \"int64_t\";\n};\n\ntemplate <>\nstruct TypeNameTraits<int32_t> {\n  static constexpr const char* name = \"int32_t\";\n};\n\ntemplate <typename KEY_T, typename VALUE_T>\nstruct MapTypeTraits {\n  using MapType = std::unordered_map<KEY_T, VALUE_T>;\n  static string MapTypeName() {\n    return string(\"(std::unordered_map<\") + TypeNameTraits<KEY_T>::name + \", \" +\n        TypeNameTraits<VALUE_T>::name + \">)\";\n  }\n};\n\nusing MapType64To64 = MapTypeTraits<int64_t, int64_t>::MapType;\nusing MapType64To32 = MapTypeTraits<int64_t, int32_t>::MapType;\nusing MapType32To32 = MapTypeTraits<int32_t, int32_t>::MapType;\nusing MapType32To64 = MapTypeTraits<int32_t, int64_t>::MapType;\n\ntemplate <class Context>\nclass CreateMapOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  CreateMapOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~CreateMapOp() {}\n\n  bool RunOnDevice() override {\n    TensorProto::DataType key_dtype =\n        static_cast<TensorProto::DataType>(OperatorBase::GetSingleArgument<int>(\n            \"key_dtype\", TensorProto_DataType_INT32));\n\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, DataTypeToTypeMeta(key_dtype));\n  }\n\n  template <typename KEY_T>\n  bool DoRunWithType() {\n    TensorProto::DataType value_dtype =\n        static_cast<TensorProto::DataType>(OperatorBase::GetSingleArgument<int>(\n            \"value_dtype\", TensorProto_DataType_INT32));\n\n    return DispatchHelper<\n        TensorTypes2<int32_t, int64_t, GenericTensorImplementation>,\n        KEY_T>::call(this, DataTypeToTypeMeta(value_dtype));\n  }\n\n  template <typename KEY_T, typename VALUE_T>\n  bool DoRunWithType2() {\n    // clear to make sure the map is empty\n    OperatorBase::Output<typename MapTypeTraits<KEY_T, VALUE_T>::MapType>(MAP)\n        ->clear();\n    return true;\n  }\n\n  template <typename KEY_T>\n  bool DoRunWithOtherType2() {\n    TensorProto::DataType value_dtype =\n        static_cast<TensorProto::DataType>(OperatorBase::GetSingleArgument<int>(\n            \"value_dtype\", TensorProto_DataType_INT32));\n\n    CAFFE_THROW(\n        \"CreateMap is not implemented on value tensor of type \",\n        DataTypeToTypeMeta(value_dtype).name(),\n        \"Consider adding it a type in the list DispatchHelper\");\n  }\n\n  OUTPUT_TAGS(MAP);\n};\n\ntemplate <class Context>\nclass KeyValueToMapOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  KeyValueToMapOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~KeyValueToMapOp() {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(KEYS));\n  }\n\n  template <typename KEY_T>\n  bool DoRunWithType() {\n    return DispatchHelper<\n        TensorTypes2<int32_t, int64_t, GenericTensorImplementation>,\n        KEY_T>::call(this, Input(VALUES));\n  }\n\n  template <typename KEY_T, typename VALUE_T>\n  bool DoRunWithType2() {\n    using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;\n    const auto& key_input = Input(KEYS);\n    const auto& value_input = Input(VALUES);\n\n    CAFFE_ENFORCE_EQ(key_input.size(), value_input.size());\n\n    auto* key_data = key_input.template data<KEY_T>();\n    auto* value_data = value_input.template data<VALUE_T>();\n\n    auto* map_data = OperatorBase::Output<MapType>(MAP);\n\n    for (int i = 0; i < key_input.size(); ++i) {\n      map_data->emplace(key_data[i], value_data[i]);\n    }\n\n    return true;\n  }\n\n  template <typename KEY_T>\n  bool DoRunWithOtherType2() {\n    CAFFE_THROW(\n        \"KeyValueToMap is not implemented on value tensor of type \",\n        Input(VALUES).meta().name(),\n        \"Consider adding it a type in the list DispatchHelper\");\n  }\n\n  INPUT_TAGS(KEYS, VALUES);\n  OUTPUT_TAGS(MAP);\n};\n\ntemplate <class Context>\nclass MapToKeyValueOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MapToKeyValueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~MapToKeyValueOp() {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<\n        MapType64To64,\n        MapType64To32,\n        MapType32To32,\n        MapType32To64>>::call(this, OperatorBase::InputBlob(MAP));\n  }\n\n  template <typename MAP_T>\n  bool DoRunWithType() {\n    using key_type = typename MAP_T::key_type;\n    using mapped_type = typename MAP_T::mapped_type;\n    auto& map_data = OperatorBase::Input<MAP_T>(MAP);\n    auto* key_output = Output(KEYS);\n    auto* value_output = Output(VALUES);\n    key_output->Resize(map_data.size());\n    value_output->Resize(map_data.size());\n    auto* key_data = key_output->template mutable_data<key_type>();\n    auto* value_data = value_output->template mutable_data<mapped_type>();\n\n    for (const auto& it : map_data) {\n      *key_data = it.first;\n      *value_data = it.second;\n      key_data++;\n      value_data++;\n    }\n\n    return true;\n  }\n\n  INPUT_TAGS(MAP);\n  OUTPUT_TAGS(KEYS, VALUES);\n};\n\ntemplate <typename KEY_T, typename VALUE_T>\nclass MapSerializer : public BlobSerializerBase {\n public:\n  using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;\n\n  void Serialize(\n      const Blob& blob,\n      const string& name,\n      BlobSerializerBase::SerializationAcceptor acceptor) override {\n    CAFFE_ENFORCE(blob.IsType<MapType>());\n    const MapType& map_data = blob.template Get<MapType>();\n    TIndex sz = map_data.size();\n    Tensor<CPUContext> key_tensor;\n    key_tensor.Resize(sz);\n    Tensor<CPUContext> value_tensor;\n    value_tensor.Resize(sz);\n    auto* key_data = key_tensor.mutable_data<KEY_T>();\n    auto* value_data = value_tensor.mutable_data<VALUE_T>();\n    for (const auto& it : map_data) {\n      *key_data = it.first;\n      *value_data = it.second;\n      key_data++;\n      value_data++;\n    }\n\n    TensorProtos tensor_protos;\n    TensorSerializer<CPUContext> ser;\n    ser.Serialize(\n        key_tensor, name, tensor_protos.add_protos(), 0, key_tensor.size());\n    ser.Serialize(\n        value_tensor, name, tensor_protos.add_protos(), 0, value_tensor.size());\n\n    BlobProto blob_proto;\n    blob_proto.set_name(name);\n    blob_proto.set_type(MapTypeTraits<KEY_T, VALUE_T>::MapTypeName());\n    blob_proto.set_content(tensor_protos.SerializeAsString());\n    acceptor(name, blob_proto.SerializeAsString());\n  }\n};\n\ntemplate <typename KEY_T, typename VALUE_T>\nclass MapDeserializer : public BlobDeserializerBase {\n public:\n  using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;\n\n  void Deserialize(const BlobProto& proto, Blob* blob) override {\n    TensorProtos tensor_protos;\n    CAFFE_ENFORCE(\n        tensor_protos.ParseFromString(proto.content()),\n        \"Fail to parse TensorProtos\");\n    TensorDeserializer<CPUContext> deser;\n    Tensor<CPUContext> key_tensor, value_tensor;\n    deser.Deserialize(tensor_protos.protos(0), &key_tensor);\n    deser.Deserialize(tensor_protos.protos(1), &value_tensor);\n    auto* key_data = key_tensor.data<KEY_T>();\n    auto* value_data = value_tensor.data<VALUE_T>();\n\n    auto* map_ptr = blob->template GetMutable<MapType>();\n    for (int i = 0; i < key_tensor.size(); ++i) {\n      map_ptr->emplace(key_data[i], value_data[i]);\n    }\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MAP_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/margin_ranking_criterion_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_\n#define CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass MarginRankingCriterionOp final : public Operator<Context> {\n public:\n  MarginRankingCriterionOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(float, \"margin\", margin_, 1.0) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float margin_;\n};\n\ntemplate <class Context>\nclass MarginRankingCriterionGradientOp final : public Operator<Context> {\n public:\n  MarginRankingCriterionGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        OP_SINGLE_ARG(float, \"margin\", margin_, 1.0) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float margin_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/math_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_MATH_OP_H_\n#define CAFFE2_OPERATORS_MATH_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/operators/elementwise_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nstruct PowFunctor {\n  explicit PowFunctor(OperatorBase& op) {\n    exponent_ = op.GetSingleArgument<float>(\"exponent\", 0);\n  }\n\n  template <typename T, class Context>\n  inline void\n  operator()(const int n, const T* x, T* y, Context* device_context) {\n    math::Powx<float, Context>(n, x, exponent_, y, device_context);\n  }\n\n  float exponent_;\n};\n}\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/matmul_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_MATMUL_OP_H_\n#define CAFFE2_OPERATORS_MATMUL_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass MatMulOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MatMulOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        trans_a_(OperatorBase::GetSingleArgument<int>(\"trans_a\", 0)),\n        trans_b_(OperatorBase::GetSingleArgument<int>(\"trans_b\", 0)) {}\n  ~MatMulOp() {}\n\n  bool RunOnDevice() override {\n    const auto& A = Input(0);\n    const auto& B = Input(1);\n    auto* Y = Output(0);\n\n    CAFFE_ENFORCE(A.ndim() == 2, A.ndim());\n    CAFFE_ENFORCE(B.ndim() == 2, B.ndim());\n\n    int a_dim0, a_dim1, b_dim0, b_dim1;\n\n    if (trans_a_) {\n      a_dim0 = A.dim32(1);\n      a_dim1 = A.dim32(0);\n    } else {\n      a_dim0 = A.dim32(0);\n      a_dim1 = A.dim32(1);\n    }\n\n    if (trans_b_) {\n      b_dim0 = B.dim32(1);\n      b_dim1 = B.dim32(0);\n    } else {\n      b_dim0 = B.dim32(0);\n      b_dim1 = B.dim32(1);\n    }\n\n    auto dimErrorString = [&]() {\n      return MakeString(\n          \"Dimension mismatch: \",\n          trans_a_ ? \"trans(A): \" : \"A: \",\n          a_dim0,\n          \" \",\n          a_dim1,\n          trans_b_ ? \", trans(B): \" : \", B: \",\n          b_dim0,\n          \" \",\n          b_dim1);\n    };\n    // Error checking\n    CAFFE_ENFORCE(a_dim1 == b_dim0, dimErrorString());\n\n    Y_shape_cache_[0] = a_dim0;\n    Y_shape_cache_[1] = b_dim1;\n    Y->Resize(Y_shape_cache_);\n    CAFFE_ENFORCE(a_dim0 * b_dim1 == Y->size(), dimErrorString());\n\n    // Y = A * B\n    math::Gemm<T, Context, Engine>(\n        trans_a_ ? CblasTrans : CblasNoTrans,\n        trans_b_ ? CblasTrans : CblasNoTrans,\n        a_dim0,\n        b_dim1,\n        a_dim1,\n        1,\n        A.template data<T>(),\n        B.template data<T>(),\n        0,\n        Y->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  // A local vector to cache the output shape so we don't need to recreate\n  // a vector object every time we run Run().\n  vector<TIndex> Y_shape_cache_{0, 0};\n  bool trans_a_;\n  bool trans_b_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MATMUL_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/max_pool_with_index.h",
    "content": "#ifndef CAFFE2_OPERATORS_MAX_POOL_WITH_INDEX_H_\n#define CAFFE2_OPERATORS_MAX_POOL_WITH_INDEX_H_\n\n#include <cfloat>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/operators/pool_op.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nclass MaxPoolWithIndexOp final : public ConvPoolOpBase<CUDAContext> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);\n  MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<CUDAContext>(operator_def, ws) {}\n  ~MaxPoolWithIndexOp() {}\n\n  template <typename T>\n  bool DoRunWithType();\n\n  bool RunOnDevice() override;\n\n  // Input: X\n  // Output: Y, mask\n};\n\nclass MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<CUDAContext> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);\n  MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<CUDAContext>(operator_def, ws) {}\n  ~MaxPoolWithIndexGradientOp() {}\n\n  template <typename T>\n  bool DoRunWithType();\n\n  bool RunOnDevice() override;\n\n  // Input: X, dY, mask\n  // Output: dX\n};\n\n}; // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MAX_POOL_WITH_INDEX_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/merge_id_lists_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_\n#define CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_\n\n#include <set>\n#include <vector>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass MergeIdListsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(MergeIdListsOp);\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& first_lengths = Input(0);\n    CAFFE_ENFORCE_EQ(first_lengths.ndim(), 1, \"LENGTHS should be 1-D\");\n    const auto batch_size = first_lengths.size();\n\n    auto* out_lengths = Output(0);\n    out_lengths->ResizeLike(first_lengths);\n\n    auto* out_lengths_data = out_lengths->template mutable_data<int32_t>();\n\n    /**\n     * Loop to figure out how much space to reserve for output\n     * and perform checks.\n     */\n    auto M = 0;\n    for (size_t i = 0; i < InputSize(); i += 2) {\n      auto& lengths = Input(i);\n      CAFFE_ENFORCE_EQ(lengths.ndim(), 1, \"LENGTHS should be 1-D\");\n      CAFFE_ENFORCE_EQ(lengths.size(), batch_size, \"LENGTHS should be equal\");\n      auto& values = Input(i + 1);\n      CAFFE_ENFORCE_EQ(values.ndim(), 1, \"VALUES should be 1-D\");\n      M += values.size();\n    }\n\n    auto* out_values = Output(1);\n    out_values->Resize(M);\n\n    T* out_values_data = out_values->template mutable_data<T>();\n    auto pos = 0;\n\n    // TODO(badri): Use unordered_set if performance is an issue\n    std::set<T> deduped;\n    std::vector<int> offsets(InputSize(), 0);\n    for (auto sample = 0; sample < batch_size; sample++) {\n      for (size_t i = 0; i < InputSize(); i += 2) {\n        auto& lengths = Input(i);\n        const auto* lengths_data = lengths.template data<int32_t>();\n\n        auto& values = Input(i + 1);\n        const T* values_data = values.template data<T>();\n        const auto length = lengths_data[sample];\n\n        for (auto j = offsets[i]; j < offsets[i] + length; j++) {\n          deduped.insert(values_data[j]);\n        }\n        offsets[i] += length;\n      }\n      for (auto val : deduped) {\n        out_values_data[pos++] = val;\n      }\n      out_lengths_data[sample] = deduped.size();\n      deduped.clear();\n    }\n    out_values->Resize(pos);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(1));\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/multi_class_accuracy_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_\n#define CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass MultiClassAccuracyOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(MultiClassAccuracyOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(PREDICTION, LABEL);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/no_default_engine_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_\n#define CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n/**\n * A helper class to denote that an op does not have a default engine.\n *\n * NoDefaultEngineOp is a helper class that one can use to denote that a\n * specific operator is not intended to be called without an explicit engine\n * given. This is the case for e.g. the communication operators where one has\n * to specify a backend (like MPI or ZEROMQ).\n */\ntemplate <class Context>\nclass NoDefaultEngineOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(NoDefaultEngineOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    CAFFE_THROW(\n        \"The operator \",\n        this->debug_def().type(),\n        \" does not have a default engine implementation. Please \"\n        \"specify an engine explicitly for this operator.\");\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/normalize_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_NORMALIZE_OP_H_\n#define CAFFE2_OPERATORS_NORMALIZE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass NormalizeOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  NormalizeOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n\n  bool RunOnDevice() override {\n    const auto& x = Input(0);\n    auto* y = Output(0);\n    const auto* xData = x.template data<T>();\n    y->ResizeLike(x);\n    auto* yData = y->template mutable_data<T>();\n\n    const auto canonical_axis = x.canonical_axis_index(\n        OperatorBase::GetSingleArgument<int>(\"axis\", -1));\n    const int m = x.dim32(canonical_axis);\n    const int n = x.size() / m;\n    const int sf = x.size_from_dim(canonical_axis + 1);\n    DoNormalize(xData, yData, m, n, sf);\n    return true;\n  }\n\n private:\n  void\n  DoNormalize(const T* xData, T* yData, const int m, const int n, const int sf);\n};\n\ntemplate <typename T, class Context>\nclass NormalizeGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  NormalizeGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws) {}\n\n  bool RunOnDevice() override {\n    const auto& x = Input(0);\n    const auto& gOut = Input(GRAD_OUT);\n    auto* gIn = Output(GRAD_IN);\n    gIn->ResizeLike(gOut);\n\n    const auto* xData = x.template data<T>();\n    const auto* gOutData = gOut.template data<T>();\n    auto* gInData = gIn->template mutable_data<T>();\n\n    const auto canonical_axis = x.canonical_axis_index(\n        OperatorBase::GetSingleArgument<int>(\"axis\", -1));\n    const int m = x.dim32(canonical_axis);\n    const int n = x.size() / m;\n    const int sf = x.size_from_dim(canonical_axis + 1);\n    DoNormalize(xData, gOutData, gInData, m, n, sf);\n    return true;\n  }\n\n private:\n  void DoNormalize(\n      const T* xData,\n      const T* gOutData,\n      T* gInData,\n      const int m,\n      const int n,\n      const int sf);\n\n  INPUT_TAGS(INPUT, GRAD_OUT);\n  OUTPUT_TAGS(GRAD_IN);\n};\n\ntemplate <typename T, class Context>\nclass NormalizeL1Op final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(NormalizeL1Op)\n\n  bool RunOnDevice() override {\n    const auto& x = Input(0);\n    auto* y = Output(0);\n    const auto* xData = x.template data<T>();\n    y->ResizeLike(x);\n    auto* yData = y->template mutable_data<T>();\n\n    const auto canonical_axis = x.canonical_axis_index(\n        OperatorBase::GetSingleArgument<int>(\"axis\", -1));\n    const int m = x.dim32(canonical_axis);\n    const int n = x.size() / m;\n    const int sf = x.size_from_dim(canonical_axis + 1);\n    DoNormalize(xData, yData, m, n, sf);\n    return true;\n  }\n\n private:\n  void\n  DoNormalize(const T* xData, T* yData, const int m, const int n, const int sf);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_NORMALIZE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/one_hot_ops.h",
    "content": "#ifndef CAFFE_OPERATORS_ONE_HOT_OPS_H_\n#define CAFFE_OPERATORS_ONE_HOT_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass OneHotOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  OneHotOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& indices = Input(0);\n    CAFFE_ENFORCE_EQ(\n        indices.ndim(),\n        1,\n        \"indices input must be 1D tensor of data type TIndex\");\n\n    // Index size input must be in CPU context\n    auto& index_size_tensor = OperatorBase::Input<Tensor<CPUContext>>(1);\n    CAFFE_ENFORCE_EQ(\n        index_size_tensor.size(),\n        1,\n        \"index_size_tensor input must be scalar of data type TIndex\");\n\n    auto batch_size = indices.size();\n    auto index_size = *index_size_tensor.template data<TIndex>();\n    auto one_hots = Output(0);\n    one_hots->Resize(batch_size, index_size);\n    auto output_size = one_hots->size();\n    if (output_size == 0) {\n      return true;\n    }\n\n    DoOneHotOp(batch_size, index_size, indices, one_hots);\n    return true;\n  }\n\n protected:\n  void DoOneHotOp(\n      TIndex batch_size,\n      TIndex index_size,\n      const Tensor<Context>& indices,\n      Tensor<Context>* output);\n};\n\ntemplate <class Context>\nclass BatchOneHotOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  BatchOneHotOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(X));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n protected:\n  INPUT_TAGS(X, LENS, VALS);\n  OUTPUT_TAGS(ONE_HOT);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE_OPERATORS_ONE_HOT_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/operator_fallback_gpu.h",
    "content": "#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\n/**\n * @brief A templated class to allow one to wrap a CPU operator as a CUDA\n * operator.\n *\n * This class can be used when one does not have the CUDA implementation ready\n * yet for an operator. Essentially, what this op does is to automatically\n * deal with data copy for you. Plausibly, this causes a lot of overhead and\n * is not optimal, so you should use this operator mostly for quick prototyping\n * purpose.\n *\n * All the input and output of the original operator should be TensorCPU.\n *\n * Example usage: if you have a class MyMagicOp that is CPU based, and you use\n * the registration code\n *     REGISTER_CPU_OPERATOR(MyMagic, MyMagicOp);\n * to register the CPU side, you can create its corresponding GPU operator\n * (with performance hits of course) via\n *     REGISTER_CUDA_OPERATOR(MyMagic,\n *                            GPUFallbackOp<MyMagicOp>);\n *\n * Advanced usage: if you want to have some specific outputs never copied, you\n * can use the SkipOutputCopy template argument to do that. For example, if\n * MyMagic produces two outputs and the first output is always going to live on\n * the CPU, you can do\n *     REGISTER_CUDA_OPERATOR(MyMagic,\n *                            GPUFallbackOp<MyMagicOp, SkipIndices<0>>);\n */\ntemplate <class CPUOp, typename SkipOutputCopy = SkipIndices<>>\nclass GPUFallbackOp final : public Operator<CUDAContext> {\n public:\n  USE_OPERATOR_FUNCTIONS(CUDAContext);\n  GPUFallbackOp(const OperatorDef& def, Workspace* ws)\n      : Operator<CUDAContext>(def, ws) {\n    CAFFE_ENFORCE_EQ(def.device_option().device_type(), CUDA);\n    OperatorDef base_def_(def);\n    // base_def_ runs on CPU, so we will set its device option to CPU.\n    base_def_.clear_device_option();\n    base_def_.mutable_device_option()->set_device_type(CPU);\n    // Set up the symbols for the local workspace.\n    for (const string& name : def.input()) {\n      local_input_blobs_.push_back(local_ws_.CreateBlob(name));\n      CHECK_NOTNULL(local_input_blobs_.back());\n    }\n    base_op_.reset(new CPUOp(base_def_, &local_ws_));\n    for (const string& name : def.output()) {\n      local_output_blobs_.push_back(local_ws_.GetBlob(name));\n      CHECK_NOTNULL(local_output_blobs_.back());\n    }\n  }\n\n  bool RunOnDevice() override {\n    bool need_sync = false;\n    for (int i = 0; i < InputSize(); ++i) {\n      if (OperatorBase::InputIsType<TensorCUDA>(i)) {\n        local_input_blobs_[i]->template GetMutable<TensorCPU>()->CopyFrom(\n            Input(i), &context_);\n        need_sync = true;\n      } else {\n        VLOG(1) << \"Input \" << i << \" is not TensorCUDA. Skipping copy.\";\n        // Note(jiayq): This removes a const but conceptually\n        // local_input_blobs will only be used as const blob input for the\n        // base op so we are still fine.\n        local_input_blobs_[i]->ShareExternal(\n            const_cast<void*>(OperatorBase::Inputs()[i]->GetRaw()),\n            OperatorBase::Inputs()[i]->meta());\n      }\n    }\n\n    // Sync to make sure copies are done.\n    if (need_sync) {\n      context_.FinishDeviceComputation();\n    }\n\n    if (!base_op_->Run()) {\n      LOG(ERROR) << \"Base op run failed in GPUFallbackOp. Def: \"\n                 << ProtoDebugString(this->debug_def());\n      return false;\n    }\n    for (int i = 0; i < OutputSize(); ++i) {\n      if (SkipOutputCopy::Contains(i)) {\n        VLOG(1) << \"Copy output: index \" << i << \" skipped.\";\n        continue;\n      }\n      CAFFE_ENFORCE(\n          local_output_blobs_[i]->template IsType<TensorCPU>(),\n          \"GPU fallback op currently does not support non-TensorCPU \"\n          \"output type who needs copying.\");\n      Output(i)->CopyFrom(\n          local_output_blobs_[i]->template Get<TensorCPU>(), &context_);\n    }\n    return true;\n  }\n\n protected:\n  Workspace local_ws_;\n  vector<Blob*> local_input_blobs_;\n  vector<Blob*> local_output_blobs_;\n  std::unique_ptr<CPUOp> base_op_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/order_switch_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_\n#define CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// Note(Yangqing): I think it is possible to do a more general swapaxes operator\n// but I am a little afraid of going down that general path. Only implementing\n// the two actually needed ones here.\n\ntemplate <typename T, class Context>\nclass NHWC2NCHWOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(NHWC2NCHWOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n};\n\ntemplate <typename T, class Context>\nclass NCHW2NHWCOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(NCHW2NHWCOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/pack_rnn_sequence_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_\n#define CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_\n\n#include <algorithm>\n#include <vector>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context, bool Forward>\nclass PackRNNSequenceOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  PackRNNSequenceOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t, float, double>>::call(\n        this, Input(0));\n  }\n\n  template <typename ValT>\n  bool DoRunWithType() {\n    // The value is copied from the sequence to the pack\n    // if Forward is true, and vice versa\n    int dim_offset = Forward ? 1 : 2;\n    auto& values = Input(0);\n    CAFFE_ENFORCE_GT(values.ndim(), dim_offset);\n\n    // block_size is the size for each individual feature\n    TIndex block_size = values.size_from_dim(dim_offset);\n    auto values_vec = values.template data<ValT>();\n\n    auto& lengths = Input(LENGTHS);\n    CAFFE_ENFORCE_EQ(lengths.ndim(), 1);\n    const auto cols = lengths.size();\n    const int32_t* lengths_vec = lengths.template data<int32_t>();\n    // the total number of rows is defined as the max number from lengths\n    // if when the lengths is empty, we set rows = 0 to support zero lengths\n    const auto rows =\n        cols ? *std::max_element(lengths_vec, lengths_vec + cols) : 0;\n    CAFFE_ENFORCE_GE(rows, 0);\n    int length_sum = 0;\n    if (cols > 0) {\n      math::Sum<int, Context>(cols, lengths_vec, &length_sum, &context_);\n    }\n\n    vector<TIndex> shape;\n    // the output shape is rows * cols for the pack,\n    // or length_sum for the sequence\n    if (Forward) {\n      shape.push_back(rows);\n      shape.push_back(cols);\n    } else {\n      shape.push_back(length_sum);\n    }\n    // insert the dim for the feature\n    shape.insert(\n        shape.end(), values.dims().begin() + dim_offset, values.dims().end());\n\n    auto* output = Output(OUTPUTVALUE);\n    output->Resize(shape);\n\n    auto output_data = output->template mutable_data<ValT>();\n    // initialize output_data with zero, as it is the default value for padding\n    // when certain length is smaller than rows\n    math::Set<ValT, Context>(output->size(), 0, output_data, &context_);\n\n    int32_t offset = 0;\n    for (int c = 0; c < cols; c++) {\n      for (int r = 0; r < lengths_vec[c]; r++) {\n        auto input_offset = Forward ? (offset + r) : (r * cols + c);\n        auto output_offset = Forward ? (r * cols + c) : (offset + r);\n        context_.template CopyItems<Context, Context>(\n            values.meta(),\n            block_size,\n            values_vec + input_offset * block_size,\n            output_data + output_offset * block_size);\n      }\n      offset += lengths_vec[c];\n    }\n    return true;\n  }\n\n private:\n  INPUT_TAGS(INPUTVALUE, LENGTHS);\n  OUTPUT_TAGS(OUTPUTVALUE);\n};\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/pack_segments.h",
    "content": "#ifndef CAFFE2_OPERATORS_PACK_SEGMENTS_H_\n#define CAFFE2_OPERATORS_PACK_SEGMENTS_H_\n\n#include <atomic>\n#include <limits>\n#include <mutex>\n#include <unordered_map>\n#include <vector>\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass PackSegmentsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  // USE_SIMPLE_CTOR_DTOR(PackSegmentsOp)\n  USE_DISPATCH_HELPER;\n\n  PackSegmentsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        pad_minf_(OperatorBase::GetSingleArgument<bool>(\"pad_minf\", false)) {\n    if (pad_minf_) {\n      padding_ = -1.0 * std::numeric_limits<float>::infinity();\n    } else {\n      padding_ = 0;\n    }\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int, long>>::call(this, Input(LENGTHS));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& data = Input(DATA);\n    const auto& lengths = Input(LENGTHS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE(data.ndim() >= 1, \"DATA should be at least 1-D\");\n    CAFFE_ENFORCE(lengths.ndim() == 1, \"LENGTH should be 1-D\");\n\n    // Find the length of the longest sequence.\n    const T* l = lengths.template data<T>();\n    T max_length = 0;\n    for (T i = 0; i < lengths.dim(0); ++i) {\n      max_length = std::max(max_length, l[i]);\n    }\n\n    auto shape = data.dims(); // Shape of output is batch_size x max_len x ...\n    shape[0] = max_length;\n    shape.insert(shape.begin(), lengths.size());\n    output->Resize(shape);\n    // create output tensor\n    auto* out = static_cast<char*>(output->raw_mutable_data(data.meta()));\n\n    if (!data.dim(0)) {\n      // Return empty output (with the proper shape)\n      return true;\n    }\n\n    // Do padding\n    if (output->template IsType<float>()) {\n      math::Set<float, Context>(\n          output->size(),\n          padding_,\n          output->template mutable_data<float>(),\n          &context_);\n    }\n\n    int block_size = data.size() / data.dim(0);\n    int block_bytesize = data.nbytes() / data.dim(0);\n    const auto* d = static_cast<const char*>(data.raw_data());\n    int start = 0;\n    for (int i = 0; i < lengths.dim(0); ++i) {\n      context_.template CopyItems<Context, Context>(\n          data.meta(),\n          l[i] * block_size,\n          d + block_bytesize * start,\n          out + block_bytesize * max_length * i);\n      start += l[i];\n    }\n\n    return true;\n  }\n\n  INPUT_TAGS(LENGTHS, DATA);\n\n private:\n  bool pad_minf_;\n  float padding_;\n};\n\ntemplate <class Context>\nclass UnpackSegmentsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(UnpackSegmentsOp)\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int, long>>::call(this, Input(LENGTHS));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& data = Input(DATA);\n    const auto& lengths = Input(LENGTHS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE(data.ndim() >= 2, \"DATA should be at least 2-D\");\n    CAFFE_ENFORCE(lengths.ndim() == 1, \"LENGTH should be 1-D\");\n\n    const T* l = lengths.template data<T>();\n\n    T max_length = 0;\n    for (T i = 0; i < lengths.dim(0); ++i) {\n      max_length = std::max(max_length, l[i]);\n    }\n    T total_l = std::accumulate(l, l + lengths.dim(0), 0);\n\n    auto shape = data.dims();\n    CAFFE_ENFORCE(\n        shape[0] == lengths.dim(0), \"LENGTH should match DATA in dimension 0\");\n    shape.erase(shape.begin());\n    shape[0] = total_l;\n    output->Resize(shape);\n    // create output tensor\n    auto* out = static_cast<char*>(output->raw_mutable_data(data.meta()));\n    if (!(data.dim(0) * data.dim(1))) {\n      return true;\n    }\n    int block_size = data.size() / (data.dim(0) * data.dim(1));\n    int block_bytesize = data.nbytes() / (data.dim(0) * data.dim(1));\n    const auto* d = static_cast<const char*>(data.raw_data());\n    int start = 0;\n    for (int i = 0; i < lengths.dim(0); ++i) {\n      context_.template CopyItems<Context, Context>(\n          data.meta(),\n          l[i] * block_size,\n          d + block_bytesize * data.dim(1) * i,\n          out + block_bytesize * start);\n      start += l[i];\n    }\n    return true;\n  }\n\n  INPUT_TAGS(LENGTHS, DATA);\n};\n\n} // namspace caffe2\n#endif // CAFFE2_OPERATORS_PACK_SEGMENTS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/pad_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_PAD_OP_H_\n#define CAFFE2_OPERATORS_PAD_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// Padding mode similar to numpy.\nenum class PadMode {\n  CONSTANT = 0, // pad constant values, with string \"constant\"\n  REFLECT = 1, // pads with reflect values, with string \"reflect\"\n  EDGE = 2, // pads with the edge values, with string \"edge\"\n};\n\nPadMode StringToPadMode(const string&);\n\ntemplate <typename T, class Context>\nclass PadImageOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  PadImageOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws),\n        mode_(StringToPadMode(\n            OperatorBase::GetSingleArgument<string>(\"mode\", \"constant\"))),\n        value_(static_cast<T>(\n            OperatorBase::GetSingleArgument<float>(\"value\", 0.0))) {\n    CAFFE_ENFORCE(\n        legacy_pad_ == LegacyPadding::NOTSET,\n        \"Padding layer only supports explicit pad values.\");\n    CAFFE_ENFORCE(\n        dilation_h() == 1 && dilation_w() == 1,\n        \"Pooling op does not support dilation right now.\");\n    CAFFE_ENFORCE(\n        stride_h() == 1 && stride_w() == 1,\n        \"Pooling op does not support stride right now.\");\n    // Pad op does not use kernel sizes, so we set it to 1 for computing the\n    // output size.\n    kernel_.assign(pads_.size() / 2, 1);\n  }\n  ~PadImageOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n  static std::vector<TensorShape> PadTensorInference(\n      const OperatorDef& def,\n      const vector<TensorShape>& in);\n\n private:\n  PadMode mode_;\n  T value_;\n\n  // Input: X\n  // Output: Y\n};\n\ntemplate <typename T, class Context>\nclass PadImageGradientOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  PadImageGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws),\n        mode_(StringToPadMode(\n            OperatorBase::GetSingleArgument<string>(\"mode\", \"constant\"))) {\n    CAFFE_ENFORCE(\n        legacy_pad_ == LegacyPadding::NOTSET,\n        \"Padding layer only supports explicit pad values.\");\n    CAFFE_ENFORCE(\n        dilation_h() == 1 && dilation_w() == 1,\n        \"Pooling op does not support dilation right now.\");\n    // Pad op does not use kernel sizes, so we set it to 1 for computing the\n    // output size.\n    kernel_.assign(pads_.size() / 2, 1);\n  }\n  ~PadImageGradientOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n private:\n  PadMode mode_;\n  // Input: dY\n  // Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PAD_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/partition_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_PARTITION_OPS_H_\n#define CAFFE2_OPERATORS_PARTITION_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Index>\nstatic inline int moduloPartition(Index key, int numPartitions) {\n  int shard = key % numPartitions;\n  // equivalent to `if (shard < 0) shard += partitions;`\n  shard += numPartitions & (shard >> (sizeof(int) * 8 - 1));\n  return shard;\n}\n\nclass GatherByKeyOp : public Operator<CPUContext> {\n public:\n  USE_DISPATCH_HELPER;\n  USE_OPERATOR_FUNCTIONS(CPUContext);\n  GatherByKeyOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws) {}\n\n private:\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));\n  }\n\n private:\n  template <typename Index>\n  bool DoRunWithType() {\n    const auto numPartitions = InputSize() - 1;\n    CAFFE_ENFORCE_GE(numPartitions, 1);\n    const auto& keysTensor = Input(0);\n    const auto* keysData = keysTensor.template data<Index>();\n    const auto& keysShape = Input(0).dims();\n    CAFFE_ENFORCE_EQ(\n        keysShape.size(), 1, \"Only 1D keys tensor supported currently.\");\n\n    // 1. Shape and type consistency checks\n    const auto& in0Shape = Input(1).dims();\n    CAFFE_ENFORCE_GE(in0Shape.size(), 1);\n\n    vector<TIndex> outShape(keysShape);\n    outShape.insert(outShape.end(), in0Shape.begin() + 1, in0Shape.end());\n\n    CAFFE_ENFORCE_GE(outShape.size(), 1);\n    auto totalSize = in0Shape[0];\n    auto meta = Input(1).meta();\n    for (int i = 2; i < InputSize(); ++i) {\n      const auto& input = Input(i);\n      CAFFE_ENFORCE(meta == input.meta());\n      CAFFE_ENFORCE_GE(input.ndim(), 1);\n      CAFFE_ENFORCE(std::equal(\n          outShape.begin() + keysShape.size(),\n          outShape.end(),\n          input.dims().begin() + 1));\n      totalSize += input.dim(0);\n    }\n    CAFFE_ENFORCE_EQ(keysTensor.size(), totalSize);\n\n    auto* outTensor = Output(0);\n    outTensor->Resize(outShape);\n    auto* outData = static_cast<char*>(outTensor->raw_mutable_data(meta));\n    const auto blockSize = outTensor->size_from_dim(1);\n\n    inputDatas_.resize(numPartitions);\n    for (int i = 0; i < numPartitions; ++i) {\n      inputDatas_[i] = static_cast<const char*>(Input(i + 1).raw_data());\n    }\n    inStartOffsets_.assign(numPartitions, 0);\n    Index outStartOffset = 0;\n    int currentShard = -1;\n\n    // 2. copy from inputs into output based on shard for each input key\n    const auto numEntries = keysTensor.size();\n    for (int64_t i = 0; i <= numEntries; ++i) {\n      auto newShard =\n          i < numEntries ? moduloPartition(keysData[i], numPartitions) : -1;\n      if (newShard != currentShard) {\n        if (currentShard != -1) {\n          auto inStartOffset = inStartOffsets_[currentShard];\n          auto numItems = i - outStartOffset;\n          context_.template CopyItems<CPUContext, CPUContext>(\n              meta,\n              numItems * blockSize,\n              inputDatas_[currentShard] +\n                  inStartOffset * blockSize * meta.itemsize(),\n              outData + outStartOffset * blockSize * meta.itemsize());\n          inStartOffsets_[currentShard] += numItems;\n        }\n        currentShard = newShard;\n        outStartOffset = i;\n      }\n    }\n\n    return true;\n  }\n\n  std::vector<const char*> inputDatas_;\n  std::vector<int64_t> inStartOffsets_;\n};\n\nclass PartitionOpBase : public Operator<CPUContext> {\n public:\n  USE_OPERATOR_FUNCTIONS(CPUContext);\n\n  PartitionOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws),\n        OP_SINGLE_ARG(int, \"pack_first_input\", pack_first_input_, 0) {}\n\n protected:\n  template <typename Index>\n  void ApplyPartition(bool skipFirstArgument) {\n    CAFFE_ENFORCE_EQ(\n        OutputSize() % InputSize(),\n        0,\n        \"Output number must be a multiple of input number\");\n    int partitions = OutputSize() / InputSize();\n    int inputSize = InputSize();\n    int mainInputIndex = skipFirstArgument;\n    CAFFE_ENFORCE_GT(partitions, 0, \"Invalid number of partitions\");\n\n    auto& main_input = Input(mainInputIndex);\n    TIndex size = main_input.size();\n    const Index* data = main_input.template data<Index>();\n    counts_.assign(partitions, 0);\n    for (TIndex p = 0; p < size; p++) {\n      int shard = moduloPartition(data[p], partitions);\n      ++counts_[shard];\n    }\n\n    raw_datas_.resize(inputSize);\n    block_sizes_.resize(inputSize);\n    metas_.resize(inputSize);\n    out_datas_.resize(OutputSize());\n    for (int i = mainInputIndex; i < inputSize; ++i) {\n      auto& input = Input(i);\n      if (i > mainInputIndex) {\n        CAFFE_ENFORCE_GE(\n            input.ndim(),\n            main_input.ndim(),\n            \"Prefix of extra input's shape must match main input's shape, \",\n            \"input: \",\n            i);\n        for (int j = 0; j < main_input.ndim(); ++j) {\n          CAFFE_ENFORCE_GE(\n              input.dim(j),\n              main_input.dim(j),\n              \"Prefix of extra input's shape must match main input's shape, \",\n              \"input: \",\n              i,\n              \", dim \",\n              j);\n        }\n      }\n      raw_datas_[i] = input.raw_data();\n      block_sizes_[i] = input.size_from_dim(main_input.ndim());\n      metas_[i] = input.meta();\n      // shape = partition_size + suffix of input dims\n      vector<TIndex> shape(\n          input.dims().begin() + main_input.ndim() - 1, input.dims().end());\n      for (int j = 0; j < partitions; ++j) {\n        int out_idx = i + j * inputSize;\n        auto output = Output(out_idx);\n        shape[0] = counts_[j];\n        output->Resize(shape);\n        out_datas_[out_idx] = output->raw_mutable_data(input.meta());\n      }\n    }\n\n    counts_.assign(partitions, 0);\n    for (TIndex p = 0; p < size; p++) {\n      int shard = moduloPartition(data[p], partitions);\n      TIndex idx = counts_[shard]++;\n\n      // special case first input\n      static_cast<Index*>(out_datas_[shard * inputSize + mainInputIndex])[idx] =\n          pack_first_input_ ? ((data[p] - shard) / partitions) : data[p];\n\n      int baseIndex = shard * inputSize;\n      for (int i = mainInputIndex + 1; i < inputSize; ++i) {\n        auto bs = block_sizes_[i];\n        auto meta = metas_[i];\n        // special case for small bs?\n        context_.template CopyItems<CPUContext, CPUContext>(\n            meta,\n            bs,\n            static_cast<const char*>(raw_datas_[i]) + p * bs * meta.itemsize(),\n            static_cast<char*>(out_datas_[baseIndex + i]) +\n                idx * bs * meta.itemsize());\n      }\n    }\n  }\n\n  bool pack_first_input_;\n\n  // use member fields to reuse memory\n  vector<TIndex> counts_;\n  vector<TIndex> block_sizes_;\n  vector<TypeMeta> metas_;\n  vector<const void*> raw_datas_;\n  vector<void*> out_datas_;\n};\n\nclass PartitionOp : public PartitionOpBase {\n public:\n  USE_DISPATCH_HELPER;\n\n  PartitionOp(const OperatorDef& operator_def, Workspace* ws)\n      : PartitionOpBase(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));\n  }\n\n private:\n  template <typename Index>\n  bool DoRunWithType() {\n    ApplyPartition<Index>(false /* skipFirstArgument */);\n    return true;\n  }\n\n  DISABLE_COPY_AND_ASSIGN(PartitionOp);\n};\n\nclass LengthsPartitionOp : public PartitionOpBase {\n public:\n  USE_DISPATCH_HELPER;\n\n  LengthsPartitionOp(const OperatorDef& operator_def, Workspace* ws)\n      : PartitionOpBase(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(1));\n  }\n\n private:\n  template <typename Index>\n  bool DoRunWithType() {\n    CAFFE_ENFORCE(\n        OutputSize() % InputSize() == 0,\n        \"Output number must be a multiple of input number\");\n    int partitions = OutputSize() / InputSize();\n    CAFFE_ENFORCE_GT(partitions, 0, \"Invalid number of partitions\");\n    CAFFE_ENFORCE_EQ(\n        Input(1).ndim(),\n        1,\n        \"Only 1-D tensors supported as a partitioning tensor for sharding\");\n\n    // Apply sharding to all parameters except lengths\n    ApplyPartition<Index>(true /* skipFirstArgument */);\n\n    // Compute lengths after sharding\n    auto& main_input = Input(1);\n    TIndex size = main_input.size();\n    const Index* data = main_input.template data<Index>();\n\n    auto& length_input = Input(0);\n    TIndex elements = length_input.size();\n    const int32_t* lengths_data = length_input.template data<int32_t>();\n    out_length_.resize(partitions);\n    for (int i = 0; i < partitions; ++i) {\n      auto& output = *Output(i * InputSize());\n      output.Resize(elements);\n      out_length_[i] = output.template mutable_data<int32_t>();\n    }\n\n    int total_length = 0;\n    for (int i = 0; i < elements; ++i) {\n      total_length += lengths_data[i];\n    }\n    CAFFE_ENFORCE(\n        total_length == size,\n        \"Total length is not matching to the number of elements\");\n\n    int index = 0;\n    for (int i = 0; i < elements; ++i) {\n      for (int j = 0; j < partitions; ++j) {\n        out_length_[j][i] = 0;\n      }\n      for (int j = 0; j < lengths_data[i]; ++j, ++index) {\n        int shard = moduloPartition(data[index], partitions);\n        ++out_length_[shard][i];\n      }\n    }\n    return true;\n  }\n\n  DISABLE_COPY_AND_ASSIGN(LengthsPartitionOp);\n\n  vector<int32_t*> out_length_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PARTITION_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/perplexity_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_PERPLEXITY_OP_H_\n#define CAFFE2_OPERATORS_PERPLEXITY_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass PerplexityOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(PerplexityOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PERPLEXITY_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/piecewise_linear_transform_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_\n#define CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass PiecewiseLinearTransformOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  PiecewiseLinearTransformOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    binary_ = OperatorBase::GetSingleArgument<bool>(\"binary\", false);\n\n    // Retrieve transform params (i.e., the linear functions).\n    bounds_from_arg_ = OperatorBase::GetRepeatedArgument<T>(\"bounds\");\n    slopes_from_arg_ = OperatorBase::GetRepeatedArgument<T>(\"slopes\");\n    intercepts_from_arg_ = OperatorBase::GetRepeatedArgument<T>(\"intercepts\");\n    transform_param_from_arg_ = CheckTransParamFromArg();\n  }\n\n  bool RunOnDevice() override {\n    return binary_ ? TransformBinary() : TransformGeneral();\n  }\n\n private:\n  // num_func_per_group is the number of pieces of linear functions of\n  // each group.\n  // num_group: The number of groups of linear functions. Each group is for\n  // transforming one column of predictions.\n  void InferNumFunctionsPerGroup(\n      const TIndex num_bounds,\n      const TIndex num_slopes,\n      const TIndex num_intercepts,\n      TIndex* num_func_per_group,\n      TIndex* num_group) {\n    CAFFE_ENFORCE_EQ(num_slopes, num_intercepts);\n\n    // This is based on the facts:\n    // 1. in each group, the num of bounds minus the num of slopes is 1;\n    // 2. each group has the same number of pieces.\n    *num_group = num_bounds - num_slopes;\n    CAFFE_ENFORCE_GT(*num_group, 0);\n    if (binary_) {\n      CAFFE_ENFORCE_EQ(*num_group, 1);\n    }\n    *num_func_per_group = num_slopes / *num_group;\n    CAFFE_ENFORCE_GT(*num_func_per_group, 0);\n    CAFFE_ENFORCE_EQ(num_slopes % *num_group, 0);\n  }\n\n  bool CheckBoundsSorted(\n      const T* bounds,\n      const TIndex num_bounds_per_group,\n      const TIndex num_group) {\n    const T* start = bounds;\n    for (TIndex i = 0; i < num_group; i++) {\n      if (!std::is_sorted(start, start + num_bounds_per_group)) {\n        return false;\n      }\n      start += num_bounds_per_group;\n    }\n    return true;\n  }\n\n  // Returns true if the transform params from arg are valid.\n  // Otherwise, we will assume the transform params will pass from Input blobs.\n  bool CheckTransParamFromArg() {\n    int good_param = 0;\n    good_param += bounds_from_arg_.size() > 0;\n    good_param += slopes_from_arg_.size() > 0;\n    good_param += intercepts_from_arg_.size() > 0;\n    CAFFE_ENFORCE(\n        good_param == 0 || good_param == 3,\n        \"bounds, slopes, intercepts must be all set or all not set\");\n    if (good_param == 3) {\n      TIndex num_func_per_group;\n      TIndex num_group;\n      InferNumFunctionsPerGroup(\n          bounds_from_arg_.size(),\n          slopes_from_arg_.size(),\n          intercepts_from_arg_.size(),\n          &num_func_per_group,\n          &num_group);\n      CAFFE_ENFORCE(\n          CheckBoundsSorted(\n              bounds_from_arg_.data(), num_func_per_group + 1, num_group),\n          \"bounds must be sorted for each group\");\n    }\n\n    return good_param == 3;\n  }\n\n  void setUpTensors(TIndex& num_func_per_group, TIndex& num_group, TIndex M);\n\n  void GetTransParamData(\n      const T** bounds,\n      const T** slopes,\n      const T** intercepts,\n      TIndex* num_func_per_group,\n      TIndex* num_group) {\n    TIndex num_bounds;\n    TIndex num_slopes;\n    TIndex num_intercepts;\n\n    if (transform_param_from_arg_) {\n      CAFFE_ENFORCE_EQ(InputSize(), 1);\n      *bounds = bounds_from_arg_.data();\n      *slopes = slopes_from_arg_.data();\n      *intercepts = intercepts_from_arg_.data();\n      num_bounds = bounds_from_arg_.size();\n      num_slopes = slopes_from_arg_.size();\n      num_intercepts = intercepts_from_arg_.size();\n    } else {\n      CAFFE_ENFORCE_EQ(InputSize(), 4);\n      auto& bounds_input = Input(BOUNDS);\n      auto& slopes_input = Input(SLOPES);\n      auto& intercepts_input = Input(INTERCEPTS);\n      *bounds = bounds_input.template data<T>();\n      *slopes = slopes_input.template data<T>();\n      *intercepts = intercepts_input.template data<T>();\n      num_bounds = bounds_input.size();\n      num_slopes = slopes_input.size();\n      num_intercepts = intercepts_input.size();\n    }\n    InferNumFunctionsPerGroup(\n        num_bounds, num_slopes, num_intercepts, num_func_per_group, num_group);\n  }\n\n  bool TransformGeneral() {\n    auto& X = Input(0);\n    auto* Y = Output(0);\n    CAFFE_ENFORCE_EQ(X.ndim(), 2);\n    TIndex N = X.dim32(0);\n    TIndex M = X.dim32(1);\n    Y->ResizeLike(X);\n    const auto* Xdata = X.template data<T>();\n    T* Ydata = Y->template mutable_data<T>();\n\n    const T* bounds;\n    const T* slopes;\n    const T* intercepts;\n    TIndex num_func_per_group;\n    TIndex num_group;\n    GetTransParamData(\n        &bounds, &slopes, &intercepts, &num_func_per_group, &num_group);\n    CAFFE_ENFORCE_EQ(num_group, M);\n\n    for (TIndex j = 0; j < M; ++j) {\n      const T* bounds_group = bounds + j * (num_func_per_group + 1);\n      const T* slopes_group = slopes + j * num_func_per_group;\n      const T* intercepts_group = intercepts + j * num_func_per_group;\n      for (TIndex i = 0; i < N; ++i) {\n        Ydata[i * M + j] = PiecewiseLinearTransform(\n            Xdata[i * M + j],\n            bounds_group,\n            slopes_group,\n            intercepts_group,\n            num_func_per_group);\n      }\n    }\n    return true;\n  }\n\n  bool TransformBinary() {\n    auto& X = Input(PREDICTIONS);\n    auto* Y = Output(0);\n    CAFFE_ENFORCE(X.ndim() == 1 || X.ndim() == 2);\n    TIndex N = X.dim32(0);\n    TIndex M = X.ndim() == 2 ? X.dim32(1) : 1;\n    CAFFE_ENFORCE(\n        M == 1 || M == 2,\n        \"If binary is set to true, the input must be Nx2 or Nx1 tensor\");\n    Y->ResizeLike(X);\n    const auto* Xdata = X.template data<T>();\n    T* Ydata = Y->template mutable_data<T>();\n\n    const T* bounds;\n    const T* slopes;\n    const T* intercepts;\n    TIndex num_func_per_group;\n    TIndex num_group;\n    GetTransParamData(\n        &bounds, &slopes, &intercepts, &num_func_per_group, &num_group);\n    CAFFE_ENFORCE_EQ(num_group, 1);\n\n    if (M == 1) {\n      for (TIndex i = 0; i < N; ++i) {\n        Ydata[i] = PiecewiseLinearTransform(\n            Xdata[i], bounds, slopes, intercepts, num_func_per_group);\n      }\n    } else {\n      for (TIndex i = 0; i < N; ++i) {\n        Ydata[i * M + 1] = PiecewiseLinearTransform(\n            Xdata[i * M + 1], bounds, slopes, intercepts, num_func_per_group);\n        Ydata[i * M] = 1.0f - Ydata[i * M + 1];\n      }\n    }\n\n    return true;\n  }\n\n  T PiecewiseLinearTransform(\n      const T x,\n      const T* bounds,\n      const T* slopes,\n      const T* intercepts,\n      const TIndex num_func_per_group) {\n    T y = 0;\n    // deal with samples out of bounds\n    // make it the same as the upper/lower bound value\n    if (x <= bounds[0]) {\n      y = slopes[0] * bounds[0] + intercepts[0];\n    } else if (x >= bounds[num_func_per_group]) {\n      y = slopes[num_func_per_group - 1] * bounds[num_func_per_group] +\n          intercepts[num_func_per_group - 1];\n    } else {\n      auto low_bound =\n          std::lower_bound(bounds, bounds + num_func_per_group + 1, x);\n      int bounds_idx = low_bound - bounds - 1;\n      // compute the piecewise linear transformation as Y\n      y = slopes[bounds_idx] * x + intercepts[bounds_idx];\n    }\n    return y;\n  }\n\n private:\n  bool binary_;\n  vector<T> bounds_from_arg_;\n  vector<T> slopes_from_arg_;\n  vector<T> intercepts_from_arg_;\n\n  Tensor<Context> bounds_device_;\n  Tensor<Context> intercepts_device_;\n  Tensor<Context> slopes_device_;\n  bool gpu_copied_ = false;\n\n  // If true, the piecewise linear functions are passed through args,\n  // otherwise, they are passed through Input blobs.\n  bool transform_param_from_arg_;\n\n  INPUT_TAGS(PREDICTIONS, BOUNDS, SLOPES, INTERCEPTS);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/pool_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_POOL_OP_H_\n#define CAFFE2_OPERATORS_POOL_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/conv_pool_op_base.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, typename PoolType>\nclass PoolOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  PoolOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws) {\n    for (int i = 0; i < kernel_.size(); ++i) {\n      CAFFE_ENFORCE(\n          dilation_[i] == 1, \"Pooling op does not support dilation right now.\");\n    }\n    if (!global_pooling_) {\n      for (int i = 0; i < kernel_.size(); ++i) {\n        CAFFE_ENFORCE(\n            pads_[i] < kernel_[i] && pads_[i + kernel_.size()] < kernel_[i],\n            \"Pad should be smaller than kernel.\");\n      }\n    }\n  }\n  ~PoolOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n  // Input: X\n  // Output: Y\n};\n\ntemplate <typename T, class Context, class PoolType>\nclass PoolGradientOp final : public ConvPoolOpBase<Context> {\n public:\n  USE_CONV_POOL_BASE_FUNCTIONS(Context);\n  PoolGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : ConvPoolOpBase<Context>(operator_def, ws) {}\n  ~PoolGradientOp() {}\n\n  bool RunOnDeviceWithOrderNCHW() override;\n  bool RunOnDeviceWithOrderNHWC() override;\n\n  // Input: X, Y, dY\n  // Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_POOL_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/prefetch_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_PREFETCH_OP_H_\n#define CAFFE2_OPERATORS_PREFETCH_OP_H_\n\n#include <condition_variable>\n#include <mutex>\n#include <thread> // NOLINT\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// PrefetchOperator is an operator that prefetches the next batch. It should\n// almost always be used to read things from disk, so I am setting the input to\n// zero blobs.\n//\n// For any operator that is derived from PrefetchOperator, it should\n// explicitly call the Finalize() function in its destructor, so that the\n// prefetching thread is properly destructed.\n\n// Note: We inherit from OperatorBase since we control the\n// synchronization properties of this operator ourselves (we inform\n// the waiting producer after we synchronize). This is a special-case\n// - you should generally inherit from Operator<Context> directly.\ntemplate <class Context>\nclass PrefetchOperator : public OperatorBase {\n public:\n  PrefetchOperator(const OperatorDef& operator_def, Workspace* ws)\n      : OperatorBase(operator_def, ws),\n        context_(operator_def.device_option()),\n        prefetched_(false),\n        prefetch_success_(true),\n        finalize_(false) {\n    context_.SwitchToDevice(0);\n  }\n\n  virtual ~PrefetchOperator() noexcept {\n    CHECK(finalize_ || !prefetch_thread_.get()) <<\n        \"YOU MADE A PROGRAMING ERROR: derived class of PrefetchOperator \"\n        \"should call Finalize() in its destructor so the prefetching \"\n        \"thread is joined. \";\n  }\n\n  void Finalize() {\n    if (prefetch_thread_.get()) {\n      {\n        std::unique_lock<std::mutex> lock(prefetch_access_mutex_);\n        while (!prefetched_)\n          consumer_.wait(lock);\n        finalize_ = true;\n        prefetched_ = false;\n      }\n      producer_.notify_one();\n      prefetch_thread_->join();\n      prefetch_thread_.reset();\n    } else {\n      // If we never initialized the prefetch thread, just set\n      // finalize anyway.\n      finalize_ = true;\n    }\n  }\n\n  bool Run(int /* unused */ /*stream_id*/) override {\n    // Note(jiayq): We only start the prefetch_thread at the Run() function\n    // instead of in the constructor, because the prefetch_thread needs to start\n    // after all derived classes' constructors finish.\n    if (!prefetch_thread_) {\n      prefetch_thread_.reset(\n          new std::thread([this] { this->PrefetchWorker(); }));\n    }\n    context_.SwitchToDevice(0);\n    std::unique_lock<std::mutex> lock(prefetch_access_mutex_);\n    while (!prefetched_)\n      consumer_.wait(lock);\n    if (!prefetch_success_) {\n      LOG(ERROR) << \"Prefetching failed.\";\n      return false;\n    }\n    if (!CopyPrefetched()) {\n      LOG(ERROR) << \"Error when copying prefetched data.\";\n      return false;\n    }\n    prefetched_ = false;\n    context_.FinishDeviceComputation();\n    producer_.notify_one();\n    return true;\n  }\n\n  void PrefetchWorker() {\n    context_.SwitchToDevice();\n    std::unique_lock<std::mutex> lock(prefetch_access_mutex_);\n    while (prefetched_)\n      producer_.wait(lock);\n    while (!finalize_) {\n      // We will need to run a FinishDeviceComputation() call because the\n      // prefetcher thread and the main thread are potentially using different\n      // streams (like on GPU).\n      try {\n        prefetch_success_ = Prefetch();\n        context_.FinishDeviceComputation();\n      } catch (const std::exception& e) {\n        // TODO: propagate exception_ptr to the caller side\n        LOG(ERROR) << \"Prefetching error \" << e.what();\n        prefetch_success_ = false;\n      }\n      prefetched_ = true;\n      consumer_.notify_one();\n      while (prefetched_)\n        producer_.wait(lock);\n    }\n  }\n\n  // You will need to implement this instead of the Run function.\n  virtual bool Prefetch() = 0;\n  virtual bool CopyPrefetched() = 0;\n\n protected:\n  Context context_;\n  std::mutex prefetch_access_mutex_;\n  std::condition_variable producer_, consumer_;\n  // prefetched_ is used to tell the operator that it is done.\n  std::atomic<bool> prefetched_;\n  // prefetch_success_ is used to see if prefetching failed or not.\n  std::atomic<bool> prefetch_success_;\n  // finalize_ is used to tell the prefetcher to quit.\n  std::atomic<bool> finalize_;\n  unique_ptr<std::thread> prefetch_thread_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PREFETCH_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/prelu_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass PReluOp final : public Operator<Context> {\n public:\n  PReluOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  StorageOrder order_;\n};\n\ntemplate <typename T, class Context>\nclass PReluGradientOp final : public Operator<Context> {\n public:\n  PReluGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  StorageOrder order_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/prepend_dim_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_PREPEND_DIM_OP_H_\n#define CAFFE2_OPERATORS_PREPEND_DIM_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass PrependDimOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  PrependDimOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        dim_size_(OperatorBase::GetSingleArgument<int64_t>(\"dim_size\", 0)) {\n    CAFFE_ENFORCE_GT(\n        dim_size_, 0, \"Argument dim_size must be greater than zero.\");\n  }\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE(input.ndim() > 0, \"Input must be at least 1D.\");\n    CAFFE_ENFORCE(\n        input.dim(0) % dim_size_ == 0,\n        \"First dimension must be multiple of prepend_dim.\");\n\n    vector<int64_t> actual_new_shape(input.ndim() + 1);\n    actual_new_shape[0] = dim_size_;\n    actual_new_shape[1] = input.dim(0) / dim_size_;\n    for (int i = 1; i < input.dims().size(); ++i) {\n      actual_new_shape[i + 1] = input.dim(i);\n    }\n    output->Resize(actual_new_shape);\n\n    if (output != &input) {\n      // If we are not doing in-place computation, a copy is needed.\n      context_.template CopyBytes<Context, Context>(\n          input.nbytes(),\n          input.raw_data(),\n          output->raw_mutable_data(input.meta()));\n    }\n    return true;\n  }\n\n private:\n  int64_t dim_size_;\n};\n\ntemplate <class Context>\nclass MergeDimOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MergeDimOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE(input.ndim() > 1, \"Input must be at least 2D.\");\n\n    vector<int64_t> actual_new_shape(input.ndim() - 1);\n    actual_new_shape[0] = input.dim(0) * input.dim(1);\n    for (int i = 1; i < input.dims().size() - 1; ++i) {\n      actual_new_shape[i] = input.dim(i + 1);\n    }\n    output->Resize(actual_new_shape);\n\n    if (output != &input) {\n      // If we are not doing in-place computation, a copy is needed.\n      context_.template CopyBytes<Context, Context>(\n          input.nbytes(),\n          input.raw_data(),\n          output->raw_mutable_data(input.meta()));\n    }\n    return true;\n  }\n\n private:\n  int64_t dim_size_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_PREPEND_DIM_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/rank_loss_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// support multiple batches of sessions\ntemplate <typename T, class Context>\nclass PairWiseLossOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(PairWiseLossOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n private:\n  INPUT_TAGS(XVALUE, LABEL, LENGTHS);\n  OUTPUT_TAGS(YVALUE);\n};\n\ntemplate <typename T, class Context>\nclass PairWiseLossGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(PairWiseLossGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n private:\n  INPUT_TAGS(XVALUE, LABEL, DYVALUE, LENGTHS);\n  OUTPUT_TAGS(DXVALUE);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_network_blob_fetcher_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_RECURRENT_BLOB_FETCHER_OP_H_\n#define CAFFE2_OPERATORS_RECURRENT_BLOB_FETCHER_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/operators/recurrent_network_op.h\"\n#include \"google/protobuf/text_format.h\"\n\n#include <string>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass RecurrentNetworkBlobFetcherOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  RecurrentNetworkBlobFetcherOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    prefix_ = OperatorBase::GetSingleArgument<std::string>(\"prefix\", \"rnn\");\n    ws_ = ws;\n  }\n\n  bool RunOnDevice() override {\n    const detail::ScratchWorkspaces& scratch =\n        OperatorBase::Input<detail::ScratchWorkspaces>(0);\n    const std::vector<std::shared_ptr<Workspace>>& stepWorkspaces =\n        scratch.stepWorkspaces;\n\n    std::vector<std::string> blob_names_vector = {};\n\n    for (TIndex i = 0; i < stepWorkspaces.size(); i++) {\n      Workspace* currentStepWorkspace = stepWorkspaces[i].get();\n      std::vector<std::string> blob_names = currentStepWorkspace->LocalBlobs();\n\n      for (auto& blob_name : blob_names) {\n        const Blob* currentBlob = currentStepWorkspace->GetBlob(blob_name);\n        const auto& currentTensor = currentBlob->Get<Tensor<Context>>();\n\n        std::string newBlobName =\n            prefix_ + std::string(\"_\") + blob_name + caffe2::to_string(i);\n        blob_names_vector.push_back(newBlobName);\n\n        ws_->CreateBlob(newBlobName)\n            ->template GetMutable<TensorCPU>()\n            ->ResizeLike(currentTensor);\n\n        auto* newTensor =\n            ws_->GetBlob(newBlobName)->template GetMutable<Tensor<Context>>();\n        newTensor->template CopyFrom<Context>(currentTensor);\n      }\n    }\n\n    auto* output = Output(0);\n    output->Resize(blob_names_vector.size());\n    std::copy(\n        blob_names_vector.begin(),\n        blob_names_vector.end(),\n        output->template mutable_data<std::string>());\n\n    return true;\n  }\n\n private:\n  std::string prefix_;\n  Workspace* ws_;\n};\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECURRENT_BLOB_FETCHER_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_network_executor.h",
    "content": "#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_H_\n\n#include <map>\n#include <vector>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/timer.h\"\n#include \"caffe2/operators/recurrent_network_executor_incl.h\"\n\nnamespace caffe2 {\n\nclass RecurrentNetworkExecutorBase {\n protected:\n  explicit RecurrentNetworkExecutorBase(\n      const NetDef& step_net_def,\n      std::map<string, string>& recurrent_input_map,\n      std::string timestep_blob)\n      : step_net_def_(step_net_def),\n        recurrent_input_map_(recurrent_input_map),\n        timestep_blob_(timestep_blob) {\n    for (int i = 0; i < step_net_def_.op_size(); i++) {\n      op_deps_.push_back(op_deps(i));\n    }\n  }\n\n public:\n  virtual ~RecurrentNetworkExecutorBase() {}\n\n  virtual bool Run(int T) = 0;\n\n  virtual bool RunBackwards(int T) = 0;\n\n  /**\n   * Callers must call this before running an execution that contains\n   * timestep t. On first call, this will initialize the data structures\n   * for the given timestep. For subsequent calls, this has no cost.\n   */\n  void EnsureTimestepInitialized(int t, Workspace* ws) {\n    if (timestep_ops_template_.size() == 0) {\n      CalculateInternalDependencies();\n    }\n    if (timestep_ops_.size() <= t ||\n        (timestep_ops_.size() > t && timestep_ops_[t].size() == 0)) {\n      for (int j = timestep_ops_.size(); j < t + 1; j++) {\n        timestep_ops_.push_back(std::vector<RNNNetOperator>());\n      }\n\n      // Create a specific timestep blob for this timestep. This is to\n      // avoid conflicting timestep blobs when reusing workspaces, as with\n      // the forward-only mode.\n      std::string this_timestep_blob =\n        timestep_blob_ + \"_rnnexec_t\" + caffe2::to_string(t);\n      ws->CreateBlob(this_timestep_blob)->template GetMutable<TensorCPU>()->Resize(1);\n      auto b = ws->GetBlob(this_timestep_blob);\n      CAFFE_ENFORCE(b);\n      b->template GetMutable<TensorCPU>()\n          ->template mutable_data<int32_t>()[0] = t;\n\n      // Copy the operators from template\n      for (auto& template_rnn_op : timestep_ops_template_) {\n        auto& rnn_op = template_rnn_op;\n        OperatorDef op_copy = step_net_def_.op(rnn_op.order);\n\n        // Rename timestep references to use the timestep specific timestep blob\n        for (int i = 0; i < op_copy.input_size(); i++) {\n          if (op_copy.input(i) == timestep_blob_) {\n            op_copy.set_input(i, this_timestep_blob);\n          }\n        }\n        CAFFE_ENFORCE(!HasOutput(op_copy, timestep_blob_),\n          \"Timestep cannot be output of an op: \", timestep_blob_,\n          \" op=\" + ProtoDebugString(op_copy));\n\n        rnn_op.op = CreateOperator(op_copy, ws);\n        timestep_ops_[t].emplace_back(rnn_op);\n      }\n    }\n  }\n\n  void SetMaxParallelTimesteps(int p) {\n    max_parallel_timesteps_ = p;\n  }\n\n private:\n  // Utility method to check if any of the op inputs or control inputs\n  // contain given blob 'input'\n  bool has_input(std::string x, int opidx) {\n    for (auto& inp : step_net_def_.op(opidx).input()) {\n      if (inp == x) {\n        return true;\n      }\n    }\n    for (auto& inp : step_net_def_.op(opidx).control_input()) {\n      if (inp == x) {\n        return true;\n      }\n    }\n    return false;\n  }\n\n  // Return all outbound dependencies of an op. Special case for\n  // rnn dependencies, that are set in recurent_network_op.\n  std::vector<string> op_deps(int i) {\n    std::vector<string> outs;\n    auto& opdef = step_net_def_.op(i);\n    for (string o : opdef.output()) {\n      outs.push_back(o);\n    };\n    for (auto& arg : opdef.arg()) {\n      if (arg.name().find(\"rnn_dependency\") == 0) {\n        outs.push_back(arg.s());\n      }\n    }\n    return outs;\n  }\n\n  /**\n   * Calculate dependencies of this op, for the ops following it in this\n   * timestep and also for the next timestep. Removes redundant dependencies.\n   */\n  void infer_dependencies(\n      int start_i,\n      std::set<string> outputs,\n      std::vector<RNNNetOperator>& rnn_ops,\n      std::set<int>* dep_ops) {\n    std::set<string> frontier = outputs;\n    std::set<int> already_accounted_deps;\n    int num_ops = step_net_def_.op_size();\n    for (int j = 0; j < num_ops - 1 && !outputs.empty(); j++) {\n      int i = (start_i + j) % num_ops;\n      if (rnn_ops[i].link_op && this->ignoreLinkDependencies()) {\n        continue;\n      }\n      for (auto& outp : frontier) {\n        if (has_input(outp, i)) {\n          if (outputs.find(outp) != outputs.end()) {\n            if (already_accounted_deps.find(i) ==\n                already_accounted_deps.end()) {\n              dep_ops->insert(i);\n            }\n\n            // Now we can take the deps of this ops and not\n            // add them anymore\n            for (int odep : rnn_ops[i].dependencies) {\n              already_accounted_deps.insert(odep);\n            }\n          }\n          for (string& dep_out : op_deps_[i]) {\n            auto oit = outputs.find(dep_out);\n            if (oit != outputs.end()) {\n              // This op produces output of the orignal op, so the dependency\n              // passed through that op\n              outputs.erase(oit);\n            }\n            frontier.insert(dep_out);\n          }\n        }\n      }\n    }\n  }\n\n  /**\n   * Add dependencies to ops in the next timestep that would write an op\n   * that this op has as an input or output. This is special for RNNs,\n   * since we can have ops running in different timesteps concurrently.\n   * Also, we need to check ops that output a blob that is input of\n   * of the op in question.\n   */\n  void add_race_conflict_dependencies(\n      int opidx,\n      std::vector<RNNNetOperator>& rnn_ops,\n      std::set<int>* dep_ops) {\n\n    for (int i = 0; i < rnn_ops.size(); i++) {\n      if (i == opidx) {\n        continue;\n      }\n      if (rnn_ops[i].link_op && this->ignoreLinkDependencies()) {\n        continue;\n      }\n      for (auto& dep_blob : op_deps_[i]) {\n        for (auto& inp : step_net_def_.op(opidx).input()) {\n          if (inp == dep_blob) {\n            dep_ops->insert(i);\n            break;\n          }\n        }\n        if (i < opidx) {\n          for (auto& outp : step_net_def_.op(opidx).output()) {\n            if (outp == dep_blob) {\n              dep_ops->insert(i);\n              break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n\n\n  void CalculateInternalDependencies() {\n    /**\n     * Calculate the dependencies between ops inside timestep and across\n     * timestep. These are store in timestep_ops_ vector that is copied\n     * for each timestep.\n     */\n    for (int i = 0; i < step_net_def_.op_size(); i++) {\n      timestep_ops_template_.push_back(RNNNetOperator(step_net_def_.op(i), i));\n    }\n\n    // Then see which outputs appear as inputs, and those are\n    // the internal blobs.\n    for (auto& rnn_op : timestep_ops_template_) {\n      set<string> dep_outputs;\n      for (auto& outp : op_deps_[rnn_op.order]) {\n        dep_outputs.insert(outp);\n      }\n\n      // Add recurrent dependencies as 'outputs' for this op\n      for (auto& outp : dep_outputs) {\n        auto rit = recurrent_input_map_.find(outp);\n        if (rit != recurrent_input_map_.end()) {\n          dep_outputs.insert(rit->second);\n        } else {\n          dep_outputs.insert(outp);\n        }\n      }\n\n      // Compute dependencies of this op.\n      if (!rnn_op.link_op || !this->ignoreLinkDependencies()) {\n        std::set<int> dependent_ops;\n        infer_dependencies(\n            rnn_op.order + 1,\n            dep_outputs,\n            timestep_ops_template_,\n            &dependent_ops);\n\n        // Race conditions arise when operator writes a blob that is\n        // being read by another.\n        if (!this->ignoreLinkDependencies()) {\n          add_race_conflict_dependencies(\n            rnn_op.order, timestep_ops_template_, &dependent_ops);\n        }\n\n        for (int i : dependent_ops) {\n          rnn_op.dependencies.push_back(i);\n        }\n\n        // Sort in ascending order of dependency distance. If op\n        // j > i, then distance is j - i. But if j < i, then distance\n        // from i to j passes the timestep boundary and is j + num ops - i.\n        std::sort(\n            rnn_op.dependencies.begin(),\n            rnn_op.dependencies.end(),\n            [&](const int& a, const int& b) {\n              if (a < rnn_op.order && b < rnn_op.order) {\n                return a < b;\n              }\n              if (a >= rnn_op.order && b >= rnn_op.order) {\n                return a < b;\n              }\n              if (a >= rnn_op.order && b < rnn_op.order) {\n                return true;\n              }\n              return false;\n            });\n      }\n    }\n\n    // Update dependency counts\n    for (auto& rnn_op : timestep_ops_template_) {\n      for (int i : rnn_op.dependencies) {\n        timestep_ops_template_[i].num_dynamic_inputs++;\n\n        if (i > rnn_op.order) {\n          timestep_ops_template_[i].frontier = false;\n        } else {\n          timestep_ops_template_[i].num_recurrent_inputs++;\n        }\n      }\n    }\n\n    // Find ops that have no recurrent inputs, and bind them\n    // to the last op of the timestep. If there is only one op\n    // in the step net, then it will depend on itself. Note that\n    // we do not increase the dynamic input counter.\n    for (auto& rnn_op : timestep_ops_template_) {\n      if (rnn_op.num_dynamic_inputs == 0 && rnn_op.num_recurrent_inputs == 0) {\n        if (rnn_op.link_op && this->ignoreLinkDependencies()) {\n          continue;\n        }\n        timestep_ops_template_.back().dependencies.push_back(rnn_op.order);\n      }\n    }\n\n    // compute parents\n    for (auto& rnn_op : timestep_ops_template_) {\n      for (int dep : rnn_op.dependencies) {\n        timestep_ops_template_[dep].parents.push_back(rnn_op.order);\n      }\n    }\n\n    AnalyzeOps();\n  }\n\n\nprotected:\n  /**\n   * For debug purposes\n   */\n  void PrintInfo(int t) {\n    auto& rnn_ops = timestep_ops_[t];\n\n    LOG(INFO) << \"Timestep: \" << t;\n    for (auto& rnn_op : rnn_ops) {\n      auto& op = rnn_op.op;\n      LOG(INFO) << \"Operator \" << rnn_op.order << \": \" << op->type()\n                << \" dep inputs:\" << rnn_op.num_dynamic_inputs\n                << \" rec inputs:\" << rnn_op.num_recurrent_inputs\n                << \" frontier: \" << rnn_op.frontier;\n      for (auto& inp : rnn_op.op->debug_def().input()) {\n        LOG(INFO) << \" ---- input: \" << inp;\n      }\n      for (auto& outp : rnn_op.op->debug_def().output()) {\n        LOG(INFO) << \" ---- output: \" << outp;\n      }\n      for (auto j : rnn_op.dependencies) {\n        LOG(INFO) << \" dep: \" << j << \": \" << rnn_ops[j].op->type();\n      }\n      for (auto j : rnn_op.parents) {\n        LOG(INFO) << \" parent: \" << j << \": \" << rnn_ops[j].op->type();\n      }\n    }\n\n    LOG(INFO) << \"recurrent_inputs:\" << recurrent_input_map_;\n\n    for (auto& rnn_op : rnn_ops) {\n      LOG(INFO) << \"Operator \" << rnn_op.order;\n      LOG(INFO) << ProtoDebugString(rnn_op.op->debug_def());\n    }\n  }\n\n protected:\n\n  virtual void AnalyzeOps() {}\n\n  virtual bool ignoreLinkDependencies() = 0;\n\n  std::vector<std::vector<RNNNetOperator>> timestep_ops_;\n  std::vector<OperatorBase*> op_ptrs_;\n\n  std::vector<RNNNetOperator> timestep_ops_template_;\n\n  NetDef step_net_def_;\n  std::vector<std::vector<string>> op_deps_;\n  std::map<string, string> recurrent_input_map_;\n  std::string timestep_blob_;\n\n  int max_parallel_timesteps_ = -1;\n};\n\ntemplate <class Context>\nstd::unique_ptr<RecurrentNetworkExecutorBase> createRNNExecutor(\n    const NetDef& step_net_def,\n    std::map<string, string>& recurrent_input_map,\n    std::string timestep_blob,\n    ArgumentHelper rnn_args);\n\nclass ThreadedRecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {\n public:\n  ThreadedRecurrentNetworkExecutor(\n      const NetDef& step_net_def,\n      std::map<string, string>& recurrent_input_map,\n      std::string timestep_blob)\n      : RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob),\n        failed_(false) {}\n\n  ~ThreadedRecurrentNetworkExecutor() {\n    job_queue_.NoMoreJobs();\n    VLOG(1) << \"Joining workers.\";\n    for (auto& worker : workers_) {\n      worker.join();\n    }\n  }\n\n  bool Run(int T) override;\n\n  bool RunBackwards(int T) override;\n\n  bool ignoreLinkDependencies() override {\n    return false;\n  }\n\n  void setNumThreads(int n) {\n    num_threads_ = n;\n  }\n\n private:\n  void _ExecRange(int from, int to);\n\n  void _Exec();\n\n  void WorkerFunction();\n\n  void RunOp(OpJob job, int thread_id);\n\n  SimpleQueue<OpJob> job_queue_;\n  std::atomic<int> countdown_;\n  std::atomic<bool> failed_;\n  std::atomic<int> finished_timesteps_;\n  int num_ops_;\n  std::mutex countdown_mtx_;\n  std::condition_variable cv_;\n  std::vector<std::thread> workers_;\n  int num_threads_ = 4;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_network_executor_gpu.h",
    "content": "#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_GPU_EXECUTOR_H_\n\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/operators/recurrent_network_executor.h\"\n\n\n#include <map>\n\nnamespace caffe2 {\n\nclass CUDARecurrentNetworkExecutor : public RecurrentNetworkExecutorBase {\n public:\n  CUDARecurrentNetworkExecutor(\n      const NetDef& step_net_def,\n      std::map<string, string>& recurrent_input_map,\n      std::string timestep_blob)\n  : RecurrentNetworkExecutorBase(step_net_def, recurrent_input_map, timestep_blob) {}\n\n  ~CUDARecurrentNetworkExecutor();\n\n protected:\n  bool Run(int T) override;\n\n  bool RunBackwards(int T) override;\n\n  bool ignoreLinkDependencies() override {\n    return true;\n  }\n\n  void AnalyzeOps() override {\n    /**\n      * Check if there is an op that only depends on ops from previous\n      * timestep, and that ops is not the last op. Then we can start computation\n      * in subsequent timesteps before the whole previous timestep has finished.\n      * If there is no parallelism, we can avoid overhead of event-based\n      * dependency management.\n      */\n    has_timestep_parallelism_ = false;\n    for (auto& rnn_op : timestep_ops_template_) {\n      int i = rnn_op.order;\n      if (rnn_op.parents.size() >= 1 && i < timestep_ops_template_.size() - 1) {\n        bool only_recurrent_deps = std::all_of(\n                  rnn_op.parents.begin(),\n                  rnn_op.parents.end(), [&](const int &parent) {\n                    return parent > i;\n                  }\n        );\n        if (only_recurrent_deps) {\n          VLOG(1) << \"Timestep parallel op: \" << ProtoDebugString(step_net_def_.op(i));\n          has_timestep_parallelism_ = true;\n\n          for (int dep : rnn_op.parents) {\n            if (dep == timestep_ops_template_.size() - 1) {\n              // This op depends on the last op of the previous iteration,\n              // so it will block any parallelism\n              has_timestep_parallelism_ = false;\n              break;\n            }\n          }\n          break;\n        }\n      }\n    }\n    LOG(INFO) << \"Analyzed ops for timestep parallelism: \" << has_timestep_parallelism_;\n }\n\n public:\n\n   void setMaxStreams(int n) {\n     max_cuda_streams_ = n;\n   }\n\n private:\n  void _ExecRange(int from, int to);\n\n  std::vector<cudaEvent_t> events_;\n  bool has_timestep_parallelism_ = false;\n  int max_cuda_streams_ = 2;\n};\n}\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_network_executor_incl.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_INCL_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_INCL_H_\n\n#include <vector>\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\nstruct RNNNetOperator {\n  // Operator\n  int order;\n  std::shared_ptr<OperatorBase> op = nullptr;\n  bool link_op;\n\n  // Bookkeeping\n  int num_dynamic_inputs = 0;\n  int num_recurrent_inputs = 0;\n\n  // Dependencies\n  std::atomic<int> proc_inputs;\n  std::vector<int> dependencies;\n  std::vector<int> parents;\n  bool frontier = true;\n\n  explicit RNNNetOperator(const OperatorDef& def, int order) : order(order) {\n    proc_inputs = 0;\n    link_op = def.type() == \"rnn_internal_apply_link\";\n  }\n\n  RNNNetOperator(const RNNNetOperator& x) {\n    order = x.order;\n    op = x.op;\n    link_op = x.link_op;\n    num_dynamic_inputs = x.num_dynamic_inputs;\n    num_recurrent_inputs = x.num_recurrent_inputs;\n    proc_inputs = 0;\n    dependencies = x.dependencies;\n    parents = x.parents;\n    frontier = x.frontier;\n  }\n};\n\nstruct OpJob {\n  int timestep;\n  int op_idx;\n  int T;\n  int direction;\n  int stream_id = -1; // only used by gpu version\n  OpJob() {}\n  OpJob(int _timestep, int _op_idx, int _T, int _direction)\n      : timestep(_timestep), op_idx(_op_idx), T(_T), direction(_direction) {\n    CHECK(direction == 1 || direction == -1);\n    CHECK(timestep >= 0 && timestep < _T);\n  }\n\n  inline bool backward() {\n    return direction == -1;\n  }\n  inline bool forward() {\n    return direction == 1;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECURRENT_NETWORK_EXECUTOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_network_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_RECURRENT_NETWORK_OP_H_\n#define CAFFE2_OPERATORS_RECURRENT_NETWORK_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/operators/recurrent_network_executor.h\"\n#include \"google/protobuf/text_format.h\"\n#include \"caffe2/utils/conversions.h\"\n\nCAFFE2_DECLARE_bool(caffe2_rnn_executor);\n\nnamespace caffe2 {\nnamespace detail {\n\nstruct Param {\n  std::string param;\n  std::string grad;\n  std::string cellGradient;\n};\n\nstruct RecurrentInput {\n  std::string state;\n  std::string input;\n};\n\nstruct RecurrentGradient {\n  std::string param;\n  std::string grad;\n  std::string externalGrad;\n  std::string lastExternalGrad;\n  int32_t offset;\n};\n\nstruct OffsetAlias {\n  std::string src;\n  std::string dst;\n  int32_t offset{0};\n};\n\nstruct Link {\n  std::string internal;\n  std::string external;\n  int32_t offset{0};\n  int32_t window{1};\n};\n\nstruct ScratchWorkspaces {\n  std::vector<std::shared_ptr<Workspace>> stepWorkspaces;\n  std::shared_ptr<Workspace> sharedBlobsWs = nullptr;\n};\n\ninline void UpdateTimestepBlob(Workspace* ws, std::string blob_name, int t) {\n  ws->CreateBlob(blob_name)->template GetMutable<TensorCPU>()->Resize(1);\n  auto timestepBlob = ws->GetBlob(blob_name);\n  CAFFE_ENFORCE(timestepBlob);\n  timestepBlob->template GetMutable<TensorCPU>()\n      ->template mutable_data<int32_t>()[0] = t;\n}\n\nstd::map<string, string> GetRecurrentMapping(\n  const std::vector<detail::Link>& links, bool backward);\n\ntemplate <typename T, typename Context>\nvoid applyOffsetAlias(\n    const OffsetAlias& oc,\n    Workspace* ws,\n    Context* /*context*/) {\n  VLOG(1) << \"Aliasing: \" << oc.src << \" to: \" << oc.dst\n          << \" at offset: \" << oc.offset;\n  auto srcBlob = ws->GetBlob(oc.src);\n  CAFFE_ENFORCE(srcBlob);\n  auto* src = srcBlob->template GetMutable<Tensor<Context>>();\n  auto* dst = ws->GetBlob(oc.dst)->template GetMutable<Tensor<Context>>();\n  auto timestep = src->size() / src->dim(0);\n  auto dims = src->dims();\n  const int32_t startDstTimestep =\n      oc.offset >= 0 ? oc.offset : src->dim(0) + oc.offset;\n  const int32_t numDstTimesteps = src->dim(0) - startDstTimestep;\n  CAFFE_ENFORCE(\n      numDstTimesteps >= 1, \"Invalid number of timesteps: \", numDstTimesteps);\n  dims[0] = numDstTimesteps;\n  dst->Resize(dims);\n  CAFFE_ENFORCE(timestep == dst->size() / numDstTimesteps, \"Invalid offset\");\n  dst->ShareExternalPointer(\n      src->template mutable_data<T>() + startDstTimestep * timestep,\n      dst->size());\n}\n\ntemplate <typename T, class Context>\nvoid repeatCopy(\n    size_t repeat_n,\n    size_t n,\n    const T* src,\n    T* dst,\n    Context* context) {\n  for (int i = 0; i < repeat_n; ++i) {\n    context->template Copy<T, Context, Context>(n, src, dst + i * n);\n  }\n}\n\n/**\n * Copy external input to the step net into the first item of\n * (T + 1) X batch_size X input_size tensor\n */\ntemplate <typename T, typename Context>\nvoid initializeRecurrentInput(\n    const RecurrentInput& rc,\n    int32_t seqLen,\n    int32_t batchSize,\n    Workspace* ws,\n    Context* context) {\n  auto stateBlob = ws->GetBlob(rc.state);\n  CAFFE_ENFORCE(stateBlob);\n  auto* state = stateBlob->template GetMutable<Tensor<Context>>();\n\n  auto inputBlob = ws->GetBlob(rc.input);\n  CAFFE_ENFORCE(inputBlob);\n  const auto& input = inputBlob->template Get<Tensor<Context>>();\n  CAFFE_ENFORCE_GE(input.ndim(), 1, rc.input);\n  CAFFE_ENFORCE_LE(input.ndim(), 3, rc.input);\n\n  const auto stateSize = input.dim(input.ndim() - 1);\n  // Sometimes we want to provide more than one initial step.\n  // For example, if we do a convolution op in step net\n  // and need a sufficient left padding around the input.\n  // This could be used together with links where window != 1.\n  auto initialStateLength = 1;\n  if (input.ndim() == 3) {\n    initialStateLength = input.dim(0);\n  }\n  // States at [0, ..., (T + initialStateLength - 1)] (inclusive)\n  state->Resize(seqLen + initialStateLength, batchSize, stateSize);\n\n  if (input.ndim() >= 2) {\n    CAFFE_ENFORCE_EQ(input.dim(input.ndim() - 2), batchSize, rc.input);\n    context->template Copy<T, Context, Context>(\n        batchSize * stateSize * initialStateLength,\n        input.template data<T>(),\n        state->template mutable_data<T>());\n  } else {\n    // Usually, the initial state is the same for all inputs in the batch.\n    // So the op conveniently accepts 1-D input and copies it batchSize times.\n    repeatCopy<T, Context>(\n          batchSize,\n          stateSize,\n          input.template data<T>(),\n          state->template mutable_data<T>(),\n          context);\n  }\n}\n\nvoid PrependOps(std::vector<OperatorDef> ops, NetDef* netdef);\n\nvoid AddApplyLinkOps(\n    const vector<Link>& links,\n    std::string timestep,\n    const DeviceOption& device_option,\n    NetDef* netdef);\n\nvoid extractLinks(\n    OperatorBase* op,\n    const std::string& internalArg,\n    const std::string& externalArg,\n    const std::string& offsetArg,\n    const std::string& windowArg,\n    std::vector<detail::Link>* links);\n} // namespace detail\n\ntemplate <class Context>\nclass RecurrentNetworkOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RecurrentNetworkOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        sharedWs_(ws),\n        enable_rnn_executor_(OperatorBase::template GetSingleArgument<bool>(\n            \"enable_rnn_executor\",\n            false)),\n        timestep_(OperatorBase::template GetSingleArgument<std::string>(\n            \"timestep\",\n            \"timestep\")) {\n    CAFFE_ENFORCE(ws);\n    const auto stepNet =\n        OperatorBase::GetSingleArgument<string>(\"step_net\", \"\");\n    CAFFE_ENFORCE(\n        google::protobuf::TextFormat::ParseFromString(stepNet, &stepNetDef_),\n        \"Invalid netdef\");\n\n    recurrentInputs_ = constructRecurrentInputs(operator_def, sharedWs_);\n    links_ = constructLinks();\n    aliases_ = constructAliases();\n\n    stepNetDef_.add_external_input(timestep_);\n    detail::AddApplyLinkOps(\n        links_, timestep_, operator_def.device_option(), &stepNetDef_);\n\n    if (FLAGS_caffe2_rnn_executor && enable_rnn_executor_) {\n      VLOG(1) << \"Use RecurrentNetworkExecutor\";\n      auto recurrent_map = detail::GetRecurrentMapping(links_, false /* backward */);\n      rnnExecutor_ =\n          createRNNExecutor<Context>(\n              stepNetDef_,\n              recurrent_map,\n              timestep_,\n              ArgumentHelper(operator_def));\n    } else {\n      // Fix for legacy models that pass \"rnn\" type net\n      if (stepNetDef_.type() == \"rnn\") {\n        stepNetDef_.set_type(\"async_simple\");\n      }\n      CAFFE_ENFORCE(stepNetDef_.type() != \"async_dag\");\n    }\n  }\n\n  std::vector<detail::RecurrentInput> constructRecurrentInputs(\n      const OperatorDef& operator_def,\n      Workspace* sharedWs) {\n    const auto states =\n        OperatorBase::GetRepeatedArgument<std::string>(\"recurrent_states\");\n    const auto inputs =\n        OperatorBase::GetRepeatedArgument<int>(\"initial_recurrent_state_ids\");\n    CAFFE_ENFORCE_EQ(states.size(), inputs.size(), \"states/inputs mismatch\");\n    std::vector<detail::RecurrentInput> ris;\n    for (auto i = 0; i < states.size(); ++i) {\n      // States need to be \"global\" (since they are shared between\n      // forward and backward).\n      sharedWs->CreateBlob(states[i]);\n\n      detail::RecurrentInput ri;\n      ri.state = states[i];\n      ri.input = operator_def.input(inputs[i]);\n      ris.push_back(ri);\n    }\n    return ris;\n  }\n\n  std::vector<detail::OffsetAlias> constructAliases() {\n    const auto& src =\n        OperatorBase::GetRepeatedArgument<std::string>(\"alias_src\");\n    const auto& dst =\n        OperatorBase::GetRepeatedArgument<std::string>(\"alias_dst\");\n    const auto& offset =\n        OperatorBase::GetRepeatedArgument<int32_t>(\"alias_offset\");\n    CAFFE_ENFORCE(\n        src.size() == offset.size(), \"alias_src/alias_offset mismatch\");\n    CAFFE_ENFORCE(\n        dst.size() == offset.size(), \"alias_dst/alias_offset mismatch\");\n    std::vector<detail::OffsetAlias> aliases;\n    for (auto i = 0; i < src.size(); ++i) {\n      detail::OffsetAlias oc;\n      oc.src = src[i];\n      oc.dst = dst[i];\n      oc.offset = offset[i];\n      aliases.push_back(oc);\n    }\n    return aliases;\n  }\n\n  /**\n    * Some blobs can be marked as to be recomputed on backward pass.\n    * For those blobs, we do not want to allocate on each step workspace,\n    * but we instead store that blob in the shared workspace so all\n    * steps can use the same buffer on forward pass.\n    */\n  void initializeBlobsToRecomputeOnBackward(Workspace* sharedBlobsWs) {\n    std::vector<std::string> v;\n    const auto& blobs = OperatorBase::GetRepeatedArgument<std::string>(\n        \"recompute_blobs_on_backward\", v);\n    for (const auto& b : blobs) {\n      // Note: if the blob already was created, this is a no-op.\n      sharedBlobsWs->CreateBlob(b);\n    }\n  }\n\n  std::vector<detail::Link> constructLinks() {\n    std::vector<detail::Link> links;\n    detail::extractLinks(\n        this,\n        \"link_internal\",\n        \"link_external\",\n        \"link_offset\",\n        \"link_window\",\n        &links);\n    return links;\n  }\n\n  template<typename T>\n  bool DoRunWithType() {\n    const auto seqLen = Input(0).dim32(0);\n    const auto batchSize = Input(0).dim32(1);\n    for (const auto& ri : recurrentInputs_) {\n      detail::initializeRecurrentInput<T, Context>(\n          ri, seqLen, batchSize, sharedWs_, &context_);\n    }\n\n    // If we don't have a backward step net, this operator is forward_only\n    // and we can avoid creating multiple workspaces.\n\n    bool has_backward_pass =\n        OperatorBase::GetSingleArgument<string>(\"backward_step_net\", \"\") != \"\";\n\n    // With backward pass: we need to create workspace for each timestep\n    detail::ScratchWorkspaces* scratch =\n        OperatorBase::Output<detail::ScratchWorkspaces>(OutputSize() - 1);\n    std::vector<std::shared_ptr<Workspace>>& stepWorkspaces =\n        scratch->stepWorkspaces;\n    std::shared_ptr<Workspace>& sharedBlobsWs = scratch->sharedBlobsWs;\n    if (!sharedBlobsWs) {\n      sharedBlobsWs = std::make_shared<Workspace>(sharedWs_);\n    }\n\n    // Caller can decide that some of the forward activations\n    // are recomputed on backward pass. Then those activations do not\n    // have to be stored in step workspaces but can be shared.\n    initializeBlobsToRecomputeOnBackward(sharedBlobsWs.get());\n\n    if (has_backward_pass && seqLen > stepWorkspaces.size()) {\n      stepWorkspaces.resize(seqLen);\n    }\n\n    // In forward-only mode, we cycle over workspaces. This limits the amount\n    // of parallelism over timesteps that the RNNExecutor provides. So with\n    // RNN executor we use more workspaces to get better perf.\n    int num_workspaces_on_fwd_only = rnnExecutor_ ? 4 : 2;\n\n    if (!has_backward_pass && stepWorkspaces.size() < num_workspaces_on_fwd_only) {\n      // Use alternating stepWorkspaces when forward_only=True.\n      // Note that the step workspaces can be shared by other ops, thus\n      // we cannot shrink it to 2 if there are more than 2 step workspaces.\n      stepWorkspaces.resize(num_workspaces_on_fwd_only);\n    }\n\n    for (auto t = 0; t < seqLen; ++t) {\n      auto& currentStepWorkspace =\n          (has_backward_pass ? stepWorkspaces[t] :\n              stepWorkspaces[t % num_workspaces_on_fwd_only]);\n      if (!currentStepWorkspace) {\n        currentStepWorkspace = std::make_shared<Workspace>(sharedBlobsWs.get());\n      }\n\n      if (rnnExecutor_) {\n        if (!has_backward_pass) {\n          // Need to limit timestep parallelism because we cycle over workspaces\n          rnnExecutor_->SetMaxParallelTimesteps(num_workspaces_on_fwd_only);\n        }\n        rnnExecutor_->EnsureTimestepInitialized(t, currentStepWorkspace.get());\n      } else {\n        // Use plain Caffe2 nets\n        detail::UpdateTimestepBlob(currentStepWorkspace.get(), timestep_, t);\n        auto* stepNet = currentStepWorkspace->GetNet(stepNetDef_.name());\n        if (stepNet == nullptr) {\n          stepNet = currentStepWorkspace->CreateNet(stepNetDef_);\n        }\n        CAFFE_ENFORCE(stepNet, \"Step Net construction failure\");\n        // Since we have a SimpleNet, there are no races here.\n        stepNet->RunAsync();\n      }\n    }\n\n    if (rnnExecutor_) {\n      rnnExecutor_->Run(seqLen);\n    }\n\n    for (const auto& alias : aliases_) {\n      detail::applyOffsetAlias<T, Context>(alias, sharedWs_, &context_);\n    }\n\n    return true;\n  }\n\n  bool RunOnDevice() {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  NetDef stepNetDef_;\n  Workspace* sharedWs_;\n  bool enable_rnn_executor_;\n  std::unique_ptr<RecurrentNetworkExecutorBase> rnnExecutor_;\n\n  std::vector<detail::Link> links_;\n  std::vector<detail::OffsetAlias> aliases_;\n  std::vector<detail::RecurrentInput> recurrentInputs_;\n  std::string timestep_;\n};\n\ntemplate <class Context>\nclass RecurrentNetworkGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RecurrentNetworkGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        sharedWs_(ws),\n        enable_rnn_executor_(OperatorBase::template GetSingleArgument<bool>(\n            \"enable_rnn_executor\",\n            false)),\n        timestep_(OperatorBase::template GetSingleArgument<std::string>(\n            \"timestep\",\n            \"timestep\")),\n        gradInputs_(OperatorBase::template GetRepeatedArgument<int32_t>(\n            \"outputs_with_grads\")) {\n    CAFFE_ENFORCE(ws);\n    const auto stepNet =\n        OperatorBase::GetSingleArgument<string>(\"backward_step_net\", \"\");\n\n    if (stepNetDef_.type() == \"rnn\") {\n      stepNetDef_.set_type(\"simple\");\n    }\n\n    CAFFE_ENFORCE(\n        google::protobuf::TextFormat::ParseFromString(stepNet, &stepNetDef_));\n\n    links_ = constructLinks();\n    params_ = constructParams(operator_def);\n    recurrentGradients_ = constructRecurrentGradients(operator_def);\n    recurrentInputIds_ = OperatorBase::template GetRepeatedArgument<int32_t>(\n        \"initial_recurrent_state_ids\");\n\n    /* Add operators to the backward step net to handle accumulation of\n       gradients over timesteps\n    */\n    stepNetDef_.add_external_input(timestep_);\n\n    AddGradientInputAccumulationOps(operator_def);\n    detail::AddApplyLinkOps(\n        links_, timestep_, operator_def.device_option(), &stepNetDef_);\n    AddParamGradientAccumulationOps(operator_def);\n\n    if (FLAGS_caffe2_rnn_executor && enable_rnn_executor_) {\n      InitializeExecutor(operator_def);\n    }\n  }\n\n  // Renaming maps (generated by memonger.py)\n  std::string remappedName(std::string blob_name) {\n    return OperatorBase::template GetSingleArgument<std::string>(\n        blob_name + \".rename\", blob_name);\n  }\n\n  detail::Link remappedLink(const detail::Link& link) {\n    detail::Link renamed_link = link;\n    renamed_link.internal = remappedName(link.internal);\n    renamed_link.external = remappedName(link.external);\n    return renamed_link;\n  }\n\n  void renameOpInputOutput(std::string from_name, std::string to_name) {\n    for (int j = 0; j < stepNetDef_.op_size(); j++) {\n      auto* op = stepNetDef_.mutable_op(j);\n      for (int i = 0; i < op->input_size(); i++) {\n        if (op->input(i) == from_name) {\n          op->set_input(i, to_name);\n        }\n      }\n      for (int i = 0; i < op->output_size(); i++) {\n        if (op->output(i) == from_name) {\n          op->set_output(i, to_name);\n        }\n      }\n    }\n  }\n\n  std::vector<detail::Param> constructParams(const OperatorDef& operator_def) {\n    std::vector<detail::Param> params;\n    const auto& param = OperatorBase::GetRepeatedArgument<int32_t>(\"param\");\n    const auto& param_grads =\n        OperatorBase::GetRepeatedArgument<string>(\"param_grads\");\n    CAFFE_ENFORCE(\n        param_grads.empty() || param_grads.size() == param.size(),\n        param.size(),\n        \" != \",\n        param_grads.size());\n    for (int i = 0; i < param.size(); ++i) {\n      detail::Param p;\n      // Forward inputs come after [outputs_with_grads] gradient inputs\n      p.param = operator_def.input(param[i] + gradInputs_.size());\n      // See GetRecurrentNetworkGradient to understand offseting here\n      p.grad = operator_def.output(i + numSequences_);\n\n      std::string grad_blob =\n          param_grads.empty() ? p.grad : remappedName(param_grads[i]);\n      p.cellGradient = grad_blob + \"_tmpstep\";\n      params.push_back(p);\n\n      renameOpInputOutput(grad_blob, p.cellGradient);\n    }\n    return params;\n  }\n\n  std::vector<detail::RecurrentGradient> constructRecurrentGradients(\n      const OperatorDef& operator_def) {\n    std::vector<detail::RecurrentGradient> rgs;\n    const auto& recurrent =\n        OperatorBase::GetRepeatedArgument<std::string>(\"recurrent_states\");\n    const auto& alias_src =\n        OperatorBase::GetRepeatedArgument<std::string>(\"alias_src\");\n    const auto& offset =\n        OperatorBase::GetRepeatedArgument<int32_t>(\"alias_offset\");\n\n    for (auto i = 0; i < recurrent.size(); ++i) {\n      detail::RecurrentGradient rg;\n      rg.param = recurrent[i];\n      rg.grad = remappedName(recurrent[i] + \"_grad\");\n\n      for (int j = 0; j < alias_src.size(); ++j) {\n        if (alias_src[j] != recurrent[i]) {\n          continue;\n        }\n        int idx = -1;\n        for (int k = 0; k < gradInputs_.size(); ++k) {\n          if (gradInputs_[k] == j) {\n            idx = k;\n          }\n        }\n        if (idx == -1) {\n          continue;\n        }\n\n        CAFFE_ENFORCE(offset[j] == 1 || offset[j] == -1);\n        if (offset[j] == 1) {\n          rg.externalGrad = operator_def.input(idx);\n        } else if (offset[j] == -1) {\n          rg.lastExternalGrad = operator_def.input(idx);\n        }\n      }\n      rg.offset = 1;\n      rgs.push_back(rg);\n    }\n    return rgs;\n  }\n\n  std::vector<detail::Link> constructLinks() {\n    std::vector<detail::Link> links;\n    detail::extractLinks(\n        this,\n        \"link_internal\",\n        \"link_external\",\n        \"link_offset\",\n        \"link_window\",\n        &links);\n    detail::extractLinks(\n        this,\n        \"backward_link_internal\",\n        \"backward_link_external\",\n        \"backward_link_offset\",\n        \"\",\n        &links);\n    for (int i = 0; i < links.size(); i++) {\n      links[i] = remappedLink(links[i]);\n    }\n    return links;\n  }\n\n  void InitializeExecutor(const OperatorDef& operator_def) {\n    VLOG(1) << \"Use RecurrentNetworkExecutor for backward\";\n    auto recurrent_map = detail::GetRecurrentMapping(links_, true /* backward */);\n    rnnExecutor_ = createRNNExecutor<Context>(\n      stepNetDef_, recurrent_map, timestep_, ArgumentHelper(operator_def));\n  }\n\n  void AddGradientInputAccumulationOps(const OperatorDef& operator_def) {\n    /**\n      * Add ops to the step net to accumulate input gradients.\n      */\n    std::vector<OperatorDef> ops;\n    for (const auto& rg : recurrentGradients_) {\n      if (rg.externalGrad.empty()) {\n        continue;\n      }\n      VLOG(1) << \"Accumulating into: \" << rg.grad << \" from \" << rg.externalGrad\n              << \", offset: \" << rg.offset;\n\n      OperatorDef opdef;\n      opdef.set_type(\"rnn_internal_accumulate_gradient_input\");\n      opdef.add_input(timestep_);\n      opdef.add_input(rg.externalGrad);\n      opdef.add_input(rg.grad);\n      opdef.add_output(rg.grad);\n\n      // Add also the linked blobs to outputs, to ensure correct\n      // chaining.\n      for (auto& l : links_) {\n        if (rg.grad == l.external) {\n          Argument* dep_arg = opdef.add_arg();\n          dep_arg->set_name(\"rnn_dependency.\" + l.internal);\n          dep_arg->set_s(l.internal);\n        }\n      }\n\n      opdef.mutable_device_option()->CopyFrom(operator_def.device_option());\n\n      Argument* offset_arg = opdef.add_arg();\n      offset_arg->set_name(\"offset\");\n      offset_arg->set_i(rg.offset);\n      ops.push_back(opdef);\n\n      stepNetDef_.add_external_input(rg.externalGrad);\n      stepNetDef_.add_external_input(rg.grad);\n    }\n    detail::PrependOps(ops, &stepNetDef_);\n  }\n\n  void AddParamGradientAccumulationOps(const OperatorDef& operator_def) {\n    // If a user passes in param_grads mapping, we can copy dirrectly\n    // form a blob where backward cell net written data to.\n    // This becomes handy in a case where gradient from the cell net\n    // is an internal blob of the backward cell. This happens, for example,\n    // when SumOp is the first op of the cell\n    for (const auto& param : params_) {\n      OperatorDef opdef;\n      opdef.set_type(\"Sum\");\n      opdef.add_input(param.grad);\n      opdef.add_input(param.cellGradient);\n      opdef.add_output(param.grad);\n      opdef.mutable_device_option()->CopyFrom(operator_def.device_option());\n      stepNetDef_.add_op()->CopyFrom(opdef);\n      stepNetDef_.add_external_input(param.grad);\n    }\n  }\n\n  void CreateSharedBlobs(\n      const std::shared_ptr<Workspace>& step0Ws,\n      Workspace* sharedBlobsWs) {\n    /**\n      * Create all output blobs created by ops of the backward step net, they\n      * can be shared.\n      */\n    for (auto& op : stepNetDef_.op()) {\n      for (const string& outp : op.output()) {\n        if (!step0Ws->HasBlob(outp)) {\n          sharedBlobsWs->CreateBlob(outp);\n        }\n      }\n    }\n  }\n\n  template<typename T>\n  bool DoRunWithType() {\n    const auto seqLen = Input(gradInputs_.size()).dim32(0);\n    VLOG(1) << \"seqLen: \" << seqLen;\n\n    const detail::ScratchWorkspaces& scratch =\n        OperatorBase::Input<detail::ScratchWorkspaces>(InputSize() - 1);\n    const std::vector<std::shared_ptr<Workspace>>& stepWorkspaces =\n        scratch.stepWorkspaces;\n    CAFFE_ENFORCE_GE(stepWorkspaces.size(), seqLen);\n    Workspace& sharedBlobsWs = *scratch.sharedBlobsWs.get();\n\n    const auto batchSize = Input(0).dim32(1);\n    for (auto& param : params_) {\n      auto pBlob = sharedWs_->GetBlob(param.param);\n      CAFFE_ENFORCE(pBlob);\n      const auto& p = pBlob->template Get<Tensor<Context>>();\n\n      auto gBlob = sharedWs_->GetBlob(param.grad);\n      CAFFE_ENFORCE(gBlob);\n      auto* g = gBlob->template GetMutable<Tensor<Context>>();\n      g->ResizeLike(p);\n      math::Set<T, Context>(\n          g->size(),\n          convert::To<float,T>(0.0),\n          g->template mutable_data<T>(),\n          &context_);\n    }\n\n    for (auto& rg : recurrentGradients_) {\n      auto pBlob = sharedWs_->GetBlob(rg.param);\n      CAFFE_ENFORCE(pBlob);\n      const auto& p = pBlob->template Get<Tensor<Context>>();\n\n      auto gBlob = sharedWs_->CreateBlob(rg.grad);\n      CAFFE_ENFORCE(gBlob);\n      auto* g = gBlob->template GetMutable<Tensor<Context>>();\n      g->ResizeLike(p);\n      CAFFE_ENFORCE_EQ(g->ndim(), 3);\n      const auto timestep = g->size() / g->dim(0);\n      // Fill the last timestep with zeros for the gradient\n      math::Set<T, Context>(\n          timestep,\n          convert::To<float,T>(0.0),\n          g->template mutable_data<T>() + (g->dim(0) - 1) * timestep,\n          &context_);\n    }\n\n    // This code assumes that there are several inputs\n    // sequences. Actually it is not supported by the rest of the code,\n    // and numSequences_ is a constant, equal to 1.\n    for (int i = 0; i < numSequences_; ++i) {\n      // Offseting as the first gradInputs_.size() inputs of the op\n      // are from GO. Then all I(0..N).\n      const int gradientInputIndex = i + gradInputs_.size();\n      const auto& inputName = this->debug_def().input(gradientInputIndex);\n      auto gradientName = remappedName(inputName + \"_grad\");\n      VLOG(1) << \"Initializing gradient for input \" << gradientInputIndex\n              << \" (\" << inputName << \") \"\n              << \" as blob \" << gradientName\n              << \". Size: \" << Input(gradientInputIndex).size();\n      auto pGradientBlob = sharedWs_->GetBlob(gradientName);\n      CAFFE_ENFORCE(pGradientBlob);\n      auto* g = pGradientBlob->template GetMutable<Tensor<Context>>();\n      g->ResizeLike(Input(gradientInputIndex));\n      g->template mutable_data<T>();\n    }\n\n    auto accumulateFinalInputGradients = [&]() {\n      for (const auto& rg : recurrentGradients_) {\n        if (rg.lastExternalGrad.empty()) {\n          continue;\n        }\n        VLOG(1) << \"Accumulating into: \" << rg.grad << \" from \"\n                << rg.lastExternalGrad << \" for final time step (sep. blob)\";\n        auto gBlob = sharedWs_->GetBlob(rg.grad);\n        CAFFE_ENFORCE(gBlob);\n        auto* g = gBlob->template GetMutable<Tensor<Context>>();\n\n        auto oglastBlob = sharedWs_->GetBlob(rg.lastExternalGrad);\n        CAFFE_ENFORCE(oglastBlob);\n        const auto& oglast = oglastBlob->template Get<Tensor<Context>>();\n        CAFFE_ENFORCE_EQ(g->dim(1), oglast.dim(1));\n        CAFFE_ENFORCE_EQ(g->dim(2), oglast.dim(2));\n\n        const auto t = g->dim(0) - 1;\n        const auto timestep_size = g->size() / g->dim(0);\n        CAFFE_ENFORCE_EQ(timestep_size, oglast.size());\n        T* g_data_with_offset =\n            g->template mutable_data<T>() + t * timestep_size;\n        math::Add<T, Context>(\n            timestep_size,\n            oglast.template data<T>(),\n            g_data_with_offset,\n            g_data_with_offset,\n            &context_);\n      }\n    };\n\n    accumulateFinalInputGradients();\n\n    // Create shared blobs for blobs that can be shared between\n    // all timesteps.\n    if (stepWorkspaces.size() > 0) {\n      CreateSharedBlobs(stepWorkspaces[0], &sharedBlobsWs);\n    }\n    for (int32_t t = seqLen - 1; t >= 0; --t) {\n      if (rnnExecutor_) {\n        rnnExecutor_->EnsureTimestepInitialized(t, stepWorkspaces[t].get());\n      } else {\n        auto* stepNet = stepWorkspaces[t].get()->GetNet(stepNetDef_.name());\n        if (stepNet == nullptr) {\n          stepNet = stepWorkspaces[t].get()->CreateNet(stepNetDef_);\n        }\n        CAFFE_ENFORCE(stepNet);\n        stepNet->RunAsync();\n      }\n    }\n\n    if (rnnExecutor_) {\n      rnnExecutor_->RunBackwards(seqLen);\n    }\n\n    CAFFE_ENFORCE_EQ(recurrentInputIds_.size(), recurrentGradients_.size());\n    for (int i = 0; i < recurrentInputIds_.size(); ++i) {\n      // See GetRecurrentNetworkGradient to understand offseting here\n      // Outputs of the gradient are inputs of the forward pass.\n      // So we need to offset on all inputs that go before recurrent\n      // initial ones\n      auto outputIdx = i + params_.size() + numSequences_;\n      // because first gradInputs_.size() inputs are from GO\n      int inputId = recurrentInputIds_[i] + gradInputs_.size();\n      VLOG(1) << \"Resetting output \" << this->debug_def().output(outputIdx)\n              << \" like input \" << this->debug_def().input(inputId);\n      Output(outputIdx)->ResizeLike(Input(inputId));\n      T* output_data = Output(outputIdx)->template mutable_data<T>();\n      auto pBlob = sharedWs_->GetBlob(recurrentGradients_[i].grad);\n      CAFFE_ENFORCE(pBlob);\n      auto* p = pBlob->template GetMutable<Tensor<Context>>();\n\n      if (Input(inputId).ndim() >= 2) {\n        // Gradient states blob should live. And if it gets changed by the\n        // backward pass, then output should be changed as well. Thus it should\n        // be okay to share data here\n        Output(outputIdx)->template ShareExternalPointer<T>(\n            p->template mutable_data<T>());\n      } else {\n        // We need to do a bunch of Adds any way. So lets not worry about\n        // copy / share data here. One way to speed this up could be a kernel\n        // which sums up several tensors together instead of going 1 by 1\n        const auto recurrentStateSize = Input(inputId).dim32(0);\n\n        math::Set<T, Context>(\n            recurrentStateSize,\n            convert::To<float,T>(0.0),\n            output_data,\n            &context_);\n\n        math::AddStripedBatch<T, Context>(\n            recurrentStateSize,\n            p->template data<T>(),\n            output_data,\n            recurrentStateSize,\n            batchSize,\n            &context_);\n      }\n    }\n\n    return true;\n  }\n\n  bool RunOnDevice() {\n    return DoRunWithType<float>();\n  }\n\n protected:\n  NetDef stepNetDef_;\n  Workspace* sharedWs_;\n  bool enable_rnn_executor_;\n  std::unique_ptr<RecurrentNetworkExecutorBase> rnnExecutor_;\n  std::vector<detail::Link> links_;\n  std::vector<detail::Param> params_;\n  std::vector<detail::RecurrentGradient> recurrentGradients_;\n  std::string timestep_;\n  // For now we support only one input sequence\n  const int numSequences_{1};\n  std::vector<int32_t> recurrentInputIds_;\n  std::vector<int32_t> gradInputs_;\n};\n\ntemplate <class Context>\nclass AccumulateInputGradientOp : public Operator<Context> {\n public:\n  AccumulateInputGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        offset_(OperatorBase::GetSingleArgument<int>(\"offset\", -1)) {\n    CAFFE_ENFORCE(offset_ >= 0, \"Offset not set\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  template<typename T>\n  bool DoRunWithType() {\n    const auto t =\n        OperatorBase::Input<Tensor<CPUContext>>(0).template data<int32_t>()[0];\n    auto& og = Input(1);\n    auto* g = Output(0);\n\n    T* g_data = g->template mutable_data<T>();\n    const auto timestep_size = g->size() / g->dim(0);\n\n    CAFFE_ENFORCE(\n        (t + offset_) * timestep_size + timestep_size <= g->size(),\n        \"Accumulation destination address over bounds\");\n    CAFFE_ENFORCE(\n        t * timestep_size + timestep_size <= og.size(),\n        \"Accumulation source address out of bounds\");\n\n    math::Add<T, Context>(\n        timestep_size,\n        og.template data<T>() + t * timestep_size,\n        g_data + (t + offset_) * timestep_size,\n        g_data + (t + offset_) * timestep_size,\n        &context_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float>>::call(this, Input(1));\n  }\n\n private:\n  int offset_;\n};\n\ntemplate <class Context>\nclass RNNApplyLinkOp : public Operator<Context> {\n public:\n  RNNApplyLinkOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        offset_(OperatorBase::GetSingleArgument<int>(\"offset\", -1)),\n        window_(OperatorBase::GetSingleArgument<int>(\"window\", -1)) {\n    CAFFE_ENFORCE(offset_ >= 0, \"offset not set\");\n    CAFFE_ENFORCE(window_ >= 0, \"window not set\");\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  template <typename T>\n  bool DoRunWithType() {\n    // Both internal and external appear as both input and output to enforce\n    // correct dependency computation.\n    const auto t =\n        OperatorBase::Input<Tensor<CPUContext>>(0).template data<int32_t>()[0];\n    auto& external = Input(1);\n\n    auto* internal_out = Output(0);\n    auto* external_out = Output(1);\n\n    CAFFE_ENFORCE_GT(external.size(), 0);\n    const TIndex externalTimestepSize = external.size() / external.dim(0);\n    auto* externalData = external_out->template mutable_data<T>() +\n        (t + offset_) * externalTimestepSize;\n    auto internalDims = external_out->dims();\n    internalDims[0] = window_;\n\n    internal_out->Resize(internalDims);\n    internal_out->ShareExternalPointer(\n        externalData, externalTimestepSize * window_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DoRunWithType<float>();\n  }\n\n private:\n  int offset_;\n  int window_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECURRENT_NETWORK_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/recurrent_op_cudnn.h",
    "content": "#ifndef CAFFE2_OPERATORS_RECURRENT_OP_CUDNN_H_\n#define CAFFE2_OPERATORS_RECURRENT_OP_CUDNN_H_\n\n#include \"caffe2/core/common_cudnn.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/context_gpu.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\nnamespace detail {\n\ntemplate <typename T>\nclass TensorDescriptors {\n public:\n  TensorDescriptors(\n      size_t n,\n      const std::vector<int>& dim,\n      const std::vector<int>& stride);\n  ~TensorDescriptors();\n  const cudnnTensorDescriptor_t* descs() const {\n    return descs_.data();\n  }\n\n private:\n  std::vector<cudnnTensorDescriptor_t> descs_;\n};\n\n} // namespace detail\n\ntemplate <typename T>\nclass RecurrentBaseOp : public Operator<CUDAContext> {\n public:\n  USE_OPERATOR_FUNCTIONS(CUDAContext);\n  RecurrentBaseOp(const OperatorDef& operator_def, Workspace* ws);\n  virtual ~RecurrentBaseOp();\n\n protected:\n  void initialize(\n      const Tensor<CUDAContext>& input,\n      Tensor<CUDAContext>* dropoutStates = nullptr,\n      // If passed, reshapes to the appropriate size\n      Tensor<CUDAContext>* output = nullptr,\n      Tensor<CUDAContext>* hiddenOutput = nullptr,\n      Tensor<CUDAContext>* cellOutput = nullptr);\n\n  CuDNNWrapper cudnn_wrapper_;\n  cudnnDropoutDescriptor_t dropoutDesc_;\n  cudnnRNNDescriptor_t rnnDesc_;\n  cudnnFilterDescriptor_t wDesc_;\n  cudnnTensorDescriptor_t hxDesc_;\n  cudnnTensorDescriptor_t cxDesc_;\n  cudnnTensorDescriptor_t hyDesc_;\n  cudnnTensorDescriptor_t cyDesc_;\n\n  std::unique_ptr<detail::TensorDescriptors<T>> xDesc_;\n  std::unique_ptr<detail::TensorDescriptors<T>> yDesc_;\n\n  std::vector<TIndex> cachedInputDims_;\n  size_t reserveNbytes_;\n  size_t cudnnWsNbytes_;\n\n private:\n};\n\n#define USE_RECURRENT_BASE_FUNCTIONS          \\\n  USE_OPERATOR_FUNCTIONS(CUDAContext);        \\\n  using RecurrentBaseOp<T>::cudnn_wrapper_;   \\\n  using RecurrentBaseOp<T>::dropoutDesc_;     \\\n  using RecurrentBaseOp<T>::rnnDesc_;         \\\n  using RecurrentBaseOp<T>::wDesc_;           \\\n  using RecurrentBaseOp<T>::hxDesc_;          \\\n  using RecurrentBaseOp<T>::cxDesc_;          \\\n  using RecurrentBaseOp<T>::hyDesc_;          \\\n  using RecurrentBaseOp<T>::cyDesc_;          \\\n  using RecurrentBaseOp<T>::xDesc_;           \\\n  using RecurrentBaseOp<T>::yDesc_;           \\\n  using RecurrentBaseOp<T>::cachedInputDims_; \\\n  using RecurrentBaseOp<T>::reserveNbytes_;   \\\n  using RecurrentBaseOp<T>::cudnnWsNbytes_;   \\\n  using RecurrentBaseOp<T>::initialize;\n\ntemplate <typename T>\nclass RecurrentOp : public RecurrentBaseOp<T> {\n public:\n  USE_RECURRENT_BASE_FUNCTIONS\n  RecurrentOp(const OperatorDef& operator_def, Workspace* ws)\n      : RecurrentBaseOp<T>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(INPUT, HIDDEN_INPUT, CELL_INPUT, WEIGHT);\n  OUTPUT_TAGS(OUTPUT, HIDDEN_OUTPUT, CELL_OUTPUT, RNN_SCRATCH, DROPOUT_STATES);\n};\n\nenum RecurrentParamOpMode { SET_PARAM, GET_PARAM };\n\ntemplate <typename T, RecurrentParamOpMode mode>\nclass RecurrentParamAccessOp : public RecurrentBaseOp<T> {\n public:\n  USE_RECURRENT_BASE_FUNCTIONS\n  RecurrentParamAccessOp(const OperatorDef& operator_def, Workspace* ws)\n      : RecurrentBaseOp<T>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n};\n\ntemplate <typename T>\nclass RecurrentGradientOp : public RecurrentBaseOp<T> {\n public:\n  USE_RECURRENT_BASE_FUNCTIONS\n  RecurrentGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : RecurrentBaseOp<T>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n\n protected:\n  INPUT_TAGS(\n      INPUT,\n      HIDDEN_INPUT,\n      CELL_INPUT,\n      WEIGHT,\n      RNN_SCRATCH,\n      OUTPUT,\n      GRAD_OUTPUT,\n      GRAD_HIDDEN_OUTPUT,\n      GRAD_CELL_OUTPUT);\n  OUTPUT_TAGS(\n      GRAD_INPUT,\n      GRAD_HIDDEN_INPUT,\n      GRAD_CELL_INPUT,\n      GRAD_WEIGHT,\n      DROPOUT_STATES,\n      RNN_SCRATCH_OUT);\n};\n\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECURRENT_OP_CUDNN_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/reducer_functors.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_\n#define CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_\n\n#include <array>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n////////////////////////////////////////////////////////////////////////////////\n// Range reducers: can leverage that input segment is continuous and provide\n// special implementation\n////////////////////////////////////////////////////////////////////////////////\n\n// Put forward and backward in the same template?\ntemplate <typename T, class Context>\nclass SumRangeReducer;\ntemplate <typename T, class Context>\nclass SumRangeReducerGradient;\n\ntemplate <typename T>\nclass SumRangeReducer<T, CPUContext> {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* in,\n      T* out,\n      CPUContext* /*context*/) {\n    // do we need to go through wrapper in math.h?\n    EigenVectorMap<T> out_vec(out, block_size);\n    out_vec = ConstEigenMatrixMap<T>(in, block_size, blocks).rowwise().sum();\n  }\n};\n\ntemplate <typename T, class Context>\nclass SumRangeReducerGradient {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* segment_grad,\n      T* data_grad,\n      const T* /*data_in*/, // unused\n      const T* /*data_out*/, // unused\n      Context* context) {\n    // do we have some op that does it smartly with minimum number of memcpy?\n    for (TIndex i = 0; i < blocks; ++i) {\n      context->template Copy<T, Context, Context>(\n          block_size, segment_grad, data_grad + block_size * i);\n    }\n  }\n};\n\nstruct SumRangeReducerDef {\n  template <typename T, class Context>\n  using Reducer = SumRangeReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = SumRangeReducerGradient<T, Context>;\n  static constexpr const char* name = \"Sum\";\n  static constexpr const char* doc =\n      \"Summation is done element-wise across slices of the input tensor and \"\n      \"doesn't change the shape of the individual blocks.\";\n};\n\n// Put forward and backward in the same template?\ntemplate <typename T, class Context>\nclass LogSumExpRangeReducer;\ntemplate <typename T, class Context>\nclass LogSumExpRangeReducerGradient;\n\ntemplate <typename T>\nclass LogSumExpRangeReducer<T, CPUContext> {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* in,\n      T* out,\n      CPUContext* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      T max_value = std::numeric_limits<T>::lowest();\n      for (int i = 0; i < blocks; ++i) {\n        max_value = std::max(max_value, in[i * block_size + j]);\n      }\n      T scaled_exp_sum = 0;\n      for (int i = 0; i < blocks; ++i) {\n        scaled_exp_sum += std::exp(in[i * block_size + j] - max_value);\n      }\n      *(out++) = std::log(scaled_exp_sum) + max_value;\n    }\n  }\n  T r{1};\n};\n\ntemplate <typename T, class Context>\nclass LogSumExpRangeReducerGradient {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* segment_grad, // GO\n      T* data_grad, // GI\n      const T* data_in, // I\n      const T* data_out, // O\n      Context* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      const T out_grad = *(segment_grad++);\n      const T offset = *(data_out++);\n      for (int i = 0; i < blocks; ++i) {\n        auto idx = i * block_size + j;\n        data_grad[idx] = out_grad * std::exp(data_in[idx] - offset);\n      }\n    }\n  }\n};\n\nstruct LogSumExpRangeReducerDef {\n  template <typename T, class Context>\n  using Reducer = LogSumExpRangeReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = LogSumExpRangeReducerGradient<T, Context>;\n  static constexpr const char* name = \"LogSumExp\";\n  static constexpr const char* doc =\n      \"LogSumExp computes the element-wise log of the sum of exponentials of \"\n      \"input slices. Operation doesn't change the shape of individual blocks.\";\n};\n\ntemplate <typename T, class Context>\nclass LogMeanExpRangeReducer;\ntemplate <typename T, class Context>\nclass LogMeanExpRangeReducerGradient;\n\ntemplate <typename T>\nclass LogMeanExpRangeReducer<T, CPUContext> {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* in,\n      T* out,\n      CPUContext* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      T max_value = std::numeric_limits<T>::lowest();\n      for (int i = 0; i < blocks; ++i) {\n        max_value = std::max(max_value, in[i * block_size + j]);\n      }\n      T scaled_exp_sum = 0;\n      for (int i = 0; i < blocks; ++i) {\n        scaled_exp_sum += std::exp(in[i * block_size + j] - max_value);\n      }\n      scaled_exp_sum /= blocks;\n      *(out++) = std::log(scaled_exp_sum) + max_value;\n    }\n  }\n};\n\ntemplate <typename T, class Context>\nclass LogMeanExpRangeReducerGradient {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* segment_grad, // GO\n      T* data_grad, // GI\n      const T* data_in, // I\n      const T* data_out, // O\n      Context* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      const T out_grad = *(segment_grad++);\n      const T offset = *(data_out++);\n      for (int i = 0; i < blocks; ++i) {\n        auto idx = i * block_size + j;\n        data_grad[idx] = out_grad * std::exp(data_in[idx] - offset) / blocks;\n      }\n    }\n  }\n};\n\nstruct LogMeanExpRangeReducerDef {\n  template <typename T, class Context>\n  using Reducer = LogMeanExpRangeReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = LogMeanExpRangeReducerGradient<T, Context>;\n  static constexpr const char* name = \"LogMeanExp\";\n  static constexpr const char* doc =\n      \"LogMeanExp computes the element-wise log of the mean of exponentials of \"\n      \"input slices. Operation doesn't change the shape of individual blocks.\";\n};\n\ntemplate <typename T, class Context>\nclass MeanRangeReducer;\ntemplate <typename T, class Context>\nclass MeanRangeReducerGradient;\n\ntemplate <typename T>\nclass MeanRangeReducer<T, CPUContext> {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* in,\n      T* out,\n      CPUContext* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      T avg_value = 0;\n      for (int i = 0; i < blocks; ++i) {\n        avg_value += in[i * block_size + j] / blocks;\n      }\n      *(out++) = avg_value;\n    }\n  }\n};\n\ntemplate <typename T, class Context>\nclass MeanRangeReducerGradient {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* segment_grad, // GO\n      T* data_grad, // GI\n      const T* /*data_in*/, // I\n      const T* /*data_out*/, // O\n      Context* /*context*/) {\n    const auto in_grad = 1.0 / blocks;\n    for (int j = 0; j < block_size; ++j) {\n      const T out_grad = *(segment_grad++);\n      for (int i = 0; i < blocks; ++i) {\n        auto idx = i * block_size + j;\n        data_grad[idx] = out_grad * in_grad;\n      }\n    }\n  }\n};\n\nstruct MeanRangeReducerDef {\n  template <typename T, class Context>\n  using Reducer = MeanRangeReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = MeanRangeReducerGradient<T, Context>;\n  static constexpr const char* name = \"Mean\";\n  static constexpr const char* doc =\n      \"Mean computation is done element-wise, so that each element of the \"\n      \"output slice corresponds to the average value of the respective \"\n      \"elements in the input slices. Operation doesn't change the shape of \"\n      \"individual blocks.\";\n};\n\ntemplate <typename T, class Context>\nclass MaxRangeReducer;\ntemplate <typename T, class Context>\nclass MaxRangeReducerGradient;\n\ntemplate <typename T>\nclass MaxRangeReducer<T, CPUContext> {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* in,\n      T* out,\n      CPUContext* /*context*/) {\n    for (int j = 0; j < block_size; ++j) {\n      T max_value = std::numeric_limits<T>::lowest();\n      for (int i = 0; i < blocks; ++i) {\n        max_value = std::max(max_value, in[i * block_size + j]);\n      }\n      *(out++) = max_value;\n    }\n  }\n};\n\ntemplate <typename T, class Context>\nclass MaxRangeReducerGradient {\n public:\n  void operator()(\n      const TIndex block_size,\n      const TIndex blocks,\n      const T* segment_grad, // GO\n      T* data_grad, // GI\n      const T* data_in, // I\n      const T* data_out, // O\n      Context* /*context*/) {\n    std::memset(\n        static_cast<void*>(data_grad), 0, blocks * block_size * sizeof(T));\n    for (int j = 0; j < block_size; ++j) {\n      const T out_grad = *(segment_grad++);\n      const T out = data_out[j];\n      for (int i = 0; i < blocks; ++i) {\n        auto idx = i * block_size + j;\n        if (out == data_in[idx]) {\n          data_grad[idx] = out_grad;\n        }\n      }\n    }\n  }\n};\n\nstruct MaxRangeReducerDef {\n  template <typename T, class Context>\n  using Reducer = MaxRangeReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = MaxRangeReducerGradient<T, Context>;\n  static constexpr const char* name = \"Max\";\n  static constexpr const char* doc =\n      \"Max computation is done element-wise, so that each element of the \"\n      \"output slice corresponds to the max value of the respective \"\n      \"elements in the input slices. Operation doesn't change the shape of \"\n      \"individual blocks. This implementation imitates torch nn.Max operator. \"\n      \"If the maximum value occurs more than once, the operator will return \"\n      \"the first occurence of value. When computing the gradient using the \"\n      \"backward propagation, the gradient input corresponding to the first \"\n      \"occurence of the maximum value will be used.\";\n};\n\n////////////////////////////////////////////////////////////////////////////////\n// Incremental reducers: consume elements one by one\n////////////////////////////////////////////////////////////////////////////////\n\n// Base implementation, everything can be overwritten\nclass BaseReducer {\n public:\n  static constexpr int kInputCount = 1;\n\n  struct Meta {\n    TIndex block_size;\n    vector<TIndex> block_shape;\n    bool first_dim;\n\n    explicit Meta(bool first = true) : first_dim(first) {}\n\n    void computeMeta(const std::vector<TIndex>& dims, int skip_dims) {\n      first_dim ? block_shape.assign(dims.begin() + skip_dims, dims.end())\n                : block_shape.assign(dims.begin(), dims.end() - skip_dims);\n      block_size = first_dim ? size_from_dim_(skip_dims, dims)\n                             : size_from_dim_(dims.size() - skip_dims, dims);\n    }\n\n    void\n    observeInput(int input, const Tensor<CPUContext>& value, int skip_dims) {\n      DCHECK_EQ(0, input);\n      auto& dims = value.dims();\n      computeMeta(dims, skip_dims);\n    }\n\n    void appendOutputShape(vector<TIndex>* output_shape) {\n      output_shape->insert(\n          output_shape->end(), block_shape.begin(), block_shape.end());\n    }\n\n    vector<TIndex> getOutputShape(const TensorShape& in, int skip_dims) {\n      vector<TIndex> dims(in.dims().begin(), in.dims().end());\n      computeMeta(dims, skip_dims);\n      return block_shape;\n    }\n  };\n\n  template <int FixedSize>\n  void finish(const Meta& /*meta*/, CPUContext* /*context*/) {}\n};\n\nclass BaseReducerGradient {\n public:\n  // which of the original inputs are required for gradient computation\n  static constexpr std::array<int, 0> originalInputs() {\n    return std::array<int, 0>();\n  }\n\n  static constexpr bool computeLength() {\n    return false;\n  }\n\n  static int numAuxInputsWithGrads(const OperatorDef& /*def*/) {\n    return 0;\n  }\n\n  static bool requiresDataInput(const OperatorDef& /*def*/) {\n    return false;\n  }\n\n  // True if the backward op requires the output of the forward op.\n  static bool requiresForwardOutput() {\n    return false;\n  }\n\n  struct Meta {\n    TIndex block_size;\n    vector<TIndex> block_shape;\n    bool first_dim;\n\n    Meta(\n        const Tensor<CPUContext>& out_grad,\n        int skip_dims,\n        bool first_dim = true)\n        : first_dim(first_dim) {\n      auto& dims = out_grad.dims();\n      first_dim ? block_shape.assign(dims.begin() + skip_dims, dims.end())\n                : block_shape.assign(dims.begin(), dims.end() - skip_dims);\n      block_size = first_dim\n          ? out_grad.size_from_dim(skip_dims)\n          : out_grad.size_from_dim(out_grad.ndim() - skip_dims);\n    }\n\n    void observeOriginalInput(\n        int /*original_input*/,\n        const Tensor<CPUContext>& /*value*/,\n        Tensor<CPUContext>* /*input_grad*/, // optional grad to populate\n        int /*skip_dims*/) {}\n\n    void appendGradShape(vector<TIndex>* output_shape) {\n      output_shape->insert(\n          output_shape->end(), block_shape.begin(), block_shape.end());\n    }\n  };\n};\n\n// Put forward and backward in the same template?\ntemplate <typename T, class Context>\nclass SumReducer;\ntemplate <typename T, class Context>\nclass SumReducerGradient;\n\ntemplate <typename T>\nclass SumReducer<T, CPUContext> : public BaseReducer {\n public:\n  using FixedDispatch = FixedValues<1>;\n\n  SumReducer(const Meta& meta, T* out, CPUContext* /*context*/)\n      : current_size_(0), out_(out) {\n    // add a wrapper in Context for it\n    if (meta.first_dim) {\n      memset(out, 0, sizeof(T) * meta.block_size);\n    }\n  }\n  template <int FixedSize>\n  void process(\n      const Meta& meta,\n      const T* in,\n      TIndex /*offset*/,\n      CPUContext* context) {\n    if (meta.first_dim) {\n      math::AxpyFixedSize<T, CPUContext, FixedSize>(\n          meta.block_size, 1, in, out_, context);\n    } else {\n      math::Sum<T, CPUContext>(\n          meta.block_size, in, out_ + current_size_++, context);\n    }\n  }\n\n private:\n  int current_size_;\n  T* out_;\n};\n\ntemplate <typename T, class Context>\nclass SumReducerGradient : public BaseReducerGradient {\n public:\n  using FixedDispatch = FixedValues<1>;\n\n  SumReducerGradient(\n      const Meta& /*meta*/,\n      const T* s_grad,\n      CPUContext* /*context*/)\n      : s_grad_(s_grad) {}\n\n  template <int FixedSize>\n  void fillGrad(\n      const Meta& meta,\n      T* data_grad,\n      TIndex offset,\n      Context* context,\n      const int length) {\n    if (FixedSize == 1) { // static if\n      *data_grad = *s_grad_;\n    } else if (meta.first_dim) {\n      context->template Copy<T, Context, Context>(\n          meta.block_size, s_grad_, data_grad);\n    } else {\n      math::Set<T, Context>(length, s_grad_[offset], data_grad, context);\n    }\n  }\n\n private:\n  const T* s_grad_;\n};\n\nstruct SumReducerDef {\n  template <typename T, class Context>\n  using Reducer = SumReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = SumReducerGradient<T, Context>;\n  static constexpr const char* name = \"Sum\";\n  static constexpr const char* doc =\n      \"Summation is done element-wise across slices of the input tensor and \"\n      \"doesn't change the shape of the individual blocks.\";\n  static void PopulateSchema(OpSchema& /*schema*/) {}\n};\n\n// Put forward and backward in the same template?\ntemplate <typename T, class Context>\nclass WeightedSumReducer;\ntemplate <typename T, class Context>\nclass WeightedSumReducerGradient;\n\ntemplate <typename T>\nclass WeightedSumReducer<T, CPUContext> : public BaseReducer {\n public:\n  static constexpr int kInputCount = 2;\n\n  using FixedDispatch = FixedValues<1>;\n\n  struct Meta : BaseReducer::Meta {\n    const T* scalars;\n\n    bool first_dim;\n\n    explicit Meta(bool first = true) : first_dim(first) {}\n\n    void\n    observeInput(int input, const Tensor<CPUContext>& value, int skip_dims) {\n      if (input == 1) {\n        CAFFE_ENFORCE_EQ(\n            skip_dims, value.ndim(), \"SCALARS mustn't have extra dimensions\");\n        scalars = value.data<T>();\n        return;\n      }\n      BaseReducer::Meta::observeInput(input, value, skip_dims);\n    }\n  };\n\n  WeightedSumReducer(const Meta& meta, T* out, CPUContext* /*context*/)\n      : out_(out) {\n    // do we have a wrapper for it?\n    memset(out, 0, sizeof(T) * meta.block_size);\n  }\n  template <int FixedSize>\n  void\n  process(const Meta& meta, const T* in, TIndex offset, CPUContext* context) {\n    CAFFE_ENFORCE(\n        meta.first_dim,\n        \"WeightedSumReducer implemented only for \"\n        \"front dimensions reduction\");\n    math::AxpyFixedSize<T, CPUContext, FixedSize>(\n        meta.block_size, meta.scalars[offset], in, out_, context);\n  }\n\n private:\n  T* out_;\n};\n\ntemplate <typename T, class Context>\nclass WeightedSumReducerGradient : public BaseReducerGradient {\n public:\n  // which of the original inputs are required for gradient computation\n  static constexpr std::array<int, 1> originalInputs() {\n    return {1};\n  }\n\n  static int numAuxInputsWithGrads(const OperatorDef& def) {\n    return GetFlagArgument(def, \"grad_on_weights\");\n  }\n\n  static bool requiresDataInput(const OperatorDef& def) {\n    return numAuxInputsWithGrads(def) > 0;\n  }\n\n  using FixedDispatch = FixedValues<1>;\n\n  struct Meta : public BaseReducerGradient::Meta {\n    const T* scalars;\n    T* scalars_grad;\n\n    using BaseReducerGradient::Meta::Meta;\n\n    void observeOriginalInput(\n        int original_input,\n        const Tensor<CPUContext>& value,\n        Tensor<CPUContext>* input_grad, // optional grad to populate\n        int /*skip_dims*/) {\n      CAFFE_ENFORCE_EQ(1, original_input);\n      scalars = value.data<T>();\n      if (input_grad) {\n        input_grad->ResizeLike(value);\n        scalars_grad = input_grad->mutable_data<T>();\n      }\n    }\n  };\n\n  WeightedSumReducerGradient(\n      const Meta& /*meta*/,\n      const T* s_grad,\n      CPUContext* /*context*/)\n      : s_grad_(s_grad) {}\n\n  template <int FixedSize>\n  void fillGrad(\n      const Meta& meta,\n      T* data_grad,\n      TIndex offset,\n      Context* context,\n      const int /*length*/) {\n    math::ScaleFixedSize<T, CPUContext, FixedSize>(\n        meta.block_size, meta.scalars[offset], s_grad_, data_grad, context);\n  }\n\n  // Special version which is called with the main input too, used only if\n  // additional input grad is requested\n  template <int FixedSize>\n  void fillGradWithMainInput(\n      const Meta& meta,\n      const T* data,\n      T* data_grad,\n      TIndex offset,\n      Context* context,\n      const int /*length*/) {\n    math::ScaleFixedSize<T, CPUContext, FixedSize>(\n        meta.block_size, meta.scalars[offset], s_grad_, data_grad, context);\n    math::Dot(\n        meta.block_size, s_grad_, data, meta.scalars_grad + offset, context);\n  }\n\n private:\n  const T* s_grad_;\n};\n\nstruct WeightedSumReducerDef {\n  template <typename T, class Context>\n  using Reducer = WeightedSumReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = WeightedSumReducerGradient<T, Context>;\n  static constexpr const char* name = \"WeightedSum\";\n  static constexpr const char* doc =\n      \"Input slices are first scaled by SCALARS and then summed element-wise. \"\n      \"It doesn't change the shape of the individual blocks.\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor for the summation\");\n    schema.Input(\n        1,\n        \"SCALARS\",\n        \"Scalar multipliers for the input slices. Must be a vector with the \"\n        \"length matching the first dimension of DATA\");\n    schema.Arg(\n        \"grad_on_weights\",\n        \"Produce also gradient for `weights`. For now it's only supported in \"\n        \"`Lengths`-based operators\");\n  }\n};\n\ntemplate <typename T, class Context>\nclass MeanReducer;\ntemplate <typename T, class Context>\nclass MeanReducerGradient;\n\ntemplate <typename T>\nclass MeanReducer<T, CPUContext> : public BaseReducer {\n public:\n  using FixedDispatch = FixedValues<1>;\n\n  MeanReducer(const Meta& meta, T* out, CPUContext* /*context*/)\n      : out_(out), current_size_(0) {\n    if (meta.first_dim) {\n      memset(out, 0, sizeof(T) * meta.block_size);\n    }\n  }\n\n  template <int FixedSize>\n  void process(\n      const Meta& meta,\n      const T* in,\n      TIndex /*offset*/,\n      CPUContext* context) {\n    if (meta.first_dim) {\n      math::AxpyFixedSize<T, CPUContext, FixedSize>(\n          meta.block_size, 1, in, out_, context);\n    } else {\n      math::Sum<T, CPUContext>(\n          meta.block_size, in, out_ + current_size_, context);\n    }\n    current_size_++;\n  }\n\n  template <int FixedSize>\n  void finish(const Meta& meta, CPUContext* context) {\n    if (meta.first_dim) {\n      if (current_size_ > 0) {\n        math::ScaleFixedSize<T, CPUContext, FixedSize>(\n            meta.block_size, 1.0 / current_size_, out_, out_, context);\n      }\n    } else {\n      math::ScaleFixedSize<T, CPUContext, FixedSize>(\n          current_size_, 1.0 / meta.block_size, out_, out_, context);\n    }\n  }\n\n private:\n  T* out_;\n  int current_size_;\n};\n\ntemplate <typename T, class Context>\nclass MeanReducerGradient : public BaseReducerGradient {\n public:\n  static constexpr bool computeLength() {\n    return true;\n  }\n\n  using FixedDispatch = FixedValues<1>;\n\n  MeanReducerGradient(\n      const Meta& /*meta*/,\n      const T* s_grad,\n      CPUContext* /*context*/)\n      : s_grad_(s_grad) {}\n\n  template <int FixedSize>\n  void fillGrad(\n      const Meta& meta,\n      T* data_grad,\n      TIndex offset,\n      Context* context,\n      const int length) {\n    CAFFE_ENFORCE_GT(length, 0, \"Segment length must be > 0\");\n    if (meta.first_dim) {\n      math::ScaleFixedSize<T, CPUContext, FixedSize>(\n          meta.block_size, 1.0 / length, s_grad_, data_grad, context);\n    } else {\n      math::Set<T, CPUContext>(\n          length, s_grad_[offset] * 1.0f / length, data_grad, context);\n    }\n  }\n\n private:\n  const T* s_grad_;\n};\n\nstruct MeanReducerDef {\n  template <typename T, class Context>\n  using Reducer = MeanReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = MeanReducerGradient<T, Context>;\n  static constexpr const char* name = \"Mean\";\n  static constexpr const char* doc =\n      \"Mean computes the element-wise mean of the input slices. \"\n      \"Operation doesn't change the shape of the individual blocks.\";\n  static void PopulateSchema(OpSchema& /*schema*/) {}\n};\n\ntemplate <typename T, class Context>\nclass MaxReducer;\ntemplate <typename T, class Context>\nclass MaxReducerGradient;\n\ntemplate <typename T>\nclass MaxReducer<T, CPUContext> : public BaseReducer {\n public:\n  using FixedDispatch = FixedValues<1>;\n\n  MaxReducer(const Meta& meta, T* out, CPUContext* /*context*/)\n      : out_(out), current_size_(0) {}\n\n  template <int FixedSize>\n  void process(\n      const Meta& meta,\n      const T* in,\n      TIndex /*offset*/,\n      CPUContext* context) {\n    CAFFE_ENFORCE(\n        meta.first_dim,\n        \"MaxReducer implemented only for front dimensions reduction\");\n    if (current_size_ > 0) {\n      EigenVectorMap<T> output_vec(out_, meta.block_size);\n      output_vec =\n          output_vec.cwiseMax(ConstEigenVectorMap<T>(in, meta.block_size));\n    } else {\n      memcpy(out_, in, sizeof(T) * meta.block_size);\n    }\n    ++current_size_;\n  }\n\n private:\n  T* out_;\n  int current_size_;\n};\n\ntemplate <typename T, class Context>\nclass MaxReducerGradient : public BaseReducerGradient {\n public:\n  static bool requiresDataInput(const OperatorDef& /*def*/) {\n    return true;\n  }\n\n  static bool requiresForwardOutput() {\n    return true;\n  }\n\n  using FixedDispatch = FixedValues<1>;\n\n  MaxReducerGradient(\n      const Meta& /*meta*/,\n      const T* s_grad,\n      CPUContext* /*context*/)\n      : s_grad_(s_grad) {}\n\n  template <int FixedSize>\n  void fillGradWithMainInputAndForwardOutput(\n      const Meta& meta,\n      const T* data,\n      T* data_grad,\n      const T* forward_output,\n      TIndex /*offset*/,\n      Context* /*context*/,\n      const int /*length*/) {\n    for (TIndex i = 0; i < meta.block_size; ++i) {\n      data_grad[i] = data[i] == forward_output[i] ? s_grad_[i] : 0;\n    }\n  }\n\n private:\n  const T* s_grad_;\n};\n\nstruct MaxReducerDef {\n  template <typename T, class Context>\n  using Reducer = MaxReducer<T, Context>;\n  template <typename T, class Context>\n  using ReducerGradient = MaxReducerGradient<T, Context>;\n  static constexpr const char* name = \"Max\";\n  static constexpr const char* doc =\n      \"Max computes the element-wise max of the input slices. \"\n      \"Operation doesn't change the shape of the individual blocks.\";\n  static void PopulateSchema(OpSchema& /*schema*/) {}\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/reduction_front_back_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_REDUCTION_FRONT_BACK_OPS_H_\n#define CAFFE2_OPERATORS_REDUCTION_FRONT_BACK_OPS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, bool FIRSTDIMS>\nclass MaxReduceDimsOp final : public Operator<Context> {\n public:\n  MaxReduceDimsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_reduce_dims_(\n            OperatorBase::GetSingleArgument<int32_t>(\"num_reduce_dim\", 1)) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() {\n    auto& X = Input(0);\n    auto* Y = Output(0);\n\n    const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)\n                               : X.size_to_dim(X.ndim() - num_reduce_dims_);\n    const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)\n                               : X.size_from_dim(X.ndim() - num_reduce_dims_);\n\n    vector<TIndex> output_shape;\n    int start_index = FIRSTDIMS ? num_reduce_dims_ : 0;\n    int end_index =\n        FIRSTDIMS ? X.dims().size() : X.dims().size() - num_reduce_dims_;\n\n    for (int i = start_index; i < end_index; ++i) {\n      output_shape.push_back(X.dims()[i]);\n    }\n    Y->Resize(output_shape);\n\n    if (cols == 0 || rows == 0) {\n      return true;\n    }\n\n    const float* data = X.template data<float>();\n    float* out_data = Y->template mutable_data<float>();\n    Compute(rows, cols, data, out_data);\n    return true;\n  }\n\n protected:\n  void Compute(int rows, int cols, const float* data, float* out_data);\n\n  int num_reduce_dims_;\n};\n\ntemplate <typename T, class Context, bool FIRSTDIMS>\nclass MaxReduceDimsGradientOp final : public Operator<Context> {\n public:\n  MaxReduceDimsGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        num_reduce_dims_(\n            OperatorBase::GetSingleArgument<int32_t>(\"num_reduce_dim\", 1)) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& dY = Input(0);\n    auto& X = Input(1);\n    auto& Y = Input(2);\n    auto* dX = Output(0);\n\n    dX->ResizeLike(X);\n    const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)\n                               : X.size_to_dim(X.ndim() - num_reduce_dims_);\n    const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)\n                               : X.size_from_dim(X.ndim() - num_reduce_dims_);\n\n    const float* dYdata = dY.template data<float>();\n    const float* Xdata = X.template data<float>();\n    const float* Ydata = Y.template data<float>();\n\n    float* dXdata = dX->template mutable_data<float>();\n    Compute(rows, cols, dYdata, Xdata, Ydata, dXdata);\n    return true;\n  }\n\n protected:\n  void Compute(\n      int rows,\n      int cols,\n      const float* dYdata,\n      const float* Xdata,\n      const float* Ydata,\n      float* dXdata);\n\n  int num_reduce_dims_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_REDUCTION_FRONT_BACK_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/reduction_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_REDUCTION_OPS_H_\n#define CAFFE2_OPERATORS_REDUCTION_OPS_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SumElementsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  SumElementsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        average_(OperatorBase::GetSingleArgument<bool>(\"average\", false)) {}\n  SumElementsOp(const OperatorDef& operator_def, Workspace* ws, bool average)\n      : Operator<Context>(operator_def, ws), average_(average) {}\n  ~SumElementsOp() {}\n\n  bool RunOnDevice() override\n// TODO: T21635002 fix float-divide-by-zero undefined behavior\n#if defined(__has_feature)\n#if __has_feature(__address_sanitizer__)\n      __attribute__((__no_sanitize__(\"float-divide-by-zero\")))\n#endif\n#endif\n  {\n    auto& X = Input(0);\n    auto* sum = Output(0);\n    sum->Resize(vector<TIndex>());\n    T* data = sum->template mutable_data<T>();\n    math::Sum<T, Context>(\n      X.size(), X.template data<T>(), data, &context_, &scratch_);\n    if (average_) {\n      math::Scale<T, Context>(\n          1,\n          static_cast<T>(1.) / X.size(),\n          sum->template data<T>(),\n          data,\n          &context_);\n    }\n    return true;\n  }\n\n private:\n  bool average_;\n  Tensor<Context> scratch_;\n};\n\ntemplate <typename T, class Context>\nclass SumElementsGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  SumElementsGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        average_(OperatorBase::GetSingleArgument<bool>(\"average\", false)) {}\n  SumElementsGradientOp(\n      const OperatorDef& operator_def,\n      Workspace* ws,\n      bool average)\n      : Operator<Context>(operator_def, ws), average_(average) {}\n  ~SumElementsGradientOp() {}\n\n  bool RunOnDevice() override;\n\n private:\n  bool average_;\n};\n\ntemplate <class Context>\nclass SumSqrElementsOp : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(SumSqrElementsOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    bool average = OperatorBase::GetSingleArgument<bool>(\"average\", false);\n    auto& X = Input(0);\n    auto* sum = Output(0);\n    sum->Resize(vector<TIndex>());\n    math::SumSqr<T, Context>(\n        X.size(),\n        X.template data<T>(),\n        sum->template mutable_data<T>(),\n        &context_,\n        &scratch_);\n    if (average) {\n      math::Scale<T, Context>(\n          1,\n          float(1.) / X.size(),\n          sum->template data<T>(),\n          sum->template mutable_data<T>(),\n          &context_);\n    }\n    return true;\n  }\n\n private:\n  Tensor<Context> scratch_;\n};\n\ntemplate <typename T, class Context, bool ROWWISE>\nclass MaxReductionOp : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(MaxReductionOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& X = Input(0);\n    CAFFE_ENFORCE_EQ(X.ndim(), 3);\n\n    const int batch_size = X.dim32(0);\n    const int M = X.dim32(1);\n    const int N = X.dim32(2);\n\n    auto* Y = Output(0);\n    ROWWISE ? Y->Resize(batch_size, M) : Y->Resize(batch_size, N);\n\n    if (ROWWISE) {\n      math::RowwiseMax<T, Context>(\n          batch_size * M,\n          N,\n          X.template data<T>(),\n          Y->template mutable_data<T>(),\n          &context_);\n    } else {\n      const int input_size = N * M;\n      for (int i = 0; i < batch_size; ++i) {\n        math::ColwiseMax<T, Context>(\n            M,\n            N,\n            X.template data<T>() + i * input_size,\n            Y->template mutable_data<T>() + i * N,\n            &context_);\n      }\n    }\n    return true;\n  }\n};\n\ntemplate <typename T, class Context, bool ROWWISE>\nclass MaxReductionGradientOp : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(MaxReductionGradientOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/relu_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_RELU_OP_H_\n#define CAFFE2_OPERATORS_RELU_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ReluOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(ReluOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n};\n\ntemplate <typename T, class Context>\nclass ReluGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(ReluGradientOp);\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  // Input: Y, dY; Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RELU_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/remove_data_blocks_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_\n#define CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_\n\n#include <algorithm>\n#include <vector>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass RemoveDataBlocksOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(RemoveDataBlocksOp);\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() override {\n    if (Input(INDICES).dims()[0] == 0) {\n      Output(0)->CopyFrom(Input(0));\n      return true;\n    } else {\n      return DispatchHelper<TensorTypes<int, long>>::call(this, Input(INDICES));\n    }\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    const auto& data = Input(DATA);\n    const auto& indices = Input(INDICES);\n    CAFFE_ENFORCE(data.ndim() > 0, \"DATA should be at leat 1-D.\");\n    CAFFE_ENFORCE(indices.ndim() == 1, \"INDICES should be 1-D.\");\n\n    const auto outer_size = data.dims()[0];\n    const auto block_size = data.size_from_dim(1);\n    const auto block_size_bytes = block_size * data.meta().itemsize();\n    auto indices_size = indices.dims()[0];\n    const char* data_ptr = (char*)data.raw_data();\n    const auto* ind_ptr = indices.template data<T>();\n\n    std::vector<T> ind_vec;\n    for (int64_t i = 0; i < indices_size; i++) {\n      ind_vec.push_back(ind_ptr[i]);\n    }\n    std::sort(ind_vec.begin(), ind_vec.end());\n    CAFFE_ENFORCE(ind_vec[0] >= 0, \"The min index should be larger than zero.\");\n    CAFFE_ENFORCE(\n        ind_vec[indices_size - 1] < outer_size,\n        \"The max index should be smaller than the data outer size.\");\n    // removes duplicate indices\n    ind_vec.erase(std::unique(ind_vec.begin(), ind_vec.end()), ind_vec.end());\n    indices_size = ind_vec.size();\n\n    auto* output = Output(0);\n    auto shape = data.dims();\n    shape[0] -= indices_size;\n    output->Resize(shape);\n    char* out_ptr = (char*)output->raw_mutable_data(data.meta());\n\n    ind_vec.insert(ind_vec.begin(), -1);\n    int64_t ind_vec_size = ind_vec.size();\n    for (auto i = 0; i < ind_vec_size; i++) {\n      int64_t interval_start = ind_vec[i] + 1;\n      int64_t interval_end =\n          (i == ind_vec_size - 1) ? outer_size : ind_vec[i + 1];\n      auto num_items = interval_end - interval_start;\n      context_.template CopyItems<Context, Context>(\n          data.meta(),\n          num_items * block_size,\n          data_ptr + block_size_bytes * interval_start,\n          out_ptr);\n      out_ptr += block_size_bytes * num_items;\n    }\n\n    return true;\n  }\n\n private:\n  INPUT_TAGS(DATA, INDICES);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/replace_nan_op.h",
    "content": "#ifndef CAFFE_OPERATORS_REPLACE_NAN_OP_H_\n#define CAFFE_OPERATORS_REPLACE_NAN_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ReplaceNaNOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ReplaceNaNOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    T value = OperatorBase::GetSingleArgument<T>(\"value\", 0);\n\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->ResizeLike(input);\n\n    const T* input_data = input.template data<T>();\n    T* output_data = output->template mutable_data<T>();\n    for (TIndex i = 0; i < input.size(); i++) {\n      if (std::isnan(input_data[i])) {\n        output_data[i] = value;\n      } else {\n        output_data[i] = input_data[i];\n      }\n    }\n\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE_OPERATORS_REPLACE_NAN_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/reshape_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_RESHAPE_OP_H_\n#define CAFFE2_OPERATORS_RESHAPE_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// Takes a shape and data tensor and reshapes it\ntemplate <typename F, class Context>\nclass ReshapeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ReshapeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        new_shape_(OperatorBase::GetRepeatedArgument<int64_t>(\"shape\")) {}\n\n  bool RunOnDevice() override {\n    if (InputSize() == 2) {\n      return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(1));\n    }\n    CAFFE_ENFORCE(\n        OperatorBase::HasArgument(\"shape\"), \"Argument `shape` is missing.\");\n    return this->template DoRunWithType<int64_t>();\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n\n    vector<int64_t> actual_new_shape = new_shape_;\n    if (InputSize() == 2) {\n      CAFFE_ENFORCE(\n          !OperatorBase::HasArgument(\"shape\"),\n          \"New shape is specified by the input blob, do not pass in \"\n          \"the argument `shape`.\");\n\n      auto& shape = Input(1);\n      CAFFE_ENFORCE(shape.ndim() == 1, \"Shape should be 1-D\");\n\n      const T* shape_data = shape.template data<T>();\n\n      // Bit awkward, but needed so works on both CPU and CUDA contexts\n      std::vector<T> tmpv(shape.size());\n      context_.template CopyBytes<Context, CPUContext>(\n          shape.size() * sizeof(T), shape_data, &tmpv[0]);\n      actual_new_shape.assign(tmpv.begin(), tmpv.begin() + shape.size());\n    }\n\n    // Copy over the dimensions for those that are specified zero.\n    for (int i = 0; i < actual_new_shape.size(); ++i) {\n      if (actual_new_shape[i] == 0) {\n        actual_new_shape[i] = input.dim(i);\n      }\n    }\n\n    // Checks if the new shape is valid and fills in the missing dimension\n    // specified by -1.\n    // NOTE: At most one dimension can be -1.\n    auto total_size = input.size_from_dim(0);\n    T size = 1;\n    int unknown_idx = -1;\n    for (int i = 0; i < actual_new_shape.size(); ++i) {\n      const auto dim = actual_new_shape[i];\n      if (dim == -1) {\n        CAFFE_ENFORCE(\n            unknown_idx == -1,\n            \"Argument `shape` has more than one missing dimension.\");\n        unknown_idx = i;\n      } else {\n        size *= dim;\n      }\n    }\n\n    if (unknown_idx != -1) {\n      CAFFE_ENFORCE(\n          total_size % size == 0,\n          \"Argument `shape` does not agree with the input data.\",\n          \" (\",\n          total_size,\n          \" vs \",\n          size,\n          \")\");\n      actual_new_shape[unknown_idx] = total_size / size;\n    } else {\n      CAFFE_ENFORCE_EQ(\n          total_size,\n          size,\n          \"Argument `shape` does not agree with the input data.\",\n          \" (\",\n          total_size,\n          \" != \",\n          size,\n          \")\");\n    }\n\n    // Write the original shape to the second output.\n    auto* old_shape = Output(1);\n    old_shape->Resize(input.ndim());\n    T* old_shape_data = old_shape->template mutable_data<T>();\n    for (int i = 0; i < input.ndim(); ++i) {\n      math::Set<T, Context>(1, input.dim(i), old_shape_data + i, &context_);\n    }\n\n    auto* output = Output(0);\n    output->Resize(actual_new_shape);\n    if (output != &input) {\n      // If we are not doing in-place computation, a copy is needed.\n      context_.template CopyBytes<Context, Context>(\n          input.nbytes(),\n          input.raw_data(),\n          output->raw_mutable_data(input.meta()));\n    }\n\n    return true;\n  }\n\n private:\n  vector<int64_t> new_shape_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RESHAPE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/resize_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass ResizeNearestOp final : public Operator<Context> {\n public:\n  ResizeNearestOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), width_scale_(1), height_scale_(1) {\n    if (HasArgument(\"width_scale\")) {\n      width_scale_ = static_cast<T>(\n          OperatorBase::GetSingleArgument<float>(\"width_scale\", 1));\n    }\n    if (HasArgument(\"height_scale\")) {\n      height_scale_ = static_cast<T>(\n          OperatorBase::GetSingleArgument<float>(\"height_scale\", 1));\n    }\n    CAFFE_ENFORCE_GT(width_scale_, 0);\n    CAFFE_ENFORCE_GT(height_scale_, 0);\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T width_scale_;\n  T height_scale_;\n};\n\ntemplate <typename T, class Context>\nclass ResizeNearestGradientOp final : public Operator<Context> {\n public:\n  ResizeNearestGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), width_scale_(1), height_scale_(1) {\n    width_scale_ = static_cast<T>(\n        OperatorBase::GetSingleArgument<float>(\"width_scale\", 1));\n    height_scale_ = static_cast<T>(\n        OperatorBase::GetSingleArgument<float>(\"height_scale\", 1));\n    CAFFE_ENFORCE_GT(width_scale_, 0);\n    CAFFE_ENFORCE_GT(height_scale_, 0);\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  T width_scale_;\n  T height_scale_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/reverse_packed_segs_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_\n#define CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ReversePackedSegsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ReversePackedSegsOp);\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float, double, int, long, bool>>::call(\n        this, Input(DATA));\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    if (Input(LENGTHS).template IsType<int>()) {\n      DoRunWithLengthType<T, int>();\n    } else {\n      DoRunWithLengthType<T, long>();\n    }\n    return true;\n  }\n\n private:\n  INPUT_TAGS(DATA, LENGTHS);\n\n  template <typename T, typename LengthType>\n  void DoRunWithLengthType() {\n    const auto& data = Input(DATA);\n    const auto& lengths = Input(LENGTHS);\n\n    CAFFE_ENFORCE(\n        data.ndim() == 3,\n        \"DATA should be 3-D tensor <lengths, \"\n        \"segments, embeddings>\");\n    CAFFE_ENFORCE(lengths.ndim() == 1, \"LENGTH should be 1-D\");\n\n    auto* output = Output(0);\n    const auto& shape = data.dims();\n    output->Resize(shape);\n\n    const auto& max_length = data.dims()[0];\n    const auto& batch_size = data.dims()[1];\n    const auto& block_size = data.dims()[2];\n    CAFFE_ENFORCE(\n        lengths.dims()[0] == batch_size,\n        \"lenths size should be\"\n        \" equal to batch size\");\n\n    const T* data_ptr = data.template data<T>();\n    const LengthType* lengths_ptr = lengths.template data<LengthType>();\n\n    vector<LengthType> lengths_host(batch_size);\n    context_.template Copy<LengthType, Context, CPUContext>(\n        batch_size, lengths_ptr, &lengths_host[0]);\n    context_.FinishDeviceComputation();\n\n    T* rev_data_ptr = output->template mutable_data<T>();\n    for (TIndex i = 0; i < batch_size; i++) {\n      const auto& seg_length = lengths_host[i];\n      CAFFE_ENFORCE_LE(seg_length, max_length);\n      TIndex j = 0;\n      for (; j < seg_length; j++) {\n        const T* data_block_ptr = data_ptr + (j * batch_size + i) * block_size;\n        T* rev_data_block_ptr =\n            rev_data_ptr + ((seg_length - 1 - j) * batch_size + i) * block_size;\n        context_.template Copy<T, Context, Context>(\n            block_size, data_block_ptr, rev_data_block_ptr);\n      }\n      for (; j < max_length; j++) {\n        const T* data_block_ptr = data_ptr + (j * batch_size + i) * block_size;\n        T* rev_data_block_ptr =\n            rev_data_ptr + (j * batch_size + i) * block_size;\n        context_.template Copy<T, Context, Context>(\n            block_size, data_block_ptr, rev_data_block_ptr);\n      }\n    }\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/rmac_regions_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#ifndef CAFFE2_OPERATORS_RMAC_REGIONS_OP_H\n#define CAFFE2_OPERATORS_RMAC_REGIONS_OP_H\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass RMACRegionsOp final : public Operator<Context> {\n public:\n  RMACRegionsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        scales_(OperatorBase::GetSingleArgument<int>(\"scales\", 3)),\n        overlap_(OperatorBase::GetSingleArgument<float>(\"overlap\", 0.4)) {}\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  int scales_;\n  float overlap_;\n  Tensor<Context> num_rois_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_RMAC_REGIONS_OP_H\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/roi_pool_op.h",
    "content": "#ifndef ROI_POOL_OP_H_\n#define ROI_POOL_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass RoIPoolOp final : public Operator<Context> {\n public:\n  RoIPoolOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        pooled_height_(OperatorBase::GetSingleArgument<int>(\"pooled_h\", 1)),\n        pooled_width_(OperatorBase::GetSingleArgument<int>(\"pooled_w\", 1)),\n        spatial_scale_(\n            OperatorBase::GetSingleArgument<float>(\"spatial_scale\", 1.)) {\n    CAFFE_ENFORCE(\n        (is_test_ && OutputSize() == 1) || (!is_test_ && OutputSize() == 2),\n        \"Output size mismatch.\");\n    CAFFE_ENFORCE_GT(spatial_scale_, 0);\n    CAFFE_ENFORCE_GT(pooled_height_, 0);\n    CAFFE_ENFORCE_GT(pooled_width_, 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  bool is_test_;\n  StorageOrder order_;\n  int pooled_height_;\n  int pooled_width_;\n  float spatial_scale_;\n};\n\ntemplate <typename T, class Context>\nclass RoIPoolGradientOp final : public Operator<Context> {\n public:\n  RoIPoolGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        spatial_scale_(\n            OperatorBase::GetSingleArgument<float>(\"spatial_scale\", 1.)),\n        pooled_height_(OperatorBase::GetSingleArgument<int>(\"pooled_h\", 1)),\n        pooled_width_(OperatorBase::GetSingleArgument<int>(\"pooled_w\", 1)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE_GT(spatial_scale_, 0);\n    CAFFE_ENFORCE_GT(pooled_height_, 0);\n    CAFFE_ENFORCE_GT(pooled_width_, 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    CAFFE_NOT_IMPLEMENTED;\n  }\n\n protected:\n  float spatial_scale_;\n  int pooled_height_;\n  int pooled_width_;\n  StorageOrder order_;\n};\n\n} // namespace caffe2\n\n#endif // ROI_POOL_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/rowmul_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_ROW_MUL_H_\n#define CAFFE2_OPERATORS_ROW_MUL_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// A hacky version of Mul with broadcast\n// RowMul([mat, w], [output])\ntemplate <typename T, class Context>\nclass RowMulOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(RowMulOp);\n\n  bool RunOnDevice() override {\n    auto& mat = Input(0);\n    auto& w = Input(1);\n    auto* output = Output(0);\n\n    output->ResizeLike(mat);\n    T* output_data = output->template mutable_data<T>();\n    const T* mat_data = mat.template data<T>();\n    const T* w_data = w.template data<T>();\n\n    // Dimension checking\n    CAFFE_ENFORCE_EQ(\n        w.size(),\n        mat.dim32(0),\n        \"Length of w should be equal to the first dim of mat\");\n\n    auto block_size = mat.size_from_dim(1);\n    for (int i = 0; i < w.size(); i++) {\n      size_t offset = i * block_size;\n      for (int j = 0; j < block_size; j++) {\n        output_data[offset + j] = mat_data[offset + j] * w_data[i];\n      }\n    }\n\n    return true;\n  }\n};\n\n// A hacky version\ntemplate <typename T, class Context>\nclass ReduceTailSumOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ReduceTailSumOp);\n\n  bool RunOnDevice() override {\n    auto& mat = Input(0);\n    auto* output = Output(0);\n\n    int N = mat.dim32(0);\n    int block_size = mat.size_from_dim(1);\n\n    output->Resize(N);\n    T* output_data = output->template mutable_data<T>();\n    const T* mat_data = mat.template data<T>();\n\n    for (int i = 0; i < N; i++) {\n      output_data[i] = 0;\n      size_t offset = i * block_size;\n      for (int j = 0; j < block_size; j++) {\n        output_data[i] += mat_data[offset + j];\n      }\n    }\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_ROW_MUL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/scale_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SCALE_OP_H_\n#define CAFFE2_OPERATORS_SCALE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ScaleOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ScaleOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        scale_(OperatorBase::GetSingleArgument<float>(\"scale\", 1.0)) {}\n\n  template <typename T>\n  bool DoRunWithType() {\n    auto& X = Input(0);\n    auto* Y = Output(0);\n    Y->ResizeLike(X);\n    math::Scale<T, Context>(\n        X.size(),\n        scale_,\n        X.template data<T>(),\n        Y->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float>>::call(this, Input(0));\n  }\n\n protected:\n  float scale_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SCALE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/segment_reduction_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SEGMENT_REDUCTION_OP_H_\n#define CAFFE2_OPERATORS_SEGMENT_REDUCTION_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/reducer_functors.h\"\n\nnamespace caffe2 {\n\ntemplate <typename TData>\nclass BaseInputAccessor {\n public:\n  BaseInputAccessor() {}\n\n  bool observeInput(const Tensor<CPUContext>& dataInput) {\n    data_ = dataInput.raw_data();\n    return dataInput.template IsType<TData>();\n  }\n\n  inline const TData*\n  getBlockPtr(TIndex in_block_size, TIndex idx, TIndex /* blocks */ = 1) {\n    return static_cast<const TData*>(data_) + in_block_size * idx;\n  }\n\n protected:\n  const void* data_ = nullptr;\n};\n\n////////////////////////////////////////////////////////////////////////////////\n// Range reducer ops: leverage that input segment is continuous and allow\n// reducer functors to do something special\n// Note: for now there are no real use cases for it yet :)\n// Also, doesn't support additional arguments for now\n////////////////////////////////////////////////////////////////////////////////\n\n/**\n * Base implementation for segment reduction op that leverages continuity of the\n * data\n *\n * Assumes that segments are sorted and there are no skip indices\n */\ntemplate <\n    typename T,\n    typename SIndex,\n    class Context,\n    class RangeReducer,\n    class InputAccessor = BaseInputAccessor<T>>\nclass AbstractSortedSegmentRangeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractSortedSegmentRangeOp);\n\n  bool RunOnDevice() override {\n    auto& dataInput = Input(DATA);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    auto N = segment_ids.dim(0);\n    CAFFE_ENFORCE_EQ(\n        N,\n        dataInput.dim(0),\n        \"SEGMENT_IDS must have the same length as outer dimension of DATA\");\n\n    OPERATOR_NEEDS_FEATURE(\n        inputAccessor_.observeInput(dataInput),\n        \"Unsupported input type: \",\n        dataInput.meta().name(),\n        \".\");\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n\n    const SIndex K = N > 0 ? s_ids[N - 1] + 1 : 0;\n    auto shape = dataInput.dims();\n    shape[0] = K;\n    output->Resize(shape);\n\n    T* out = output->template mutable_data<T>();\n\n    if (N == 0) {\n      return true;\n    }\n\n    TIndex block_size = dataInput.size() / N;\n\n    // Assume the segments are sorted and there are no gaps\n    CAFFE_ENFORCE_EQ(0, s_ids[0], \"Indices must be sorted and not have gaps\");\n    for (TIndex i = 0; i < N;) {\n      TIndex start = i;\n      for (++i; i < N && s_ids[start] == s_ids[i]; ++i)\n        ;\n\n      RangeReducer()(\n          block_size,\n          i - start,\n          inputAccessor_.getBlockPtr(block_size, start, i - start),\n          out + block_size * s_ids[start],\n          &context_);\n\n      // check correctness of the next segment\n      if (i < N) {\n        CAFFE_ENFORCE_EQ(\n            s_ids[start] + 1,\n            s_ids[i],\n            \"Indices must be sorted and not have gaps\");\n      }\n    }\n    return true;\n  }\n\n  static constexpr int kNumInputs = 2;\n  INPUT_TAGS(DATA, SEGMENT_IDS);\n\n private:\n  InputAccessor inputAccessor_;\n};\n\ntemplate <\n    typename T,\n    typename SIndex,\n    class Context,\n    class RangeReducerGradient>\nclass AbstractSortedSegmentRangeGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractSortedSegmentRangeGradientOp);\n\n  bool RunOnDevice() override {\n    // TODO(azzolini): avoid using input/output if not used by a particular op\n    auto& data_in = Input(DATA_IN);\n    auto& data_out = Input(DATA_OUT);\n    auto& segment_grads = Input(SEGMENT_GRADS);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* data_grads = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    TIndex N = segment_ids.dim(0);\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n    const T* s_grads = segment_grads.template data<T>();\n    const T* d_in = data_in.template data<T>();\n    const T* d_out = data_out.template data<T>();\n\n    auto shape = segment_grads.dims();\n    shape[0] = N;\n    data_grads->Resize(shape);\n\n    const SIndex K = segment_grads.dim(0);\n    T* out = data_grads->template mutable_data<T>();\n\n    if (N == 0) {\n      return true;\n    }\n\n    TIndex block_size = segment_grads.size_from_dim(1);\n\n    // Assume the segments are sorted and there are no gaps\n    CAFFE_ENFORCE_EQ(0, s_ids[0], \"Indices must be sorted and not have gaps\");\n    // repeat the check from forward op\n    CAFFE_ENFORCE_EQ(\n        K - 1, s_ids[N - 1], \"Indices must be sorted and not have gaps\");\n    for (TIndex i = 0; i < N;) {\n      TIndex start = i;\n      for (++i; i < N && s_ids[start] == s_ids[i]; ++i)\n        ;\n\n      auto expanded_idx = block_size * start;\n      auto reduced_idx = block_size * s_ids[start];\n      RangeReducerGradient()(\n          block_size,\n          i - start,\n          s_grads + reduced_idx,\n          out + expanded_idx,\n          d_in + expanded_idx,\n          d_out + reduced_idx,\n          &context_);\n\n      // check correctness of the next segment\n      if (i < N) {\n        CAFFE_ENFORCE_EQ(\n            s_ids[start] + 1,\n            s_ids[i],\n            \"Indices must be sorted and not have gaps\");\n      }\n    }\n    return true;\n  }\n\n  static constexpr int kNumInputs = 4;\n  INPUT_TAGS(DATA_IN, DATA_OUT, SEGMENT_GRADS, SEGMENT_IDS);\n};\n\ntemplate <typename T, typename SIndex, typename Context, typename ReducerDef>\nstruct AbstractSortedSegmentRangeDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"SortedSegmentRange\";\n  static constexpr const char* doc = R\"DOC(\nApplies '{op}' to each segment of input tensor. In order to allow for more\nefficient implementation of '{op}', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor to be aggregated\");\n    schema.Input(\n        1,\n        \"SEGMENT_IDS\",\n        \"Vector with the same length as the first dimension of DATA \"\n        \"and values in the range 0..K-1 and in increasing order that \"\n        \"maps each slice of DATA to one of the segments\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated tensor with the first dimension of K and the \"\n        \"other dimentsions inherited from DATA\");\n  }\n  using ForwardOp = AbstractSortedSegmentRangeOp<\n      T,\n      SIndex,\n      Context,\n      typename ReducerDef::template Reducer<T, Context>>;\n  using BackwardOp = AbstractSortedSegmentRangeGradientOp<\n      T,\n      SIndex,\n      Context,\n      typename ReducerDef::template ReducerGradient<T, Context>>;\n  struct GetGradient : public GradientMakerBase {\n    using GradientMakerBase::GradientMakerBase;\n    vector<OperatorDef> GetGradientDefs() override {\n      return SingleGradientDef(\n          string(basename) + ReducerDef::name + \"Gradient\",\n          \"\",\n          vector<string>{I(0), O(0), GO(0), I(1)},\n          // no gradient on segment_ids!\n          vector<string>{GI(0)});\n    }\n  };\n};\n\n////////////////////////////////////////////////////////////////////////////////\n// Incremental reducer ops: assume that reducer consumes pieces of data one by\n// one. Also, supports additional arguments passed to reducer, e.g. scalers for\n// weighted sum.\n//\n// Note: in current implementation additional inputs are considered auxiliary\n// constants and have limitations:\n// - there is no gradient computation for auxiliary inputs\n// - auxiliary inputs aren't affected by fused embedding lookup in operations\n// like sparse_sorted_segment\n////////////////////////////////////////////////////////////////////////////////\n\n/**\n * @brief Simple non-segmented reduction over the first few dimensions of the\n * tensor\n *\n * Inputs:\n *   0: DATA - input embedding to do lookups in\n *   1..P: AUX_ARG_<I> - optional additional arguments to be passed to the\n *                       reducer\n *\n * Args:\n *   num_reduce_dim (default 1) - the number of dims in front of the tensor to\n *                                reduce\n *\n * Output:\n *   Tensor without the first `num_dim` dimensions of DATA\n */\ntemplate <\n    typename T,\n    class Context,\n    class Reducer,\n    bool FirstDim,\n    class InputAccessor = BaseInputAccessor<T>>\nclass AbstractReduceFrontOrBackOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  AbstractReduceFrontOrBackOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(int, \"num_reduce_dim\", num_reduce_dims_, 1) {}\n\n  bool RunOnDevice() override {\n    auto& data = Input(0);\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex in_block_size = FirstDim\n        ? data.size_from_dim(num_reduce_dims_)\n        : data.size_to_dim(data.ndim() - num_reduce_dims_);\n    return DispatchHelper<typename Reducer::FixedDispatch>::call(\n        this, in_block_size);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& data = Input(0);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_LE(num_reduce_dims_, data.ndim());\n\n    typename Reducer::Meta ctx(FirstDim);\n    ctx.observeInput(0, data, num_reduce_dims_);\n    for (int i = 1; i < Reducer::kInputCount; ++i) {\n      auto& aux_in = Input(i);\n      ctx.observeInput(i, aux_in, num_reduce_dims_);\n    }\n\n    OPERATOR_NEEDS_FEATURE(\n        inputAccessor_.observeInput(data),\n        \"Unsupported input type: \",\n        data.meta().name(),\n        \".\");\n\n    vector<TIndex> shape;\n    ctx.appendOutputShape(&shape);\n    output->Resize(shape);\n\n    T* out = output->template mutable_data<T>();\n\n    const int block_size = FirstDim\n        ? data.size_from_dim(num_reduce_dims_)\n        : data.size_from_dim(data.ndim() - num_reduce_dims_);\n\n    const int num_blocks = block_size > 0 ? data.size() / block_size : 0;\n\n    Reducer r(ctx, out, &context_);\n    for (TIndex i = 0; i < num_blocks; ++i) {\n      r.template process<FixedSize>(\n          ctx, inputAccessor_.getBlockPtr(block_size, i), i, &context_);\n    }\n    r.template finish<FixedSize>(ctx, &context_);\n    return true;\n  }\n\n  static constexpr int kNumInputs = Reducer::kInputCount;\n\n private:\n  int num_reduce_dims_;\n  InputAccessor inputAccessor_;\n};\n\ntemplate <\n    typename T,\n    class Context,\n    class ReducerGradient,\n    bool FirstDim = true>\nclass AbstractReduceFrontOrBackGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  AbstractReduceFrontOrBackGradientOp(\n      const OperatorDef& operator_def,\n      Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(int, \"num_reduce_dim\", num_reduce_dims_, 1) {}\n\n  bool RunOnDevice() override {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex grad_block_size = Input(REDUCTION_GRAD).size();\n    return DispatchHelper<typename ReducerGradient::FixedDispatch>::call(\n        this, grad_block_size);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& reduction_grad = Input(REDUCTION_GRAD);\n    auto& source_shape = OperatorBase::Input<TensorCPU>(SOURCE_SHAPE);\n\n    auto* data_grads = Output(0);\n\n    typename ReducerGradient::Meta ctx(reduction_grad, 0, FirstDim);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      auto& aux_in = Input(i);\n      ctx.observeOriginalInput(\n          ReducerGradient::originalInputs()[i],\n          aux_in,\n          nullptr, /*no grad*/\n          num_reduce_dims_);\n    }\n\n    const T* r_grad = reduction_grad.template data<T>();\n\n    CAFFE_ENFORCE_LE(num_reduce_dims_, source_shape.size());\n\n    vector<TIndex> shape(\n        source_shape.template data<TIndex>(),\n        source_shape.template data<TIndex>() + source_shape.size());\n\n    data_grads->Resize(shape);\n\n    TIndex block_size = FirstDim\n        ? data_grads->size_from_dim(num_reduce_dims_)\n        : data_grads->size_from_dim(data_grads->ndim() - num_reduce_dims_);\n    TIndex block_num = block_size > 0 ? data_grads->size() / block_size : 0;\n\n    T* out = data_grads->template mutable_data<T>();\n\n    ReducerGradient r(ctx, r_grad, &context_);\n    for (TIndex i = 0; i < block_num; ++i) {\n      r.template fillGrad<FixedSize>(\n          ctx,\n          out + block_size * i,\n          i,\n          &context_,\n          FirstDim ? block_num : block_size);\n    }\n    return true;\n  }\n\n  static constexpr int kNumInputs =\n      ReducerGradient::originalInputs().size() + 2;\n  enum _InputTags {\n    REDUCTION_GRAD = ReducerGradient::originalInputs().size(),\n    SOURCE_SHAPE\n  };\n\n private:\n  int num_reduce_dims_;\n};\n\ntemplate <typename T, typename Context, typename ReducerDef>\nstruct AbstractReduceFrontDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"ReduceFront\";\n  static constexpr const char* doc = R\"DOC(\nReduces the input tensor along the first dimension of the input tensor by\napplying '{op}'. This op acts in a similar way to SortedSegment{op} and\nUnsortedSegment{op} but as if all input slices belong to a single segment.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(\n        0, \"DATA\", \"Input tensor to be reduced on the first dimension\");\n    schema.TensorInferenceFunction([](const OperatorDef& def,\n                                      const vector<TensorShape>& in) {\n      CAFFE_ENFORCE_EQ(1, in.size());\n      ArgumentHelper helper(def);\n      int num_reduce_dims = helper.GetSingleArgument<int>(\"num_reduce_dim\", 1);\n      typename ReducerDef::template Reducer<T, Context>::Meta ctx(true);\n      vector<TIndex> out_dims = ctx.getOutputShape(in[0], num_reduce_dims);\n      return vector<TensorShape>{\n          CreateTensorShape(out_dims, in[0].data_type())};\n    });\n    ReducerDef::PopulateSchema(schema);\n  }\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractReduceFrontOrBackOp<\n      T,\n      Context,\n      typename ReducerDef::template Reducer<T, Context>,\n      true>;\n  using BackwardOp =\n      AbstractReduceFrontOrBackGradientOp<T, Context, ReducerGradient, true>;\n  struct GetGradient : public GradientMakerBase {\n    using GradientMakerBase::GradientMakerBase;\n    vector<OperatorDef> GetGradientDefs() override {\n      // Have utility function generating these names?\n      string tmp_dims = \"_\" + O(0) + \"_dims\";\n\n      vector<string> grad_ins;\n      for (const int i : ReducerGradient::originalInputs()) {\n        grad_ins.push_back(I(i));\n      }\n      grad_ins.push_back(GO(0));\n      grad_ins.push_back(tmp_dims);\n\n      vector<Argument> args;\n      if (ArgumentHelper::HasArgument(def_, \"num_reduce_dim\")) {\n        args.push_back(GetArgument(def_, \"num_reduce_dim\"));\n      }\n      // FIXME: pass in num_reduce_dims?!\n      return vector<OperatorDef>{\n          CreateOperatorDef(\n              \"Shape\", \"\", vector<string>{I(0)}, vector<string>{tmp_dims}),\n          CreateOperatorDef(\n              string(basename) + ReducerDef::name + \"Gradient\",\n              \"\",\n              grad_ins,\n              // no gradient on auxiliary inputs for now\n              vector<string>{GI(0)}),\n      };\n    }\n  };\n};\n\ntemplate <typename T, typename Context, typename ReducerDef>\nstruct AbstractReduceBackDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"ReduceBack\";\n  static constexpr const char* doc = R\"DOC(\nReduces the input tensor along the last dimension of the input tensor by\napplying '{op}'. This op acts in a similar way to SortedSegment{op} and\nUnsortedSegment{op} but as if all input slices belong to a single segment.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(\n        0, \"DATA\", \"Input tensor to be reduced on the first dimension\");\n    schema.TensorInferenceFunction([](const OperatorDef& def,\n                                      const vector<TensorShape>& in) {\n      CAFFE_ENFORCE_EQ(1, in.size());\n      ArgumentHelper helper(def);\n      int num_reduce_dims = helper.GetSingleArgument<int>(\"num_reduce_dim\", 1);\n      typename ReducerDef::template Reducer<T, Context>::Meta ctx(false);\n      vector<TIndex> out_dims = ctx.getOutputShape(in[0], num_reduce_dims);\n      return vector<TensorShape>{\n          CreateTensorShape(out_dims, in[0].data_type())};\n    });\n    ReducerDef::PopulateSchema(schema);\n  }\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractReduceFrontOrBackOp<\n      T,\n      Context,\n      typename ReducerDef::template Reducer<T, Context>,\n      false>;\n  using BackwardOp =\n      AbstractReduceFrontOrBackGradientOp<T, Context, ReducerGradient, false>;\n  struct GetGradient : public GradientMakerBase {\n    using GradientMakerBase::GradientMakerBase;\n    vector<OperatorDef> GetGradientDefs() override {\n      // Have utility function generating these names?\n      string tmp_dims = \"_\" + O(0) + \"_dims\";\n\n      vector<string> grad_ins;\n      for (const int i : ReducerGradient::originalInputs()) {\n        grad_ins.push_back(I(i));\n      }\n      grad_ins.push_back(GO(0));\n      grad_ins.push_back(tmp_dims);\n\n      vector<Argument> args;\n      if (ArgumentHelper::HasArgument(def_, \"num_reduce_dim\")) {\n        args.push_back(GetArgument(def_, \"num_reduce_dim\"));\n      }\n      // FIXME: pass in num_reduce_dims?!\n      return vector<OperatorDef>{\n          CreateOperatorDef(\n              \"Shape\", \"\", vector<string>{I(0)}, vector<string>{tmp_dims}),\n          CreateOperatorDef(\n              string(basename) + ReducerDef::name + \"Gradient\",\n              \"\",\n              grad_ins,\n              // no gradient on auxiliary inputs for now\n              vector<string>{GI(0)}),\n      };\n    }\n  };\n};\n\n/**\n * @brief Segment reduction op with optional fused embedding lookup\n *\n * Base implementation for SortedSegmentXXX and SparseSortedSegmentXXX depending\n * on SparseFused static argument.\n *\n * Inputs:\n *   0: DATA - input embedding to do lookups in\n *   1..P: AUX_ARG_<I> - optional additional arguments to be passed to the\n *                       reducer, should have the same first dimension as\n *                       SEGMENT_IDS (e.g. scalars in WeightedSum)\n *   # if SparseFused == true:\n *   P+1: INDICES - 1-D vector with indices to look up in DATA. Should have the\n *                  same dimension as SEGMENT_IDS\n *   # P+1 if SparseFused == false:\n *   P+1 or P+2: SEGMENT_IDS - sorted segment ids 1-D vector\n *\n * Output:\n *   Tensor with first dimension of K, where K is the max segment id + 1. Rest\n *   of dimensions are decided by reducer but usually are the same size as extra\n *   dimensions of DATA\n */\ntemplate <\n    typename T,\n    typename SIndex,\n    class Context,\n    class Reducer,\n    bool SparseFused = true,\n    class InputAccessor = BaseInputAccessor<T>>\nclass AbstractSortedSegmentOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractSortedSegmentOp);\n\n  bool RunOnDevice() override {\n    if (SparseFused) {\n      return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n          this, Input(INDICES));\n    } else {\n      // type doesn't matter\n      return DoRunWithType<TIndex>();\n    }\n  }\n\n  template <typename IndexType>\n  bool DoRunWithType() {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex in_block_size = Input(0).size_from_dim(1);\n    return DispatchHelper<typename Reducer::FixedDispatch, IndexType>::call(\n        this, in_block_size);\n  }\n\n  template <typename IndexType, int FixedSize>\n  bool DoRunWithValue() {\n    auto& dataInput = Input(0);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    TIndex N = segment_ids.dim(0);\n    const TIndex M = dataInput.dim(0);\n\n    const IndexType* idxs;\n    if (SparseFused) { // static if\n      auto& indices = Input(INDICES);\n      CAFFE_ENFORCE_EQ(1, indices.ndim(), \"INDICES must be a vector\");\n      CAFFE_ENFORCE_EQ(\n          N,\n          indices.dim(0),\n          \"SEGMENT_IDS must have the same length as INDICES\");\n      idxs = indices.template data<IndexType>();\n    } else {\n      CAFFE_ENFORCE_EQ(\n          N, M, \"DATA must have the same first dimension as SEGMENT_IDS\");\n    }\n\n    // It would probably look nicer with varargs templates but it's too much\n    // metaprogramming\n    typename Reducer::Meta ctx;\n    ctx.observeInput(0, dataInput, 1);\n    for (int i = 1; i < Reducer::kInputCount; ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE_EQ(\n          N,\n          aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeInput(i, aux_in, 1);\n    }\n\n    OPERATOR_NEEDS_FEATURE(\n        inputAccessor_.observeInput(dataInput),\n        \"Unsupported input type: \",\n        dataInput.meta().name(),\n        \".\");\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n\n    const SIndex K = N > 0 ? s_ids[N - 1] + 1 : 0;\n    vector<TIndex> shape;\n    shape.push_back(K);\n    ctx.appendOutputShape(&shape);\n    output->Resize(shape);\n\n    T* out = output->template mutable_data<T>();\n    if (N == 0) {\n      return true;\n    }\n    TIndex in_block_size = dataInput.size_from_dim(1);\n    TIndex out_block_size = output->size_from_dim(1);\n\n    // Assume the segments are sorted and there are no gaps\n    CAFFE_ENFORCE_EQ(0, s_ids[0], \"Indices must be sorted and not have gaps\");\n    for (TIndex i = 0; i < N;) {\n      TIndex start = i;\n\n      Reducer r(ctx, out + out_block_size * s_ids[start], &context_);\n      for (; i < N && s_ids[start] == s_ids[i]; ++i) {\n        IndexType idx;\n        if (SparseFused) { // static if\n          CAFFE_ENFORCE(\n              0 <= idxs[i] && idxs[i] < M,\n              \"Index out of bounds: \",\n              idxs[i],\n              \", range 0 to \",\n              M);\n          idx = idxs[i];\n        } else {\n          idx = i;\n        }\n        r.template process<FixedSize>(\n            ctx, inputAccessor_.getBlockPtr(in_block_size, idx), i, &context_);\n      }\n\n      r.template finish<FixedSize>(ctx, &context_);\n      // check correctness of the next segment\n      if (i < N) {\n        CAFFE_ENFORCE_EQ(\n            s_ids[start] + 1,\n            s_ids[i],\n            \"Indices must be sorted and not have gaps\");\n      }\n    }\n    return true;\n  }\n\n  enum {\n    INDICES = Reducer::kInputCount,\n    SEGMENT_IDS = Reducer::kInputCount + (SparseFused ? 1 : 0)\n  };\n  static constexpr int kSelfInputs = SparseFused ? 2 : 1;\n  static constexpr int kNumInputs = Reducer::kInputCount + kSelfInputs;\n\n private:\n  InputAccessor inputAccessor_;\n};\n\n// Gradient actually doesn't depend on whether sparse lookup is fused or not\ntemplate <typename T, typename SIndex, class Context, class ReducerGradient>\nclass AbstractSortedSegmentGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractSortedSegmentGradientOp);\n\n  bool RunOnDevice() override {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex grad_block_size = Input(SEGMENT_GRADS).size_from_dim(1);\n    return DispatchHelper<typename ReducerGradient::FixedDispatch>::call(\n        this, grad_block_size);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& segment_grads = Input(SEGMENT_GRADS);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* data_grads = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    TIndex N = segment_ids.dim(0);\n\n    typename ReducerGradient::Meta ctx(segment_grads, 1);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE_EQ(\n          N,\n          aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeOriginalInput(\n          ReducerGradient::originalInputs()[i], aux_in, nullptr /*no grad*/, 1);\n    }\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n    const T* s_grads = segment_grads.template data<T>();\n\n    vector<TIndex> shape;\n    shape.push_back(N);\n    ctx.appendGradShape(&shape);\n    data_grads->Resize(shape);\n\n    TIndex d_block_size = data_grads->size_from_dim(1);\n    const SIndex K = segment_grads.dim(0);\n    TIndex s_block_size = segment_grads.size_from_dim(1);\n    T* out = data_grads->template mutable_data<T>();\n\n    if (N == 0) {\n      return true;\n    }\n\n    // Assume the segments are sorted and there are no gaps\n    CAFFE_ENFORCE_EQ(0, s_ids[0], \"Indices must be sorted and not have gaps\");\n    // repeat the check from forward op\n    CAFFE_ENFORCE_EQ(\n        K - 1, s_ids[N - 1], \"Indices must be sorted and not have gaps\");\n    for (TIndex i = 0; i < N;) {\n      TIndex start = i;\n      TIndex end = start;\n\n      if (ReducerGradient::computeLength()) {\n        for (; end < N && s_ids[start] == s_ids[end]; ++end) {\n        }\n      }\n\n      ReducerGradient r(ctx, s_grads + s_block_size * s_ids[start], &context_);\n      for (; i < N && s_ids[start] == s_ids[i]; ++i) {\n        r.template fillGrad<FixedSize>(\n            ctx, out + d_block_size * i, i, &context_, end - start);\n      }\n\n      // check correctness of the next segment\n      if (i < N) {\n        CAFFE_ENFORCE_EQ(\n            s_ids[start] + 1,\n            s_ids[i],\n            \"Indices must be sorted and not have gaps\");\n      }\n    }\n    return true;\n  }\n\n  // Input layout:\n  //   orig_arg1, orig_arg2, ..., orig_argN, SEGMENT_GRADS, SEGMENT_IDS\n  // orig_argXs represent original op's inputs and will be passed to the reducer\n  // directly\n  static constexpr int kNumInputs =\n      ReducerGradient::originalInputs().size() + 2;\n  enum _InputTags {\n    SEGMENT_GRADS = ReducerGradient::originalInputs().size(),\n    SEGMENT_IDS\n  };\n};\n\n// base implementation of sorted/unsorted sparse/non-sparse gradient computation\ntemplate <\n    typename ForwardOp,\n    typename ReducerDef,\n    typename ReducerGradient,\n    bool Sorted,\n    bool SparseFused>\nstruct SegmentOpGetGradient : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  vector<OperatorDef> GetGradientDefs() override {\n    CAFFE_ENFORCE(\n        !ReducerGradient::requiresDataInput(Def()),\n        \"grads on aux inputs are not yet implemented for Segment operators.\");\n    vector<string> grad_ins;\n    for (const int i : ReducerGradient::originalInputs()) {\n      grad_ins.push_back(I(i));\n    }\n    grad_ins.push_back(GO(0));\n    grad_ins.push_back(I(ForwardOp::SEGMENT_IDS));\n    vector<OperatorDef> r{CreateOperatorDef(\n        string(Sorted ? \"SortedSegment\" : \"UnsortedSegment\") +\n            ReducerDef::name + \"Gradient\",\n        \"\",\n        grad_ins,\n        // no gradient on segment_ids or auxiliary inputs for now\n        vector<string>{SparseFused ? GI_V(0) : GI(0)})};\n    if (SparseFused) {\n      SetSparse(0, I(ForwardOp::INDICES), GI_V(0));\n    }\n    return r;\n  }\n};\n\ntemplate <typename T, typename SIndex, typename Context, typename ReducerDef>\nstruct AbstractSortedSegmentDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"SortedSegment\";\n  static constexpr const char* doc = R\"DOC(\nApplies '{op}' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegment{op} that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"SEGMENT_IDS\",\n        \"Vector with the same length as the first dimension of DATA \"\n        \"and values in the range 0..K-1 and in increasing order that \"\n        \"maps each slice of DATA to one of the segments\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of K \"\n        \"(the number of segments).\");\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractSortedSegmentOp<T, SIndex, Context, Reducer, false>;\n  using BackwardOp =\n      AbstractSortedSegmentGradientOp<T, SIndex, Context, ReducerGradient>;\n  using GetGradient = SegmentOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      true /*Sorted*/,\n      false /*SparseFused*/>;\n};\n\ntemplate <typename T, typename SIndex, typename Context, typename ReducerDef>\nstruct AbstractSparseSortedSegmentDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"SparseSortedSegment\";\n  static constexpr const char* doc = R\"DOC(\nPulls in slices of the input tensor, groups them into segments and applies\n'{op}' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegment{op} that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegment{op} fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"INDICES\",\n        \"Integer vector containing indices of the first dimension of DATA for \"\n        \"the slices that are being aggregated\");\n    schema.Input(\n        Reducer::kInputCount + 1,\n        \"SEGMENT_IDS\",\n        \"Vector with the same length as INDICES and values in the range \"\n        \"0..K-1 and in increasing order that maps each slice of DATA referenced\"\n        \" by INDICES to one of the segments\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of K \"\n        \"(the number of segments).\");\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractSortedSegmentOp<T, SIndex, Context, Reducer>;\n  // TODO(dzhulgakov): we're registering the same class twice here,\n  // consider avoiding op duplication here\n  using BackwardOp =\n      AbstractSortedSegmentGradientOp<T, SIndex, Context, ReducerGradient>;\n  using GetGradient = SegmentOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      true /*Sorted*/,\n      true /*SparseFused*/>;\n};\n\n/**\n * @brief Unsorted segment reduction op with optional fused embedding lookup\n *\n * Base implementation for UnsortedSegmentXXX and UnsparseSortedSegmentXXX\n * depending on SparseFused static argument.\n *\n * Unlike the sorted version it allows to have \"gaps\" in segment ids.\n *\n * Inputs:\n *   0: DATA - input embedding to do lookups in\n *   1..P: AUX_ARG_<I> - optional additional arguments to be passed to the\n *                       reducer, should have the same first dimension as\n *                       SEGMENT_IDS (e.g. scalars in WeightedSum)\n *   # if SparseFused == true:\n *   P+1: INDICES - 1-D vector with indices to look up in DATA. Should have the\n *                  same dimension as SEGMENT_IDS\n *   # P+1 if SparseFused == false:\n *   P+1 or P+2: SEGMENT_IDS - unsorted segment ids 1-D vector\n *\n * Args:\n *   num_segments - allows to override the dimension of the output. If not set\n *                  it would be inferred from segment_ids tensor.\n *\n *\n * Output:\n *   Tensor with first dimension of K, where K is the max segment id + 1. Rest\n *   of dimensions are decided by reducer but usually are the same size as extra\n *   dimensions of DATA\n */\ntemplate <\n    typename T,\n    typename SIndex,\n    class Context,\n    class Reducer,\n    bool SparseFused = true,\n    class InputAccessor = BaseInputAccessor<T>>\nclass AbstractUnsortedSegmentOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  AbstractUnsortedSegmentOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        OP_SINGLE_ARG(int, \"num_segments\", num_segments_, -1) {}\n\n  bool RunOnDevice() override {\n    if (SparseFused) {\n      return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n          this, Input(INDICES));\n    } else {\n      // type doesn't matter\n      return DoRunWithType<TIndex>();\n    }\n  }\n\n  template <typename IndexType>\n  bool DoRunWithType() {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex in_block_size = Input(0).size_from_dim(1);\n    return DispatchHelper<typename Reducer::FixedDispatch, IndexType>::call(\n        this, in_block_size);\n  }\n\n  template <typename IndexType, int FixedSize>\n  bool DoRunWithValue() {\n    auto& data = Input(0);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    TIndex N = segment_ids.dim(0);\n    const TIndex M = data.dim(0);\n\n    const IndexType* idxs;\n    if (SparseFused) { // static if\n      auto& indices = Input(INDICES);\n      CAFFE_ENFORCE_EQ(1, indices.ndim(), \"INDICES must be a vector\");\n      CAFFE_ENFORCE_EQ(\n          N,\n          indices.dim(0),\n          \"SEGMENT_IDS must have the same length as INDICES\");\n      idxs = indices.template data<IndexType>();\n    } else {\n      CAFFE_ENFORCE_EQ(\n          N, M, \"DATA must have the same first dimension as SEGMENT_IDS\");\n    }\n\n    // It would probably look nicer with varargs templates but it's too much\n    // metaprogramming\n    typename Reducer::Meta ctx;\n    ctx.observeInput(0, data, 1);\n    for (int i = 1; i < Reducer::kInputCount; ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE_EQ(\n          N,\n          aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeInput(i, aux_in, 1);\n    }\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n    OPERATOR_NEEDS_FEATURE(\n        inputAccessor_.observeInput(data),\n        \"Unsupported input type: \",\n        data.meta().name(),\n        \".\");\n\n    // determine the number of segments\n    SIndex K;\n    if (num_segments_ != -1) {\n      K = num_segments_;\n    } else {\n      K = 0;\n      for (TIndex i = 0; i < N; ++i) {\n        K = std::max(K, s_ids[i] + 1);\n      }\n    }\n\n    vector<TIndex> shape;\n    shape.push_back(K);\n    ctx.appendOutputShape(&shape);\n    output->Resize(shape);\n\n    TIndex in_block_size = data.size_from_dim(1);\n    TIndex out_block_size = output->size_from_dim(1);\n    T* out = output->template mutable_data<T>();\n\n    reducers_.clear();\n    reducers_.reserve(K);\n    for (TIndex i = 0; i < K; ++i) {\n      reducers_.emplace_back(ctx, out + out_block_size * i, &context_);\n    }\n\n    for (TIndex i = 0; i < N; ++i) {\n      auto s_id = s_ids[i];\n      CAFFE_ENFORCE(\n          0 <= s_id && s_id < K,\n          \"Segment id out of range: \",\n          s_id,\n          \", range 0 to \",\n          K);\n      IndexType idx;\n      if (SparseFused) { // static if\n        CAFFE_ENFORCE(\n            0 <= idxs[i] && idxs[i] < M,\n            \"Index out of bounds: \",\n            idxs[i],\n            \", range 0 to \",\n            M);\n        idx = idxs[i];\n      } else {\n        idx = i;\n      }\n      reducers_[s_id].template process<FixedSize>(\n          ctx, inputAccessor_.getBlockPtr(in_block_size, idx), i, &context_);\n    }\n\n    for (TIndex i = 0; i < K; ++i) {\n      reducers_[i].template finish<FixedSize>(ctx, &context_);\n    }\n    // call reducers destructors (if there is any)\n    reducers_.clear();\n    return true;\n  }\n\n  enum {\n    INDICES = Reducer::kInputCount,\n    SEGMENT_IDS = Reducer::kInputCount + (SparseFused ? 1 : 0)\n  };\n  static constexpr int kSelfInputs = SparseFused ? 2 : 1;\n  static constexpr int kNumInputs = Reducer::kInputCount + kSelfInputs;\n\n private:\n  TIndex num_segments_;\n  // member field to reuse memory\n  vector<Reducer> reducers_;\n  InputAccessor inputAccessor_;\n};\n\n// Gradient actually doesn't depend on whether sparse lookup is fused or not\ntemplate <typename T, typename SIndex, class Context, class ReducerGradient>\nclass AbstractUnsortedSegmentGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractUnsortedSegmentGradientOp);\n\n  bool RunOnDevice() override {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex grad_block_size = Input(SEGMENT_GRADS).size_from_dim(1);\n    return DispatchHelper<typename ReducerGradient::FixedDispatch>::call(\n        this, grad_block_size);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& segment_grads = Input(SEGMENT_GRADS);\n    auto& segment_ids = Input(SEGMENT_IDS);\n    auto* data_grads = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, segment_ids.ndim(), \"SEGMENT_IDS must be a vector\");\n    TIndex N = segment_ids.dim(0);\n\n    typename ReducerGradient::Meta ctx(segment_grads, 1);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE_EQ(\n          N,\n          aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeOriginalInput(\n          ReducerGradient::originalInputs()[i], aux_in, nullptr /*no grad*/, 1);\n    }\n\n    const SIndex* s_ids = segment_ids.template data<SIndex>();\n    const T* s_grads = segment_grads.template data<T>();\n\n    vector<TIndex> shape;\n    shape.push_back(N);\n    ctx.appendGradShape(&shape);\n    data_grads->Resize(shape);\n\n    TIndex d_block_size = data_grads->size_from_dim(1);\n    const SIndex K = segment_grads.dim(0);\n    TIndex s_block_size = segment_grads.size_from_dim(1);\n    T* out = data_grads->template mutable_data<T>();\n\n    if (ReducerGradient::computeLength()) {\n      segment_length_.resize(K, 0);\n      for (int i = 0; i < N; ++i) {\n        auto s_id = s_ids[i];\n        CAFFE_ENFORCE(\n            0 <= s_id && s_id < K,\n            \"Segment id out of range: \",\n            s_id,\n            \", range 0 to \",\n            K);\n        segment_length_[s_ids[i]]++;\n      }\n    }\n\n    reducers_.clear();\n    reducers_.reserve(K);\n    for (SIndex i = 0; i < K; ++i) {\n      reducers_.emplace_back(ctx, s_grads + s_block_size * i, &context_);\n    }\n\n    for (TIndex i = 0; i < N; ++i) {\n      auto s_id = s_ids[i];\n      if (ReducerGradient::computeLength()) {\n        reducers_[s_id].template fillGrad<FixedSize>(\n            ctx, out + d_block_size * i, i, &context_, segment_length_[s_id]);\n      } else {\n        reducers_[s_id].template fillGrad<FixedSize>(\n            ctx, out + d_block_size * i, i, &context_, 0);\n      }\n    }\n    // call reducers destructors (if there is any)\n    reducers_.clear();\n    return true;\n  }\n\n  // Input layout:\n  //   orig_arg1, orig_arg2, ..., orig_argN, SEGMENT_GRADS, SEGMENT_IDS\n  // orig_argXs represent original op's inputs and will be passed to the reducer\n  // directly\n  static constexpr int kNumInputs =\n      ReducerGradient::originalInputs().size() + 2;\n  enum _InputTags {\n    SEGMENT_GRADS = ReducerGradient::originalInputs().size(),\n    SEGMENT_IDS\n  };\n\n private:\n  // member field to reuse memory\n  vector<ReducerGradient> reducers_;\n  vector<int> segment_length_;\n};\n\ntemplate <typename T, typename SIndex, typename Context, typename ReducerDef>\nstruct AbstractUnsortedSegmentDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"UnsortedSegment\";\n  static constexpr const char* doc = R\"DOC(\nApplies '{op}' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegment{op}).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Arg(\n        \"num_segments\",\n        \"Optional int argument specifying the number of output segments and \"\n        \"thus the first dimension of the output\");\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"SEGMENT_IDS\",\n        \"Integer vector with the same length as the first dimension of DATA \"\n        \"that maps each slice of DATA to one of the segments\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of equal to the \"\n        \"number of segments.\");\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractUnsortedSegmentOp<\n      T,\n      SIndex,\n      Context,\n      typename ReducerDef::template Reducer<T, Context>,\n      false>;\n  using BackwardOp =\n      AbstractUnsortedSegmentGradientOp<T, SIndex, Context, ReducerGradient>;\n  using GetGradient = SegmentOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      false /*Sorted*/,\n      false /*SparseFused*/>;\n};\n\ntemplate <typename T, typename SIndex, typename Context, typename ReducerDef>\nstruct AbstractSparseUnsortedSegmentDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"SparseUnsortedSegment\";\n  static constexpr const char* doc = R\"DOC(\nPulls in slices of the input tensor, groups them into segments and applies\n'{op}' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegment{op}).\n\nThis op is basically Gather and UnsortedSegment{op} fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"INDICES\",\n        \"Integer vector containing indices of the first dimension of DATA for \"\n        \"the slices that are being aggregated\");\n    schema.Input(\n        Reducer::kInputCount + 1,\n        \"SEGMENT_IDS\",\n        \"Integer vector with the same length as INDICES that maps each slice \"\n        \"of DATA referenced by INDICES to one of the segments\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of equal to the \"\n        \"number of segments.\");\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractUnsortedSegmentOp<T, SIndex, Context, Reducer>;\n  // TODO(dzhulgakov): we're registering the same class twice here,\n  // consider avoiding op duplication here\n  using BackwardOp =\n      AbstractUnsortedSegmentGradientOp<T, SIndex, Context, ReducerGradient>;\n  using GetGradient = SegmentOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      false /*Sorted*/,\n      true /*SparseFused*/>;\n};\n\n/**\n * @brief Segment reduction op with optional fused embedding lookup\n *\n * Base implementation for LengthsXXX and SparseLengthsXXX depending\n * on SparseFused static argument.\n *\n * Inputs:\n *   0: DATA - input embedding to do lookups in\n *   1..P: AUX_ARG_<I> - optional additional arguments to be passed to the\n *                       reducer, should have the same first dimension as\n *                       LENGTHS (e.g. scalars in WeightedSum)\n *   # if SparseFused == true:\n *   P+1: INDICES - 1-D vector with indices to look up in DATA. Should have the\n *                  same dimension as LENGTHS\n *   # P+1 if SparseFused == false:\n *   P+1 or P+2: LENGTHS - lengths on indecies vector\n *\n * Output:\n *   Tensor with first dimension of K, where K = len(LENGTHS). Rest\n *   of dimensions are decided by reducer but usually are the same size as extra\n *   dimensions of DATA\n */\n// TODO(dzhulgakov): for now it's implemented with incremental reducers because\n// of fused sparse support. But using \"lengths\" representation actually implies\n// continuous segments and thus range reducers can be used for non-sparse\n// version.\n\ntemplate <\n    typename TData,\n    typename TLengths,\n    class Context,\n    class Reducer,\n    bool SparseFused = true,\n    class InputAccessor = BaseInputAccessor<TData>>\nclass AbstractLengthsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractLengthsOp);\n\n  bool RunOnDevice() override {\n    if (SparseFused) {\n      return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n          this, Input(INDICES));\n    } else {\n      // type doesn't matter\n      return DoRunWithType<TIndex>();\n    }\n  }\n\n  template <typename IndexType>\n  bool DoRunWithType() {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex in_block_size = Input(0).size_from_dim(1);\n    return DispatchHelper<typename Reducer::FixedDispatch, IndexType>::call(\n        this, in_block_size);\n  }\n\n  template <typename IndexType, int FixedSize>\n  bool DoRunWithValue() {\n    auto& dataInput = Input(0);\n    auto& lengthsInput = Input(LENGTHS);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(1, lengthsInput.ndim(), \"LENGTHS must be a vector\");\n    const TIndex dataSize = dataInput.dim(0);\n    // Either first dim the data or how much we pull in indexies from it\n    TIndex dataToReduceSize;\n    const TIndex outputSize = lengthsInput.dim(0);\n\n    const IndexType* indices;\n    if (SparseFused) { // static if\n      auto& indicesInput = Input(INDICES);\n      CAFFE_ENFORCE_EQ(1, indicesInput.ndim(), \"INDICES must be a vector\");\n      indices = indicesInput.template data<IndexType>();\n      dataToReduceSize = indicesInput.dim(0);\n    } else {\n      dataToReduceSize = dataSize;\n    }\n\n    typename Reducer::Meta ctx;\n    ctx.observeInput(0, dataInput, 1);\n    for (int i = 1; i < Reducer::kInputCount; ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE(\n          dataToReduceSize == aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeInput(i, aux_in, 1);\n    }\n\n    const TLengths* lengths = lengthsInput.template data<TLengths>();\n\n    OPERATOR_NEEDS_FEATURE(\n        inputAccessor_.observeInput(dataInput),\n        \"Unsupported input type: \",\n        dataInput.meta().name(),\n        \".\");\n\n    vector<TIndex> shape{outputSize};\n    ctx.appendOutputShape(&shape);\n    output->Resize(shape);\n\n    TIndex in_block_size = dataInput.size_from_dim(1);\n    TIndex out_block_size = output->size_from_dim(1);\n    TData* out = output->template mutable_data<TData>();\n\n    TIndex dataIndex = 0;\n    for (TIndex rangeIndex = 0; rangeIndex < outputSize; ++rangeIndex) {\n      Reducer reducer(ctx, out + out_block_size * rangeIndex, &context_);\n      for (TIndex start = dataIndex; dataIndex < start + lengths[rangeIndex];\n           ++dataIndex) {\n        IndexType idx;\n        if (SparseFused) { // static if\n          idx = indices[dataIndex];\n          CAFFE_ENFORCE(\n              0 <= idx && idx < dataSize,\n              \"Index \",\n              dataIndex,\n              \" is out of bounds: \",\n              idx,\n              \", range 0 to \",\n              dataSize);\n        } else {\n          idx = dataIndex;\n          CAFFE_ENFORCE(\n              idx < dataSize,\n              \"Range \",\n              rangeIndex,\n              \" of length \",\n              lengths[rangeIndex],\n              \" is out of bound \",\n              dataSize);\n        }\n\n        const TData* input = inputAccessor_.getBlockPtr(in_block_size, idx);\n        reducer.template process<FixedSize>(ctx, input, dataIndex, &context_);\n      }\n      reducer.template finish<FixedSize>(ctx, &context_);\n    }\n    CAFFE_ENFORCE(\n        dataIndex == dataToReduceSize, dataIndex, \" != \", dataToReduceSize);\n\n    return true;\n  }\n\n  enum {\n    INDICES = Reducer::kInputCount,\n    LENGTHS = Reducer::kInputCount + (SparseFused ? 1 : 0)\n  };\n  static constexpr int kSelfInputs = SparseFused ? 2 : 1;\n  static constexpr int kNumInputs = Reducer::kInputCount + kSelfInputs;\n\n private:\n  InputAccessor inputAccessor_;\n};\n\n/*\n * Some notice:\n * 1. Gradient actually doesn't depend on whether sparse lookup is fused or not\n * 2. INDICES are not used in CPU version, but they are needed in async CUDA\n *    version. So we register 3 input version for CPU as gradient op for\n *    GPU/CPU convert. We then register 2 input version for CPU for backward\n *    compatibility with older nets.\n */\ntemplate <\n    typename T,\n    typename TLengths,\n    class Context,\n    class ReducerGradient,\n    bool GradientNeedIndices = false>\nclass AbstractLengthsGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractLengthsGradientOp);\n\n  bool RunOnDevice() override {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex gradBlockSize = Input(SEGMENT_GRADS).size_from_dim(1);\n    return DispatchHelper<typename ReducerGradient::FixedDispatch>::call(\n        this, gradBlockSize);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& segmentGradsInput = Input(SEGMENT_GRADS);\n    auto& lengthsInput = Input(LENGTHS);\n    auto* dataGradsOutput = Output(0);\n\n    CAFFE_ENFORCE(lengthsInput.ndim() == 1, \"LENGTHS must be a vector\");\n    TIndex reducedDataSize = 0;\n    TIndex numSegments = lengthsInput.dim(0);\n    CAFFE_ENFORCE(segmentGradsInput.ndim() > 0);\n    CAFFE_ENFORCE(numSegments == segmentGradsInput.dim(0));\n    const TLengths* lengths = lengthsInput.template data<TLengths>();\n    for (TIndex i = 0; i < numSegments; ++i) {\n      reducedDataSize += lengths[i];\n    }\n\n    typename ReducerGradient::Meta ctx(segmentGradsInput, 1);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      auto& aux_in = Input(i);\n      CAFFE_ENFORCE_EQ(\n          reducedDataSize,\n          aux_in.dim(0),\n          \"Input \",\n          i,\n          \" must have the same first dim as SEGMENT_IDS\");\n      ctx.observeOriginalInput(\n          ReducerGradient::originalInputs()[i], aux_in, nullptr /*no grad*/, 1);\n    }\n\n    const T* segmentGrads = segmentGradsInput.template data<T>();\n\n    vector<TIndex> shape;\n    shape.push_back(reducedDataSize);\n    ctx.appendGradShape(&shape);\n    dataGradsOutput->Resize(shape);\n\n    TIndex dataGradsBlockSize = dataGradsOutput->size_from_dim(1);\n    TIndex segmentBlockSize = segmentGradsInput.size_from_dim(1);\n    T* dataGrads = dataGradsOutput->template mutable_data<T>();\n\n    TIndex dataIndex = 0;\n    for (TIndex rangeIndex = 0; rangeIndex < numSegments; ++rangeIndex) {\n      ReducerGradient reducer(\n          ctx, segmentGrads + segmentBlockSize * rangeIndex, &context_);\n      for (TIndex start = dataIndex; dataIndex < start + lengths[rangeIndex];\n           ++dataIndex) {\n        reducer.template fillGrad<FixedSize>(\n            ctx,\n            dataGrads + dataGradsBlockSize * dataIndex,\n            dataIndex,\n            &context_,\n            lengths[rangeIndex]);\n      }\n    }\n    CAFFE_ENFORCE(\n        dataIndex == reducedDataSize, dataIndex, \" != \", reducedDataSize);\n    return true;\n  }\n\n  // Input layout:\n  //   orig_arg1, orig_arg2, ..., orig_argN, SEGMENT_GRADS, SEGMENT_IDS\n  // orig_argXs represent original op's inputs and will be passed to the reducer\n  // directly\n  static constexpr int kNumInputs = ReducerGradient::originalInputs().size() +\n      2 + (GradientNeedIndices ? 1 : 0);\n  enum _InputTags {\n    SEGMENT_GRADS = ReducerGradient::originalInputs().size(),\n    LENGTHS,\n    INDICES\n  };\n};\n\n// Version of gradient that requires the main input and thus needs to receive\n// length, indices and other stuff\ntemplate <\n    typename T,\n    typename TLengths,\n    class Context,\n    class ReducerGradient,\n    bool SparseFused = true,\n    bool GradientNeedIndices = false>\nclass AbstractLengthsWithMainInputGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractLengthsWithMainInputGradientOp);\n\n  bool RunOnDevice() override {\n    if (SparseFused) {\n      return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n          this, Input(INDICES));\n    } else {\n      // type doesn't matter\n      return DoRunWithType<TIndex>();\n    }\n  }\n\n  template <typename IndexType>\n  bool DoRunWithType() {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class\n    TIndex in_block_size = Input(SEGMENT_GRADS).size_from_dim(1);\n    return DispatchHelper<typename ReducerGradient::FixedDispatch, IndexType>::\n        call(this, in_block_size);\n  }\n\n  template <typename IndexType, int FixedSize>\n  bool DoRunWithValue() {\n    auto& dataInput = Input(DATA_INPUT);\n    auto& segmentGradsInput = Input(SEGMENT_GRADS);\n    auto& lengthsInput = Input(LENGTHS);\n    auto* dataGradsOutput = Output(0);\n\n    CAFFE_ENFORCE(lengthsInput.ndim() == 1, \"LENGTHS must be a vector\");\n    TIndex numSegments = lengthsInput.dim(0);\n    CAFFE_ENFORCE(segmentGradsInput.ndim() > 0);\n    CAFFE_ENFORCE(numSegments == segmentGradsInput.dim(0));\n    const TLengths* lengths = lengthsInput.template data<TLengths>();\n\n    typename ReducerGradient::Meta ctx(segmentGradsInput, 1);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      int aux_num = ReducerGradient::originalInputs()[i];\n      auto& aux_in = Input(i);\n      auto* aux_grad = aux_num < OutputSize() ? Output(aux_num) : nullptr;\n      ctx.observeOriginalInput(aux_num, aux_in, aux_grad, 1);\n    }\n\n    // Either first dim the data or how much we pull in indexies from it\n    TIndex dataToReduceSize;\n    const IndexType* indices = nullptr;\n    if (SparseFused) { // static if\n      auto& indicesInput = Input(INDICES);\n      indices = indicesInput.template data<IndexType>();\n      dataToReduceSize = indicesInput.dim(0);\n    } else {\n      dataToReduceSize = dataInput.dim(0);\n    }\n\n    const T* segmentGrads = segmentGradsInput.template data<T>();\n\n    vector<TIndex> shape;\n    shape.push_back(dataToReduceSize);\n    ctx.appendGradShape(&shape);\n    dataGradsOutput->Resize(shape);\n\n    TIndex dataGradsBlockSize = dataGradsOutput->size_from_dim(1);\n    TIndex segmentBlockSize = segmentGradsInput.size_from_dim(1);\n    T* dataGrads = dataGradsOutput->template mutable_data<T>();\n\n    const T* data = dataInput.template data<T>();\n\n    TIndex dataIndex = 0;\n    for (TIndex rangeIndex = 0; rangeIndex < numSegments; ++rangeIndex) {\n      ReducerGradient reducer(\n          ctx, segmentGrads + segmentBlockSize * rangeIndex, &context_);\n      for (TIndex start = dataIndex; dataIndex < start + lengths[rangeIndex];\n           ++dataIndex) {\n        IndexType data_pos;\n        // No range checking, should've been verified in forward pass\n        if (SparseFused) { // static if\n          data_pos = indices[dataIndex];\n        } else {\n          data_pos = dataIndex;\n        }\n        reducer.template fillGradWithMainInput<FixedSize>(\n            ctx,\n            data + dataGradsBlockSize * data_pos,\n            dataGrads + dataGradsBlockSize * dataIndex,\n            dataIndex,\n            &context_,\n            lengths[rangeIndex]);\n      }\n    }\n    return true;\n  }\n\n  // Input layout:\n  //   orig_arg1, orig_arg2, ..., orig_argN, DATA_INPUT, SEGMENT_GRADS,\n  //      SEGMENT_LEGNTHS, [INDICES]\n  // orig_argXs represent original op's inputs and will be passed to the reducer\n  // directly\n  static constexpr int kNumInputs = ReducerGradient::originalInputs().size() +\n      3 + (SparseFused ? 1 : 0) + (GradientNeedIndices ? 1 : 0);\n  enum _InputTags {\n    SEGMENT_GRADS = ReducerGradient::originalInputs().size(),\n    LENGTHS,\n    DATA_INPUT,\n    INDICES,\n  };\n};\n\n// Version of gradient that requires the main input as well as the output of the\n// forward op.\ntemplate <typename T, typename TLengths, class Context, class ReducerGradient>\nclass AbstractLengthsWithMainInputAndForwardOutputGradientOp\n    : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AbstractLengthsWithMainInputAndForwardOutputGradientOp);\n\n  bool RunOnDevice() override {\n    // If more complicated fixed size logic becomes necessary, it can be moved\n    // to the reducer class.\n    TIndex in_block_size = Input(SEGMENT_GRADS).size_from_dim(1);\n    return DispatchHelper<typename ReducerGradient::FixedDispatch>::call(\n        this, in_block_size);\n  }\n\n  template <int FixedSize>\n  bool DoRunWithValue() {\n    auto& dataInput = Input(DATA_INPUT);\n    auto& segmentGradsInput = Input(SEGMENT_GRADS);\n    auto& lengthsInput = Input(LENGTHS);\n    auto& forwardOutputInput = Input(FORWARD_OUTPUT);\n    auto* dataGradsOutput = Output(0);\n\n    CAFFE_ENFORCE(lengthsInput.ndim() == 1, \"LENGTHS must be a vector\");\n    TIndex numSegments = lengthsInput.dim(0);\n    CAFFE_ENFORCE(segmentGradsInput.ndim() > 0);\n    CAFFE_ENFORCE(numSegments == segmentGradsInput.dim(0));\n    const TLengths* lengths = lengthsInput.template data<TLengths>();\n\n    typename ReducerGradient::Meta ctx(segmentGradsInput, 1);\n    for (int i = 0; i < ReducerGradient::originalInputs().size(); ++i) {\n      int aux_num = ReducerGradient::originalInputs()[i];\n      auto& aux_in = Input(i);\n      auto* aux_grad = aux_num < OutputSize() ? Output(aux_num) : nullptr;\n      ctx.observeOriginalInput(aux_num, aux_in, aux_grad, 1);\n    }\n\n    CAFFE_ENFORCE(forwardOutputInput.ndim() > 0);\n    CAFFE_ENFORCE(numSegments == forwardOutputInput.dim(0));\n    const T* forwardOutput = forwardOutputInput.template data<T>();\n\n    TIndex dataToReduceSize = dataInput.dim(0);\n\n    const T* segmentGrads = segmentGradsInput.template data<T>();\n\n    vector<TIndex> shape;\n    shape.push_back(dataToReduceSize);\n    ctx.appendGradShape(&shape);\n    dataGradsOutput->Resize(shape);\n\n    TIndex dataGradsBlockSize = dataGradsOutput->size_from_dim(1);\n    TIndex segmentBlockSize = segmentGradsInput.size_from_dim(1);\n    T* dataGrads = dataGradsOutput->template mutable_data<T>();\n\n    const T* data = dataInput.template data<T>();\n\n    TIndex dataIndex = 0;\n    for (TIndex rangeIndex = 0; rangeIndex < numSegments; ++rangeIndex) {\n      ReducerGradient reducer(\n          ctx, segmentGrads + segmentBlockSize * rangeIndex, &context_);\n      for (TIndex start = dataIndex; dataIndex < start + lengths[rangeIndex];\n           ++dataIndex) {\n        // No range checking, should've been verified in forward pass\n        reducer.template fillGradWithMainInputAndForwardOutput<FixedSize>(\n            ctx,\n            data + dataGradsBlockSize * dataIndex,\n            dataGrads + dataGradsBlockSize * dataIndex,\n            forwardOutput + segmentBlockSize * rangeIndex,\n            dataIndex,\n            &context_,\n            lengths[rangeIndex]);\n      }\n    }\n    return true;\n  }\n\n  // Input layout:\n  //   orig_arg1, orig_arg2, ..., orig_argN, FORWARD_OUTPUT, DATA_INPUT,\n  //      SEGMENT_GRADS, SEGMENT_LEGNTHS\n  // orig_argXs represent original op's inputs and will be passed to the reducer\n  // directly\n  static constexpr int kNumInputs =\n      ReducerGradient::originalInputs().size() + 4;\n  enum _InputTags {\n    FORWARD_OUTPUT = ReducerGradient::originalInputs().size(),\n    SEGMENT_GRADS,\n    LENGTHS,\n    DATA_INPUT,\n  };\n};\n\n// base implementation of sparse/non-sparse gradient computation\ntemplate <\n    typename ForwardOp,\n    typename ReducerDef,\n    typename ReducerGradient,\n    bool SparseFused,\n    bool GradientNeedIndices = false>\nstruct LengthsOpGetGradient : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  vector<OperatorDef> GetGradientDefs() override {\n    vector<string> grad_ins;\n    string suffix = \"Gradient\";\n    for (const int i : ReducerGradient::originalInputs()) {\n      grad_ins.push_back(I(i));\n    }\n    if (ReducerGradient::requiresForwardOutput()) {\n      grad_ins.push_back(O(0));\n      CAFFE_ENFORCE(\n          !SparseFused,\n          \"Forward pass output not yet supported as input for backward pass \"\n          \"for SparseLengthsXXX operators\");\n      suffix = \"AndForwardOutput\" + suffix;\n    }\n    grad_ins.push_back(GO(0));\n    grad_ins.push_back(I(ForwardOp::LENGTHS));\n    bool indices_pushed = false;\n    if (ReducerGradient::requiresDataInput(Def())) {\n      grad_ins.push_back(I(0));\n      if (SparseFused) {\n        grad_ins.push_back(I(ForwardOp::INDICES));\n        indices_pushed = true;\n      }\n      suffix = \"WithMainInput\" + suffix;\n    }\n    if (GradientNeedIndices && !indices_pushed) {\n      if (SparseFused) {\n        grad_ins.push_back(I(ForwardOp::INDICES));\n      } else {\n        // Hacky: using Input as Indices, remove this after we have specialized\n        // cuda LengthsIndicesInGradientSumGradient\n        grad_ins.push_back(I(0));\n      }\n    }\n    vector<string> grad_outs;\n    grad_outs.push_back({SparseFused ? GI_V(0) : GI(0)});\n    int aux_grads = ReducerGradient::numAuxInputsWithGrads(Def());\n    for (int i = 1; i <= aux_grads; ++i) {\n      grad_outs.push_back(GI(i));\n    }\n    vector<OperatorDef> r{CreateOperatorDef(\n        string(SparseFused ? \"SparseLengths\" : \"Lengths\") +\n            string(GradientNeedIndices ? \"IndicesInGradient\" : \"\") +\n            ReducerDef::name + suffix,\n        \"\",\n        grad_ins,\n        grad_outs)};\n    if (SparseFused) {\n      SetSparse(0, I(ForwardOp::INDICES), GI_V(0));\n    }\n    return r;\n  }\n};\n\ntemplate <\n    typename T,\n    typename SIndex,\n    typename Context,\n    typename ReducerDef,\n    bool GradientNeedIndices = false>\nstruct AbstractLengthsDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"Lengths\";\n  static constexpr const char* doc = R\"DOC(\nApplies '{op}' to each segment of the input tensor. Segments are defined\nby their LENGTHS.\n\nLENGTHS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nFor example LENGTHS = [2, 1] stands for segments DATA[0..1] and DATA[2]\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"LENGTHS\",\n        \"Vector with the same sum of elements as the first dimension of DATA\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of len(LENGTHS) \");\n    schema.TensorInferenceFunction(\n        [](const OperatorDef& def, const vector<TensorShape>& in) {\n          vector<TensorShape> out(0);\n          TensorShape output;\n          for (int d : in[Reducer::kInputCount].dims()) {\n            output.add_dims(d);\n          }\n          for (int j = 1; j < in[0].dims_size(); j++) {\n            output.add_dims(in[0].dims(j));\n          }\n          output.set_data_type(in[0].data_type());\n          out.push_back(output);\n          return out;\n        });\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractLengthsOp<T, SIndex, Context, Reducer, false>;\n  using BackwardOp =\n      AbstractLengthsGradientOp<T, SIndex, Context, ReducerGradient>;\n  using WithMainInputBackwardOp = AbstractLengthsWithMainInputGradientOp<\n      T,\n      SIndex,\n      Context,\n      ReducerGradient,\n      false>;\n  using WithMainInputAndForwardOutputBackwardOp =\n      AbstractLengthsWithMainInputAndForwardOutputGradientOp<\n          T,\n          SIndex,\n          Context,\n          ReducerGradient>;\n  using GetGradient = LengthsOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      false /*SparseFused*/,\n      GradientNeedIndices>;\n};\n\ntemplate <\n    typename T,\n    typename SIndex,\n    typename Context,\n    typename ReducerDef,\n    bool GradientNeedIndices = false>\nstruct AbstractSparseLengthsDef {\n  using OpDef = ReducerDef;\n  static constexpr const char* basename = \"SparseLengths\";\n  static constexpr const char* doc = R\"DOC(\nPulls in slices of the input tensor, groups them into segments and applies\n'{op}' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and Lengths{op} fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimention of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\n{op_doc}\n  )DOC\";\n  static void PopulateSchema(OpSchema& schema) {\n    schema.Input(0, \"DATA\", \"Input tensor, slices of which are aggregated.\");\n    schema.Input(\n        Reducer::kInputCount,\n        \"INDICES\",\n        \"Integer vector containing indices of the first dimension of DATA for \"\n        \"the slices that are being aggregated\");\n    schema.Input(\n        Reducer::kInputCount + 1,\n        \"LENGTHS\",\n        \"Non negative vector with sum of elements equal to INDICES length\");\n    schema.Output(\n        0,\n        \"OUTPUT\",\n        \"Aggregated output tensor. Has the first dimension of K \"\n        \"(the number of segments).\");\n    ReducerDef::PopulateSchema(schema);\n  }\n  using Reducer = typename ReducerDef::template Reducer<T, Context>;\n  using ReducerGradient =\n      typename ReducerDef::template ReducerGradient<T, Context>;\n  using ForwardOp = AbstractLengthsOp<T, SIndex, Context, Reducer>;\n  // TODO(dzhulgakov): we're registering the same class twice here,\n  // consider avoiding op duplication here\n  // Note: registering 2 input version for now because of naming in the macro,\n  // will register 3 input version alone\n  /* INDICES are not used in CPU version, but they are needed in async CUDA\n   *    version. So we register 3 input version for CPU as gradient op for\n   *    GPU/CPU convert. We then register 2 input version for CPU for backward\n   *    compatibility with older nets.\n   */\n  using BackwardOp = AbstractLengthsGradientOp<\n      T,\n      SIndex,\n      Context,\n      ReducerGradient,\n      false /*GradientNeedIndices*/>;\n  using WithMainInputBackwardOp = AbstractLengthsWithMainInputGradientOp<\n      T,\n      SIndex,\n      Context,\n      ReducerGradient>;\n  // Will return 3 input version. This is aliging new CPU/GPU nets.\n  using GetGradient = LengthsOpGetGradient<\n      ForwardOp,\n      ReducerDef,\n      ReducerGradient,\n      true /*SparseFused*/,\n      GradientNeedIndices>;\n};\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SEGMENT_REDUCTION_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/sequence_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_SEQUENCE_OPS_H_\n#define CAFFE2_OPERATORS_SEQUENCE_OPS_H_\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass GatherPaddingOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  GatherPaddingOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        startPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"padding_width\", 1)),\n        endPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"end_padding_width\", -1)) {\n    CAFFE_ENFORCE_GE(startPaddingWidth_, 0);\n    if (endPaddingWidth_ < 0) {\n      endPaddingWidth_ = startPaddingWidth_;\n    }\n  }\n\n  bool RunOnDevice() override {\n    if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {\n      Output(0)->Resize(std::vector<TIndex>(0));\n      if (OutputSize() == 2) {\n        Output(1)->Resize(std::vector<TIndex>(0));\n      }\n      return true;\n    }\n    return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(\n        this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n private:\n  int startPaddingWidth_;\n  int endPaddingWidth_;\n};\n\ntemplate <class Context>\nclass RemovePaddingOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RemovePaddingOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        startPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"padding_width\", 1)),\n        endPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"end_padding_width\", -1)) {\n    CAFFE_ENFORCE_GE(startPaddingWidth_, 0);\n    if (endPaddingWidth_ < 0) {\n      endPaddingWidth_ = startPaddingWidth_;\n    }\n  }\n\n  bool RunOnDevice() override {\n    if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {\n      Output(0)->CopyFrom(Input(0), &context_);\n      if (OutputSize() == 2) {\n        Output(1)->CopyFrom(Input(1), &context_);\n      }\n      return true;\n    }\n    return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(\n        this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n private:\n  int startPaddingWidth_;\n  int endPaddingWidth_;\n};\n\ntemplate <class Context>\nclass AddPaddingOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  AddPaddingOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        startPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"padding_width\", 1)),\n        endPaddingWidth_(\n            OperatorBase::GetSingleArgument<int>(\"end_padding_width\", -1)) {\n    CAFFE_ENFORCE_GE(startPaddingWidth_, 0);\n    if (endPaddingWidth_ < 0) {\n      endPaddingWidth_ = startPaddingWidth_;\n    }\n  }\n\n  bool RunOnDevice() override {\n    if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {\n      Output(0)->CopyFrom(Input(0), &context_);\n      if (OutputSize() == 2) {\n        Output(1)->CopyFrom(Input(1), &context_);\n      }\n      return true;\n    }\n    return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(\n        this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n private:\n  int startPaddingWidth_;\n  int endPaddingWidth_;\n};\n\ntemplate <class Context>\nclass PadEmptySamplesOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  PadEmptySamplesOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SEQUENCE_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/shape_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// RecordShapeOp records the shape of the input tensor to a vector of int. You\n// mostly don't need this operator explicitly, and it is mostly used in the\n// autodiff process.\ntemplate <class Context>\nclass ShapeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ShapeOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = OperatorBase::Output<Tensor<Context>>(0);\n    output->Resize(input.ndim());\n    TIndex* output_data = output->template mutable_data<TIndex>();\n    context_.template CopyBytes<Context, Context>(\n        input.ndim() * sizeof(TIndex), input.dims().data(), output_data);\n    return true;\n  }\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/sinusoid_position_encoding_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_\n#define CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_\n\n#ifdef _MSC_VER\n#define _USE_MATH_DEFINES\n#endif // _MSC_VER\n#include <cmath>\n\n#include \"caffe2/core/operator.h\"\n\n#include \"Eigen/Core\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SinusoidPositionEncodingOp : public Operator<Context> {\n public:\n  SinusoidPositionEncodingOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        embedding_size_(OperatorBase::template GetSingleArgument<int>(\n            \"embedding_size\",\n            100)),\n        alpha_(OperatorBase::template GetSingleArgument<float>(\"alpha\", 10000)),\n        amplitude_(\n            OperatorBase::template GetSingleArgument<float>(\"amplitude\", 1)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(0));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& positions = Input(0);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_EQ(positions.ndim(), 2, \"POSITIONS should be a 2-D tensor\");\n\n    auto shape = positions.dims();\n    shape.push_back(embedding_size_);\n    output->Resize(shape);\n\n    int M = shape[0];\n    int K = shape[1];\n    const Index* idxs = positions.template data<Index>();\n    float* out = output->template mutable_data<float>();\n\n    float log_alpha = std::log(alpha_);\n    float max_alpha_pow =\n        ((float)embedding_size_ - 1.0f) / (float)embedding_size_;\n\n    for (int i = 0; i < M; ++i) {\n      float pos = (float)idxs[i * K];\n\n      // Compute the embedding for position i, example 0 first\n      float* row = &out[i * K * embedding_size_];\n      Eigen::Map<Eigen::VectorXf> row_map(row, embedding_size_, 1);\n      auto row_array = row_map.array();\n\n      float log_pos = std::log(pos);\n      row_array.setLinSpaced(\n          embedding_size_, log_pos, log_pos - log_alpha * max_alpha_pow);\n      row_array = row_array.exp().eval();\n      // row_array[k] == pos / alpha^(k / embedding_size)\n\n      // Phase shift so that alternating elements are cosines\n      for (int k = 1; k < embedding_size_; k += 2) {\n        row[k] += (float)M_PI_2;\n      }\n      row_array = amplitude_ * row_array.sin().eval();\n\n      // Copy the embedding to position i in the other examples\n      for (int j = 1; j < K; ++j) {\n        int base = i * K * embedding_size_;\n        std::copy(\n            &out[base],\n            &out[base + embedding_size_],\n            &out[base + j * embedding_size_]);\n      }\n    }\n    return true;\n  }\n\n protected:\n  int embedding_size_;\n  float alpha_;\n  float amplitude_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/slice_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\nnamespace {\n\ntemplate <class SIndex, class Context>\nbool SliceImpl(\n    Tensor<Context>* output,\n    const Tensor<Context>& data,\n    const Tensor<Context>& starts,\n    const Tensor<Context>& ends,\n    Context* context,\n    Tensor<Context>* gdata = nullptr,\n    const Tensor<Context>* go = nullptr) {\n  bool backward = output == nullptr;\n\n  auto* starts_data = starts.template data<SIndex>();\n  auto* ends_data = ends.template data<SIndex>();\n\n  CAFFE_ENFORCE_EQ(starts.ndim(), 1);\n  CAFFE_ENFORCE_EQ(ends.ndim(), 1);\n  CAFFE_ENFORCE_GE(data.ndim(), starts.size());\n  CAFFE_ENFORCE_EQ(starts.size(), ends.size());\n\n  std::vector<SIndex> starts_idx(data.ndim());\n  std::vector<SIndex> ends_idx(data.ndim());\n  std::vector<SIndex> dst_sizes(data.ndim());\n\n  for (int i = 0; i < data.ndim(); ++i) {\n    if (i >= starts.size()) {\n      starts_idx[i] = 0;\n      ends_idx[i] = data.dims()[i];\n      continue;\n    }\n    if (data.dims()[i] > 0) {\n      auto start = starts_data[i];\n      auto end = ends_data[i];\n      if (start < 0) {\n        start = data.dims()[i] + 1 + start;\n      }\n      if (end < 0) {\n        end = data.dims()[i] + 1 + end;\n      }\n      CAFFE_ENFORCE_GE(start, 0);\n      CAFFE_ENFORCE_GE(end, 0);\n      CAFFE_ENFORCE_LT(start, data.dims()[i]);\n      CAFFE_ENFORCE_LE(end, data.dims()[i]);\n      CAFFE_ENFORCE_GE(end, start);\n      starts_idx[i] = start;\n      ends_idx[i] = end;\n      dst_sizes[i] = end - start;\n    } else {\n      starts_idx[i] = 0;\n      ends_idx[i] = 0;\n      dst_sizes[i] = 0;\n    }\n  }\n\n  if (data.size() <= 0) {\n    // When the input is empty, we do not need to do copy.\n    if (!backward) {\n      output->Resize(dst_sizes);\n      output->raw_mutable_data(data.meta());\n    }\n    return true;\n  }\n  // for now only supports slicing in 1 dimension\n  int dim = -1;\n  for (int i = 0; i < data.ndim(); ++i) {\n    if (starts_idx[i] > 0 || ends_idx[i] < data.dims()[i]) {\n      CAFFE_ENFORCE_EQ(\n          dim, -1, \"Currently only possible to slice in 1 dimension.\");\n      dim = i;\n    }\n  }\n  if (dim == -1) {\n    if (!backward) {\n      output->CopyFrom(data, context);\n    } else {\n      gdata->CopyFrom(*go, context);\n    }\n    return true;\n  }\n  size_t unit = std::accumulate(\n      data.dims().begin() + dim + 1,\n      data.dims().end(),\n      1,\n      std::multiplies<SIndex>());\n  size_t num_blocks = std::accumulate(\n      data.dims().begin(),\n      data.dims().begin() + dim,\n      1,\n      std::multiplies<SIndex>());\n  if (!backward) {\n    output->Resize(dst_sizes);\n  } else {\n    gdata->ResizeLike(data);\n  }\n\n  size_t itemsize = data.meta().itemsize();\n\n  if (!backward) {\n    char* src_bytes = (char*)data.raw_data();\n    char* dst_bytes = (char*)output->raw_mutable_data(data.meta());\n\n    size_t src_nbytes = data.nbytes();\n    size_t dst_nbytes = output->nbytes();\n\n    size_t src_block_size = unit * data.dims()[dim];\n    size_t dst_block_size = unit * (ends_idx[dim] - starts_idx[dim]);\n    size_t src_offset = unit * starts_idx[dim];\n\n    if (num_blocks == 0 || dst_block_size == 0) {\n      return true;\n    }\n\n    size_t src_block_size_bytes = itemsize * src_block_size;\n    size_t dst_block_size_bytes = itemsize * dst_block_size;\n\n    char* src_offset_bytes = src_bytes + itemsize * src_offset;\n    char* dst_offset_bytes = dst_bytes;\n    for (int i = 0; i < num_blocks; ++i) {\n      char* local_src_offset_bytes =\n          src_offset_bytes + i * src_block_size_bytes;\n      char* local_dst_offset_bytes =\n          dst_offset_bytes + i * dst_block_size_bytes;\n      DCHECK_LE(\n          static_cast<void*>(local_src_offset_bytes + dst_block_size_bytes),\n          static_cast<void*>(src_bytes + src_nbytes));\n      DCHECK_LE(\n          static_cast<void*>(local_dst_offset_bytes + dst_block_size_bytes),\n          static_cast<void*>(dst_bytes + dst_nbytes));\n      context->template CopyItems<Context, Context>(\n          data.meta(),\n          dst_block_size,\n          (void*)local_src_offset_bytes,\n          (void*)local_dst_offset_bytes);\n    }\n  } else {\n    char* src_bytes = (char*)go->raw_data();\n    char* dst_bytes = (char*)gdata->raw_mutable_data(go->meta());\n\n    size_t src_nbytes = go->nbytes();\n    size_t dst_nbytes = gdata->nbytes();\n\n    size_t src_block_size = unit * (ends_idx[dim] - starts_idx[dim]);\n    size_t dst_block_size = unit * data.dims()[dim];\n    size_t dst_offset = unit * starts_idx[dim];\n\n    if (num_blocks == 0 || dst_block_size == 0) {\n      return true;\n    }\n\n    size_t src_block_size_bytes = itemsize * src_block_size;\n    size_t dst_block_size_bytes = itemsize * dst_block_size;\n\n    char* src_offset_bytes = src_bytes;\n    char* dst_offset_bytes = dst_bytes + itemsize * dst_offset;\n    // Zero out gradient blob before copy since we copy in fewer items than\n    // there is space for\n    math::Set<char, Context>(dst_nbytes, 0, dst_bytes, context);\n\n    // If output tensor is empty, just return zeroed gradient tensor\n    if (!src_bytes) {\n      return true;\n    }\n\n    for (int i = 0; i < num_blocks; ++i) {\n      char* local_src_offset_bytes =\n          src_offset_bytes + i * src_block_size_bytes;\n      char* local_dst_offset_bytes =\n          dst_offset_bytes + i * dst_block_size_bytes;\n      DCHECK_LE(\n          local_src_offset_bytes + src_block_size_bytes,\n          src_bytes + src_nbytes);\n      DCHECK_LE(\n          local_dst_offset_bytes + src_block_size_bytes,\n          dst_bytes + dst_nbytes);\n      context->template CopyItems<Context, Context>(\n          go->meta(),\n          src_block_size,\n          (void*)local_src_offset_bytes,\n          (void*)local_dst_offset_bytes);\n    }\n  }\n  return true;\n}\n\n} // namespace\n\ntemplate <class SIndex, class Context>\nclass SliceOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SliceOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        starts_(OperatorBase::GetRepeatedArgument<SIndex>(\"starts\")),\n        ends_(OperatorBase::GetRepeatedArgument<SIndex>(\"ends\")),\n        statically_inited_(false) {}\n\n  bool RunOnDevice() override {\n    auto* output = Output(0);\n    auto& data = Input(0);\n\n    if (InputSize() > 1) {\n      starts_host_.template CopyFrom<Context>(Input(1));\n      ends_host_.template CopyFrom<Context>(Input(2));\n    } else {\n      if (!statically_inited_) {\n        CAFFE_ENFORCE(HasArgument(\"starts\"));\n        CAFFE_ENFORCE(HasArgument(\"ends\"));\n        CAFFE_ENFORCE_EQ(starts_.size(), ends_.size());\n\n        starts_host_.Resize(starts_.size());\n        ends_host_.Resize(ends_.size());\n\n        memcpy(\n            starts_host_.template mutable_data<SIndex>(),\n            starts_.data(),\n            sizeof(SIndex) * starts_.size());\n        memcpy(\n            ends_host_.template mutable_data<SIndex>(),\n            ends_.data(),\n            sizeof(SIndex) * ends_.size());\n        statically_inited_ = true;\n      }\n    }\n\n    return SliceImpl<SIndex, Context>(\n        output, data, starts_host_, ends_host_, &context_);\n  }\n\n  DISABLE_COPY_AND_ASSIGN(SliceOp);\n\n private:\n  std::vector<SIndex> starts_;\n  std::vector<SIndex> ends_;\n  bool statically_inited_;\n  TensorCPU starts_host_;\n  TensorCPU ends_host_;\n};\n\ntemplate <class SIndex, class Context>\nclass SliceGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SliceGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        starts_(OperatorBase::GetRepeatedArgument<SIndex>(\"starts\")),\n        ends_(OperatorBase::GetRepeatedArgument<SIndex>(\"ends\")),\n        statically_inited_(false) {}\n\n  bool RunOnDevice() override {\n    auto* gdata = Output(0);\n    auto& data = Input(0);\n\n    if (InputSize() == 4) {\n      starts_host_.template CopyFrom<Context>(Input(1));\n      ends_host_.template CopyFrom<Context>(Input(2));\n\n      auto& go = Input(3);\n\n      return SliceImpl<SIndex, Context>(\n          nullptr, data, starts_host_, ends_host_, &context_, gdata, &go);\n    } else {\n      if (!statically_inited_) {\n        CAFFE_ENFORCE(HasArgument(\"starts\"));\n        CAFFE_ENFORCE(HasArgument(\"ends\"));\n        CAFFE_ENFORCE_EQ(starts_.size(), ends_.size());\n\n        starts_host_.Resize(starts_.size());\n        ends_host_.Resize(ends_.size());\n\n        memcpy(\n            starts_host_.template mutable_data<SIndex>(),\n            starts_.data(),\n            sizeof(SIndex) * starts_.size());\n        memcpy(\n            ends_host_.template mutable_data<SIndex>(),\n            ends_.data(),\n            sizeof(SIndex) * ends_.size());\n\n        statically_inited_ = true;\n      }\n      auto& go = Input(1);\n\n      return SliceImpl<SIndex, Context>(\n          nullptr, data, starts_host_, ends_host_, &context_, gdata, &go);\n    }\n  }\n\n  DISABLE_COPY_AND_ASSIGN(SliceGradientOp);\n\n private:\n  std::vector<SIndex> starts_;\n  std::vector<SIndex> ends_;\n  bool statically_inited_;\n  TensorCPU starts_host_;\n  TensorCPU ends_host_;\n};\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/softmax_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SOFTMAX_OP_H_\n#define CAFFE2_OPERATORS_SOFTMAX_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SoftmaxOp final : public Operator<Context> {\n public:\n  SoftmaxOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n      axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n  Tensor<Context> scale_;\n  Tensor<Context> rowmax_;\n  Tensor<Context> sum_multiplier_;\n};\n\ntemplate <typename T, class Context>\nclass SoftmaxGradientOp final : public Operator<Context> {\n public:\n  SoftmaxGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {}\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n protected:\n  int axis_;\n  Tensor<Context> scale_;\n  Tensor<Context> sum_multiplier_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SOFTMAX_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/softmax_shared.h",
    "content": "#ifndef CAFFE2_OPERATORS_SOFTMAX_SHARED_H_\n#define CAFFE2_OPERATORS_SOFTMAX_SHARED_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\nvoid SoftmaxCPU(\n    CPUContext& context,\n    const int N,\n    const int D,\n    const float* Xdata,\n    float* Ydata,\n    float* scale,\n    const float* sum_multiplier,\n    bool logarithmic,\n    float* rowmax);\n} // namespace caffe2\n\n#endif // #define CAFFE2_OPERATORS_SOFTMAX_SHARED_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/softmax_with_loss_op.h",
    "content": "#ifndef SOFTMAX_WITH_LOSS_OP_H_\n#define SOFTMAX_WITH_LOSS_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SoftmaxWithLossOp final : public Operator<Context> {\n public:\n  SoftmaxWithLossOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        scale_(OperatorBase::GetSingleArgument<float>(\"scale\", 1.)),\n        label_prob_mode_(OperatorBase::GetSingleArgument<int>(\"label_prob\", 0)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {\n    CAFFE_ENFORCE(scale_ >= 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float scale_;\n  int label_prob_mode_;\n  StorageOrder order_;\n  int axis_;\n\n  Tensor<Context> losses_; // Per example loss\n  Tensor<Context> rowmax_; // per example row max\n  Tensor<Context> weights_; // unignored weights\n  Tensor<Context> sum_multiplier_; // Vector of ones for summing via dot prod\n  Tensor<Context> total_weight_ptr_;\n  Tensor<Context> scratch_;\n};\n\ntemplate <typename T, class Context>\nclass SoftmaxWithLossGradientOp final : public Operator<Context> {\n public:\n  SoftmaxWithLossGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        scale_(OperatorBase::GetSingleArgument<float>(\"scale\", 1.)),\n        label_prob_mode_(OperatorBase::GetSingleArgument<int>(\"label_prob\", 0)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        only_loss_(OperatorBase::GetSingleArgument<bool>(\"only_loss\", false)),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 1)) {\n    CAFFE_ENFORCE(scale_ >= 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float scale_;\n  int label_prob_mode_;\n  Tensor<Context> sum_multiplier_;\n  Tensor<Context> weights_; // unignored weights\n  Tensor<Context> total_weight_ptr_;\n  StorageOrder order_;\n  bool only_loss_;\n  int axis_;\n  Tensor<Context> scratch_;\n};\n\n} // namespace caffe2\n\n#endif // SOFTMAX_WITH_LOSS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/softplus_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SOFTPLUS_OP_H_\n#define CAFFE2_OPERATORS_SOFTPLUS_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SoftplusOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(SoftplusOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n};\n\ntemplate <typename T, class Context>\nclass SoftplusGradientOp final : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(SoftplusGradientOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  // Input: Y, dY; Output: dX\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SOFTPLUS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/space_batch_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPACE_BATCH_OP_H_\n#define CAFFE2_OPERATORS_SPACE_BATCH_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nvoid spaceToBatch(\n    const Tensor<Context>& input,\n    int pad_t,\n    int pad_l,\n    int block_size,\n    Tensor<Context>* output,\n    Context* /*context*/) {\n  CAFFE_ENFORCE(input.ndim() == 4);\n  CAFFE_ENFORCE(output->ndim() == 4);\n\n  const int output_batch = output->dim32(0);\n  const int output_depth = output->dim32(1);\n  const int output_height = output->dim32(2);\n  const int output_width = output->dim32(3);\n\n  const int input_batch = input.dim32(0);\n  const int input_depth = input.dim32(1);\n  const int input_height = input.dim32(2);\n  const int input_width = input.dim32(3);\n\n  for (int out_b = 0; out_b < output_batch; ++out_b) {\n    const int in_b = out_b % input_batch;\n    const int offset_w = (out_b / input_batch) % block_size;\n    const int offset_h = (out_b / input_batch) / block_size;\n    for (int d = 0; d < input_depth; ++d) {\n      for (int out_h = 0; out_h < output_height; ++out_h) {\n        const int in_h = out_h * block_size + offset_h - pad_t;\n        for (int out_w = 0; out_w < output_width; ++out_w) {\n          const int in_w = out_w * block_size + offset_w - pad_l;\n          const auto output_offset =\n              ((out_b * output_depth + d) * output_height + out_h) *\n                  output_width +\n              out_w;\n          const auto input_offset =\n              ((in_b * input_depth + d) * input_height + in_h) * input_width +\n              in_w;\n          if (in_h >= 0 && in_w >= 0 && in_h < input_height &&\n              in_w < input_width) {\n            output->template mutable_data<float>()[output_offset] =\n                input.template data<float>()[input_offset];\n          } else {\n            output->template mutable_data<float>()[output_offset] = 0.0;\n          }\n        }\n      }\n    }\n  }\n}\n\ntemplate <typename Context>\nvoid batchToSpace(\n    const Tensor<Context>& input,\n    int pad_t,\n    int pad_l,\n    int block_size,\n    Tensor<Context>* output,\n    Context* /*context*/) {\n  CAFFE_ENFORCE(input.ndim() == 4);\n  CAFFE_ENFORCE(output->ndim() == 4);\n\n  const int output_batch = output->dim32(0);\n  const int output_depth = output->dim32(1);\n  const int output_height = output->dim32(2);\n  const int output_width = output->dim32(3);\n\n  const int input_batch = input.dim32(0);\n  const int input_depth = input.dim32(1);\n  const int input_height = input.dim32(2);\n  const int input_width = input.dim32(3);\n\n  CAFFE_ENFORCE(input_depth == output_depth);\n  for (int in_b = 0; in_b < input_batch; ++in_b) {\n    const int out_b = in_b % output_batch;\n    const int offset_w = (in_b / output_batch) % block_size;\n    const int offset_h = (in_b / output_batch) / block_size;\n    for (int d = 0; d < input_depth; ++d) {\n      for (int in_h = 0; in_h < input_height; ++in_h) {\n        const int out_h = in_h * block_size + offset_h - pad_t;\n        for (int in_w = 0; in_w < input_width; ++in_w) {\n          const int out_w = in_w * block_size + offset_w - pad_l;\n          if (out_h >= 0 && out_w >= 0 && out_h < output_height &&\n              out_w < output_width) {\n            const auto output_offset =\n                ((out_b * output_depth + d) * output_height + out_h) *\n                    output_width +\n                out_w;\n            const auto input_offset =\n                ((in_b * input_depth + d) * input_height + in_h) * input_width +\n                in_w;\n            output->template mutable_data<float>()[output_offset] =\n                input.template data<float>()[input_offset];\n          }\n        }\n      }\n    }\n  }\n}\n\ntemplate <typename Context>\nclass SpaceBatchOpBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SpaceBatchOpBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        pad_(OperatorBase::GetSingleArgument<int>(\"pad\", 0)),\n        pad_t_(OperatorBase::GetSingleArgument<int>(\"pad_t\", pad_)),\n        pad_l_(OperatorBase::GetSingleArgument<int>(\"pad\", pad_)),\n        pad_b_(OperatorBase::GetSingleArgument<int>(\"pad\", pad_)),\n        pad_r_(OperatorBase::GetSingleArgument<int>(\"pad\", pad_)),\n        block_size_(OperatorBase::GetSingleArgument<int>(\"block_size\", 2)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(order_ == StorageOrder::NCHW);\n  }\n\n protected:\n  int pad_;\n  int pad_t_;\n  int pad_l_;\n  int pad_b_;\n  int pad_r_;\n  int block_size_;\n  StorageOrder order_;\n};\n\ntemplate <typename Context>\nclass SpaceToBatchOp final : public SpaceBatchOpBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using SpaceBatchOpBase<Context>::SpaceBatchOpBase;\n\n  bool RunOnDevice() override {\n    const auto& input = Input(0);\n    auto* output = Output(0);\n    const int batch = input.dim32(0);\n    const int depth = input.dim32(1);\n    const int height = this->pad_b_ + this->pad_t_ + input.dim32(2);\n    const int width = this->pad_l_ + this->pad_r_ + input.dim32(3);\n    CAFFE_ENFORCE(\n        height % this->block_size_ == 0,\n        \"Height: \",\n        height,\n        \", block size: \",\n        this->block_size_);\n    CAFFE_ENFORCE(width % this->block_size_ == 0);\n\n    const int output_batch = batch * this->block_size_ * this->block_size_;\n    const int output_height = height / this->block_size_;\n    const int output_width = width / this->block_size_;\n    Output(0)->Resize(output_batch, depth, output_height, output_width);\n\n    spaceToBatch<Context>(\n        input,\n        this->pad_t_,\n        this->pad_l_,\n        this->block_size_,\n        output,\n        &context_);\n\n    return true;\n  }\n};\n\ntemplate <typename Context>\nclass BatchToSpaceOp final : public SpaceBatchOpBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using SpaceBatchOpBase<Context>::SpaceBatchOpBase;\n\n  bool RunOnDevice() override {\n    const auto& input = Input(0);\n    auto* output = Output(0);\n    const int batch = input.dim32(0);\n    const int depth = input.dim32(1);\n    const int height = input.dim32(2);\n    const int width = input.dim32(3);\n\n    const int output_batch = batch / this->block_size_ / this->block_size_;\n    const int output_height =\n        height * this->block_size_ - this->pad_b_ - this->pad_t_;\n    const int output_width =\n        width * this->block_size_ - this->pad_l_ - this->pad_r_;\n    Output(0)->Resize(output_batch, depth, output_height, output_width);\n    batchToSpace<Context>(\n        input,\n        this->pad_t_,\n        this->pad_l_,\n        this->block_size_,\n        output,\n        &context_);\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPACE_BATCH_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/sparse_to_dense_mask_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPARSE_TO_DENSE_MASK_OP_H_\n#define CAFFE2_OPERATORS_SPARSE_TO_DENSE_MASK_OP_H_\n\n#include <algorithm>\n#include <unordered_map>\n#include <vector>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SparseToDenseMaskBase : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseToDenseMaskBase(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    std::vector<int> mask =\n        OperatorBase::template GetRepeatedArgument<int>(\"mask\");\n    featuresCount_ = mask.size();\n    CAFFE_ENFORCE(!mask.empty(), \"mask can't be empty\");\n    auto biggest = *std::max_element(mask.begin(), mask.end());\n    dense_.assign(std::min(kMaxDenseSize, biggest + 1), -1);\n    for (int i = 0; i < mask.size(); i++) {\n      int id = mask[i];\n      CAFFE_ENFORCE_GE(id, 0, \"Only positive IDs are allowed.\");\n      if (id >= kMaxDenseSize) {\n        CAFFE_ENFORCE(sparse_.count(id) == 0, \"Duplicated id: \", id);\n        sparse_[id] = i;\n      } else {\n        CAFFE_ENFORCE(dense_[id] == -1, \"Duplicated id: \", id);\n        dense_[id] = i;\n      }\n    }\n  }\n\n protected:\n  const int kMaxDenseSize = 1024 * 128;\n\n  std::unordered_map<int, int> sparse_;\n  std::vector<int> dense_;\n  int featuresCount_;\n\n  inline int getFeatureIdx(int id) const {\n    if (id >= kMaxDenseSize) {\n      const auto& iter = sparse_.find(id);\n      if (iter == sparse_.end()) {\n        return -1;\n      } else {\n        return iter->second;\n      }\n    } else {\n      return (id >= dense_.size()) ? -1 : dense_[id];\n    }\n  }\n};\n\ntemplate <class Context>\nclass SparseToDenseMaskOp : public SparseToDenseMaskBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseToDenseMaskOp(const OperatorDef& operator_def, Workspace* ws)\n      : SparseToDenseMaskBase<Context>(operator_def, ws) {\n    returnPresenceMask_ = OperatorBase::template GetSingleArgument<bool>(\n        \"return_presence_mask\", false);\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename TInd>\n  bool DoRunWithType() {\n    auto& sparse_indices = Input(INDICES);\n    CAFFE_ENFORCE_EQ(sparse_indices.ndim(), 1);\n    auto& sparse_values = Input(VALUES);\n    CAFFE_ENFORCE_GE(sparse_values.ndim(), 1);\n    CAFFE_ENFORCE_EQ(sparse_indices.size(), sparse_values.dim(0));\n    auto& default_value = Input(DEFAULT);\n    CAFFE_ENFORCE_EQ(default_value.ndim() + 1, sparse_values.ndim());\n    CAFFE_ENFORCE_EQ(default_value.size(), sparse_values.size_from_dim(1));\n    CAFFE_ENFORCE(sparse_values.meta() == default_value.meta());\n\n    const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();\n    const char* sparse_values_vec =\n        static_cast<const char*>(sparse_values.raw_data());\n    const void* default_val = default_value.raw_data();\n\n    TIndex block_size = default_value.size();\n    size_t block_nbytes = default_value.nbytes();\n\n    const int cols = this->featuresCount_;\n    int rows = -1;\n    int32_t sparse_indices_length = sparse_indices.dim32(0);\n    const int32_t* lengths_vec = nullptr;\n    auto* output = Output(OUTPUTVALUE);\n    Tensor<Context>* presence_mask = nullptr;\n    if (returnPresenceMask_) {\n      presence_mask = Output(PRESENCEMASK);\n    }\n    vector<TIndex> shape;\n    if (InputSize() == 4) {\n      auto& lengths = Input(LENGTHS);\n      CAFFE_ENFORCE_EQ(lengths.ndim(), 1);\n      lengths_vec = lengths.template data<int32_t>();\n      rows = lengths.dim32(0);\n    }\n    if (rows == -1) {\n      // if the LENGTHS is not set, the output will be a vector\n      rows = 1;\n      lengths_vec = &sparse_indices_length;\n    } else {\n      shape.push_back(rows);\n    }\n    shape.push_back(cols);\n    if (returnPresenceMask_) {\n      presence_mask->Resize(shape);\n    }\n    shape.insert(\n        shape.end(), default_value.dims().begin(), default_value.dims().end());\n    output->Resize(shape);\n\n    // init\n    // TODO: consider unrolling CopyItems to make elemental types copy faster\n    char* output_data =\n        static_cast<char*>(output->raw_mutable_data(sparse_values.meta()));\n    for (int i = 0; i < cols * rows; i++) {\n      context_.template CopyItems<Context, Context>(\n          default_value.meta(),\n          block_size,\n          default_val,\n          output_data + i * block_nbytes);\n    }\n    bool* presence_mask_data = nullptr;\n    if (returnPresenceMask_) {\n      presence_mask_data = presence_mask->template mutable_data<bool>();\n      math::Set<bool, Context>(\n          rows * cols, false, presence_mask_data, &context_);\n    }\n\n    CAFFE_ENFORCE(\n        (ConstEigenVectorArrayMap<TInd>(\n             sparse_indices_vec, sparse_indices_length) <\n         std::numeric_limits<int32_t>::max())\n                .all() &&\n            (ConstEigenVectorArrayMap<TInd>(\n                 sparse_indices_vec, sparse_indices_length) >= 0)\n                .all(),\n        \"All indices must be representable as non-negative int32_t numbers\");\n\n    int32_t offset = 0;\n    for (int r = 0; r < rows; r++) {\n      for (int c = 0; c < lengths_vec[r]; c++) {\n        int idx = this->getFeatureIdx(sparse_indices_vec[offset + c]);\n        if (idx != -1) {\n          context_.template CopyItems<Context, Context>(\n              sparse_values.meta(),\n              block_size,\n              sparse_values_vec + (offset + c) * block_nbytes,\n              output_data + (r * cols + idx) * block_nbytes);\n          if (returnPresenceMask_) {\n            presence_mask_data[r * cols + idx] = true;\n          }\n        }\n      }\n      offset += lengths_vec[r];\n    }\n\n    return true;\n  }\n\n private:\n  bool returnPresenceMask_;\n\n  INPUT_TAGS(INDICES, VALUES, DEFAULT, LENGTHS);\n  OUTPUT_TAGS(OUTPUTVALUE, PRESENCEMASK);\n};\n\ntemplate <class Context>\nclass SparseToDenseMaskGradientOp : public SparseToDenseMaskBase<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseToDenseMaskGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : SparseToDenseMaskBase<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename TInd>\n  bool DoRunWithType() {\n    auto& sparse_indices = Input(INDICES);\n    CAFFE_ENFORCE_EQ(sparse_indices.ndim(), 1);\n    auto& gradient_output = Input(GOUTPUT);\n\n    TIndex block_size = gradient_output.size_from_dim(1);\n    size_t block_nbytes = gradient_output.itemsize() * block_size;\n\n    const int cols = this->featuresCount_;\n    int rows = -1;\n    int iter_offset = 1;\n    int32_t default_length = sparse_indices.dim32(0);\n    const int32_t* lengths_vec = nullptr;\n    auto* output = Output(GVALUES);\n    vector<TIndex> shape;\n    if (InputSize() > LENGTHS) {\n      // if the LENGTHS is set, the gradient_output has dim:\n      // lengths * mask.size() * feature_dim\n      auto& lengths = Input(LENGTHS);\n      lengths_vec = lengths.template data<int32_t>();\n      rows = lengths.dim32(0);\n      CAFFE_ENFORCE_EQ(lengths.ndim(), 1);\n      CAFFE_ENFORCE_GE(gradient_output.ndim(), 2);\n      CAFFE_ENFORCE_EQ(gradient_output.dim(0), rows);\n      CAFFE_ENFORCE_EQ(gradient_output.dim(1), cols);\n      block_nbytes /= gradient_output.dim(1);\n      block_size /= gradient_output.dim(1);\n      iter_offset += 1;\n    }\n    if (rows == -1) {\n      // if the LENGTHS is not set, the gradient_output has dim:\n      // mask.size() * feature_dim\n      rows = 1;\n      lengths_vec = &default_length;\n      CAFFE_ENFORCE_GE(gradient_output.ndim(), 1);\n      CAFFE_ENFORCE_EQ(gradient_output.dim(0), cols);\n    }\n    shape.push_back(default_length);\n    // insert feature_dim\n    shape.insert(\n        shape.end(),\n        gradient_output.dims().begin() + iter_offset,\n        gradient_output.dims().end());\n    output->Resize(shape);\n\n    const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();\n    const char* gradient_output_vec =\n        static_cast<const char*>(gradient_output.raw_data());\n\n    char* output_data =\n        static_cast<char*>(output->raw_mutable_data(gradient_output.meta()));\n    math::Set<char, Context>(\n        default_length * gradient_output.itemsize(), 0, output_data, &context_);\n\n    int32_t offset = 0;\n    // SparseToDenseMask is not injective; gradient_used records\n    // if the gradient is used for other input value from the same row\n    vector<bool> gradient_used(cols, false);\n    for (int r = 0; r < rows; r++) {\n      std::fill(gradient_used.begin(), gradient_used.end(), false);\n      for (int c = lengths_vec[r] - 1; c >= 0; c--) {\n        int idx = this->getFeatureIdx(sparse_indices_vec[offset + c]);\n        if (idx != -1 && !gradient_used[idx]) {\n          gradient_used[idx] = true;\n          context_.template CopyItems<Context, Context>(\n              gradient_output.meta(),\n              block_size,\n              gradient_output_vec + (r * cols + idx) * block_nbytes,\n              output_data + (offset + c) * block_nbytes);\n        }\n      }\n      offset += lengths_vec[r];\n    }\n    return true;\n  }\n\n private:\n  INPUT_TAGS(INDICES, GOUTPUT, LENGTHS);\n  OUTPUT_TAGS(GVALUES);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPARSE_TO_DENSE_MASK_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/sparse_to_dense_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPARSE_TO_DENSE_OP_H_\n#define CAFFE2_OPERATORS_SPARSE_TO_DENSE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SparseToDenseOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n\n  SparseToDenseOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        output_first_dim_(\n            OperatorBase::GetSingleArgument<int>(\"output_first_dim\", 0)) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n private:\n  template <typename TInd>\n  int GetOutputFirstDim(\n      const TInd* sparse_indices_vec,\n      const int32_t sparse_indices_len) {\n    if (output_first_dim_ > 0) {\n      CAFFE_ENFORCE_EQ(InputSize(), 2);\n      return output_first_dim_;\n    }\n    if (InputSize() == 3) {\n      auto& data_to_infer_dim = Input(DATA_TO_INFER_DIM);\n      CAFFE_ENFORCE_GE(data_to_infer_dim.ndim(), 1);\n      return data_to_infer_dim.dim32(0);\n    }\n    if (sparse_indices_len <= 0) {\n      return 0;\n    }\n\n    // Awkward way to get the max element to make it work with both CUDA\n    // and CPU.\n    max_element_.Resize(1);\n    TInd* max_element_ptr = max_element_.template mutable_data<TInd>();\n    math::ReduceMax<TInd>(sparse_indices_len, sparse_indices_vec, max_element_ptr,\n          &scratch_, &context_);\n    max_element_host_.CopyFrom(max_element_);\n    return 1 + max_element_host_.template data<TInd>()[0];\n  }\n\n  template <typename TInd>\n  bool DoRunWithType() {\n    return DispatchHelper<\n        TensorTypes2<\n            float,\n            int32_t,\n            int64_t,\n            GenericTensorImplementation>,\n        TInd>::call(this, Input(VALUES));\n  }\n\n  template <typename TInd, typename TData>\n  bool DoRunWithType2() {\n    auto& sparse_indices = Input(INDICES);\n    CAFFE_ENFORCE_EQ(sparse_indices.ndim(), 1);\n    auto& sparse_values = Input(VALUES);\n    CAFFE_ENFORCE_GE(sparse_values.ndim(), 1);\n    CAFFE_ENFORCE_EQ(sparse_indices.size(), sparse_values.dim(0));\n\n    const TInd* sparse_indices_vec = sparse_indices.template data<TInd>();\n    const int32_t sparse_indices_len = sparse_indices.dim32(0);\n    const int output_first_dim =\n        GetOutputFirstDim(sparse_indices_vec, sparse_indices_len);\n\n    auto shape = sparse_values.dims();\n    shape[0] = output_first_dim;\n    auto* output = Output(0);\n    output->Resize(shape);\n\n    TData* output_data = output->template mutable_data<TData>();\n    memset(output_data, 0, output->nbytes());\n    const auto block_nitems = sparse_values.size_from_dim(1);\n    const TData* sparse_values_vec = sparse_values.template data<TData>();\n\n    for (int32_t i = 0; i < sparse_indices_len; i++) {\n      const TInd idx = sparse_indices_vec[i];\n      CAFFE_ENFORCE_GE(idx, 0);\n      CAFFE_ENFORCE_LT(idx, output_first_dim);\n      math::Add(\n          block_nitems,\n          output_data + idx * block_nitems,\n          sparse_values_vec + i * block_nitems,\n          output_data + idx * block_nitems,\n          &context_);\n    }\n    return true;\n  }\n\n  template <typename TInd>\n  bool DoRunWithOtherType2() {\n    CAFFE_THROW(\n        \"SparseToDense is not implemented on tensor of type \",\n        Input(VALUES).meta().name(),\n        \"Consider adding it a type in the list DispatchHelper or implementing \"\n        \"a generic version (which won't work for duplicated indices though)\");\n  }\n\n private:\n  int output_first_dim_;\n  Tensor<Context> scratch_;\n  Tensor<CPUContext> max_element_host_;\n  Tensor<Context> max_element_;\n\n  INPUT_TAGS(INDICES, VALUES, DATA_TO_INFER_DIM);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPARSE_TO_DENSE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/spatial_batch_norm_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SPATIAL_BATCH_NORM_OP_H_\n#define CAFFE2_OPERATORS_SPATIAL_BATCH_NORM_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SpatialBNOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SpatialBNOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5)),\n        momentum_(OperatorBase::GetSingleArgument<float>(\"momentum\", 0.9)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    // TODO(jiayq): update the input and output size checks.\n    CAFFE_ENFORCE(\n        (is_test_ && OutputSize() == 1) || (!is_test_ && OutputSize() == 5));\n    CAFFE_ENFORCE_GT(epsilon_, 0);\n    CAFFE_ENFORCE_GE(momentum_, 0);\n    CAFFE_ENFORCE_LE(momentum_, 1);\n  }\n  ~SpatialBNOp() {}\n\n  bool RunOnDevice() override {\n    return true;\n  }\n\n protected:\n  bool is_test_;\n  double epsilon_;\n  double momentum_;\n  StorageOrder order_;\n  INPUT_TAGS(INPUT, SCALE, BIAS, EST_MEAN, EST_VAR);\n  OUTPUT_TAGS(OUTPUT, RUNNING_MEAN, RUNNING_VAR, SAVED_MEAN, SAVED_INV_VAR);\n};\n\ntemplate <class Context>\nclass SpatialBNGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SpatialBNGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        is_test_(OperatorBase::GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(InputSize() == 5);\n    CAFFE_ENFORCE(OutputSize() == 3);\n  }\n  ~SpatialBNGradientOp() {}\n\n  bool RunOnDevice() override {\n    return true;\n  }\n\n protected:\n  bool is_test_;\n  double epsilon_;\n  StorageOrder order_;\n\n  INPUT_TAGS(INPUT, SCALE, OUTPUT_GRAD, SAVED_MEAN, SAVED_INV_VAR);\n  OUTPUT_TAGS(INPUT_GRAD, SCALE_GRAD, BIAS_GRAD);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SPATIAL_BATCH_NORM_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/spatial_softmax_with_loss_op.h",
    "content": "#ifndef SPATIAL_SOFTMAX_WITH_LOSS_OP_H_\n#define SPATIAL_SOFTMAX_WITH_LOSS_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass SpatialSoftmaxWithLossOp final : public Operator<Context> {\n public:\n  SpatialSoftmaxWithLossOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        scale_(OperatorBase::GetSingleArgument<float>(\"scale\", 1.)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))) {\n    CAFFE_ENFORCE(scale_ >= 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float scale_;\n  StorageOrder order_;\n\n  Tensor<Context> losses_; // Per example loss\n  Tensor<Context> rowmax_; // per example row max\n  Tensor<Context> weights_; // unignored weights\n  Tensor<Context> sum_multiplier_; // Vector of ones for summing via dot prod\n  Tensor<Context> total_weight_ptr_;\n  Tensor<Context> scratch_;\n};\n\ntemplate <typename T, class Context>\nclass SpatialSoftmaxWithLossGradientOp final : public Operator<Context> {\n public:\n  SpatialSoftmaxWithLossGradientOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        scale_(OperatorBase::GetSingleArgument<float>(\"scale\", 1.)),\n        order_(StringToStorageOrder(\n            OperatorBase::GetSingleArgument<string>(\"order\", \"NCHW\"))),\n        only_loss_(OperatorBase::GetSingleArgument<bool>(\"only_loss\", false)) {\n    CAFFE_ENFORCE(scale_ >= 0);\n    CAFFE_ENFORCE_EQ(\n        order_, StorageOrder::NCHW, \"Only NCHW order is supported right now.\");\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override;\n\n protected:\n  float scale_;\n  Tensor<Context> sum_multiplier_;\n  Tensor<Context> weights_; // unignored weights\n  Tensor<Context> total_weight_ptr_;\n  StorageOrder order_;\n  bool only_loss_;\n  Tensor<Context> scratch_;\n};\n\n} // namespace caffe2\n\n#endif // SOFTMAX_WITH_LOSS_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/square_root_divide_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SQUARE_ROOT_DIVIDE_OP_H_\n#define CAFFE2_OPERATORS_SQUARE_ROOT_DIVIDE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass SquareRootDivideOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n\n  SquareRootDivideOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<float>>::call(this, Input(DATA));\n  }\n\n private:\n  template <typename TData>\n  bool DoRunWithType() {\n    return DispatchHelper<TensorTypes2<float, int32_t, int64_t>, TData>::call(\n        this, Input(SCALE));\n  }\n\n  template <typename TData, typename TScale>\n  bool DoRunWithType2() {\n    auto& data = Input(DATA);\n    auto& scale = Input(SCALE);\n    auto* Y = Output(0);\n    Y->ResizeLike(data);\n    size_t batchSize = data.dim(0);\n    size_t exampleSize = data.size_from_dim(1);\n    CAFFE_ENFORCE(batchSize == scale.dim(0), batchSize, \" != \", scale.dim(0));\n    auto* scalePtr = scale.template data<TScale>();\n    auto* dataPtr = data.template data<TData>();\n    auto* yPtr = Y->template mutable_data<TData>();\n    for (auto i = 0; i < batchSize; ++i) {\n      auto scale = scalePtr[i];\n      CAFFE_ENFORCE(scale >= 0, scale, \" < 0\");\n      auto multiplier = scale == 0 ? 1.0 : 1 / std::sqrt(scale);\n      math::Scale<TData, Context>(\n          exampleSize,\n          multiplier,\n          dataPtr + i * exampleSize,\n          yPtr + i * exampleSize,\n          &context_);\n    }\n    return true;\n  }\n\n  INPUT_TAGS(DATA, SCALE);\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SQUARE_ROOT_DIVIDE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/stop_gradient.h",
    "content": "#ifndef CAFFE2_OPERATORS_STOP_GRADIENT_H_\n#define CAFFE2_OPERATORS_STOP_GRADIENT_H_\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass StopGradientOp : public Operator<Context> {\n public:\n  USE_SIMPLE_CTOR_DTOR(StopGradientOp)\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override {\n    const auto& in = Input(0);\n    auto* out = Output(0);\n    if (out != &in) {\n      out->CopyFrom(in, &context_);\n    }\n    return true;\n  }\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_STOP_GRADIENT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/string_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_STRING_OPS_H_\n#define CAFFE2_OPERATORS_STRING_OPS_H_\n\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/operators/elementwise_op.h\"\n\nnamespace caffe2 {\n\n/**\n * ForEach is a unary functor that forwards each element of the input array\n * into the elementwise Functor provided, and gathers the results of each\n * call into the resulting array. Use it as an adaptor if you want to create\n * a UnaryElementwiseOp that acts on each element of the tensor per function\n * call -- this is resonable for complex types where vectorization wouldn't\n * be much of a gain, performance-wise.\n */\ntemplate <typename Functor>\nstruct ForEach {\n  explicit ForEach(OperatorBase& op) : functor(op) {}\n\n  template <typename In, typename Out, typename Context>\n  void operator()(int n, const In* in, Out* out, Context* /*c*/) {\n    for (int i = 0; i < n; ++i) {\n      out[i] = functor(in[i]);\n    }\n  }\n  Functor functor;\n};\n\ntemplate <typename ScalarFunctor, typename TypeMap = FixedType<std::string>>\nusing StringElementwiseOp = UnaryElementwiseWithArgsOp<\n    TensorTypes<std::string>,\n    CPUContext,\n    ForEach<ScalarFunctor>,\n    TypeMap>;\n\ntemplate <class Context>\nclass StringJoinOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  StringJoinOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        delimiter_(\n            OperatorBase::GetSingleArgument<std::string>(\"delimiter\", \",\")),\n        axis_(OperatorBase::GetSingleArgument<int>(\"axis\", 0)) {\n    CAFFE_ENFORCE(axis_ == 0 || axis_ == 1);\n  }\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<\n        float,\n        double,\n        int8_t,\n        uint8_t,\n        int16_t,\n        uint16_t,\n        int32_t,\n        int64_t,\n        std::string,\n        bool>>::call(this, Input(0));\n  }\n\n  template <typename T>\n  bool DoRunWithType();\n\n protected:\n  std::string delimiter_;\n  int axis_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_STRING_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/summarize_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_SUMMARIZE_OP_H_\n#define CAFFE2_OPERATORS_SUMMARIZE_OP_H_\n\n#include <fstream>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\nconstexpr char kSummaryzeOpExtension[] = \".summary\";\n\ntemplate <typename T, class Context>\nclass SummarizeOp final : public Operator<Context> {\n public:\n  SummarizeOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        to_file_(OperatorBase::GetSingleArgument<int>(\"to_file\", 0)) {\n    if (to_file_) {\n      // We will output to file instead of printing on screen.\n      const string& target_folder = ws->RootFolder();\n      // We will write each individual tensor to its individual file.\n      // Also, since the namescope is currently represented by \"/\", we will\n      // need to replace it with a symbol that does not conflict with the\n      // folder separator in Linux.\n      string proper_name = def.input(0);\n      std::replace(proper_name.begin(), proper_name.end(), '/', '#');\n      log_file_.reset(new std::ofstream(\n          target_folder + \"/\" + proper_name + kSummaryzeOpExtension,\n          std::ofstream::out | std::ofstream::trunc));\n      CAFFE_ENFORCE(\n          log_file_->good(),\n          \"Failed to open summarize file for tensor \",\n          def.input(0),\n          \". rdstate() = \",\n          log_file_->rdstate());\n    }\n  }\n  ~SummarizeOp() {\n    if (to_file_)\n      log_file_->close();\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n  static constexpr int MIN_IDX = 0;\n  static constexpr int MAX_IDX = 1;\n  static constexpr int MEAN_IDX = 2;\n  static constexpr int STD_IDX = 3;\n\n  static constexpr int NUM_STATS = 4;\n\n protected:\n  bool to_file_;\n  std::unique_ptr<std::ofstream> log_file_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_SUMMARIZE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/tensor_protos_db_input.h",
    "content": "#ifndef CAFFE2_OPERATORS_TENSOR_PROTOS_DB_INPUT_H_\n#define CAFFE2_OPERATORS_TENSOR_PROTOS_DB_INPUT_H_\n\n#include <iostream>\n#include <mutex>\n\n#include \"caffe2/core/db.h\"\n#include \"caffe2/operators/prefetch_op.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass TensorProtosDBInput final : public PrefetchOperator<Context> {\n public:\n  using OperatorBase::OutputSize;\n  using PrefetchOperator<Context>::prefetch_thread_;\n  explicit TensorProtosDBInput(const OperatorDef& operator_def, Workspace* ws);\n  ~TensorProtosDBInput() {\n    PrefetchOperator<Context>::Finalize();\n  }\n\n  bool Prefetch() override;\n  bool CopyPrefetched() override;\n\n private:\n  // Prefetch will always just happen on the CPU side.\n  vector<Blob> prefetched_blobs_;\n  int batch_size_;\n  bool shape_inferred_ = false;\n  string key_;\n  string value_;\n};\n\ntemplate <class Context>\nTensorProtosDBInput<Context>::TensorProtosDBInput(\n    const OperatorDef& operator_def,\n    Workspace* ws)\n    : PrefetchOperator<Context>(operator_def, ws),\n      prefetched_blobs_(operator_def.output_size()),\n      batch_size_(\n          OperatorBase::template GetSingleArgument<int>(\"batch_size\", 0)) {}\n\ntemplate <class Context>\nbool TensorProtosDBInput<Context>::Prefetch() {\n  const db::DBReader& reader = OperatorBase::Input<db::DBReader>(0);\n  TensorDeserializer<CPUContext> deserializer;\n  if (batch_size_ == 0) {\n    // We do not need to construct a batch. As a result, we will simply\n    // deserialize everything into the target prefetched blob.\n    reader.Read(&key_, &value_);\n    TensorProtos protos;\n    CAFFE_ENFORCE(protos.ParseFromString(value_));\n    CAFFE_ENFORCE(protos.protos_size() == OutputSize());\n    for (int i = 0; i < protos.protos_size(); ++i) {\n      if (protos.protos(i).has_device_detail()) {\n        protos.mutable_protos(i)->clear_device_detail();\n      }\n      deserializer.Deserialize(\n          protos.protos(i),\n          prefetched_blobs_[i].template GetMutable<TensorCPU>());\n    }\n  } else {\n    vector<TensorCPU> temp_tensors(OutputSize());\n    for (int item_id = 0; item_id < batch_size_; ++item_id) {\n      reader.Read(&key_, &value_);\n      TensorProtos protos;\n      CAFFE_ENFORCE(protos.ParseFromString(value_));\n      CAFFE_ENFORCE(protos.protos_size() == OutputSize());\n      if (!shape_inferred_) {\n        // First, set the shape of all the blobs.\n        for (int i = 0; i < protos.protos_size(); ++i) {\n          vector<int> dims(\n              protos.protos(i).dims().begin(), protos.protos(i).dims().end());\n          dims.insert(dims.begin(), batch_size_);\n          prefetched_blobs_[i].template GetMutable<TensorCPU>()->Resize(dims);\n        }\n      }\n      for (int i = 0; i < protos.protos_size(); ++i) {\n        TensorCPU* dst = prefetched_blobs_[i].template GetMutable<TensorCPU>();\n        TensorCPU& src = temp_tensors[i];\n        if (protos.protos(i).has_device_detail()) {\n          protos.mutable_protos(i)->clear_device_detail();\n        }\n        deserializer.Deserialize(protos.protos(i), &src);\n        DCHECK_EQ(src.size() * batch_size_, dst->size());\n        this->context_.template CopyItems<CPUContext, CPUContext>(\n            src.meta(),\n            src.size(),\n            src.raw_data(),\n            static_cast<char*>(dst->raw_mutable_data(src.meta())) +\n                src.nbytes() * item_id);\n      }\n    }\n  }\n  return true;\n}\n\ntemplate <class Context>\nbool TensorProtosDBInput<Context>::CopyPrefetched() {\n  for (int i = 0; i < OutputSize(); ++i) {\n    OperatorBase::Output<Tensor<Context>>(i)->CopyFrom(\n        prefetched_blobs_[i].template Get<TensorCPU>(), &this->context_);\n  }\n  return true;\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TENSOR_PROTOS_DB_INPUT_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/text_file_reader_utils.h",
    "content": "#ifndef CAFFE2_OPERATORS_TEXT_FILE_READER_UTILS_H\n#define CAFFE2_OPERATORS_TEXT_FILE_READER_UTILS_H\n\n#include <memory>\n#include <string>\n#include <vector>\n\n#include \"caffe2/core/common.h\"\n\nnamespace caffe2 {\n\nstruct Token {\n  int startDelimId;\n  const char* start;\n  const char* end;\n};\n\nclass TokenizedString {\n  // holder for strings that have been modified\n  std::vector<std::unique_ptr<std::string>> modifiedStrings_;\n  std::vector<Token> tokens_;\n  int lastDelim_;\n\n public:\n  const std::vector<Token>& tokens() const {\n    return tokens_;\n  }\n  const int lastDelim() const {\n    return lastDelim_;\n  }\n  friend class Tokenizer;\n};\n\nclass Tokenizer {\n private:\n  int startDelimId_;\n  // state of the tokenizer\n  std::string leftover_;\n  // if we need to skip the first characters of the next batch because\n  // e.g. an escape char that was the last character of the last batch.\n  int toBeSkipped_;\n  int delimTable_[256];\n  const char escape_;\n\n public:\n  Tokenizer(const std::vector<char>& delimiters, char escape);\n  void reset();\n  void next(char* start, char* end, TokenizedString& tokenized);\n};\n\nstruct CharRange {\n  char* start;\n  char* end;\n};\n\nstruct StringProvider {\n  virtual void operator()(CharRange&) = 0;\n  virtual void reset() = 0;\n  virtual ~StringProvider() {}\n};\n\nclass BufferedTokenizer {\n public:\n  BufferedTokenizer(const Tokenizer& t, StringProvider* p, int numPasses = 1)\n      : provider_(p), tokenizer_(t), tokenIndex_(0), numPasses_(numPasses) {}\n\n  bool next(Token& token) {\n    CharRange range;\n    while (tokenIndex_ >= tokenized_.tokens().size()) {\n      range.start = nullptr;\n      while (range.start == nullptr && pass_ < numPasses_) {\n        (*provider_)(range);\n        if (range.start == nullptr) {\n          ++pass_;\n          if (pass_ < numPasses_) {\n            provider_->reset();\n            tokenizer_.reset();\n          }\n        }\n      }\n      if (range.start == nullptr) {\n        return false;\n      }\n      tokenizer_.next(range.start, range.end, tokenized_);\n      tokenIndex_ = 0;\n    }\n    token = tokenized_.tokens()[tokenIndex_++];\n    return true;\n  };\n\n  int endDelim() const {\n    if (tokenIndex_ + 1 < tokenized_.tokens().size()) {\n      return tokenized_.tokens()[tokenIndex_ + 1].startDelimId;\n    }\n    return tokenized_.lastDelim();\n  }\n\n private:\n  StringProvider* provider_;\n  Tokenizer tokenizer_;\n  TokenizedString tokenized_;\n  int tokenIndex_;\n  int numPasses_;\n  int pass_{0};\n};\n\nclass FileReader : public StringProvider {\n public:\n  explicit FileReader(const std::string& path, size_t bufferSize = 65536);\n  ~FileReader();\n  void operator()(CharRange& range) override;\n  void reset() override;\n\n private:\n  const size_t bufferSize_;\n  int fd_;\n  std::unique_ptr<char[]> buffer_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TEXT_FILE_READER_UTILS_H\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/tile_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_TILE_OP_H_\n#define CAFFE2_OPERATORS_TILE_OP_H_\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\n// Copy a Blob n times along a specified axis.\ntemplate <class Context>\nclass TileOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TileOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        tiles_(OperatorBase::GetSingleArgument<int32_t>(\"tiles\", 1)),\n        axis_(OperatorBase::GetSingleArgument<int32_t>(\"axis\", 0)) {}\n  ~TileOp() {}\n\n  bool RunOnDevice() override {\n    const auto& input = Input(0);\n    std::array<int32_t, 2> temp_params = {{tiles_, axis_}};\n    if (InputSize() > 1) {\n      // We potentially have tiles and/or axis specified as inputs\n      // as well. We will check for them in that order. In other words:\n      // InputSize() == 2: tiles is specified\n      // InputSize() == 3: tiles is specified and axis.\n      // Anything specified as input will override the arguments\n      CAFFE_ENFORCE(\n          Input(1).ndim() == 1 && Input(1).size() == 1,\n          \"Input `tiles` should be a vector of size 1.\");\n\n      const auto& input1 = Input(1);\n      context_.template CopyItems<Context, CPUContext>(\n          input1.meta(),\n          1,\n          static_cast<const char*>(input1.raw_data()),\n          &(temp_params[0]));\n\n      if (InputSize() > 2) {\n        CAFFE_ENFORCE(\n            Input(2).ndim() == 1 && Input(2).size() == 1,\n            \"Input `axis` should be a vector of size 1.\");\n\n        const auto& input2 = Input(2);\n        context_.template CopyItems<Context, CPUContext>(\n            input2.meta(),\n            1,\n            static_cast<const char*>(input2.raw_data()),\n            &(temp_params[1]));\n      } else {\n        CAFFE_ENFORCE(\n            OperatorBase::HasArgument(\"axis\"),\n            \"Argument `axis` is missing and was not specified as input.\");\n      }\n    } else {\n      CAFFE_ENFORCE(\n          OperatorBase::HasArgument(\"tiles\"),\n          \"Argument `tiles` is missing and was not specified as input.\");\n      CAFFE_ENFORCE(\n          OperatorBase::HasArgument(\"axis\"),\n          \"Argument `axis` is missing and was not specified as input.\");\n    }\n\n    tiles_ = temp_params[0];\n    axis_ = temp_params[1];\n\n    auto* output = Output(0);\n    const auto axis = input.canonical_axis_index(axis_);\n\n    // reshape output to be input tiled along the axis\n    vector<TIndex> output_dims(input.dims());\n    output_dims[axis_] = output_dims[axis_] * tiles_;\n    output->Resize(output_dims);\n\n    // size up to (and not including) axis\n    const auto outer_dim = input.size_to_dim(axis);\n    // size from axis up\n    const auto inner_dim = input.size_from_dim(axis);\n\n    /**\n     * How this works:\n     * Imagine a 2D tensor (matrix) of size 3x10, tiled 2 times.\n     * - Tiling along axis 0 (row) means copying the entire 3x10 Matrix 2\n     * times. outer_dim = 0, inner_dim = 30.\n     * - Tiling along axis 1 (column) means copying each row 2 times, then\n     * proceed to the next row, until the end. outer_dim = 3, inner_dim = 10.\n     */\n    const char* input_data = static_cast<const char*>(input.raw_data());\n    char* output_data =\n        static_cast<char*>(output->raw_mutable_data(input.meta()));\n\n    DoTile(\n        input.meta(),\n        input.itemsize(),\n        outer_dim,\n        inner_dim,\n        input_data,\n        output_data);\n\n    return true;\n  }\n\n private:\n  void DoTile(\n      const TypeMeta& meta,\n      int item_size,\n      int outer_dim,\n      int inner_dim,\n      const char* input_data,\n      char* output_data) {\n    for (auto i = 0; i < outer_dim; ++i) {\n      for (auto t = 0; t < tiles_; ++t) {\n        context_.template CopyItems<Context, Context>(\n            meta, inner_dim, input_data, output_data);\n        output_data += inner_dim * item_size;\n      }\n      input_data += inner_dim * item_size;\n    }\n  }\n\n  int32_t tiles_;\n  int32_t axis_;\n};\n\ntemplate <typename T, class Context>\nclass TileGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TileGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        tiles_(OperatorBase::GetSingleArgument<int32_t>(\"tiles\", 1)),\n        axis_(OperatorBase::GetSingleArgument<int32_t>(\"axis\", 0)) {}\n  ~TileGradientOp() {}\n\n  bool RunOnDevice() override {\n    std::array<int32_t, 2> temp_params = {{tiles_, axis_}};\n    if (InputSize() > 1) {\n      // We potentially have tiles and/or axis specified as inputs\n      // as well. We will check for them in that order. In other words:\n      // InputSize() == 2: tiles is specified\n      // InputSize() == 3: tiles is specified and axis.\n      // Anything specified as input will override the arguments\n      CAFFE_ENFORCE(\n          Input(1).ndim() == 1 && Input(1).size() == 1,\n          \"Input `tiles` should be a vector of size 1.\");\n\n      const auto& input1 = Input(1);\n      context_.template CopyItems<Context, CPUContext>(\n          input1.meta(),\n          1,\n          static_cast<const char*>(input1.raw_data()),\n          &(temp_params[0]));\n\n      if (InputSize() > 2) {\n        CAFFE_ENFORCE(\n            Input(2).ndim() == 1 && Input(2).size() == 1,\n            \"Input `axis` should be a vector of size 1.\");\n\n        const auto& input2 = Input(2);\n        context_.template CopyItems<Context, CPUContext>(\n            input2.meta(),\n            1,\n            static_cast<const char*>(input2.raw_data()),\n            &(temp_params[1]));\n      } else {\n        CAFFE_ENFORCE(\n            OperatorBase::HasArgument(\"axis\"),\n            \"Argument `axis` is missing and was not specified as input.\");\n      }\n    } else {\n      CAFFE_ENFORCE(\n          OperatorBase::HasArgument(\"tiles\"),\n          \"Argument `tiles` is missing and was not specified as input.\");\n      CAFFE_ENFORCE(\n          OperatorBase::HasArgument(\"axis\"),\n          \"Argument `axis` is missing and was not specified as input.\");\n    }\n\n    tiles_ = temp_params[0];\n    axis_ = temp_params[1];\n\n    const auto& input = Input(0);\n    auto* output = Output(0);\n    const auto axis = input.canonical_axis_index(axis_);\n\n    // reshape output to be input \"untiled\" along the axis\n    vector<TIndex> output_dims(input.dims());\n    output_dims[axis_] = output_dims[axis_] / tiles_;\n    output->Resize(output_dims);\n\n    // size up to (and not including) axis\n    const auto outer_dim = output->size_to_dim(axis);\n    // size from axis up\n    const auto inner_dim = output->size_from_dim(axis);\n\n    /**\n     * How this works:\n     * Imagine a 2D tensor (matrix) of size 3x10, tiled 2 times along axis 1\n     * (column).\n     * This is equivalent to multiplying by a vector of 1s transposed.\n     * The gradient of this is all 1s in the shape of the input matrix\n     * (call it X).\n     * So the output gradient should be the matrix multipication result\n     * of input gradient (gradient of tiled tensor output) and X.\n     */\n    const char* input_data = static_cast<const char*>(input.raw_data());\n    char* output_data =\n        static_cast<char*>(output->raw_mutable_data(input.meta()));\n\n    DoTileGradient(\n        input.meta(),\n        input.itemsize(),\n        outer_dim,\n        inner_dim,\n        input_data,\n        output_data);\n\n    return true;\n  }\n\n private:\n  void DoTileGradient(\n      const TypeMeta& meta,\n      int item_size,\n      int outer_dim,\n      int inner_dim,\n      const char* input_data,\n      char* output_data) {\n    for (auto i = 0; i < outer_dim; ++i) {\n      context_.template CopyItems<Context, Context>(\n          meta, inner_dim, input_data, output_data);\n      input_data += inner_dim * item_size;\n      for (auto t = 1; t < tiles_; ++t) {\n        math::Axpy<T, Context>(\n            inner_dim,\n            T(1),\n            reinterpret_cast<const T*>(input_data),\n            reinterpret_cast<T*>(output_data),\n            &context_);\n        input_data += inner_dim * item_size;\n      }\n      output_data += inner_dim * item_size;\n    }\n  }\n\n  int32_t tiles_;\n  int32_t axis_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TILE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/top_k.h",
    "content": "#ifndef CAFFE2_OPERATORS_TOP_K_H_\n#define CAFFE2_OPERATORS_TOP_K_H_\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass TopKOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  TopKOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), OP_SINGLE_ARG(int, \"k\", k_, -1) {\n    CAFFE_ENFORCE(k_ >= 1, \"k argument must be >= 1\");\n  }\n\n  bool RunOnDevice() override;\n\n private:\n  int k_;\n};\n\ntemplate <typename T, class Context>\nclass TopKGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  TopKGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TOP_K_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/transpose_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_TRANSPOSE_H_\n#define CAFFE2_OPERATORS_TRANSPOSE_H_\n#define MAX_BLOB_NUM 1024\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass TransposeOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n  TransposeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        axes_(OperatorBase::GetRepeatedArgument<int>(\"axes\")) {\n    // We will check the legality of axes_: it should be from 0 to axes_.size().\n    std::vector<int> axes_sorted(axes_);\n    std::sort(axes_sorted.begin(), axes_sorted.end());\n    for (int i = 0; i < axes_sorted.size(); ++i) {\n      if (axes_sorted[i] != i) {\n        CAFFE_THROW(\"Axes should be a permutation of 0 to ndim.\");\n      }\n    }\n  }\n  ~TransposeOp() {}\n\n  bool RunOnDevice() override {\n    const auto& X = Input(0);\n    auto* Y = Output(0);\n    new_dims_.resize(X.ndim());\n    if (axes_.size() == 0) {\n      axes_.resize(X.ndim());\n      for (int i = 0; i < axes_.size(); ++i) {\n        axes_[i] = axes_.size() - 1 - i;\n      }\n      new_dims_.assign(X.dims().rbegin(), X.dims().rend());\n    } else {\n      CAFFE_ENFORCE_EQ(X.ndim(), axes_.size());\n      for (int i = 0; i < new_dims_.size(); ++i) {\n        new_dims_[i] = X.dim(axes_[i]);\n      }\n    }\n    Y->Resize(new_dims_);\n    // Do the actual transpose, which is implemented in DoRunWithType().\n    return DispatchHelper<TensorTypes<float, double, int, long>>::call(\n        this, Input(0));\n  }\n\n protected:\n  template <typename T>\n  bool DoRunWithType();\n\n  std::vector<int> axes_;\n  std::vector<TIndex> new_dims_;\n  // buffer_ is used in TransposeOp<CUDAContext> so we can obtain a consistent\n  // buffer on the GPU. It is not used in the CPUContext implementation.\n  Tensor<Context> buffer_;\n  TensorCPU buffer_cpu_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TRANSPOSE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/tt_linear_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_TT_LINEAR_OP_H_\n#define CAFFE2_OPERATORS_TT_LINEAR_OP_H_\n\n#ifdef CAFFE2_USE_MKL\n#include <mkl.h>\n#endif // CAFFE2_USE_MKL\n\n#include \"Eigen/Core\"\n#include \"Eigen/Dense\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTLinearOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTLinearOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        inp_sizes_(OperatorBase::GetRepeatedArgument<int>(\"inp_sizes\")),\n        out_sizes_(OperatorBase::GetRepeatedArgument<int>(\"out_sizes\")),\n        tt_ranks_(OperatorBase::GetRepeatedArgument<int>(\"tt_ranks\")),\n        Y_temp_(unique_ptr<Blob>(new Blob())) {}\n  ~TTLinearOp() {}\n\n  bool RunOnDevice() override {\n    const auto& X = Input(0); // Input array\n    const auto& b = Input(1); // Bias array\n    const auto& cores = Input(2); // 1D array containing the TT-cores\n    auto* Y = Output(0);\n\n    CAFFE_ENFORCE(X.ndim() > 1, \"Number of dimensions in X: \", X.ndim());\n    CAFFE_ENFORCE(b.ndim() == 1, \"Number of dimensions in b: \", b.ndim());\n    CAFFE_ENFORCE(\n        inp_sizes_.size() == out_sizes_.size(),\n        \"inp_sizes has size: \",\n        inp_sizes_.size(),\n        \", out_sizes has size: \",\n        out_sizes_.size());\n    CAFFE_ENFORCE(\n        cores.ndim() == 1, \"Number of dimensions in cores: \", cores.ndim());\n    // batch size\n    const int batch_size = X.ndim() > 1 ? X.dim32(0) : 1;\n\n    // dimension d of tensors\n    const int d = inp_sizes_.size();\n\n    // Keep track of index of current core in multiplication\n    int cores_idx = 0;\n\n    // Temporary buffer to facilitate multiplication of TT-cores with input\n    auto Y_buf = Y_temp_->GetMutable<Tensor<Context>>();\n    Y_buf->ResizeLike(X);\n    Y_buf->CopyFrom(X);\n\n    // The overall forward pass involves multiplication with each core, where\n    // each core has sizes dictated by inp_sizes_ and out_sizes_. Each core thus\n    // has size inp_sizes_[i] * tt_ranks_[i] * tt_ranks_[i + 1] * out_sizes_[i].\n    for (int i = (d - 1); i >= 0; --i) {\n      int curr_rows = inp_sizes_[i] * tt_ranks_[i + 1];\n      int curr_cols = tt_ranks_[i] * out_sizes_[i];\n\n      // TODO Replace by Reshape(), once wrappers are written\n      Y_buf->Resize(Y_buf->size() / curr_rows, curr_rows);\n      Y->Resize(Y_buf->size() / curr_rows, curr_cols);\n\n      // Defensive checks\n      CAFFE_ENFORCE(Y_buf->size() % curr_rows == 0, Y_buf->size(), curr_rows);\n      CAFFE_ENFORCE(\n          cores_idx + curr_rows * curr_cols <= cores.size(),\n          cores_idx + curr_rows * curr_cols,\n          cores.size());\n\n      // Multiply ith core with the intermediate output\n      math::Gemm<float, Context, Engine>(\n          CblasNoTrans,\n          CblasNoTrans,\n          Y_buf->size() / curr_rows,\n          curr_cols,\n          curr_rows,\n          1,\n          Y_buf->template data<float>(),\n          cores.template data<float>() + cores_idx,\n          0,\n          Y->template mutable_data<float>(),\n          &context_);\n\n      CAFFE_ENFORCE(Y->size() % out_sizes_[i] == 0, Y->size(), out_sizes_[i]);\n\n      // TODO Add GPU support by writing a generic wrapper.\n      auto Y_mat = EigenMatrixMap<float>(\n          Y->template mutable_data<float>(),\n          Y->size() / out_sizes_[i],\n          out_sizes_[i]);\n      Y_mat = ConstEigenMatrixMap<float>(\n                  Y->template data<float>(),\n                  out_sizes_[i],\n                  Y->size() / out_sizes_[i])\n                  .transpose()\n                  .eval();\n\n      // Resize operation\n      Y_buf->Resize(Y->dim32(0), Y->dim32(1));\n      context_.template Copy<float, CPUContext, CPUContext>(\n          Y->size(),\n          Y->template data<float>(),\n          Y_buf->template mutable_data<float>());\n\n      cores_idx += curr_rows * curr_cols;\n    }\n\n    // TODO Add GPU support by writing a generic wrapper.\n    auto Y_mat = EigenMatrixMap<float>(\n        Y->template mutable_data<float>(), batch_size, Y->size() / batch_size);\n    Y_mat = ConstEigenMatrixMap<float>(\n                Y->template data<float>(), Y->size() / batch_size, batch_size)\n                .transpose()\n                .eval();\n    // TODO Replace by Reshape(), once wrappers are written\n    Y->Resize(batch_size, Y->size() / batch_size);\n\n    // Check that output size of Y is the element-wise product of out_sizes\n    int prod_out_sizes = 1;\n    for (int i = 0; i < out_sizes_.size(); i++) {\n      prod_out_sizes *= out_sizes_[i];\n    }\n    CAFFE_ENFORCE(\n        Y->dim32(1) == prod_out_sizes,\n        \"Output dimension of Y: \",\n        Y->dim32(1),\n        \", product of out_sizes: \",\n        prod_out_sizes);\n\n    // Add bias term\n    if (bias_multiplier_.size() != batch_size) {\n      // If the helper bias multiplier is not M, reshape and fill it with one.\n      bias_multiplier_.Resize(batch_size);\n      math::Set<T, Context>(\n          batch_size,\n          static_cast<T>(1),\n          bias_multiplier_.template mutable_data<T>(),\n          &context_);\n    }\n    math::Gemm<T, Context, Engine>(\n        CblasNoTrans,\n        CblasNoTrans,\n        Y->dim32(0),\n        Y->dim32(1),\n        1,\n        1,\n        bias_multiplier_.template data<T>(),\n        b.template data<T>(),\n        1,\n        Y->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  Tensor<Context> bias_multiplier_;\n  std::vector<int> inp_sizes_;\n  std::vector<int> out_sizes_;\n  std::vector<int> tt_ranks_;\n  std::unique_ptr<Blob> Y_temp_;\n};\n\n// TODO: Complete after verifying utility of TT-layer's forward pass.\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nclass TTLinearGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  TTLinearGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n  ~TTLinearGradientOp() {}\n\n  bool RunOnDevice() override {\n    return false;\n  }\n\n protected:\n  Tensor<Context> bias_multiplier_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_TT_LINEAR_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/utility_ops.h",
    "content": "#ifndef CAFFE2_OPERATORS_UTILITY_OPS_H_\n#define CAFFE2_OPERATORS_UTILITY_OPS_H_\n\n#include <math.h>\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/utils/math.h\"\n\n#include <map>\n#include <utility>\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass NanCheckOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  NanCheckOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override;\n\n private:\n  TensorPrinter tensorPrinter_;\n  Tensor<Context> scratch_;\n};\n\nstruct GetNanCheckGradient : public GradientMakerBase {\n  using GradientMakerBase::GradientMakerBase;\n  std::vector<OperatorDef> GetGradientDefs() override {\n    return {CreateOperatorDef(\n        \"NanCheck\",\n        \"\",\n        std::vector<string>{GO(0)},\n        std::vector<string>{GI(0)})};\n  }\n};\n\ntemplate <class Context>\nclass WallClockTimeOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  WallClockTimeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    int64_t nanoseconds = static_cast<long int>(\n        std::chrono::duration_cast<std::chrono::nanoseconds>(\n            std::chrono::high_resolution_clock::now().time_since_epoch())\n            .count());\n\n    TensorCPU* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize();\n    *output->template mutable_data<int64_t>() = nanoseconds;\n\n    return true;\n  }\n};\n\nconst char kPrintFileExtension[] = \".log\";\n\ntemplate <class Context>\nclass PrintOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_DISPATCH_HELPER;\n  PrintOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        tensor_printer_(\n            operator_def.input(0),\n            OperatorBase::GetSingleArgument<int>(\"to_file\", 0)\n                ? ws->RootFolder() + \"/\" + operator_def.input(0) +\n                    kPrintFileExtension\n                : \"\",\n            OperatorBase::GetSingleArgument<int>(\"limit\", 0)),\n        every_n_(OperatorBase::GetSingleArgument<int>(\"every_n\", 1)) {\n    CAFFE_ENFORCE_GE(every_n_, 1);\n  }\n\n  bool RunOnDevice() override {\n    if (++occurrences_mod_n_ > every_n_) {\n      occurrences_mod_n_ -= every_n_;\n    }\n    if (occurrences_mod_n_ != 1) {\n      return true;\n    }\n\n    if (!OperatorBase::InputIsType<Tensor<Context>>(0) &&\n        !OperatorBase::InputIsType<TensorCPU>(0)) {\n      LOG(INFO) << \"Blob of type: \"\n                << OperatorBase::Inputs().at(0)->meta().name();\n      return true;\n    }\n    // special-case empty tensors since they may have no meta()\n    if (Input(0).size() == 0) {\n      tensor_printer_.PrintMeta(Input(0));\n      return true;\n    }\n\n    using Types = TensorTypes<\n        float,\n        double,\n        int,\n        long,\n        bool,\n        char,\n        unsigned char,\n        std::string>;\n\n    if (OperatorBase::InputIsType<TensorCPU>(0)) {\n      return DispatchHelper<Types>::call(\n          this, OperatorBase::Input<TensorCPU>(0));\n    } else {\n      return DispatchHelper<Types>::call(this, Input(0));\n    }\n  }\n\n private:\n  template <typename T>\n  bool DoRunWithType() {\n    // A simple strategy to copy tensor if needed, and have the tensor pointer\n    // pointing to the right instantiation. Note that tensor_copy_if_needed\n    // will handle memory deallocation itself so no smart pointer is needed.\n    const TensorCPU* tensor;\n    TensorCPU tensor_copy_if_needed;\n    if (OperatorBase::InputIsType<TensorCPU>(0)) {\n      tensor = &OperatorBase::Input<TensorCPU>(0);\n    } else {\n      tensor_copy_if_needed.CopyFrom(Input(0), &context_);\n      // Make sure that the copy is finished.\n      context_.FinishDeviceComputation();\n      tensor = &tensor_copy_if_needed;\n    }\n    tensor_printer_.Print<T>(*tensor);\n    return true;\n  }\n\n private:\n  TensorPrinter tensor_printer_;\n  int every_n_;\n  int occurrences_mod_n_{0};\n};\n\n/**\n * @brief Alias op makes the output and the input share the same underlying\n * storage.\n *\n * WARNING: in general, in caffe2's operator interface different tensors should\n * have different underlying storage, which is the assumption made by\n * components such as the dependency engine and memory optimization. Thus, in\n * normal situations you should not use the AliasOp, especially in a normal\n * forward-backward pass.\n *\n * The Alias op is provided so one can achieve true asynchrony, such as\n * Hogwild, in a graph. But make sure you understand all the implications\n * similar to multi-thread computation before you use it explicitly.\n */\ntemplate <class Context>\nclass AliasOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(AliasOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    CAFFE_ENFORCE_GE(input.size(), 0, \"Tensor is not initialized\");\n    Output(0)->ResizeLike(input);\n    Output(0)->ShareData(input);\n    return true;\n  }\n};\n\n/**\n * @brief Pass inputs to outputs.\n * Input:\n *   DATA - dense tensor.\n * Output:\n *   DATA - same tensor as input.\n */\ntemplate <class Context>\nclass EnsureDenseOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(EnsureDenseOp)\n\n  bool RunOnDevice() override {\n    const auto& input = Input(0);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_GT(input.ndim(), 0, \"Input has to be at least a vector.\");\n    // it is allowed to have the output inplace overwrite the input but also\n    // allow the output to be copied from the input\n    if (&input != output) {\n      output->ResizeLike(input);\n      output->CopyFrom(input, &context_);\n    }\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass FlattenOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(FlattenOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_GE(\n        input.dims().size(), 2, \"The rank of the tensor must be >= 2.\");\n    output->Resize(input.dim(0), input.size_from_dim(1));\n    context_.template CopyItems<Context, Context>(\n        input.meta(),\n        input.size(),\n        input.raw_data(),\n        output->raw_mutable_data(input.meta()));\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass FlattenToVecOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(FlattenToVecOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_GE(\n        input.dims().size(), 1, \"The rank of the tensor must be >= 1.\");\n    output->Resize(input.size());\n\n    context_.template CopyItems<Context, Context>(\n        input.meta(),\n        input.size(),\n        input.raw_data(),\n        output->raw_mutable_data(input.meta()));\n    return true;\n  }\n};\n\n// Output gets the data of input(0), but reshapes it like input(1).\ntemplate <class Context>\nclass ResizeLikeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ResizeLikeOp);\n\n  bool RunOnDevice() override {\n    auto& input0 = Input(0);\n    auto& input1 = Input(1);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_EQ(input0.size(), input1.size());\n    output->ResizeLike(Input(1));\n    context_.template CopyItems<Context, Context>(\n        input0.meta(),\n        input0.size(),\n        input0.raw_data(),\n        output->raw_mutable_data(input0.meta()));\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass SumOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(SumOp);\n\n  template <typename T, typename M>\n  bool DoRunWithType() {\n    auto& input0 = Input(0);\n    auto* output = Output(0);\n    if (InputSize() == 1) {\n      output->CopyFrom(input0, &context_);\n      return true;\n    }\n    output->ResizeLike(input0);\n    T* output_data = output->template mutable_data<T>();\n    // Dimension checking\n    for (int i = 1; i < InputSize(); ++i) {\n      if (output->dims() != Input(i).dims()) {\n        CAFFE_THROW(\n            \"Check failed: output->dims() == Input(i).dims().\",\n            \"Description: Input #\",\n            i,\n            \", input dimension:\",\n            Input(i).dims(),\n            \" should match output dimension: \",\n            output->dims());\n      }\n    }\n\n    // Add the first two - works if in-place or not.\n    math::Add(\n        output->size(),\n        input0.template data<T>(),\n        Input(1).template data<T>(),\n        output_data,\n        &context_);\n    // Add remaining.\n    for (int i = 2; i < InputSize(); ++i) {\n      math::Add(\n          output->size(),\n          output_data,\n          Input(i).template data<T>(),\n          output_data,\n          &context_);\n    }\n    return true;\n  }\n\n  bool RunOnDevice() override {\n    if (Input(0).template IsType<float>()) {\n      return DoRunWithType<float, float>();\n    } else if (Input(0).template IsType<int>()) {\n      return DoRunWithType<int, int>();\n    } else {\n      CAFFE_THROW(\n          \"Sum operator only supports 32-bit float and ints, but\",\n          \" input was of type \",\n          Input(0).meta().name());\n    }\n  }\n};\n\n// WeightedSumOp computes the weighted sum of several tensors. The input should\n// be in the form X_0, weight_0, X_1, weight_1, ... where X_i all have the same\n// shape, and weight_i are size 1 tensors that specifies the weight of each\n// vector. Note that if one wants to do in-place computation, it could only be\n// done with X_0 also as the output, but not other X_i.\ntemplate <class Context>\nclass WeightedSumOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(WeightedSumOp);\n\n  template <typename DstType>\n  bool DoRunWithType() {\n    CAFFE_ENFORCE_EQ(InputSize() % 2, 0);\n    auto& X0 = Input(0);\n    auto& weight0 = Input(1);\n    CAFFE_ENFORCE_GT(X0.size(), 0);\n    CAFFE_ENFORCE_EQ(weight0.size(), 1);\n    int size = X0.size();\n    auto* output = Output(0);\n    output->ResizeLike(X0);\n    math::Scale<DstType, Context>(\n        size,\n        weight0.template data<float>(),\n        X0.template data<DstType>(),\n        output->template mutable_data<DstType>(),\n        &context_);\n    for (int i = 2; i < InputSize(); i += 2) {\n      auto& X = Input(i);\n      // Do a check: if the input is the same as output, we have a problem -\n      // in-place update should always only happen with the zeroth input.\n      if (&X == output) {\n        LOG(ERROR) << \"Input #\" << i << \" is the same as output. \"\n                   << \"If you want to do in-place updates, put the output as \"\n                   << \"input #0.\";\n        return false;\n      }\n      auto& weight = Input(i + 1);\n      CAFFE_ENFORCE_EQ(X.size(), size);\n      CAFFE_ENFORCE_EQ(weight.size(), 1);\n      math::Axpy<DstType, Context>(\n          size,\n          weight.template data<float>(),\n          X.template data<DstType>(),\n          output->template mutable_data<DstType>(),\n          &context_);\n    }\n    return true;\n  }\n  bool RunOnDevice() override;\n};\n\n/**\n * @brief Update slices of the tensor in-place with weighted sum.\n *\n * ScatterWeightedSumOp is similar to WeightedSum and computes the weighted sum\n * of several tensors. The first tensor has to be in-place and only slices of it\n * on the first dimension as indexed by INDICES will be updated.\n *\n * Input:\n *   X_0 - tensor to be updated\n *   weight_0 - scalar weight for X_0, applied only to slices affected,\n *   INDICES - 1-D list of indices on the first dimension of X_0 that need to be\n * updated\n *   X_1 - update slices, has to have shape of len(INDICES) + shape(X_0)[1:]\n *   weight_1 - scalar weight for X_1 update\n *   X_2, weight_2, ...\n *\n * Output:\n *   X_0 - has to be exactly the same tensor as the input 0\n *\n * Note: The op pretty much ignores the exact shapes of the input arguments and\n * cares only about sizes. It's done for performance consideration to avoid\n * unnecessary reshapes. Only first dimension of X_0 is important, let's call it\n * N. If M is the total size of X_0 and K is the size of INDICES then X_i is\n * assumed to be of shape K x (M / N) regardless of the real shape.\n *\n * Note: Each update in INDICES is applied independently which means that if\n * duplicated elements are present in INDICES the corresponding slice of X_0\n * will be scaled multiple times. Manual collapsing of INDICES is required\n * beforehand if necessary.\n *\n * Note: Updates are applied sequentially by inputs which might have undesired\n * consequences if the input tensor is accessed concurrently by different op\n * (e.g. when doing Hogwild). Other threads might see intermediate results even\n * on individual slice level, e.g. X_0 scaled by weight_0 but without any\n * updates applied.\n *\n * For now really works only on CPU because of INDICES access\n */\ntemplate <typename T, class Context>\nclass ScatterWeightedSumOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ScatterWeightedSumOp);\n  USE_DISPATCH_HELPER;\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(2));\n  }\n\n private:\n  template <typename Index>\n  bool DoRunWithType() {\n    TIndex block_size = Input(0).size_from_dim(1);\n    return DispatchHelper<FixedValues<1>, Index>::call(this, block_size);\n  }\n\n  template <typename Index, int FixedSize>\n  bool DoRunWithValue() {\n    CAFFE_ENFORCE_EQ(InputSize() % 2, 1);\n    auto& X0 = Input(0);\n    auto& weight0 = Input(1);\n    auto& indices = Input(2);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_EQ(&X0, output, \"In place operation is required\");\n\n    CAFFE_ENFORCE_GT(X0.size(), 0);\n    CAFFE_ENFORCE_GT(X0.ndim(), 0, \"X0 has to be at least the vector\");\n    CAFFE_ENFORCE_EQ(weight0.size(), 1);\n    TIndex M = X0.size();\n    TIndex N = X0.dim(0);\n    TIndex K = indices.size();\n    TIndex block_size = M / N;\n    T* data = output->template mutable_data<T>();\n    const Index* idxs = indices.template data<Index>();\n    T w0 = *weight0.template data<T>();\n    // It's most likely a constant so exact comparison is fine\n    if (w0 != 1.0) {\n      for (int i = 0; i < K; ++i) {\n        Index idx = idxs[i];\n        CAFFE_ENFORCE(\n            0 <= idx && idx < N,\n            \"Index out of bounds: \",\n            idx,\n            \", range 0 to \",\n            N);\n        math::ScaleFixedSize<T, Context, FixedSize>(\n            block_size,\n            w0,\n            data + block_size * idx,\n            data + block_size * idx,\n            &context_);\n      }\n    }\n    for (int inp = 3; inp < InputSize(); inp += 2) {\n      auto& X = Input(inp);\n      auto& weight = Input(inp + 1);\n      CAFFE_ENFORCE_EQ(X.size(), block_size * K);\n      CAFFE_ENFORCE_EQ(weight.size(), 1);\n      const T* x_data = X.template data<T>();\n      T w = *weight.template data<T>();\n      for (int i = 0; i < K; ++i) {\n        Index idx = idxs[i];\n        // double-checking the indices, but it's fine as it's DCHECK only\n        DCHECK(0 <= idx && idx < N) << \"Index out of bounds: \" << idx\n                                    << \", range 0 to \" << N;\n        math::AxpyFixedSize<T, Context, FixedSize>(\n            block_size,\n            w,\n            x_data + block_size * i,\n            data + block_size * idx,\n            &context_);\n      }\n    }\n    return true;\n  }\n  Tensor<CPUContext> x_data_host_;\n  Tensor<CPUContext> weights_host_;\n  Tensor<Context> x_data_device_;\n  Tensor<Context> weights_device_;\n};\n\n\ntemplate <typename T, class Context>\nclass MaxOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(MaxOp);\n\n  bool RunOnDevice() override {\n    auto& input0 = Input(0);\n    auto* output = Output(0);\n\n    output->ResizeLike(input0);\n    output->CopyFrom(input0, &context_);\n\n    if (InputSize() == 1) {\n      return true;\n    }\n\n    // Dimension checking\n    for (int i = 1; i < InputSize(); ++i) {\n      CAFFE_ENFORCE_EQ(\n          output->dims(),\n          Input(i).dims(),\n          \"Description: Input #\",\n          i,\n          \", input dimension:\",\n          Input(i).dims(),\n          \" should match output dimension: \",\n          output->dims());\n    }\n\n    return Compute();\n  }\n\n  virtual bool Compute();\n};\n\ntemplate <typename T, class Context>\nclass MaxGradientOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(MaxGradientOp);\n\n  bool RunOnDevice() override;\n};\n\n/**\n * @brief Update slices of the tensor in-place by overriding.\n *\n * Input:\n *   DATA - tensor to be updated\n *   INDICES - 1-D list of indices on the first dimension of X_0 that need to be\n *             updated\n *   SLICES - update slices, has to have shape of len(INDICES) + shape(X_0)[1:]\n *\n * Output:\n *   DATA - has to be exactly the same tensor as the input 0\n *\n * Note: The op pretty much ignores the exact shapes of the input arguments and\n * cares only about sizes. It's done for performance consideration to avoid\n * unnecessary reshapes. Only first dimension of X_0 is important, let's call it\n * N. If M is the total size of X_0 and K is the size of INDICES then X_i is\n * assumed to be of shape K x (M / N) regardless of the real shape.\n *\n * Note: Each update in INDICES is applied independently which means that if\n * duplicated elements are present in INDICES arbitrary one will win.\n *\n * For now really works only on CPU because of INDICES access\n */\ntemplate <class Context>\nclass ScatterAssignOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  virtual ~ScatterAssignOp() {}\n\n  ScatterAssignOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        runners_({{{TensorProto_DataType_INT32, TensorProto_DataType_FLOAT},\n                   &ScatterAssignOp::DoRun<int32_t, float>},\n                  {{TensorProto_DataType_INT32, TensorProto_DataType_FLOAT16},\n                   &ScatterAssignOp::DoRun<int32_t, float16>},\n                  {{TensorProto_DataType_INT64, TensorProto_DataType_FLOAT},\n                   &ScatterAssignOp::DoRun<int64_t, float>},\n                  {{TensorProto_DataType_INT64, TensorProto_DataType_FLOAT16},\n                   &ScatterAssignOp::DoRun<int64_t, float16>}}) {}\n\n  bool RunOnDevice() override {\n    const auto& data = Input(DATA);\n    const auto& slices = Input(SLICES);\n    auto& indices = Input(INDICES);\n\n    const auto dataType = TypeMetaToDataType(data.meta());\n    const auto slicesType = TypeMetaToDataType(slices.meta());\n    const auto indicesType = TypeMetaToDataType(indices.meta());\n    auto* output = Output(0);\n\n    auto runner = GetRunner(dataType, slicesType, indicesType);\n    (this->*runner)();\n    return true;\n  }\n\n private:\n  typedef void (ScatterAssignOp::*RunnerType)();\n  typedef std::\n      map<std::pair<TensorProto_DataType, TensorProto_DataType>, RunnerType>\n          RunnerMap;\n\n  RunnerMap runners_;\n\n  RunnerType GetRunner(\n      const TensorProto_DataType dataType,\n      const TensorProto_DataType slicesType,\n      const TensorProto_DataType indicesType) {\n    CAFFE_ENFORCE_EQ(dataType, slicesType, \"Data and slice types must match\");\n    auto it = runners_.find({indicesType, dataType});\n    CAFFE_ENFORCE(\n        it != runners_.end(),\n        \"Could not find the runner corresponding to indicesType, dataType = \",\n        indicesType,\n        \" \",\n        dataType);\n    return it->second;\n  }\n\n  template <typename Index, typename T>\n  void DoRun() {\n    auto& input = Input(DATA);\n    auto& indices = Input(INDICES);\n    auto& slices = Input(SLICES);\n    auto* output = Output(0);\n    CAFFE_ENFORCE_EQ(&input, output, \"In place operation is required\");\n\n    CAFFE_ENFORCE_GT(input.ndim(), 0, \"X0 has to be at least the vector\");\n    TIndex M = input.size();\n    TIndex N = input.dim(0);\n    TIndex K = indices.size();\n    TIndex block_size = M / N;\n    CAFFE_ENFORCE_EQ(slices.size(), block_size * K);\n    // TODO(dzhulgakov): it can be made to work with arbitrary data type by\n    // using raw_mutable_data\n    T* data = output->template mutable_data<T>();\n    const Index* idxs = indices.template data<Index>();\n    const T* slicesData = slices.template data<T>();\n    for (int i = 0; i < K; ++i) {\n      Index idx = idxs[i];\n      // double-checking the indices, but it's fine as it's DCHECK only\n      DCHECK(0 <= idx && idx < N) << \"Index out of bounds: \" << idx\n                                  << \", range 0 to \" << N;\n      context_.template Copy<T, Context, Context>(\n          block_size, slicesData + block_size * i, data + block_size * idx);\n    }\n  }\n\n  INPUT_TAGS(DATA, INDICES, SLICES);\n};\n\ntemplate <class Context, class DstContext, class SrcContext>\nclass CopyOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(CopyOp);\n\n  bool RunOnDevice() override {\n    auto& input = OperatorBase::Input<Tensor<SrcContext>>(0);\n    auto* output = OperatorBase::Output<Tensor<DstContext>>(0);\n    output->ResizeLike(input);\n    this->context_.template CopyItems<SrcContext, DstContext>(\n        input.meta(),\n        input.size(),\n        input.raw_data(),\n        output->raw_mutable_data(input.meta()));\n    return true;\n  }\n};\n\ntemplate <class Context, class DstContext, class SrcContext>\nclass CopyOnDeviceLikeOp : public CopyOp<Context, DstContext, SrcContext> {\n public:\n  CopyOnDeviceLikeOp(const OperatorDef& operator_def, Workspace* ws)\n      : CopyOp<Context, DstContext, SrcContext>(operator_def, ws) {}\n};\n\ntemplate <class Context>\nclass LengthsToSegmentIdsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsToSegmentIdsOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    auto* input_data = input.template data<int32_t>();\n\n    CAFFE_ENFORCE(input.dims().size() == 1, \"Input must be a vector.\");\n    auto total_length =\n        std::accumulate(input_data, input_data + input.size(), 0);\n\n    output->Resize(total_length);\n    auto* output_data = output->template mutable_data<int32_t>();\n\n    for (int i = 0; i < input.size(); ++i) {\n      auto len = input_data[i];\n      std::fill(output_data, output_data + len, i);\n      output_data += len;\n    }\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass LengthsToRangesOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsToRangesOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    auto* input_data = input.template data<int32_t>();\n\n    CAFFE_ENFORCE(input.dims().size() == 1, \"Input must be a vector.\");\n    auto size = input.size();\n\n    output->Resize(size, 2);\n    auto* output_data = output->template mutable_data<int32_t>();\n\n    int32_t offset = 0;\n    for (int i = 0; i < size; ++i) {\n      auto len = input_data[i];\n      output_data[i * 2] = offset;\n      output_data[i * 2 + 1] = len;\n      offset += len;\n    }\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass SegmentIdsToLengthsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(SegmentIdsToLengthsOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    if (input.ndim() == 2) {\n      CAFFE_ENFORCE(\n          input.dim32(0) == 1 || input.dim32(1) == 1,\n          \"Input must be a vector.\");\n    } else {\n      CAFFE_ENFORCE_EQ(input.ndim(), 1, \"Input must be a vector.\");\n    }\n    auto* input_data = input.template data<Index>();\n    auto input_size = input.size();\n    auto* output = Output(0);\n    // segment id starts from 0\n    auto num_segments = input_size ? input_data[input_size - 1] + 1 : 0;\n    if (InputSize() > 1) {\n      CAFFE_ENFORCE_GE(Input(1).ndim(), 1);\n      CAFFE_ENFORCE_LE(\n          num_segments,\n          Input(1).dim(0),\n          \"The number of segments inferred should *NOT* be larger \"\n          \"than the size of Input(1)'s first dimension\");\n      num_segments = Input(1).dim(0);\n    }\n    CAFFE_ENFORCE(0 <= num_segments, \"Indices must be in 0..K-1 range\");\n    output->Resize(num_segments);\n    auto* output_data = output->template mutable_data<int32_t>();\n    if (num_segments == 0) {\n      return true;\n    }\n    std::fill(output_data, output_data + num_segments, 0);\n    Index prev = 0; // Assume that segment_id >= 0.\n    for (int64_t i = 0; i < input_size; i++) {\n      CAFFE_ENFORCE(\n          prev <= input_data[i],\n          \"Segment ids must be sorted: \",\n          prev,\n          \" vs \",\n          input_data[i]);\n      prev = input_data[i];\n      output_data[input_data[i]] += 1;\n    }\n\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass SegmentIdsToRangesOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(SegmentIdsToRangesOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    CAFFE_ENFORCE(input.dims().size() == 1, \"Input must be a vector.\");\n    auto* input_data = input.template data<Index>();\n    auto input_size = input.size();\n    auto* output = Output(0);\n    // segment id starts from 0\n    auto num_segments = input_size ? input_data[input_size - 1] + 1 : 0;\n    if (InputSize() > 1) {\n      CAFFE_ENFORCE_GE(Input(1).ndim(), 1);\n      CAFFE_ENFORCE_LE(\n          num_segments,\n          Input(1).dim(0),\n          \"The number of segments inferred should *NOT* be larger \"\n          \"than the size of Input(1)'s first dimension\");\n      num_segments = Input(1).dim(0);\n    }\n    CAFFE_ENFORCE(0 <= num_segments, \"Indices must be in 0..K-1 range\");\n    output->Resize(num_segments, 2);\n    auto* output_data = output->template mutable_data<int32_t>();\n    if (num_segments == 0) {\n      return true;\n    }\n    std::fill(output_data, output_data + num_segments * 2, 0);\n    Index prev = input_data[0];\n    for (int64_t i = 0; i < input_size; i++) {\n      CAFFE_ENFORCE(\n          prev <= input_data[i],\n          \"Segment ids must be sorted: \",\n          prev,\n          \" vs \",\n          input_data[i]);\n      while (prev != input_data[i]) {\n        ++prev;\n        output_data[prev * 2] = i;\n      }\n      output_data[input_data[i] * 2 + 1] += 1;\n    }\n\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass LengthsToWeightsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  LengthsToWeightsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        power_(OperatorBase::GetSingleArgument<float>(\"power\", 0.5)) {}\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& input = Input(0);\n    CAFFE_ENFORCE(input.dims().size() == 1, \"Input must be a vector.\");\n    auto* input_data = input.template data<Index>();\n    auto input_size = input.size();\n    auto* output = Output(0);\n\n    int64_t output_size = 0;\n    for (auto i = 0; i < input_size; i++) {\n      CAFFE_ENFORCE_GE(input_data[i], 0, \"unexpected negative length value\");\n      output_size += input_data[i];\n    }\n\n    std::function<float(const int64_t& length, const float& power)> getWeight;\n    if (power_ == 0.5) {\n      getWeight = [](const int64_t& length, const float& /*power*/) {\n        return 1.0 / std::sqrt(length);\n      };\n    } else if (power_ == 1) {\n      getWeight = [](const int64_t& length, const float& /*power*/) {\n        return 1.0 / length;\n      };\n    } else {\n      getWeight = [](const int64_t& length, const float& power) {\n        return 1.0 / std::pow(length, power);\n      };\n    }\n\n    output->Resize(output_size);\n    auto* output_data = output->template mutable_data<float>();\n    int64_t cnt = 0;\n    for (auto i = 0; i < input_size; i++) {\n      auto len = input_data[i];\n      if (len == 0) {\n        continue;\n      }\n      CAFFE_ENFORCE_LE(cnt + len, output_size, \"unexpected lengths value\");\n\n      float weight_value = getWeight(len, power_);\n      std::fill(output_data + cnt, output_data + cnt + len, weight_value);\n      cnt += len;\n    }\n\n    return true;\n  }\n\n private:\n  float power_;\n};\n\ntemplate <class Context>\nclass HasElementsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(HasElementsOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<TIndex>{});\n    *output->template mutable_data<bool>() = input.size() > 0;\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass IsEmptyOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(IsEmptyOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = OperatorBase::Output<TensorCPU>(0);\n    output->Resize(std::vector<TIndex>{});\n    *output->template mutable_data<bool>() = (input.size() == 0);\n    return true;\n  }\n};\n\n// Return the size of a tensor\ntemplate <class Context>\nclass SizeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(SizeOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n\n    output->Resize(vector<TIndex>());\n    auto* output_data = output->template mutable_data<int64_t>();\n\n    auto size = input.size();\n    math::Set<int64_t, Context>(\n        1, static_cast<int64_t>(size), output_data, &context_);\n\n    return true;\n  }\n};\n\n// returns a shape to be passed to Reshape\ntemplate <class Context>\nclass LengthsToShapeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsToShapeOp);\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n\n    CAFFE_ENFORCE(input.dims().size() == 1, \"Input must be a vector.\");\n    auto* output = Output(0);\n    auto* input_data = input.template data<int32_t>();\n\n    auto size = input.size();\n    auto first = input_data[0];\n\n    for (int i = 1; i < size; i++) {\n      CAFFE_ENFORCE(\n          input_data[i] == first, \"All elements of input must be same \");\n    }\n\n    output->Resize(2);\n    auto* output_data = output->template mutable_data<int32_t>();\n    output_data[0] = size;\n    output_data[1] = first;\n\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass SqueezeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SqueezeOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        dims_(OperatorBase::GetRepeatedArgument<int>(\"dims\")) {\n    auto originalSize = dims_.size();\n    CAFFE_ENFORCE(originalSize > 0, \"Parameter `dims` must be provided.\");\n\n    std::sort(dims_.begin(), dims_.end());\n    dims_.erase(std::unique(dims_.begin(), dims_.end()), dims_.end());\n    if (dims_.size() < originalSize) {\n      LOG(WARNING) << \"Parameter `dims` has repeated dimensions.\";\n    }\n    CAFFE_ENFORCE(dims_.front() >= 0, \"Dimension ids must be non-negative.\");\n  }\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->CopyFrom(input, &context_);\n\n    CAFFE_ENFORCE_GT(\n        input.ndim(),\n        dims_.back(),\n        \"Input needs at least \",\n        (dims_.back() + 1),\n        \" dimensions.\");\n\n    std::vector<int> newDims = ComputeDims(input.dims(), dims_);\n    output->Reshape(newDims);\n    return true;\n  }\n\n  static std::vector<int> ComputeDims(\n      std::vector<TIndex> inputDims,\n      std::vector<int> dims) {\n    int j = 0;\n    std::vector<int> newDims;\n    for (int i = 0; i < inputDims.size(); ++i) {\n      if (j < dims.size() && dims[j] == i) {\n        CAFFE_ENFORCE_EQ(\n            inputDims[i],\n            1,\n            \"Dimension \",\n            i,\n            \" of input must be 1\",\n            \" instead of \",\n            inputDims[i],\n            \".\");\n        ++j;\n        continue;\n      }\n      newDims.push_back(inputDims.at(i));\n    }\n    return newDims;\n  }\n\n private:\n  vector<int> dims_;\n\n public:\n  DISABLE_COPY_AND_ASSIGN(SqueezeOp);\n};\n\ntemplate <class Context>\nclass ExpandDimsOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  ExpandDimsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        dims_(OperatorBase::GetRepeatedArgument<int>(\"dims\")) {\n    auto originalSize = dims_.size();\n    CAFFE_ENFORCE(originalSize > 0, \"Parameter `dims` must be provided.\");\n    std::sort(dims_.begin(), dims_.end());\n    dims_.erase(std::unique(dims_.begin(), dims_.end()), dims_.end());\n    if (dims_.size() < originalSize) {\n      LOG(WARNING) << \"Parameter `dims` has repeated dimensions.\";\n    }\n    CAFFE_ENFORCE(dims_.front() >= 0, \"Dimension ids must be non-negative.\");\n  }\n\n  bool RunOnDevice() override {\n    auto& input = Input(0);\n    auto* output = Output(0);\n    output->CopyFrom(input, &context_);\n    if (dims_.empty()) {\n      return true;\n    }\n\n    auto newDims = input.dims();\n    CAFFE_ENFORCE_GE(\n        input.dims().size() + dims_.size(),\n        dims_.back() + 1,\n        \"Input needs at least \",\n        (1 + dims_.back() - dims_.size()),\n        \" dimensions given `dims`.\");\n    for (const auto dim : dims_) {\n      newDims.insert(newDims.begin() + dim, 1);\n    }\n    output->Reshape(newDims);\n    return true;\n  }\n\n private:\n  vector<int> dims_;\n};\n\ntemplate <class Context>\nclass GatherOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(GatherOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(INDICES));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    // If we endup using it on GPU doing O(N) memcpy is probably not best :)\n    // TODO: implement prefetching if it starts mattering (TF does it)\n    auto& data = Input(DATA);\n    auto& indices = Input(INDICES);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_GE(data.ndim(), 1, \"DATA should be at least 1-D\");\n    auto shape = indices.dims();\n    shape.insert(shape.end(), data.dims().begin() + 1, data.dims().end());\n    output->Resize(shape);\n\n    int block_size = data.size_from_dim(1);\n    auto block_bytesize = data.size_from_dim(1) * data.meta().itemsize();\n    int N = indices.size();\n\n    auto src_base = static_cast<const char*>(data.raw_data());\n    const Index* idxs = indices.template data<Index>();\n    auto out = static_cast<char*>(output->raw_mutable_data(data.meta()));\n\n    for (int i = 0; i < N; ++i) {\n      auto idx = idxs[i];\n      CAFFE_ENFORCE(\n          0 <= idx && idx < data.dim(0),\n          \"INDICES element is out of DATA bounds, id=\",\n          idx,\n          \" data_dim=\",\n          data.dim(0));\n      auto src = src_base + idx * block_bytesize;\n      context_.template CopyItems<Context, Context>(\n          data.meta(), block_size, src, out + block_bytesize * i);\n    }\n    return true;\n  }\n\n  INPUT_TAGS(DATA, INDICES);\n};\n\ntemplate <class Context>\nclass GatherRangesOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(GatherRangesOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(RANGES));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& data = Input(DATA);\n    auto& ranges = Input(RANGES);\n    auto* outputData = Output(0);\n    auto* outputLengths = Output(1);\n\n    auto batchSize = ranges.dim(0);\n    CAFFE_ENFORCE(data.ndim() == 1, \"Data has to be 1-D\");\n    CAFFE_ENFORCE(ranges.ndim() == 3, \"Ranges must be 3-D\");\n    CAFFE_ENFORCE(ranges.dim(1) > 0, \"There has to be at least one range\");\n    CAFFE_ENFORCE_EQ(\n        ranges.dim(2), 2, \"Ranges last dimention should be of size 2\");\n\n    auto* rawData = static_cast<const char*>(data.raw_data());\n    auto* rangesData = ranges.template data<Index>();\n\n    outputLengths->Resize(batchSize);\n    auto* outputLengthsPtr = outputLengths->template mutable_data<int32_t>();\n    size_t start = 0;\n    size_t blockSize = ranges.size_from_dim(1);\n    for (size_t i = 0; i < batchSize; ++i) {\n      auto end = start + blockSize;\n      outputLengthsPtr[i] = accumulate(rangesData, start, end);\n      start = end;\n    }\n\n    size_t outputSize = accumulate(rangesData, 0, ranges.size());\n    outputData->Resize(outputSize);\n\n    auto outputRawData =\n        static_cast<char*>(outputData->raw_mutable_data(data.meta()));\n    VLOG(1) << \"Copying data\";\n    size_t outputOffsetBytes = 0;\n    auto itemsize = data.meta().itemsize();\n    for (int i = 0; i < ranges.size(); i += 2) {\n      auto rangeStart = rangesData[i];\n      auto rangeLength = rangesData[i + 1];\n      if (!rangeLength) {\n        continue;\n      }\n      auto rangeSizeBytes = rangeLength * itemsize;\n      CAFFE_ENFORCE(outputOffsetBytes < outputSize * itemsize);\n      CAFFE_ENFORCE(rangeStart + rangeLength <= data.size());\n      context_.template CopyItems<Context, Context>(\n          data.meta(),\n          rangeLength,\n          rawData + rangeStart * itemsize,\n          outputRawData + outputOffsetBytes);\n      outputOffsetBytes += rangeSizeBytes;\n    }\n    CAFFE_ENFORCE(outputOffsetBytes == outputSize * itemsize);\n    return true;\n  }\n\n  INPUT_TAGS(DATA, RANGES, LENGTHS);\n\n private:\n  template <typename Index>\n  size_t accumulate(Index* ranges, size_t start, size_t end) {\n    size_t result = 0;\n    for (int i = start + 1; i < end; i += 2) {\n      result += ranges[i];\n    }\n    return result;\n  }\n};\n\ntemplate <class Context>\nclass LengthsGatherOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(LengthsGatherOp);\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, OperatorBase::Input<TensorCPU>(INDICES));\n  }\n\n  template <typename Index>\n  bool DoRunWithType() {\n    auto& items = Input(ITEMS);\n    auto& lengths = Input(LENGTHS);\n    auto& indices = Input(INDICES);\n    auto* output = Output(0);\n\n    CAFFE_ENFORCE_GE(items.ndim(), 1, \"ITEMS should be at least 1-D\");\n    CAFFE_ENFORCE_EQ(lengths.ndim(), 1, \"LENGTHS should be 1-D\");\n    CAFFE_ENFORCE_EQ(indices.ndim(), 1, \"INDICES should be 1-D\");\n\n    const auto* lengths_data = lengths.template data<int32_t>();\n    const auto* indices_data = indices.template data<Index>();\n\n    TIndex total_length = 0;\n    for (size_t i = 0; i < indices.size(); ++i) {\n      auto idx = indices_data[i];\n      CAFFE_ENFORCE_LT(idx, lengths.size());\n      total_length += lengths_data[idx];\n    }\n    auto shape = items.dims();\n    shape[0] = total_length;\n    output->Resize(shape);\n\n    offsets_.clear();\n    TIndex running_offset = 0;\n    offsets_.reserve(lengths.size());\n    for (size_t i = 0; i < lengths.size(); ++i) {\n      offsets_.push_back(running_offset);\n      running_offset += lengths_data[i];\n    }\n    CAFFE_ENFORCE_EQ(\n        items.dim(0),\n        running_offset,\n        \"LENGTHS must match the first dimension of ITEMS\");\n\n    auto src_base = static_cast<const char*>(items.raw_data());\n    auto block_size = items.size_from_dim(1);\n    auto block_bytesize = block_size * items.itemsize();\n    auto out = static_cast<char*>(output->raw_mutable_data(items.meta()));\n\n    for (size_t i = 0; i < indices.size(); ++i) {\n      auto idx = indices_data[i];\n      auto length = lengths_data[idx];\n      context_.template CopyItems<Context, Context>(\n          items.meta(),\n          length * block_size,\n          src_base + offsets_[idx] * block_bytesize,\n          out);\n      out += length * block_bytesize;\n    }\n    return true;\n  }\n\n  std::vector<TIndex> offsets_;\n\n  INPUT_TAGS(ITEMS, LENGTHS, INDICES);\n};\n\n// Since we just do copying, consider untemplating it on T and using raw_data()\n/**\n * Deduplicates input indices vector and optionally produces reverse remapping.\n * Current implementation produces a sorted list but it's not guaranteed in\n * general.\n */\ntemplate <class Context>\nclass UniqueOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(UniqueOp);\n\n  bool RunOnDevice() override {\n    // Use run-time polymorphism\n    auto& input = Input(0);\n    if (input.template IsType<int32_t>()) {\n      DoRun<int32_t>();\n    } else if (input.template IsType<int64_t>()) {\n      DoRun<int64_t>();\n    } else {\n      LOG(FATAL) << \"Unsupported type of input in Unique: \"\n                 << input.meta().name();\n    }\n    return true;\n  }\n\n private:\n  vector<int> order_;\n  Tensor<Context> thrust_unique_buffer_;\n  Tensor<Context> cuda_order_buffer_;\n  Tensor<Context> second_order_buffer_;\n\n  template <typename T>\n  void DoRun();\n\n public:\n  OUTPUT_TAGS(UNIQUE, REMAPPING);\n};\n\ntemplate <class Context>\nclass UnsafeCoalesceOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n\n  bool RunOnDevice() override {\n    size_t coalesced_size = 0;\n    for (int i = 0; i < InputSize(); ++i) {\n      CAFFE_ENFORCE(\n          !Input(i).meta().ctor(),\n          \"Must only coalesce fundamental types, error at input: \",\n          i);\n    }\n\n    auto roundToAlignment = [](size_t bytes) -> size_t {\n      return ((bytes + gCaffe2Alignment - 1) / gCaffe2Alignment) *\n          gCaffe2Alignment;\n    };\n\n    for (int i = 0; i < InputSize(); ++i) {\n      coalesced_size += roundToAlignment(Input(i).nbytes());\n    }\n\n    auto* coalesced = Output(OutputSize() - 1);\n    coalesced->Resize(coalesced_size);\n    math::Set<uint8_t, Context>(\n        coalesced_size,\n        0.0,\n        coalesced->template mutable_data<uint8_t>(),\n        &context_);\n\n    size_t coalesced_offset = 0;\n    for (auto i = 0; i < InputSize(); ++i) {\n      const auto input_nbytes = Input(i).nbytes();\n      context_.template CopyBytes<Context, Context>(\n          input_nbytes,\n          (const uint8_t*)Input(i).raw_data(),\n          coalesced->template mutable_data<uint8_t>() + coalesced_offset);\n\n      // Note: this could cause Input(i) to free it's data if\n      // Output(i) and Input(i) alias each other. This is safe on a\n      // GPU (as the copy will happen-before the free), but it's\n      // worth mentioning.\n\n      Output(i)->ResizeLike(Input(i));\n      Output(i)->ShareExternalPointer(\n          static_cast<void*>(\n              coalesced->template mutable_data<uint8_t>() + coalesced_offset),\n          Input(i).meta(),\n          input_nbytes);\n      coalesced_offset += roundToAlignment(input_nbytes);\n    }\n    return true;\n  }\n};\n\ntemplate <typename T, class Context>\nclass AccumulateHistogramOp : public Operator<Context> {\n public:\n  AccumulateHistogramOp(const OperatorDef& def, Workspace* ws)\n      : Operator<Context>(def, ws),\n        lower_bound_(\n            OperatorBase::GetSingleArgument<float>(\"lower_bound\", 0.0)),\n        upper_bound_(\n            OperatorBase::GetSingleArgument<float>(\"upper_bound\", 1.0)),\n        num_buckets_(OperatorBase::GetSingleArgument<int>(\"num_buckets\", 1)) {\n    CAFFE_ENFORCE_GT(num_buckets_, 0);\n    // 2 more for histograms < lower_bound, >= upper_bound respectively\n    num_output_buckets_ = num_buckets_ + 2;\n    accumulate_hist_ = std::vector<int64_t>(num_output_buckets_, 0);\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    auto& X = Input(X_IN);\n    auto* X_data = X.template data<T>();\n    int N = X.size();\n    auto* cur_hist = Output(CUR_HIST);\n    auto* acc_hist = Output(ACC_HIST);\n    cur_hist->Resize(num_output_buckets_);\n    acc_hist->Resize(num_output_buckets_);\n    auto* cur_hist_data = cur_hist->template mutable_data<int64_t>();\n    auto* acc_hist_data = acc_hist->template mutable_data<int64_t>();\n    auto segment = (upper_bound_ - lower_bound_) / num_buckets_;\n    math::Set<int64_t, Context>(\n        num_output_buckets_, 0, cur_hist_data, &context_);\n\n    for (int i = 0; i < N; i++) {\n      int bucket_index = -1;\n      if (X_data[i] < lower_bound_) {\n        bucket_index = 0;\n      } else if (X_data[i] >= upper_bound_) {\n        bucket_index = num_buckets_ + 1;\n      } else {\n        bucket_index = (int)((X_data[i] - lower_bound_) / segment) + 1;\n      }\n      cur_hist_data[bucket_index] += 1;\n      accumulate_hist_[bucket_index] += 1;\n    }\n\n    for (int i = 0; i < num_output_buckets_; i++) {\n      acc_hist_data[i] = accumulate_hist_[i];\n    }\n\n    return true;\n  }\n\n private:\n  float lower_bound_;\n  float upper_bound_;\n  int num_buckets_;\n  int num_output_buckets_;\n  std::vector<int64_t> accumulate_hist_;\n\n  INPUT_TAGS(X_IN);\n  OUTPUT_TAGS(CUR_HIST, ACC_HIST);\n};\n\ntemplate <class Context>\nclass RangeOp : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(RangeOp)\n\n  bool RunOnDevice() override {\n    return DispatchHelper<TensorTypes<int32_t, int64_t, float, double>>::call(\n        this, Input(0));\n  }\n\n  template <typename T>\n  T readScalarInput(const int index) {\n    if (std::is_same<Context, TensorCPU>::value) {\n      return Input(index).template data<T>()[0];\n    } else {\n      local_.template CopyFrom<Context>(Input(index));\n      return local_.template data<T>()[0];\n    }\n  }\n\n  template <typename T>\n  bool DoRunWithType() {\n    T stop = 0;\n    T start = 0;\n    T step = 1;\n\n    for (int i = 0; i < InputSize(); ++i) {\n      CAFFE_ENFORCE_EQ(Input(0).ndim(), 0, \"All inputs must be scalar.\");\n    }\n\n    switch (InputSize()) {\n      case 1:\n        stop = readScalarInput<T>(0);\n        break;\n      case 2:\n        start = readScalarInput<T>(0);\n        stop = readScalarInput<T>(1);\n        break;\n      case 3:\n        step = readScalarInput<T>(2);\n        start = readScalarInput<T>(0);\n        stop = readScalarInput<T>(1);\n        break;\n    }\n    CAFFE_ENFORCE_NE(step, 0, \"Step size cannot be 0.\");\n    int length;\n    auto diff = stop - start;\n    if (std::is_integral<T>::value) {\n      // Avoid casting to and from floats in case it introduces rounding and\n      // avoid mod because the compiler doesn't strip unused code until later.\n      length = diff / step;\n      if (length * step < diff) {\n        length += 1;\n      }\n    } else {\n      length = static_cast<int>(ceil(diff / step));\n    }\n    auto* output = Output(0);\n    // Match numpy's behavior here.\n    if (length <= 0) {\n      output->Resize(0);\n      // Called for the side effect of setting the data.\n      output->template mutable_data<T>();\n      return true;\n    } else {\n      output->Resize(length);\n      return DoRunOnDevice<T>(start, step, output);\n    }\n  }\n\n  template <typename T>\n  bool DoRunOnDevice(const T& start, const T& step, Tensor<Context>* output);\n\n private:\n  // local CPU tensor for copying constants.\n  TensorCPU local_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_UTILITY_OPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/while_op.h",
    "content": "#ifndef CAFFE2_OPERATORS_WHILE_OP_H_\n#define CAFFE2_OPERATORS_WHILE_OP_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass WhileOp final : public Operator<Context> {\n public:\n  WhileOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    CAFFE_ENFORCE(\n        this->template HasSingleArgumentOfType<NetDef>(\"loop_net\"),\n        \"loop_net must be specified in While operator\");\n    loop_net_def_ =\n        this->template GetSingleArgument<NetDef>(\"loop_net\", NetDef());\n    loop_net_ = CreateNet(loop_net_def_, ws);\n    CAFFE_ENFORCE(loop_net_, \"Failed to initialize loop subnet\");\n\n    cond_net_ = nullptr;\n    bool has_cond_net =\n        this->template HasSingleArgumentOfType<NetDef>(\"cond_net\");\n    if (has_cond_net) {\n      cond_net_def_ =\n          this->template GetSingleArgument<NetDef>(\"cond_net\", NetDef());\n      cond_net_ = CreateNet(cond_net_def_, ws);\n      CAFFE_ENFORCE(cond_net_, \"Failed to initialize condition subnet\");\n    }\n  }\n\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  bool RunOnDevice() override;\n\n private:\n  NetDef loop_net_def_;\n  std::unique_ptr<NetBase> loop_net_;\n\n  NetDef cond_net_def_;\n  std::unique_ptr<NetBase> cond_net_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_OPERATORS_WHILE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/operators/zero_gradient_op.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass ZeroGradientOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  USE_SIMPLE_CTOR_DTOR(ZeroGradientOp);\n\n  bool RunOnDevice() override {\n    return true;\n  }\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/perfkernels/common.h",
    "content": "// Common utilities for writing performance kernels and easy dispatching of\n// different backends.\n/*\nThe general workflow shall be as follows, say we want to\nimplement a functionality called void foo(int a, float b).\n\nIn foo.h, do:\n   void foo(int a, float b);\n\nIn foo_avx2.cc, do:\n   void foo__avx2(int a, float b) {\n     [actual avx2 implementation]\n   }\n\nIn foo_avx.cc, do:\n   void foo__avx(int a, float b) {\n     [actual avx implementation]\n   }\n\nIn foo.cc, do:\n   // The base implementation should *always* be provided.\n   void foo__base(int a, float b) {\n     [base, possibly slow implementation]\n   }\n   void foo(int a, float b) {\n     // You should always order things by their preference, faster\n     // implementations earlier in the function.\n     AVX2_DO(foo, a, b);\n     AVX_DO(foo, a, b);\n     BASE_DO(foo, a, b);\n   }\n\n*/\n// Details: this functionality basically covers the cases for both build time\n// and run time architecture support.\n//\n// During build time:\n//    The build system should provide flags CAFFE2_PERF_WITH_AVX2 and\n//    CAFFE2_PERF_WITH_AVX that corresponds to the __AVX__ and __AVX2__ flags\n//    the compiler provides. Note that we do not use the compiler flags but\n//    rely on the build system flags, because the common files (like foo.cc\n//    above) will always be built without __AVX__ and __AVX2__.\n// During run time:\n//    we use cpuid to identify cpu support and run the proper functions.\n\n#pragma once\n\n// DO macros: these should be used in your entry function, similar to foo()\n// above, that routes implementations based on CPU capability.\n\n#define BASE_DO(funcname, ...) return funcname##__base(__VA_ARGS__);\n\n#ifdef CAFFE2_PERF_WITH_AVX2\n#define AVX2_DO(funcname, ...)                 \\\n  decltype(funcname##__base) funcname##__avx2; \\\n  if (GetCpuId().avx2()) {                     \\\n    return funcname##__avx2(__VA_ARGS__);      \\\n  }\n#define AVX2_FMA_DO(funcname, ...)                 \\\n  decltype(funcname##__base) funcname##__avx2_fma; \\\n  if (GetCpuId().avx2() && GetCpuId().fma()) {     \\\n    return funcname##__avx2_fma(__VA_ARGS__);      \\\n  }\n#else // CAFFE2_PERF_WITH_AVX2\n#define AVX2_DO(funcname, ...)\n#define AVX2_FMA_DO(funcname, ...)\n#endif // CAFFE2_PERF_WITH_AVX2\n\n#ifdef CAFFE2_PERF_WITH_AVX\n#define AVX_DO(funcname, ...)                 \\\n  decltype(funcname##__base) funcname##__avx; \\\n  if (GetCpuId().avx()) {                     \\\n    return funcname##__avx(__VA_ARGS__);      \\\n  }\n#define AVX_F16C_DO(funcname, ...)                 \\\n  decltype(funcname##__base) funcname##__avx_f16c; \\\n  if (GetCpuId().avx() && GetCpuId().f16c()) {     \\\n    return funcname##__avx_f16c(__VA_ARGS__);      \\\n  }\n#else // CAFFE2_PERF_WITH_AVX\n#define AVX_DO(funcname, ...)\n#define AVX_F16C_DO(funcname, ...)\n#endif // CAFFE2_PERF_WITH_AVX\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/perfkernels/cvtsh_ss_bugfix.h",
    "content": "#pragma once\n\n#if defined(__APPLE__) && ((__clang_major__ < 8) || ((__clang_major__ == 8) && (__clang_minor__ < 1)))\n\n#include <emmintrin.h>\n\n// This version of apple clang has a bug that _cvtsh_ss is not defined, see\n// https://reviews.llvm.org/D16177\nstatic __inline float\n    __attribute__((__always_inline__, __nodebug__, __target__(\"f16c\")))\n_cvtsh_ss(unsigned short a)\n{\n  __v8hi v = {(short)a, 0, 0, 0, 0, 0, 0, 0};\n  __v4sf r = __builtin_ia32_vcvtph2ps(v);\n  return r[0];\n}\n\n#endif // defined(__APPLE__) && (__clang_major__ < 8)\n\n#ifdef _MSC_VER\n\n// It seems that microsoft msvc does not have a _cvtsh_ss implementation so\n// we will add a dummy version to it.\n\nstatic inline float\n_cvtsh_ss(unsigned short x) {\n  union {\n    uint32_t intval;\n    float floatval;\n  } t1;\n  uint32_t t2, t3;\n  t1.intval = x & 0x7fff; // Non-sign bits\n  t2 = x & 0x8000; // Sign bit\n  t3 = x & 0x7c00; // Exponent\n  t1.intval <<= 13; // Align mantissa on MSB\n  t2 <<= 16; // Shift sign bit into position\n  t1.intval += 0x38000000; // Adjust bias\n  t1.intval = (t3 == 0 ? 0 : t1.intval); // Denormals-as-zero\n  t1.intval |= t2; // Re-insert sign bit\n  return t1.floatval;\n}\n\n#endif // _MSC_VER\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/perfkernels/embedding_lookup.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n\nnamespace caffe2 {\n\n/**\n * Embedding lookup with reduction.\n *\n * `input` of size data_size * block_size\n * `indices` of size index_size\n * `lengths` of size output_size\n * `weights` nullptr or array of size index_size\n * `out` of size output_size * block_size\n * sum(lengths[i]) == index_size\n *\n * Behavior is roughly equivalent to pseudocode:\n *\n * pos = 0\n * for (i = 0..index_size-1)\n *   for (k = 0..block_size-1)\n *     out[i*block_size + k] = 0\n *   for (j = 0..lengths[i]-1)\n *     for (k = 0..block_size-1)\n *       out[i*block_size + k] += input[indices[pos]*block_size + k] *\n *                                (weights ? weights[pos] : 1.0)\n *     pos += 1\n *   if (normalize_weights && lengths[i] > 0)\n *     for (k = 0..block_size-1)\n *       out[i*block_size + k] /= lengths[i]\n *\n */\ntemplate <typename IndexType, typename InType, typename OutType>\nvoid EmbeddingLookup(\n    const TIndex block_size,\n    const TIndex output_size,\n    const TIndex index_size,\n    const TIndex data_size,\n    const InType* input,\n    const IndexType* indices,\n    const int* lengths,\n    const float* weights, // optional, can be null for non-weighted sum\n    const float* scale_bias, // optional scale & bias params for uint8 input\n    bool normalize_by_lengths,\n    OutType* out);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/perfkernels/typed_axpy.h",
    "content": "#pragma once\n\nnamespace caffe2 {\n\n// Similar to Axpy that calculate y = a * x + y, but allowing x and y to be\n// of different data types.\n// It also provides a performance optimization hint (use_a) to see if a is going\n// to be 1 or not.\ntemplate <typename IN, typename OUT>\nvoid TypedAxpy(int N, const OUT a, const IN* x, OUT* y);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/caffe2.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/caffe2.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fcaffe2_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fcaffe2_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/generated_enum_reflection.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\nclass Argument;\nclass BlobProto;\nclass DBReaderProto;\nclass DeviceOption;\nclass ExecutionStep;\nclass NetDef;\nclass OperatorDef;\nclass PlanDef;\nclass QTensorProto;\nclass TensorProto;\nclass TensorProto_Segment;\nclass TensorProtos;\nclass TensorShape;\nclass TensorShapes;\n\nenum TensorProto_DataType {\n  TensorProto_DataType_UNDEFINED = 0,\n  TensorProto_DataType_FLOAT = 1,\n  TensorProto_DataType_INT32 = 2,\n  TensorProto_DataType_BYTE = 3,\n  TensorProto_DataType_STRING = 4,\n  TensorProto_DataType_BOOL = 5,\n  TensorProto_DataType_UINT8 = 6,\n  TensorProto_DataType_INT8 = 7,\n  TensorProto_DataType_UINT16 = 8,\n  TensorProto_DataType_INT16 = 9,\n  TensorProto_DataType_INT64 = 10,\n  TensorProto_DataType_FLOAT16 = 12,\n  TensorProto_DataType_DOUBLE = 13\n};\nbool TensorProto_DataType_IsValid(int value);\nconst TensorProto_DataType TensorProto_DataType_DataType_MIN = TensorProto_DataType_UNDEFINED;\nconst TensorProto_DataType TensorProto_DataType_DataType_MAX = TensorProto_DataType_DOUBLE;\nconst int TensorProto_DataType_DataType_ARRAYSIZE = TensorProto_DataType_DataType_MAX + 1;\n\nconst ::google::protobuf::EnumDescriptor* TensorProto_DataType_descriptor();\ninline const ::std::string& TensorProto_DataType_Name(TensorProto_DataType value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    TensorProto_DataType_descriptor(), value);\n}\ninline bool TensorProto_DataType_Parse(\n    const ::std::string& name, TensorProto_DataType* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<TensorProto_DataType>(\n    TensorProto_DataType_descriptor(), name, value);\n}\nenum DeviceType {\n  CPU = 0,\n  CUDA = 1,\n  MKLDNN = 2,\n  COMPILE_TIME_MAX_DEVICE_TYPES = 3,\n  ONLY_FOR_TEST = 20901701\n};\nbool DeviceType_IsValid(int value);\nconst DeviceType DeviceType_MIN = CPU;\nconst DeviceType DeviceType_MAX = ONLY_FOR_TEST;\nconst int DeviceType_ARRAYSIZE = DeviceType_MAX + 1;\n\nconst ::google::protobuf::EnumDescriptor* DeviceType_descriptor();\ninline const ::std::string& DeviceType_Name(DeviceType value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    DeviceType_descriptor(), value);\n}\ninline bool DeviceType_Parse(\n    const ::std::string& name, DeviceType* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<DeviceType>(\n    DeviceType_descriptor(), name, value);\n}\n// ===================================================================\n\nclass TensorProto_Segment : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TensorProto.Segment) */ {\n public:\n  TensorProto_Segment();\n  virtual ~TensorProto_Segment();\n\n  TensorProto_Segment(const TensorProto_Segment& from);\n\n  inline TensorProto_Segment& operator=(const TensorProto_Segment& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TensorProto_Segment& default_instance();\n\n  static const TensorProto_Segment* internal_default_instance();\n\n  void Swap(TensorProto_Segment* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TensorProto_Segment* New() const { return New(NULL); }\n\n  TensorProto_Segment* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TensorProto_Segment& from);\n  void MergeFrom(const TensorProto_Segment& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TensorProto_Segment* other);\n  void UnsafeMergeFrom(const TensorProto_Segment& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required int64 begin = 1;\n  bool has_begin() const;\n  void clear_begin();\n  static const int kBeginFieldNumber = 1;\n  ::google::protobuf::int64 begin() const;\n  void set_begin(::google::protobuf::int64 value);\n\n  // required int64 end = 2;\n  bool has_end() const;\n  void clear_end();\n  static const int kEndFieldNumber = 2;\n  ::google::protobuf::int64 end() const;\n  void set_end(::google::protobuf::int64 value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.TensorProto.Segment)\n private:\n  inline void set_has_begin();\n  inline void clear_has_begin();\n  inline void set_has_end();\n  inline void clear_has_end();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::int64 begin_;\n  ::google::protobuf::int64 end_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TensorProto_Segment> TensorProto_Segment_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass TensorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TensorProto) */ {\n public:\n  TensorProto();\n  virtual ~TensorProto();\n\n  TensorProto(const TensorProto& from);\n\n  inline TensorProto& operator=(const TensorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TensorProto& default_instance();\n\n  static const TensorProto* internal_default_instance();\n\n  void Swap(TensorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TensorProto* New() const { return New(NULL); }\n\n  TensorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TensorProto& from);\n  void MergeFrom(const TensorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TensorProto* other);\n  void UnsafeMergeFrom(const TensorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef TensorProto_Segment Segment;\n\n  typedef TensorProto_DataType DataType;\n  static const DataType UNDEFINED =\n    TensorProto_DataType_UNDEFINED;\n  static const DataType FLOAT =\n    TensorProto_DataType_FLOAT;\n  static const DataType INT32 =\n    TensorProto_DataType_INT32;\n  static const DataType BYTE =\n    TensorProto_DataType_BYTE;\n  static const DataType STRING =\n    TensorProto_DataType_STRING;\n  static const DataType BOOL =\n    TensorProto_DataType_BOOL;\n  static const DataType UINT8 =\n    TensorProto_DataType_UINT8;\n  static const DataType INT8 =\n    TensorProto_DataType_INT8;\n  static const DataType UINT16 =\n    TensorProto_DataType_UINT16;\n  static const DataType INT16 =\n    TensorProto_DataType_INT16;\n  static const DataType INT64 =\n    TensorProto_DataType_INT64;\n  static const DataType FLOAT16 =\n    TensorProto_DataType_FLOAT16;\n  static const DataType DOUBLE =\n    TensorProto_DataType_DOUBLE;\n  static inline bool DataType_IsValid(int value) {\n    return TensorProto_DataType_IsValid(value);\n  }\n  static const DataType DataType_MIN =\n    TensorProto_DataType_DataType_MIN;\n  static const DataType DataType_MAX =\n    TensorProto_DataType_DataType_MAX;\n  static const int DataType_ARRAYSIZE =\n    TensorProto_DataType_DataType_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  DataType_descriptor() {\n    return TensorProto_DataType_descriptor();\n  }\n  static inline const ::std::string& DataType_Name(DataType value) {\n    return TensorProto_DataType_Name(value);\n  }\n  static inline bool DataType_Parse(const ::std::string& name,\n      DataType* value) {\n    return TensorProto_DataType_Parse(name, value);\n  }\n\n  // accessors -------------------------------------------------------\n\n  // repeated int64 dims = 1;\n  int dims_size() const;\n  void clear_dims();\n  static const int kDimsFieldNumber = 1;\n  ::google::protobuf::int64 dims(int index) const;\n  void set_dims(int index, ::google::protobuf::int64 value);\n  void add_dims(::google::protobuf::int64 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\n      dims() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\n      mutable_dims();\n\n  // optional .caffe2.TensorProto.DataType data_type = 2 [default = FLOAT];\n  bool has_data_type() const;\n  void clear_data_type();\n  static const int kDataTypeFieldNumber = 2;\n  ::caffe2::TensorProto_DataType data_type() const;\n  void set_data_type(::caffe2::TensorProto_DataType value);\n\n  // repeated float float_data = 3 [packed = true];\n  int float_data_size() const;\n  void clear_float_data();\n  static const int kFloatDataFieldNumber = 3;\n  float float_data(int index) const;\n  void set_float_data(int index, float value);\n  void add_float_data(float value);\n  const ::google::protobuf::RepeatedField< float >&\n      float_data() const;\n  ::google::protobuf::RepeatedField< float >*\n      mutable_float_data();\n\n  // repeated int32 int32_data = 4 [packed = true];\n  int int32_data_size() const;\n  void clear_int32_data();\n  static const int kInt32DataFieldNumber = 4;\n  ::google::protobuf::int32 int32_data(int index) const;\n  void set_int32_data(int index, ::google::protobuf::int32 value);\n  void add_int32_data(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      int32_data() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_int32_data();\n\n  // optional bytes byte_data = 5;\n  bool has_byte_data() const;\n  void clear_byte_data();\n  static const int kByteDataFieldNumber = 5;\n  const ::std::string& byte_data() const;\n  void set_byte_data(const ::std::string& value);\n  void set_byte_data(const char* value);\n  void set_byte_data(const void* value, size_t size);\n  ::std::string* mutable_byte_data();\n  ::std::string* release_byte_data();\n  void set_allocated_byte_data(::std::string* byte_data);\n\n  // repeated bytes string_data = 6;\n  int string_data_size() const;\n  void clear_string_data();\n  static const int kStringDataFieldNumber = 6;\n  const ::std::string& string_data(int index) const;\n  ::std::string* mutable_string_data(int index);\n  void set_string_data(int index, const ::std::string& value);\n  void set_string_data(int index, const char* value);\n  void set_string_data(int index, const void* value, size_t size);\n  ::std::string* add_string_data();\n  void add_string_data(const ::std::string& value);\n  void add_string_data(const char* value);\n  void add_string_data(const void* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& string_data() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_string_data();\n\n  // repeated double double_data = 9 [packed = true];\n  int double_data_size() const;\n  void clear_double_data();\n  static const int kDoubleDataFieldNumber = 9;\n  double double_data(int index) const;\n  void set_double_data(int index, double value);\n  void add_double_data(double value);\n  const ::google::protobuf::RepeatedField< double >&\n      double_data() const;\n  ::google::protobuf::RepeatedField< double >*\n      mutable_double_data();\n\n  // repeated int64 int64_data = 10 [packed = true];\n  int int64_data_size() const;\n  void clear_int64_data();\n  static const int kInt64DataFieldNumber = 10;\n  ::google::protobuf::int64 int64_data(int index) const;\n  void set_int64_data(int index, ::google::protobuf::int64 value);\n  void add_int64_data(::google::protobuf::int64 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\n      int64_data() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\n      mutable_int64_data();\n\n  // optional string name = 7;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 7;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional .caffe2.DeviceOption device_detail = 8;\n  bool has_device_detail() const;\n  void clear_device_detail();\n  static const int kDeviceDetailFieldNumber = 8;\n  const ::caffe2::DeviceOption& device_detail() const;\n  ::caffe2::DeviceOption* mutable_device_detail();\n  ::caffe2::DeviceOption* release_device_detail();\n  void set_allocated_device_detail(::caffe2::DeviceOption* device_detail);\n\n  // optional .caffe2.TensorProto.Segment segment = 11;\n  bool has_segment() const;\n  void clear_segment();\n  static const int kSegmentFieldNumber = 11;\n  const ::caffe2::TensorProto_Segment& segment() const;\n  ::caffe2::TensorProto_Segment* mutable_segment();\n  ::caffe2::TensorProto_Segment* release_segment();\n  void set_allocated_segment(::caffe2::TensorProto_Segment* segment);\n\n  // @@protoc_insertion_point(class_scope:caffe2.TensorProto)\n private:\n  inline void set_has_data_type();\n  inline void clear_has_data_type();\n  inline void set_has_byte_data();\n  inline void clear_has_byte_data();\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_device_detail();\n  inline void clear_has_device_detail();\n  inline void set_has_segment();\n  inline void clear_has_segment();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 > dims_;\n  ::google::protobuf::RepeatedField< float > float_data_;\n  mutable int _float_data_cached_byte_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > int32_data_;\n  mutable int _int32_data_cached_byte_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> string_data_;\n  ::google::protobuf::RepeatedField< double > double_data_;\n  mutable int _double_data_cached_byte_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 > int64_data_;\n  mutable int _int64_data_cached_byte_size_;\n  ::google::protobuf::internal::ArenaStringPtr byte_data_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::caffe2::DeviceOption* device_detail_;\n  ::caffe2::TensorProto_Segment* segment_;\n  int data_type_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TensorProto> TensorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass QTensorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.QTensorProto) */ {\n public:\n  QTensorProto();\n  virtual ~QTensorProto();\n\n  QTensorProto(const QTensorProto& from);\n\n  inline QTensorProto& operator=(const QTensorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const QTensorProto& default_instance();\n\n  static const QTensorProto* internal_default_instance();\n\n  void Swap(QTensorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline QTensorProto* New() const { return New(NULL); }\n\n  QTensorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const QTensorProto& from);\n  void MergeFrom(const QTensorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(QTensorProto* other);\n  void UnsafeMergeFrom(const QTensorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated int64 dims = 1;\n  int dims_size() const;\n  void clear_dims();\n  static const int kDimsFieldNumber = 1;\n  ::google::protobuf::int64 dims(int index) const;\n  void set_dims(int index, ::google::protobuf::int64 value);\n  void add_dims(::google::protobuf::int64 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\n      dims() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\n      mutable_dims();\n\n  // required int32 precision = 2;\n  bool has_precision() const;\n  void clear_precision();\n  static const int kPrecisionFieldNumber = 2;\n  ::google::protobuf::int32 precision() const;\n  void set_precision(::google::protobuf::int32 value);\n\n  // required double scale = 3;\n  bool has_scale() const;\n  void clear_scale();\n  static const int kScaleFieldNumber = 3;\n  double scale() const;\n  void set_scale(double value);\n\n  // required double bias = 4;\n  bool has_bias() const;\n  void clear_bias();\n  static const int kBiasFieldNumber = 4;\n  double bias() const;\n  void set_bias(double value);\n\n  // required bool is_signed = 5;\n  bool has_is_signed() const;\n  void clear_is_signed();\n  static const int kIsSignedFieldNumber = 5;\n  bool is_signed() const;\n  void set_is_signed(bool value);\n\n  // repeated int32 data = 6 [packed = true];\n  int data_size() const;\n  void clear_data();\n  static const int kDataFieldNumber = 6;\n  ::google::protobuf::int32 data(int index) const;\n  void set_data(int index, ::google::protobuf::int32 value);\n  void add_data(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      data() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_data();\n\n  // optional string name = 7;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 7;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // @@protoc_insertion_point(class_scope:caffe2.QTensorProto)\n private:\n  inline void set_has_precision();\n  inline void clear_has_precision();\n  inline void set_has_scale();\n  inline void clear_has_scale();\n  inline void set_has_bias();\n  inline void clear_has_bias();\n  inline void set_has_is_signed();\n  inline void clear_has_is_signed();\n  inline void set_has_name();\n  inline void clear_has_name();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 > dims_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > data_;\n  mutable int _data_cached_byte_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  double scale_;\n  ::google::protobuf::int32 precision_;\n  bool is_signed_;\n  double bias_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<QTensorProto> QTensorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass TensorProtos : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TensorProtos) */ {\n public:\n  TensorProtos();\n  virtual ~TensorProtos();\n\n  TensorProtos(const TensorProtos& from);\n\n  inline TensorProtos& operator=(const TensorProtos& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TensorProtos& default_instance();\n\n  static const TensorProtos* internal_default_instance();\n\n  void Swap(TensorProtos* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TensorProtos* New() const { return New(NULL); }\n\n  TensorProtos* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TensorProtos& from);\n  void MergeFrom(const TensorProtos& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TensorProtos* other);\n  void UnsafeMergeFrom(const TensorProtos& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .caffe2.TensorProto protos = 1;\n  int protos_size() const;\n  void clear_protos();\n  static const int kProtosFieldNumber = 1;\n  const ::caffe2::TensorProto& protos(int index) const;\n  ::caffe2::TensorProto* mutable_protos(int index);\n  ::caffe2::TensorProto* add_protos();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::TensorProto >*\n      mutable_protos();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::TensorProto >&\n      protos() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.TensorProtos)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::TensorProto > protos_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TensorProtos> TensorProtos_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass TensorShape : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TensorShape) */ {\n public:\n  TensorShape();\n  virtual ~TensorShape();\n\n  TensorShape(const TensorShape& from);\n\n  inline TensorShape& operator=(const TensorShape& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TensorShape& default_instance();\n\n  static const TensorShape* internal_default_instance();\n\n  void Swap(TensorShape* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TensorShape* New() const { return New(NULL); }\n\n  TensorShape* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TensorShape& from);\n  void MergeFrom(const TensorShape& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TensorShape* other);\n  void UnsafeMergeFrom(const TensorShape& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated int64 dims = 1;\n  int dims_size() const;\n  void clear_dims();\n  static const int kDimsFieldNumber = 1;\n  ::google::protobuf::int64 dims(int index) const;\n  void set_dims(int index, ::google::protobuf::int64 value);\n  void add_dims(::google::protobuf::int64 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\n      dims() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\n      mutable_dims();\n\n  // optional .caffe2.TensorProto.DataType data_type = 2 [default = FLOAT];\n  bool has_data_type() const;\n  void clear_data_type();\n  static const int kDataTypeFieldNumber = 2;\n  ::caffe2::TensorProto_DataType data_type() const;\n  void set_data_type(::caffe2::TensorProto_DataType value);\n\n  // repeated int32 unknown_dims = 3;\n  int unknown_dims_size() const;\n  void clear_unknown_dims();\n  static const int kUnknownDimsFieldNumber = 3;\n  ::google::protobuf::int32 unknown_dims(int index) const;\n  void set_unknown_dims(int index, ::google::protobuf::int32 value);\n  void add_unknown_dims(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      unknown_dims() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_unknown_dims();\n\n  // optional bool unknown_shape = 4 [default = false];\n  bool has_unknown_shape() const;\n  void clear_unknown_shape();\n  static const int kUnknownShapeFieldNumber = 4;\n  bool unknown_shape() const;\n  void set_unknown_shape(bool value);\n\n  // optional string name = 5;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 5;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // @@protoc_insertion_point(class_scope:caffe2.TensorShape)\n private:\n  inline void set_has_data_type();\n  inline void clear_has_data_type();\n  inline void set_has_unknown_shape();\n  inline void clear_has_unknown_shape();\n  inline void set_has_name();\n  inline void clear_has_name();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 > dims_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > unknown_dims_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  bool unknown_shape_;\n  int data_type_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TensorShape> TensorShape_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass TensorShapes : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TensorShapes) */ {\n public:\n  TensorShapes();\n  virtual ~TensorShapes();\n\n  TensorShapes(const TensorShapes& from);\n\n  inline TensorShapes& operator=(const TensorShapes& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TensorShapes& default_instance();\n\n  static const TensorShapes* internal_default_instance();\n\n  void Swap(TensorShapes* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TensorShapes* New() const { return New(NULL); }\n\n  TensorShapes* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TensorShapes& from);\n  void MergeFrom(const TensorShapes& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TensorShapes* other);\n  void UnsafeMergeFrom(const TensorShapes& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .caffe2.TensorShape shapes = 1;\n  int shapes_size() const;\n  void clear_shapes();\n  static const int kShapesFieldNumber = 1;\n  const ::caffe2::TensorShape& shapes(int index) const;\n  ::caffe2::TensorShape* mutable_shapes(int index);\n  ::caffe2::TensorShape* add_shapes();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::TensorShape >*\n      mutable_shapes();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::TensorShape >&\n      shapes() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.TensorShapes)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::TensorShape > shapes_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TensorShapes> TensorShapes_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass Argument : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.Argument) */ {\n public:\n  Argument();\n  virtual ~Argument();\n\n  Argument(const Argument& from);\n\n  inline Argument& operator=(const Argument& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Argument& default_instance();\n\n  static const Argument* internal_default_instance();\n\n  void Swap(Argument* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Argument* New() const { return New(NULL); }\n\n  Argument* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Argument& from);\n  void MergeFrom(const Argument& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Argument* other);\n  void UnsafeMergeFrom(const Argument& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional float f = 2;\n  bool has_f() const;\n  void clear_f();\n  static const int kFFieldNumber = 2;\n  float f() const;\n  void set_f(float value);\n\n  // optional int64 i = 3;\n  bool has_i() const;\n  void clear_i();\n  static const int kIFieldNumber = 3;\n  ::google::protobuf::int64 i() const;\n  void set_i(::google::protobuf::int64 value);\n\n  // optional bytes s = 4;\n  bool has_s() const;\n  void clear_s();\n  static const int kSFieldNumber = 4;\n  const ::std::string& s() const;\n  void set_s(const ::std::string& value);\n  void set_s(const char* value);\n  void set_s(const void* value, size_t size);\n  ::std::string* mutable_s();\n  ::std::string* release_s();\n  void set_allocated_s(::std::string* s);\n\n  // optional .caffe2.NetDef n = 8;\n  bool has_n() const;\n  void clear_n();\n  static const int kNFieldNumber = 8;\n  const ::caffe2::NetDef& n() const;\n  ::caffe2::NetDef* mutable_n();\n  ::caffe2::NetDef* release_n();\n  void set_allocated_n(::caffe2::NetDef* n);\n\n  // repeated float floats = 5;\n  int floats_size() const;\n  void clear_floats();\n  static const int kFloatsFieldNumber = 5;\n  float floats(int index) const;\n  void set_floats(int index, float value);\n  void add_floats(float value);\n  const ::google::protobuf::RepeatedField< float >&\n      floats() const;\n  ::google::protobuf::RepeatedField< float >*\n      mutable_floats();\n\n  // repeated int64 ints = 6;\n  int ints_size() const;\n  void clear_ints();\n  static const int kIntsFieldNumber = 6;\n  ::google::protobuf::int64 ints(int index) const;\n  void set_ints(int index, ::google::protobuf::int64 value);\n  void add_ints(::google::protobuf::int64 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\n      ints() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\n      mutable_ints();\n\n  // repeated bytes strings = 7;\n  int strings_size() const;\n  void clear_strings();\n  static const int kStringsFieldNumber = 7;\n  const ::std::string& strings(int index) const;\n  ::std::string* mutable_strings(int index);\n  void set_strings(int index, const ::std::string& value);\n  void set_strings(int index, const char* value);\n  void set_strings(int index, const void* value, size_t size);\n  ::std::string* add_strings();\n  void add_strings(const ::std::string& value);\n  void add_strings(const char* value);\n  void add_strings(const void* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& strings() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_strings();\n\n  // repeated .caffe2.NetDef nets = 9;\n  int nets_size() const;\n  void clear_nets();\n  static const int kNetsFieldNumber = 9;\n  const ::caffe2::NetDef& nets(int index) const;\n  ::caffe2::NetDef* mutable_nets(int index);\n  ::caffe2::NetDef* add_nets();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >*\n      mutable_nets();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >&\n      nets() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.Argument)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_f();\n  inline void clear_has_f();\n  inline void set_has_i();\n  inline void clear_has_i();\n  inline void set_has_s();\n  inline void clear_has_s();\n  inline void set_has_n();\n  inline void clear_has_n();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< float > floats_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int64 > ints_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> strings_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef > nets_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr s_;\n  ::caffe2::NetDef* n_;\n  ::google::protobuf::int64 i_;\n  float f_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Argument> Argument_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass DeviceOption : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.DeviceOption) */ {\n public:\n  DeviceOption();\n  virtual ~DeviceOption();\n\n  DeviceOption(const DeviceOption& from);\n\n  inline DeviceOption& operator=(const DeviceOption& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DeviceOption& default_instance();\n\n  static const DeviceOption* internal_default_instance();\n\n  void Swap(DeviceOption* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DeviceOption* New() const { return New(NULL); }\n\n  DeviceOption* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DeviceOption& from);\n  void MergeFrom(const DeviceOption& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DeviceOption* other);\n  void UnsafeMergeFrom(const DeviceOption& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 device_type = 1 [default = 0];\n  bool has_device_type() const;\n  void clear_device_type();\n  static const int kDeviceTypeFieldNumber = 1;\n  ::google::protobuf::int32 device_type() const;\n  void set_device_type(::google::protobuf::int32 value);\n\n  // optional int32 cuda_gpu_id = 2;\n  bool has_cuda_gpu_id() const;\n  void clear_cuda_gpu_id();\n  static const int kCudaGpuIdFieldNumber = 2;\n  ::google::protobuf::int32 cuda_gpu_id() const;\n  void set_cuda_gpu_id(::google::protobuf::int32 value);\n\n  // optional uint32 random_seed = 3;\n  bool has_random_seed() const;\n  void clear_random_seed();\n  static const int kRandomSeedFieldNumber = 3;\n  ::google::protobuf::uint32 random_seed() const;\n  void set_random_seed(::google::protobuf::uint32 value);\n\n  // optional string node_name = 4;\n  bool has_node_name() const;\n  void clear_node_name();\n  static const int kNodeNameFieldNumber = 4;\n  const ::std::string& node_name() const;\n  void set_node_name(const ::std::string& value);\n  void set_node_name(const char* value);\n  void set_node_name(const char* value, size_t size);\n  ::std::string* mutable_node_name();\n  ::std::string* release_node_name();\n  void set_allocated_node_name(::std::string* node_name);\n\n  // @@protoc_insertion_point(class_scope:caffe2.DeviceOption)\n private:\n  inline void set_has_device_type();\n  inline void clear_has_device_type();\n  inline void set_has_cuda_gpu_id();\n  inline void clear_has_cuda_gpu_id();\n  inline void set_has_random_seed();\n  inline void clear_has_random_seed();\n  inline void set_has_node_name();\n  inline void clear_has_node_name();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr node_name_;\n  ::google::protobuf::int32 device_type_;\n  ::google::protobuf::int32 cuda_gpu_id_;\n  ::google::protobuf::uint32 random_seed_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DeviceOption> DeviceOption_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass OperatorDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.OperatorDef) */ {\n public:\n  OperatorDef();\n  virtual ~OperatorDef();\n\n  OperatorDef(const OperatorDef& from);\n\n  inline OperatorDef& operator=(const OperatorDef& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const OperatorDef& default_instance();\n\n  static const OperatorDef* internal_default_instance();\n\n  void Swap(OperatorDef* other);\n\n  // implements Message ----------------------------------------------\n\n  inline OperatorDef* New() const { return New(NULL); }\n\n  OperatorDef* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const OperatorDef& from);\n  void MergeFrom(const OperatorDef& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(OperatorDef* other);\n  void UnsafeMergeFrom(const OperatorDef& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated string input = 1;\n  int input_size() const;\n  void clear_input();\n  static const int kInputFieldNumber = 1;\n  const ::std::string& input(int index) const;\n  ::std::string* mutable_input(int index);\n  void set_input(int index, const ::std::string& value);\n  void set_input(int index, const char* value);\n  void set_input(int index, const char* value, size_t size);\n  ::std::string* add_input();\n  void add_input(const ::std::string& value);\n  void add_input(const char* value);\n  void add_input(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& input() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_input();\n\n  // repeated string output = 2;\n  int output_size() const;\n  void clear_output();\n  static const int kOutputFieldNumber = 2;\n  const ::std::string& output(int index) const;\n  ::std::string* mutable_output(int index);\n  void set_output(int index, const ::std::string& value);\n  void set_output(int index, const char* value);\n  void set_output(int index, const char* value, size_t size);\n  ::std::string* add_output();\n  void add_output(const ::std::string& value);\n  void add_output(const char* value);\n  void add_output(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& output() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_output();\n\n  // optional string name = 3;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 3;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string type = 4;\n  bool has_type() const;\n  void clear_type();\n  static const int kTypeFieldNumber = 4;\n  const ::std::string& type() const;\n  void set_type(const ::std::string& value);\n  void set_type(const char* value);\n  void set_type(const char* value, size_t size);\n  ::std::string* mutable_type();\n  ::std::string* release_type();\n  void set_allocated_type(::std::string* type);\n\n  // repeated .caffe2.Argument arg = 5;\n  int arg_size() const;\n  void clear_arg();\n  static const int kArgFieldNumber = 5;\n  const ::caffe2::Argument& arg(int index) const;\n  ::caffe2::Argument* mutable_arg(int index);\n  ::caffe2::Argument* add_arg();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >*\n      mutable_arg();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >&\n      arg() const;\n\n  // optional .caffe2.DeviceOption device_option = 6;\n  bool has_device_option() const;\n  void clear_device_option();\n  static const int kDeviceOptionFieldNumber = 6;\n  const ::caffe2::DeviceOption& device_option() const;\n  ::caffe2::DeviceOption* mutable_device_option();\n  ::caffe2::DeviceOption* release_device_option();\n  void set_allocated_device_option(::caffe2::DeviceOption* device_option);\n\n  // optional string engine = 7;\n  bool has_engine() const;\n  void clear_engine();\n  static const int kEngineFieldNumber = 7;\n  const ::std::string& engine() const;\n  void set_engine(const ::std::string& value);\n  void set_engine(const char* value);\n  void set_engine(const char* value, size_t size);\n  ::std::string* mutable_engine();\n  ::std::string* release_engine();\n  void set_allocated_engine(::std::string* engine);\n\n  // repeated string control_input = 8;\n  int control_input_size() const;\n  void clear_control_input();\n  static const int kControlInputFieldNumber = 8;\n  const ::std::string& control_input(int index) const;\n  ::std::string* mutable_control_input(int index);\n  void set_control_input(int index, const ::std::string& value);\n  void set_control_input(int index, const char* value);\n  void set_control_input(int index, const char* value, size_t size);\n  ::std::string* add_control_input();\n  void add_control_input(const ::std::string& value);\n  void add_control_input(const char* value);\n  void add_control_input(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& control_input() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_control_input();\n\n  // optional bool is_gradient_op = 9 [default = false];\n  bool has_is_gradient_op() const;\n  void clear_is_gradient_op();\n  static const int kIsGradientOpFieldNumber = 9;\n  bool is_gradient_op() const;\n  void set_is_gradient_op(bool value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.OperatorDef)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_type();\n  inline void clear_has_type();\n  inline void set_has_device_option();\n  inline void clear_has_device_option();\n  inline void set_has_engine();\n  inline void clear_has_engine();\n  inline void set_has_is_gradient_op();\n  inline void clear_has_is_gradient_op();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> input_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> output_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::Argument > arg_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> control_input_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr type_;\n  ::google::protobuf::internal::ArenaStringPtr engine_;\n  ::caffe2::DeviceOption* device_option_;\n  bool is_gradient_op_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<OperatorDef> OperatorDef_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass NetDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.NetDef) */ {\n public:\n  NetDef();\n  virtual ~NetDef();\n\n  NetDef(const NetDef& from);\n\n  inline NetDef& operator=(const NetDef& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const NetDef& default_instance();\n\n  static const NetDef* internal_default_instance();\n\n  void Swap(NetDef* other);\n\n  // implements Message ----------------------------------------------\n\n  inline NetDef* New() const { return New(NULL); }\n\n  NetDef* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const NetDef& from);\n  void MergeFrom(const NetDef& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(NetDef* other);\n  void UnsafeMergeFrom(const NetDef& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .caffe2.OperatorDef op = 2;\n  int op_size() const;\n  void clear_op();\n  static const int kOpFieldNumber = 2;\n  const ::caffe2::OperatorDef& op(int index) const;\n  ::caffe2::OperatorDef* mutable_op(int index);\n  ::caffe2::OperatorDef* add_op();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::OperatorDef >*\n      mutable_op();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::OperatorDef >&\n      op() const;\n\n  // optional string type = 3;\n  bool has_type() const;\n  void clear_type();\n  static const int kTypeFieldNumber = 3;\n  const ::std::string& type() const;\n  void set_type(const ::std::string& value);\n  void set_type(const char* value);\n  void set_type(const char* value, size_t size);\n  ::std::string* mutable_type();\n  ::std::string* release_type();\n  void set_allocated_type(::std::string* type);\n\n  // optional int32 num_workers = 4 [deprecated = true];\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR bool has_num_workers() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void clear_num_workers();\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR static const int kNumWorkersFieldNumber = 4;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR ::google::protobuf::int32 num_workers() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_num_workers(::google::protobuf::int32 value);\n\n  // optional .caffe2.DeviceOption device_option = 5;\n  bool has_device_option() const;\n  void clear_device_option();\n  static const int kDeviceOptionFieldNumber = 5;\n  const ::caffe2::DeviceOption& device_option() const;\n  ::caffe2::DeviceOption* mutable_device_option();\n  ::caffe2::DeviceOption* release_device_option();\n  void set_allocated_device_option(::caffe2::DeviceOption* device_option);\n\n  // repeated .caffe2.Argument arg = 6;\n  int arg_size() const;\n  void clear_arg();\n  static const int kArgFieldNumber = 6;\n  const ::caffe2::Argument& arg(int index) const;\n  ::caffe2::Argument* mutable_arg(int index);\n  ::caffe2::Argument* add_arg();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >*\n      mutable_arg();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >&\n      arg() const;\n\n  // repeated string external_input = 7;\n  int external_input_size() const;\n  void clear_external_input();\n  static const int kExternalInputFieldNumber = 7;\n  const ::std::string& external_input(int index) const;\n  ::std::string* mutable_external_input(int index);\n  void set_external_input(int index, const ::std::string& value);\n  void set_external_input(int index, const char* value);\n  void set_external_input(int index, const char* value, size_t size);\n  ::std::string* add_external_input();\n  void add_external_input(const ::std::string& value);\n  void add_external_input(const char* value);\n  void add_external_input(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& external_input() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_external_input();\n\n  // repeated string external_output = 8;\n  int external_output_size() const;\n  void clear_external_output();\n  static const int kExternalOutputFieldNumber = 8;\n  const ::std::string& external_output(int index) const;\n  ::std::string* mutable_external_output(int index);\n  void set_external_output(int index, const ::std::string& value);\n  void set_external_output(int index, const char* value);\n  void set_external_output(int index, const char* value, size_t size);\n  ::std::string* add_external_output();\n  void add_external_output(const ::std::string& value);\n  void add_external_output(const char* value);\n  void add_external_output(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& external_output() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_external_output();\n\n  // @@protoc_insertion_point(class_scope:caffe2.NetDef)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_type();\n  inline void clear_has_type();\n  inline void set_has_num_workers();\n  inline void clear_has_num_workers();\n  inline void set_has_device_option();\n  inline void clear_has_device_option();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::OperatorDef > op_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::Argument > arg_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> external_input_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> external_output_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr type_;\n  ::caffe2::DeviceOption* device_option_;\n  ::google::protobuf::int32 num_workers_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<NetDef> NetDef_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass ExecutionStep : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.ExecutionStep) */ {\n public:\n  ExecutionStep();\n  virtual ~ExecutionStep();\n\n  ExecutionStep(const ExecutionStep& from);\n\n  inline ExecutionStep& operator=(const ExecutionStep& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ExecutionStep& default_instance();\n\n  static const ExecutionStep* internal_default_instance();\n\n  void Swap(ExecutionStep* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ExecutionStep* New() const { return New(NULL); }\n\n  ExecutionStep* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ExecutionStep& from);\n  void MergeFrom(const ExecutionStep& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ExecutionStep* other);\n  void UnsafeMergeFrom(const ExecutionStep& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .caffe2.ExecutionStep substep = 2;\n  int substep_size() const;\n  void clear_substep();\n  static const int kSubstepFieldNumber = 2;\n  const ::caffe2::ExecutionStep& substep(int index) const;\n  ::caffe2::ExecutionStep* mutable_substep(int index);\n  ::caffe2::ExecutionStep* add_substep();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >*\n      mutable_substep();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >&\n      substep() const;\n\n  // repeated string network = 3;\n  int network_size() const;\n  void clear_network();\n  static const int kNetworkFieldNumber = 3;\n  const ::std::string& network(int index) const;\n  ::std::string* mutable_network(int index);\n  void set_network(int index, const ::std::string& value);\n  void set_network(int index, const char* value);\n  void set_network(int index, const char* value, size_t size);\n  ::std::string* add_network();\n  void add_network(const ::std::string& value);\n  void add_network(const char* value);\n  void add_network(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& network() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_network();\n\n  // optional int64 num_iter = 4;\n  bool has_num_iter() const;\n  void clear_num_iter();\n  static const int kNumIterFieldNumber = 4;\n  ::google::protobuf::int64 num_iter() const;\n  void set_num_iter(::google::protobuf::int64 value);\n\n  // optional string criteria_network = 5 [deprecated = true];\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR bool has_criteria_network() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void clear_criteria_network();\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR static const int kCriteriaNetworkFieldNumber = 5;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR const ::std::string& criteria_network() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_criteria_network(const ::std::string& value);\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_criteria_network(const char* value);\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_criteria_network(const char* value, size_t size);\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR ::std::string* mutable_criteria_network();\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR ::std::string* release_criteria_network();\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_allocated_criteria_network(::std::string* criteria_network);\n\n  // optional string report_net = 7;\n  bool has_report_net() const;\n  void clear_report_net();\n  static const int kReportNetFieldNumber = 7;\n  const ::std::string& report_net() const;\n  void set_report_net(const ::std::string& value);\n  void set_report_net(const char* value);\n  void set_report_net(const char* value, size_t size);\n  ::std::string* mutable_report_net();\n  ::std::string* release_report_net();\n  void set_allocated_report_net(::std::string* report_net);\n\n  // optional int32 report_interval = 8;\n  bool has_report_interval() const;\n  void clear_report_interval();\n  static const int kReportIntervalFieldNumber = 8;\n  ::google::protobuf::int32 report_interval() const;\n  void set_report_interval(::google::protobuf::int32 value);\n\n  // optional int64 run_every_ms = 11;\n  bool has_run_every_ms() const;\n  void clear_run_every_ms();\n  static const int kRunEveryMsFieldNumber = 11;\n  ::google::protobuf::int64 run_every_ms() const;\n  void set_run_every_ms(::google::protobuf::int64 value);\n\n  // optional bool concurrent_substeps = 6;\n  bool has_concurrent_substeps() const;\n  void clear_concurrent_substeps();\n  static const int kConcurrentSubstepsFieldNumber = 6;\n  bool concurrent_substeps() const;\n  void set_concurrent_substeps(bool value);\n\n  // optional string should_stop_blob = 9;\n  bool has_should_stop_blob() const;\n  void clear_should_stop_blob();\n  static const int kShouldStopBlobFieldNumber = 9;\n  const ::std::string& should_stop_blob() const;\n  void set_should_stop_blob(const ::std::string& value);\n  void set_should_stop_blob(const char* value);\n  void set_should_stop_blob(const char* value, size_t size);\n  ::std::string* mutable_should_stop_blob();\n  ::std::string* release_should_stop_blob();\n  void set_allocated_should_stop_blob(::std::string* should_stop_blob);\n\n  // optional bool only_once = 10;\n  bool has_only_once() const;\n  void clear_only_once();\n  static const int kOnlyOnceFieldNumber = 10;\n  bool only_once() const;\n  void set_only_once(bool value);\n\n  // optional bool create_workspace = 12;\n  bool has_create_workspace() const;\n  void clear_create_workspace();\n  static const int kCreateWorkspaceFieldNumber = 12;\n  bool create_workspace() const;\n  void set_create_workspace(bool value);\n\n  // optional int32 num_concurrent_instances = 13;\n  bool has_num_concurrent_instances() const;\n  void clear_num_concurrent_instances();\n  static const int kNumConcurrentInstancesFieldNumber = 13;\n  ::google::protobuf::int32 num_concurrent_instances() const;\n  void set_num_concurrent_instances(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.ExecutionStep)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_num_iter();\n  inline void clear_has_num_iter();\n  inline void set_has_criteria_network();\n  inline void clear_has_criteria_network();\n  inline void set_has_report_net();\n  inline void clear_has_report_net();\n  inline void set_has_report_interval();\n  inline void clear_has_report_interval();\n  inline void set_has_run_every_ms();\n  inline void clear_has_run_every_ms();\n  inline void set_has_concurrent_substeps();\n  inline void clear_has_concurrent_substeps();\n  inline void set_has_should_stop_blob();\n  inline void clear_has_should_stop_blob();\n  inline void set_has_only_once();\n  inline void clear_has_only_once();\n  inline void set_has_create_workspace();\n  inline void clear_has_create_workspace();\n  inline void set_has_num_concurrent_instances();\n  inline void clear_has_num_concurrent_instances();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep > substep_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> network_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr criteria_network_;\n  ::google::protobuf::internal::ArenaStringPtr report_net_;\n  ::google::protobuf::internal::ArenaStringPtr should_stop_blob_;\n  ::google::protobuf::int64 num_iter_;\n  ::google::protobuf::int64 run_every_ms_;\n  ::google::protobuf::int32 report_interval_;\n  bool concurrent_substeps_;\n  bool only_once_;\n  bool create_workspace_;\n  ::google::protobuf::int32 num_concurrent_instances_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ExecutionStep> ExecutionStep_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass PlanDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.PlanDef) */ {\n public:\n  PlanDef();\n  virtual ~PlanDef();\n\n  PlanDef(const PlanDef& from);\n\n  inline PlanDef& operator=(const PlanDef& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const PlanDef& default_instance();\n\n  static const PlanDef* internal_default_instance();\n\n  void Swap(PlanDef* other);\n\n  // implements Message ----------------------------------------------\n\n  inline PlanDef* New() const { return New(NULL); }\n\n  PlanDef* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const PlanDef& from);\n  void MergeFrom(const PlanDef& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(PlanDef* other);\n  void UnsafeMergeFrom(const PlanDef& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .caffe2.NetDef network = 2;\n  int network_size() const;\n  void clear_network();\n  static const int kNetworkFieldNumber = 2;\n  const ::caffe2::NetDef& network(int index) const;\n  ::caffe2::NetDef* mutable_network(int index);\n  ::caffe2::NetDef* add_network();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >*\n      mutable_network();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >&\n      network() const;\n\n  // repeated .caffe2.ExecutionStep execution_step = 3;\n  int execution_step_size() const;\n  void clear_execution_step();\n  static const int kExecutionStepFieldNumber = 3;\n  const ::caffe2::ExecutionStep& execution_step(int index) const;\n  ::caffe2::ExecutionStep* mutable_execution_step(int index);\n  ::caffe2::ExecutionStep* add_execution_step();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >*\n      mutable_execution_step();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >&\n      execution_step() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.PlanDef)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef > network_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep > execution_step_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<PlanDef> PlanDef_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass BlobProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.BlobProto) */ {\n public:\n  BlobProto();\n  virtual ~BlobProto();\n\n  BlobProto(const BlobProto& from);\n\n  inline BlobProto& operator=(const BlobProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const BlobProto& default_instance();\n\n  static const BlobProto* internal_default_instance();\n\n  void Swap(BlobProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline BlobProto* New() const { return New(NULL); }\n\n  BlobProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const BlobProto& from);\n  void MergeFrom(const BlobProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(BlobProto* other);\n  void UnsafeMergeFrom(const BlobProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string type = 2;\n  bool has_type() const;\n  void clear_type();\n  static const int kTypeFieldNumber = 2;\n  const ::std::string& type() const;\n  void set_type(const ::std::string& value);\n  void set_type(const char* value);\n  void set_type(const char* value, size_t size);\n  ::std::string* mutable_type();\n  ::std::string* release_type();\n  void set_allocated_type(::std::string* type);\n\n  // optional .caffe2.TensorProto tensor = 3;\n  bool has_tensor() const;\n  void clear_tensor();\n  static const int kTensorFieldNumber = 3;\n  const ::caffe2::TensorProto& tensor() const;\n  ::caffe2::TensorProto* mutable_tensor();\n  ::caffe2::TensorProto* release_tensor();\n  void set_allocated_tensor(::caffe2::TensorProto* tensor);\n\n  // optional bytes content = 4;\n  bool has_content() const;\n  void clear_content();\n  static const int kContentFieldNumber = 4;\n  const ::std::string& content() const;\n  void set_content(const ::std::string& value);\n  void set_content(const char* value);\n  void set_content(const void* value, size_t size);\n  ::std::string* mutable_content();\n  ::std::string* release_content();\n  void set_allocated_content(::std::string* content);\n\n  // optional .caffe2.QTensorProto qtensor = 5;\n  bool has_qtensor() const;\n  void clear_qtensor();\n  static const int kQtensorFieldNumber = 5;\n  const ::caffe2::QTensorProto& qtensor() const;\n  ::caffe2::QTensorProto* mutable_qtensor();\n  ::caffe2::QTensorProto* release_qtensor();\n  void set_allocated_qtensor(::caffe2::QTensorProto* qtensor);\n\n  // optional int32 content_num_chunks = 6;\n  bool has_content_num_chunks() const;\n  void clear_content_num_chunks();\n  static const int kContentNumChunksFieldNumber = 6;\n  ::google::protobuf::int32 content_num_chunks() const;\n  void set_content_num_chunks(::google::protobuf::int32 value);\n\n  // optional int32 content_chunk_id = 7;\n  bool has_content_chunk_id() const;\n  void clear_content_chunk_id();\n  static const int kContentChunkIdFieldNumber = 7;\n  ::google::protobuf::int32 content_chunk_id() const;\n  void set_content_chunk_id(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.BlobProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_type();\n  inline void clear_has_type();\n  inline void set_has_tensor();\n  inline void clear_has_tensor();\n  inline void set_has_content();\n  inline void clear_has_content();\n  inline void set_has_qtensor();\n  inline void clear_has_qtensor();\n  inline void set_has_content_num_chunks();\n  inline void clear_has_content_num_chunks();\n  inline void set_has_content_chunk_id();\n  inline void clear_has_content_chunk_id();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr type_;\n  ::google::protobuf::internal::ArenaStringPtr content_;\n  ::caffe2::TensorProto* tensor_;\n  ::caffe2::QTensorProto* qtensor_;\n  ::google::protobuf::int32 content_num_chunks_;\n  ::google::protobuf::int32 content_chunk_id_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<BlobProto> BlobProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass DBReaderProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.DBReaderProto) */ {\n public:\n  DBReaderProto();\n  virtual ~DBReaderProto();\n\n  DBReaderProto(const DBReaderProto& from);\n\n  inline DBReaderProto& operator=(const DBReaderProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DBReaderProto& default_instance();\n\n  static const DBReaderProto* internal_default_instance();\n\n  void Swap(DBReaderProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DBReaderProto* New() const { return New(NULL); }\n\n  DBReaderProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DBReaderProto& from);\n  void MergeFrom(const DBReaderProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DBReaderProto* other);\n  void UnsafeMergeFrom(const DBReaderProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string source = 2;\n  bool has_source() const;\n  void clear_source();\n  static const int kSourceFieldNumber = 2;\n  const ::std::string& source() const;\n  void set_source(const ::std::string& value);\n  void set_source(const char* value);\n  void set_source(const char* value, size_t size);\n  ::std::string* mutable_source();\n  ::std::string* release_source();\n  void set_allocated_source(::std::string* source);\n\n  // optional string db_type = 3;\n  bool has_db_type() const;\n  void clear_db_type();\n  static const int kDbTypeFieldNumber = 3;\n  const ::std::string& db_type() const;\n  void set_db_type(const ::std::string& value);\n  void set_db_type(const char* value);\n  void set_db_type(const char* value, size_t size);\n  ::std::string* mutable_db_type();\n  ::std::string* release_db_type();\n  void set_allocated_db_type(::std::string* db_type);\n\n  // optional string key = 4;\n  bool has_key() const;\n  void clear_key();\n  static const int kKeyFieldNumber = 4;\n  const ::std::string& key() const;\n  void set_key(const ::std::string& value);\n  void set_key(const char* value);\n  void set_key(const char* value, size_t size);\n  ::std::string* mutable_key();\n  ::std::string* release_key();\n  void set_allocated_key(::std::string* key);\n\n  // @@protoc_insertion_point(class_scope:caffe2.DBReaderProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_source();\n  inline void clear_has_source();\n  inline void set_has_db_type();\n  inline void clear_has_db_type();\n  inline void set_has_key();\n  inline void clear_has_key();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr source_;\n  ::google::protobuf::internal::ArenaStringPtr db_type_;\n  ::google::protobuf::internal::ArenaStringPtr key_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fcaffe2_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DBReaderProto> DBReaderProto_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// TensorProto_Segment\n\n// required int64 begin = 1;\ninline bool TensorProto_Segment::has_begin() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void TensorProto_Segment::set_has_begin() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void TensorProto_Segment::clear_has_begin() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void TensorProto_Segment::clear_begin() {\n  begin_ = GOOGLE_LONGLONG(0);\n  clear_has_begin();\n}\ninline ::google::protobuf::int64 TensorProto_Segment::begin() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.Segment.begin)\n  return begin_;\n}\ninline void TensorProto_Segment::set_begin(::google::protobuf::int64 value) {\n  set_has_begin();\n  begin_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.Segment.begin)\n}\n\n// required int64 end = 2;\ninline bool TensorProto_Segment::has_end() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void TensorProto_Segment::set_has_end() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void TensorProto_Segment::clear_has_end() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void TensorProto_Segment::clear_end() {\n  end_ = GOOGLE_LONGLONG(0);\n  clear_has_end();\n}\ninline ::google::protobuf::int64 TensorProto_Segment::end() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.Segment.end)\n  return end_;\n}\ninline void TensorProto_Segment::set_end(::google::protobuf::int64 value) {\n  set_has_end();\n  end_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.Segment.end)\n}\n\ninline const TensorProto_Segment* TensorProto_Segment::internal_default_instance() {\n  return &TensorProto_Segment_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// TensorProto\n\n// repeated int64 dims = 1;\ninline int TensorProto::dims_size() const {\n  return dims_.size();\n}\ninline void TensorProto::clear_dims() {\n  dims_.Clear();\n}\ninline ::google::protobuf::int64 TensorProto::dims(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.dims)\n  return dims_.Get(index);\n}\ninline void TensorProto::set_dims(int index, ::google::protobuf::int64 value) {\n  dims_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.dims)\n}\ninline void TensorProto::add_dims(::google::protobuf::int64 value) {\n  dims_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.dims)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\nTensorProto::dims() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.dims)\n  return dims_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\nTensorProto::mutable_dims() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.dims)\n  return &dims_;\n}\n\n// optional .caffe2.TensorProto.DataType data_type = 2 [default = FLOAT];\ninline bool TensorProto::has_data_type() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void TensorProto::set_has_data_type() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void TensorProto::clear_has_data_type() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void TensorProto::clear_data_type() {\n  data_type_ = 1;\n  clear_has_data_type();\n}\ninline ::caffe2::TensorProto_DataType TensorProto::data_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.data_type)\n  return static_cast< ::caffe2::TensorProto_DataType >(data_type_);\n}\ninline void TensorProto::set_data_type(::caffe2::TensorProto_DataType value) {\n  assert(::caffe2::TensorProto_DataType_IsValid(value));\n  set_has_data_type();\n  data_type_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.data_type)\n}\n\n// repeated float float_data = 3 [packed = true];\ninline int TensorProto::float_data_size() const {\n  return float_data_.size();\n}\ninline void TensorProto::clear_float_data() {\n  float_data_.Clear();\n}\ninline float TensorProto::float_data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.float_data)\n  return float_data_.Get(index);\n}\ninline void TensorProto::set_float_data(int index, float value) {\n  float_data_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.float_data)\n}\ninline void TensorProto::add_float_data(float value) {\n  float_data_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.float_data)\n}\ninline const ::google::protobuf::RepeatedField< float >&\nTensorProto::float_data() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.float_data)\n  return float_data_;\n}\ninline ::google::protobuf::RepeatedField< float >*\nTensorProto::mutable_float_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.float_data)\n  return &float_data_;\n}\n\n// repeated int32 int32_data = 4 [packed = true];\ninline int TensorProto::int32_data_size() const {\n  return int32_data_.size();\n}\ninline void TensorProto::clear_int32_data() {\n  int32_data_.Clear();\n}\ninline ::google::protobuf::int32 TensorProto::int32_data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.int32_data)\n  return int32_data_.Get(index);\n}\ninline void TensorProto::set_int32_data(int index, ::google::protobuf::int32 value) {\n  int32_data_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.int32_data)\n}\ninline void TensorProto::add_int32_data(::google::protobuf::int32 value) {\n  int32_data_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.int32_data)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nTensorProto::int32_data() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.int32_data)\n  return int32_data_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nTensorProto::mutable_int32_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.int32_data)\n  return &int32_data_;\n}\n\n// optional bytes byte_data = 5;\ninline bool TensorProto::has_byte_data() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void TensorProto::set_has_byte_data() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void TensorProto::clear_has_byte_data() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void TensorProto::clear_byte_data() {\n  byte_data_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_byte_data();\n}\ninline const ::std::string& TensorProto::byte_data() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.byte_data)\n  return byte_data_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorProto::set_byte_data(const ::std::string& value) {\n  set_has_byte_data();\n  byte_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.byte_data)\n}\ninline void TensorProto::set_byte_data(const char* value) {\n  set_has_byte_data();\n  byte_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.TensorProto.byte_data)\n}\ninline void TensorProto::set_byte_data(const void* value, size_t size) {\n  set_has_byte_data();\n  byte_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.TensorProto.byte_data)\n}\ninline ::std::string* TensorProto::mutable_byte_data() {\n  set_has_byte_data();\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProto.byte_data)\n  return byte_data_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* TensorProto::release_byte_data() {\n  // @@protoc_insertion_point(field_release:caffe2.TensorProto.byte_data)\n  clear_has_byte_data();\n  return byte_data_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorProto::set_allocated_byte_data(::std::string* byte_data) {\n  if (byte_data != NULL) {\n    set_has_byte_data();\n  } else {\n    clear_has_byte_data();\n  }\n  byte_data_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), byte_data);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TensorProto.byte_data)\n}\n\n// repeated bytes string_data = 6;\ninline int TensorProto::string_data_size() const {\n  return string_data_.size();\n}\ninline void TensorProto::clear_string_data() {\n  string_data_.Clear();\n}\ninline const ::std::string& TensorProto::string_data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.string_data)\n  return string_data_.Get(index);\n}\ninline ::std::string* TensorProto::mutable_string_data(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProto.string_data)\n  return string_data_.Mutable(index);\n}\ninline void TensorProto::set_string_data(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.string_data)\n  string_data_.Mutable(index)->assign(value);\n}\ninline void TensorProto::set_string_data(int index, const char* value) {\n  string_data_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.TensorProto.string_data)\n}\ninline void TensorProto::set_string_data(int index, const void* value, size_t size) {\n  string_data_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.TensorProto.string_data)\n}\ninline ::std::string* TensorProto::add_string_data() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.TensorProto.string_data)\n  return string_data_.Add();\n}\ninline void TensorProto::add_string_data(const ::std::string& value) {\n  string_data_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.string_data)\n}\ninline void TensorProto::add_string_data(const char* value) {\n  string_data_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.TensorProto.string_data)\n}\ninline void TensorProto::add_string_data(const void* value, size_t size) {\n  string_data_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.TensorProto.string_data)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nTensorProto::string_data() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.string_data)\n  return string_data_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nTensorProto::mutable_string_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.string_data)\n  return &string_data_;\n}\n\n// repeated double double_data = 9 [packed = true];\ninline int TensorProto::double_data_size() const {\n  return double_data_.size();\n}\ninline void TensorProto::clear_double_data() {\n  double_data_.Clear();\n}\ninline double TensorProto::double_data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.double_data)\n  return double_data_.Get(index);\n}\ninline void TensorProto::set_double_data(int index, double value) {\n  double_data_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.double_data)\n}\ninline void TensorProto::add_double_data(double value) {\n  double_data_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.double_data)\n}\ninline const ::google::protobuf::RepeatedField< double >&\nTensorProto::double_data() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.double_data)\n  return double_data_;\n}\ninline ::google::protobuf::RepeatedField< double >*\nTensorProto::mutable_double_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.double_data)\n  return &double_data_;\n}\n\n// repeated int64 int64_data = 10 [packed = true];\ninline int TensorProto::int64_data_size() const {\n  return int64_data_.size();\n}\ninline void TensorProto::clear_int64_data() {\n  int64_data_.Clear();\n}\ninline ::google::protobuf::int64 TensorProto::int64_data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.int64_data)\n  return int64_data_.Get(index);\n}\ninline void TensorProto::set_int64_data(int index, ::google::protobuf::int64 value) {\n  int64_data_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.int64_data)\n}\ninline void TensorProto::add_int64_data(::google::protobuf::int64 value) {\n  int64_data_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorProto.int64_data)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\nTensorProto::int64_data() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProto.int64_data)\n  return int64_data_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\nTensorProto::mutable_int64_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProto.int64_data)\n  return &int64_data_;\n}\n\n// optional string name = 7;\ninline bool TensorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void TensorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void TensorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void TensorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& TensorProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorProto.name)\n}\ninline void TensorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.TensorProto.name)\n}\ninline void TensorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.TensorProto.name)\n}\ninline ::std::string* TensorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* TensorProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.TensorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TensorProto.name)\n}\n\n// optional .caffe2.DeviceOption device_detail = 8;\ninline bool TensorProto::has_device_detail() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void TensorProto::set_has_device_detail() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void TensorProto::clear_has_device_detail() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void TensorProto::clear_device_detail() {\n  if (device_detail_ != NULL) device_detail_->::caffe2::DeviceOption::Clear();\n  clear_has_device_detail();\n}\ninline const ::caffe2::DeviceOption& TensorProto::device_detail() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.device_detail)\n  return device_detail_ != NULL ? *device_detail_\n                         : *::caffe2::DeviceOption::internal_default_instance();\n}\ninline ::caffe2::DeviceOption* TensorProto::mutable_device_detail() {\n  set_has_device_detail();\n  if (device_detail_ == NULL) {\n    device_detail_ = new ::caffe2::DeviceOption;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProto.device_detail)\n  return device_detail_;\n}\ninline ::caffe2::DeviceOption* TensorProto::release_device_detail() {\n  // @@protoc_insertion_point(field_release:caffe2.TensorProto.device_detail)\n  clear_has_device_detail();\n  ::caffe2::DeviceOption* temp = device_detail_;\n  device_detail_ = NULL;\n  return temp;\n}\ninline void TensorProto::set_allocated_device_detail(::caffe2::DeviceOption* device_detail) {\n  delete device_detail_;\n  device_detail_ = device_detail;\n  if (device_detail) {\n    set_has_device_detail();\n  } else {\n    clear_has_device_detail();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TensorProto.device_detail)\n}\n\n// optional .caffe2.TensorProto.Segment segment = 11;\ninline bool TensorProto::has_segment() const {\n  return (_has_bits_[0] & 0x00000400u) != 0;\n}\ninline void TensorProto::set_has_segment() {\n  _has_bits_[0] |= 0x00000400u;\n}\ninline void TensorProto::clear_has_segment() {\n  _has_bits_[0] &= ~0x00000400u;\n}\ninline void TensorProto::clear_segment() {\n  if (segment_ != NULL) segment_->::caffe2::TensorProto_Segment::Clear();\n  clear_has_segment();\n}\ninline const ::caffe2::TensorProto_Segment& TensorProto::segment() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProto.segment)\n  return segment_ != NULL ? *segment_\n                         : *::caffe2::TensorProto_Segment::internal_default_instance();\n}\ninline ::caffe2::TensorProto_Segment* TensorProto::mutable_segment() {\n  set_has_segment();\n  if (segment_ == NULL) {\n    segment_ = new ::caffe2::TensorProto_Segment;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProto.segment)\n  return segment_;\n}\ninline ::caffe2::TensorProto_Segment* TensorProto::release_segment() {\n  // @@protoc_insertion_point(field_release:caffe2.TensorProto.segment)\n  clear_has_segment();\n  ::caffe2::TensorProto_Segment* temp = segment_;\n  segment_ = NULL;\n  return temp;\n}\ninline void TensorProto::set_allocated_segment(::caffe2::TensorProto_Segment* segment) {\n  delete segment_;\n  segment_ = segment;\n  if (segment) {\n    set_has_segment();\n  } else {\n    clear_has_segment();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TensorProto.segment)\n}\n\ninline const TensorProto* TensorProto::internal_default_instance() {\n  return &TensorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// QTensorProto\n\n// repeated int64 dims = 1;\ninline int QTensorProto::dims_size() const {\n  return dims_.size();\n}\ninline void QTensorProto::clear_dims() {\n  dims_.Clear();\n}\ninline ::google::protobuf::int64 QTensorProto::dims(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.dims)\n  return dims_.Get(index);\n}\ninline void QTensorProto::set_dims(int index, ::google::protobuf::int64 value) {\n  dims_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.dims)\n}\ninline void QTensorProto::add_dims(::google::protobuf::int64 value) {\n  dims_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.QTensorProto.dims)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\nQTensorProto::dims() const {\n  // @@protoc_insertion_point(field_list:caffe2.QTensorProto.dims)\n  return dims_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\nQTensorProto::mutable_dims() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.QTensorProto.dims)\n  return &dims_;\n}\n\n// required int32 precision = 2;\ninline bool QTensorProto::has_precision() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void QTensorProto::set_has_precision() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void QTensorProto::clear_has_precision() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void QTensorProto::clear_precision() {\n  precision_ = 0;\n  clear_has_precision();\n}\ninline ::google::protobuf::int32 QTensorProto::precision() const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.precision)\n  return precision_;\n}\ninline void QTensorProto::set_precision(::google::protobuf::int32 value) {\n  set_has_precision();\n  precision_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.precision)\n}\n\n// required double scale = 3;\ninline bool QTensorProto::has_scale() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void QTensorProto::set_has_scale() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void QTensorProto::clear_has_scale() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void QTensorProto::clear_scale() {\n  scale_ = 0;\n  clear_has_scale();\n}\ninline double QTensorProto::scale() const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.scale)\n  return scale_;\n}\ninline void QTensorProto::set_scale(double value) {\n  set_has_scale();\n  scale_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.scale)\n}\n\n// required double bias = 4;\ninline bool QTensorProto::has_bias() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void QTensorProto::set_has_bias() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void QTensorProto::clear_has_bias() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void QTensorProto::clear_bias() {\n  bias_ = 0;\n  clear_has_bias();\n}\ninline double QTensorProto::bias() const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.bias)\n  return bias_;\n}\ninline void QTensorProto::set_bias(double value) {\n  set_has_bias();\n  bias_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.bias)\n}\n\n// required bool is_signed = 5;\ninline bool QTensorProto::has_is_signed() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void QTensorProto::set_has_is_signed() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void QTensorProto::clear_has_is_signed() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void QTensorProto::clear_is_signed() {\n  is_signed_ = false;\n  clear_has_is_signed();\n}\ninline bool QTensorProto::is_signed() const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.is_signed)\n  return is_signed_;\n}\ninline void QTensorProto::set_is_signed(bool value) {\n  set_has_is_signed();\n  is_signed_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.is_signed)\n}\n\n// repeated int32 data = 6 [packed = true];\ninline int QTensorProto::data_size() const {\n  return data_.size();\n}\ninline void QTensorProto::clear_data() {\n  data_.Clear();\n}\ninline ::google::protobuf::int32 QTensorProto::data(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.data)\n  return data_.Get(index);\n}\ninline void QTensorProto::set_data(int index, ::google::protobuf::int32 value) {\n  data_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.data)\n}\ninline void QTensorProto::add_data(::google::protobuf::int32 value) {\n  data_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.QTensorProto.data)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nQTensorProto::data() const {\n  // @@protoc_insertion_point(field_list:caffe2.QTensorProto.data)\n  return data_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nQTensorProto::mutable_data() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.QTensorProto.data)\n  return &data_;\n}\n\n// optional string name = 7;\ninline bool QTensorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void QTensorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void QTensorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void QTensorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& QTensorProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.QTensorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void QTensorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.QTensorProto.name)\n}\ninline void QTensorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.QTensorProto.name)\n}\ninline void QTensorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.QTensorProto.name)\n}\ninline ::std::string* QTensorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.QTensorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* QTensorProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.QTensorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void QTensorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.QTensorProto.name)\n}\n\ninline const QTensorProto* QTensorProto::internal_default_instance() {\n  return &QTensorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// TensorProtos\n\n// repeated .caffe2.TensorProto protos = 1;\ninline int TensorProtos::protos_size() const {\n  return protos_.size();\n}\ninline void TensorProtos::clear_protos() {\n  protos_.Clear();\n}\ninline const ::caffe2::TensorProto& TensorProtos::protos(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorProtos.protos)\n  return protos_.Get(index);\n}\ninline ::caffe2::TensorProto* TensorProtos::mutable_protos(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorProtos.protos)\n  return protos_.Mutable(index);\n}\ninline ::caffe2::TensorProto* TensorProtos::add_protos() {\n  // @@protoc_insertion_point(field_add:caffe2.TensorProtos.protos)\n  return protos_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::TensorProto >*\nTensorProtos::mutable_protos() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorProtos.protos)\n  return &protos_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::TensorProto >&\nTensorProtos::protos() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorProtos.protos)\n  return protos_;\n}\n\ninline const TensorProtos* TensorProtos::internal_default_instance() {\n  return &TensorProtos_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// TensorShape\n\n// repeated int64 dims = 1;\ninline int TensorShape::dims_size() const {\n  return dims_.size();\n}\ninline void TensorShape::clear_dims() {\n  dims_.Clear();\n}\ninline ::google::protobuf::int64 TensorShape::dims(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShape.dims)\n  return dims_.Get(index);\n}\ninline void TensorShape::set_dims(int index, ::google::protobuf::int64 value) {\n  dims_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorShape.dims)\n}\ninline void TensorShape::add_dims(::google::protobuf::int64 value) {\n  dims_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorShape.dims)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\nTensorShape::dims() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorShape.dims)\n  return dims_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\nTensorShape::mutable_dims() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorShape.dims)\n  return &dims_;\n}\n\n// optional .caffe2.TensorProto.DataType data_type = 2 [default = FLOAT];\ninline bool TensorShape::has_data_type() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void TensorShape::set_has_data_type() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void TensorShape::clear_has_data_type() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void TensorShape::clear_data_type() {\n  data_type_ = 1;\n  clear_has_data_type();\n}\ninline ::caffe2::TensorProto_DataType TensorShape::data_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShape.data_type)\n  return static_cast< ::caffe2::TensorProto_DataType >(data_type_);\n}\ninline void TensorShape::set_data_type(::caffe2::TensorProto_DataType value) {\n  assert(::caffe2::TensorProto_DataType_IsValid(value));\n  set_has_data_type();\n  data_type_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.TensorShape.data_type)\n}\n\n// repeated int32 unknown_dims = 3;\ninline int TensorShape::unknown_dims_size() const {\n  return unknown_dims_.size();\n}\ninline void TensorShape::clear_unknown_dims() {\n  unknown_dims_.Clear();\n}\ninline ::google::protobuf::int32 TensorShape::unknown_dims(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShape.unknown_dims)\n  return unknown_dims_.Get(index);\n}\ninline void TensorShape::set_unknown_dims(int index, ::google::protobuf::int32 value) {\n  unknown_dims_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorShape.unknown_dims)\n}\ninline void TensorShape::add_unknown_dims(::google::protobuf::int32 value) {\n  unknown_dims_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.TensorShape.unknown_dims)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nTensorShape::unknown_dims() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorShape.unknown_dims)\n  return unknown_dims_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nTensorShape::mutable_unknown_dims() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorShape.unknown_dims)\n  return &unknown_dims_;\n}\n\n// optional bool unknown_shape = 4 [default = false];\ninline bool TensorShape::has_unknown_shape() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void TensorShape::set_has_unknown_shape() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void TensorShape::clear_has_unknown_shape() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void TensorShape::clear_unknown_shape() {\n  unknown_shape_ = false;\n  clear_has_unknown_shape();\n}\ninline bool TensorShape::unknown_shape() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShape.unknown_shape)\n  return unknown_shape_;\n}\ninline void TensorShape::set_unknown_shape(bool value) {\n  set_has_unknown_shape();\n  unknown_shape_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.TensorShape.unknown_shape)\n}\n\n// optional string name = 5;\ninline bool TensorShape::has_name() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void TensorShape::set_has_name() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void TensorShape::clear_has_name() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void TensorShape::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& TensorShape::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShape.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorShape::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.TensorShape.name)\n}\ninline void TensorShape::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.TensorShape.name)\n}\ninline void TensorShape::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.TensorShape.name)\n}\ninline ::std::string* TensorShape::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorShape.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* TensorShape::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.TensorShape.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void TensorShape::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TensorShape.name)\n}\n\ninline const TensorShape* TensorShape::internal_default_instance() {\n  return &TensorShape_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// TensorShapes\n\n// repeated .caffe2.TensorShape shapes = 1;\ninline int TensorShapes::shapes_size() const {\n  return shapes_.size();\n}\ninline void TensorShapes::clear_shapes() {\n  shapes_.Clear();\n}\ninline const ::caffe2::TensorShape& TensorShapes::shapes(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.TensorShapes.shapes)\n  return shapes_.Get(index);\n}\ninline ::caffe2::TensorShape* TensorShapes::mutable_shapes(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.TensorShapes.shapes)\n  return shapes_.Mutable(index);\n}\ninline ::caffe2::TensorShape* TensorShapes::add_shapes() {\n  // @@protoc_insertion_point(field_add:caffe2.TensorShapes.shapes)\n  return shapes_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::TensorShape >*\nTensorShapes::mutable_shapes() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.TensorShapes.shapes)\n  return &shapes_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::TensorShape >&\nTensorShapes::shapes() const {\n  // @@protoc_insertion_point(field_list:caffe2.TensorShapes.shapes)\n  return shapes_;\n}\n\ninline const TensorShapes* TensorShapes::internal_default_instance() {\n  return &TensorShapes_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Argument\n\n// optional string name = 1;\ninline bool Argument::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void Argument::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void Argument::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void Argument::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& Argument::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Argument::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.Argument.name)\n}\ninline void Argument::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.Argument.name)\n}\ninline void Argument::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.Argument.name)\n}\ninline ::std::string* Argument::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.Argument.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Argument::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.Argument.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Argument::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.Argument.name)\n}\n\n// optional float f = 2;\ninline bool Argument::has_f() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void Argument::set_has_f() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void Argument::clear_has_f() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void Argument::clear_f() {\n  f_ = 0;\n  clear_has_f();\n}\ninline float Argument::f() const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.f)\n  return f_;\n}\ninline void Argument::set_f(float value) {\n  set_has_f();\n  f_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.Argument.f)\n}\n\n// optional int64 i = 3;\ninline bool Argument::has_i() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void Argument::set_has_i() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void Argument::clear_has_i() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void Argument::clear_i() {\n  i_ = GOOGLE_LONGLONG(0);\n  clear_has_i();\n}\ninline ::google::protobuf::int64 Argument::i() const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.i)\n  return i_;\n}\ninline void Argument::set_i(::google::protobuf::int64 value) {\n  set_has_i();\n  i_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.Argument.i)\n}\n\n// optional bytes s = 4;\ninline bool Argument::has_s() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void Argument::set_has_s() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void Argument::clear_has_s() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void Argument::clear_s() {\n  s_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_s();\n}\ninline const ::std::string& Argument::s() const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.s)\n  return s_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Argument::set_s(const ::std::string& value) {\n  set_has_s();\n  s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.Argument.s)\n}\ninline void Argument::set_s(const char* value) {\n  set_has_s();\n  s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.Argument.s)\n}\ninline void Argument::set_s(const void* value, size_t size) {\n  set_has_s();\n  s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.Argument.s)\n}\ninline ::std::string* Argument::mutable_s() {\n  set_has_s();\n  // @@protoc_insertion_point(field_mutable:caffe2.Argument.s)\n  return s_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Argument::release_s() {\n  // @@protoc_insertion_point(field_release:caffe2.Argument.s)\n  clear_has_s();\n  return s_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Argument::set_allocated_s(::std::string* s) {\n  if (s != NULL) {\n    set_has_s();\n  } else {\n    clear_has_s();\n  }\n  s_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), s);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.Argument.s)\n}\n\n// optional .caffe2.NetDef n = 8;\ninline bool Argument::has_n() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void Argument::set_has_n() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void Argument::clear_has_n() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void Argument::clear_n() {\n  if (n_ != NULL) n_->::caffe2::NetDef::Clear();\n  clear_has_n();\n}\ninline const ::caffe2::NetDef& Argument::n() const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.n)\n  return n_ != NULL ? *n_\n                         : *::caffe2::NetDef::internal_default_instance();\n}\ninline ::caffe2::NetDef* Argument::mutable_n() {\n  set_has_n();\n  if (n_ == NULL) {\n    n_ = new ::caffe2::NetDef;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.Argument.n)\n  return n_;\n}\ninline ::caffe2::NetDef* Argument::release_n() {\n  // @@protoc_insertion_point(field_release:caffe2.Argument.n)\n  clear_has_n();\n  ::caffe2::NetDef* temp = n_;\n  n_ = NULL;\n  return temp;\n}\ninline void Argument::set_allocated_n(::caffe2::NetDef* n) {\n  delete n_;\n  n_ = n;\n  if (n) {\n    set_has_n();\n  } else {\n    clear_has_n();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.Argument.n)\n}\n\n// repeated float floats = 5;\ninline int Argument::floats_size() const {\n  return floats_.size();\n}\ninline void Argument::clear_floats() {\n  floats_.Clear();\n}\ninline float Argument::floats(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.floats)\n  return floats_.Get(index);\n}\ninline void Argument::set_floats(int index, float value) {\n  floats_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.Argument.floats)\n}\ninline void Argument::add_floats(float value) {\n  floats_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.Argument.floats)\n}\ninline const ::google::protobuf::RepeatedField< float >&\nArgument::floats() const {\n  // @@protoc_insertion_point(field_list:caffe2.Argument.floats)\n  return floats_;\n}\ninline ::google::protobuf::RepeatedField< float >*\nArgument::mutable_floats() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.Argument.floats)\n  return &floats_;\n}\n\n// repeated int64 ints = 6;\ninline int Argument::ints_size() const {\n  return ints_.size();\n}\ninline void Argument::clear_ints() {\n  ints_.Clear();\n}\ninline ::google::protobuf::int64 Argument::ints(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.ints)\n  return ints_.Get(index);\n}\ninline void Argument::set_ints(int index, ::google::protobuf::int64 value) {\n  ints_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.Argument.ints)\n}\ninline void Argument::add_ints(::google::protobuf::int64 value) {\n  ints_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.Argument.ints)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >&\nArgument::ints() const {\n  // @@protoc_insertion_point(field_list:caffe2.Argument.ints)\n  return ints_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >*\nArgument::mutable_ints() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.Argument.ints)\n  return &ints_;\n}\n\n// repeated bytes strings = 7;\ninline int Argument::strings_size() const {\n  return strings_.size();\n}\ninline void Argument::clear_strings() {\n  strings_.Clear();\n}\ninline const ::std::string& Argument::strings(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.strings)\n  return strings_.Get(index);\n}\ninline ::std::string* Argument::mutable_strings(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.Argument.strings)\n  return strings_.Mutable(index);\n}\ninline void Argument::set_strings(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.Argument.strings)\n  strings_.Mutable(index)->assign(value);\n}\ninline void Argument::set_strings(int index, const char* value) {\n  strings_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.Argument.strings)\n}\ninline void Argument::set_strings(int index, const void* value, size_t size) {\n  strings_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.Argument.strings)\n}\ninline ::std::string* Argument::add_strings() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.Argument.strings)\n  return strings_.Add();\n}\ninline void Argument::add_strings(const ::std::string& value) {\n  strings_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.Argument.strings)\n}\ninline void Argument::add_strings(const char* value) {\n  strings_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.Argument.strings)\n}\ninline void Argument::add_strings(const void* value, size_t size) {\n  strings_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.Argument.strings)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nArgument::strings() const {\n  // @@protoc_insertion_point(field_list:caffe2.Argument.strings)\n  return strings_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nArgument::mutable_strings() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.Argument.strings)\n  return &strings_;\n}\n\n// repeated .caffe2.NetDef nets = 9;\ninline int Argument::nets_size() const {\n  return nets_.size();\n}\ninline void Argument::clear_nets() {\n  nets_.Clear();\n}\ninline const ::caffe2::NetDef& Argument::nets(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.Argument.nets)\n  return nets_.Get(index);\n}\ninline ::caffe2::NetDef* Argument::mutable_nets(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.Argument.nets)\n  return nets_.Mutable(index);\n}\ninline ::caffe2::NetDef* Argument::add_nets() {\n  // @@protoc_insertion_point(field_add:caffe2.Argument.nets)\n  return nets_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >*\nArgument::mutable_nets() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.Argument.nets)\n  return &nets_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >&\nArgument::nets() const {\n  // @@protoc_insertion_point(field_list:caffe2.Argument.nets)\n  return nets_;\n}\n\ninline const Argument* Argument::internal_default_instance() {\n  return &Argument_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// DeviceOption\n\n// optional int32 device_type = 1 [default = 0];\ninline bool DeviceOption::has_device_type() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void DeviceOption::set_has_device_type() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void DeviceOption::clear_has_device_type() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void DeviceOption::clear_device_type() {\n  device_type_ = 0;\n  clear_has_device_type();\n}\ninline ::google::protobuf::int32 DeviceOption::device_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.DeviceOption.device_type)\n  return device_type_;\n}\ninline void DeviceOption::set_device_type(::google::protobuf::int32 value) {\n  set_has_device_type();\n  device_type_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.DeviceOption.device_type)\n}\n\n// optional int32 cuda_gpu_id = 2;\ninline bool DeviceOption::has_cuda_gpu_id() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void DeviceOption::set_has_cuda_gpu_id() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void DeviceOption::clear_has_cuda_gpu_id() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void DeviceOption::clear_cuda_gpu_id() {\n  cuda_gpu_id_ = 0;\n  clear_has_cuda_gpu_id();\n}\ninline ::google::protobuf::int32 DeviceOption::cuda_gpu_id() const {\n  // @@protoc_insertion_point(field_get:caffe2.DeviceOption.cuda_gpu_id)\n  return cuda_gpu_id_;\n}\ninline void DeviceOption::set_cuda_gpu_id(::google::protobuf::int32 value) {\n  set_has_cuda_gpu_id();\n  cuda_gpu_id_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.DeviceOption.cuda_gpu_id)\n}\n\n// optional uint32 random_seed = 3;\ninline bool DeviceOption::has_random_seed() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void DeviceOption::set_has_random_seed() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void DeviceOption::clear_has_random_seed() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void DeviceOption::clear_random_seed() {\n  random_seed_ = 0u;\n  clear_has_random_seed();\n}\ninline ::google::protobuf::uint32 DeviceOption::random_seed() const {\n  // @@protoc_insertion_point(field_get:caffe2.DeviceOption.random_seed)\n  return random_seed_;\n}\ninline void DeviceOption::set_random_seed(::google::protobuf::uint32 value) {\n  set_has_random_seed();\n  random_seed_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.DeviceOption.random_seed)\n}\n\n// optional string node_name = 4;\ninline bool DeviceOption::has_node_name() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void DeviceOption::set_has_node_name() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void DeviceOption::clear_has_node_name() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void DeviceOption::clear_node_name() {\n  node_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_node_name();\n}\ninline const ::std::string& DeviceOption::node_name() const {\n  // @@protoc_insertion_point(field_get:caffe2.DeviceOption.node_name)\n  return node_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DeviceOption::set_node_name(const ::std::string& value) {\n  set_has_node_name();\n  node_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.DeviceOption.node_name)\n}\ninline void DeviceOption::set_node_name(const char* value) {\n  set_has_node_name();\n  node_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.DeviceOption.node_name)\n}\ninline void DeviceOption::set_node_name(const char* value, size_t size) {\n  set_has_node_name();\n  node_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.DeviceOption.node_name)\n}\ninline ::std::string* DeviceOption::mutable_node_name() {\n  set_has_node_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.DeviceOption.node_name)\n  return node_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DeviceOption::release_node_name() {\n  // @@protoc_insertion_point(field_release:caffe2.DeviceOption.node_name)\n  clear_has_node_name();\n  return node_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DeviceOption::set_allocated_node_name(::std::string* node_name) {\n  if (node_name != NULL) {\n    set_has_node_name();\n  } else {\n    clear_has_node_name();\n  }\n  node_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), node_name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.DeviceOption.node_name)\n}\n\ninline const DeviceOption* DeviceOption::internal_default_instance() {\n  return &DeviceOption_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// OperatorDef\n\n// repeated string input = 1;\ninline int OperatorDef::input_size() const {\n  return input_.size();\n}\ninline void OperatorDef::clear_input() {\n  input_.Clear();\n}\ninline const ::std::string& OperatorDef::input(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.input)\n  return input_.Get(index);\n}\ninline ::std::string* OperatorDef::mutable_input(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.input)\n  return input_.Mutable(index);\n}\ninline void OperatorDef::set_input(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.input)\n  input_.Mutable(index)->assign(value);\n}\ninline void OperatorDef::set_input(int index, const char* value) {\n  input_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.input)\n}\ninline void OperatorDef::set_input(int index, const char* value, size_t size) {\n  input_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.input)\n}\ninline ::std::string* OperatorDef::add_input() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.OperatorDef.input)\n  return input_.Add();\n}\ninline void OperatorDef::add_input(const ::std::string& value) {\n  input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.OperatorDef.input)\n}\ninline void OperatorDef::add_input(const char* value) {\n  input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.OperatorDef.input)\n}\ninline void OperatorDef::add_input(const char* value, size_t size) {\n  input_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.OperatorDef.input)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nOperatorDef::input() const {\n  // @@protoc_insertion_point(field_list:caffe2.OperatorDef.input)\n  return input_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nOperatorDef::mutable_input() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.OperatorDef.input)\n  return &input_;\n}\n\n// repeated string output = 2;\ninline int OperatorDef::output_size() const {\n  return output_.size();\n}\ninline void OperatorDef::clear_output() {\n  output_.Clear();\n}\ninline const ::std::string& OperatorDef::output(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.output)\n  return output_.Get(index);\n}\ninline ::std::string* OperatorDef::mutable_output(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.output)\n  return output_.Mutable(index);\n}\ninline void OperatorDef::set_output(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.output)\n  output_.Mutable(index)->assign(value);\n}\ninline void OperatorDef::set_output(int index, const char* value) {\n  output_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.output)\n}\ninline void OperatorDef::set_output(int index, const char* value, size_t size) {\n  output_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.output)\n}\ninline ::std::string* OperatorDef::add_output() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.OperatorDef.output)\n  return output_.Add();\n}\ninline void OperatorDef::add_output(const ::std::string& value) {\n  output_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.OperatorDef.output)\n}\ninline void OperatorDef::add_output(const char* value) {\n  output_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.OperatorDef.output)\n}\ninline void OperatorDef::add_output(const char* value, size_t size) {\n  output_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.OperatorDef.output)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nOperatorDef::output() const {\n  // @@protoc_insertion_point(field_list:caffe2.OperatorDef.output)\n  return output_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nOperatorDef::mutable_output() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.OperatorDef.output)\n  return &output_;\n}\n\n// optional string name = 3;\ninline bool OperatorDef::has_name() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void OperatorDef::set_has_name() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void OperatorDef::clear_has_name() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void OperatorDef::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& OperatorDef::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.name)\n}\ninline void OperatorDef::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.name)\n}\ninline void OperatorDef::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.name)\n}\ninline ::std::string* OperatorDef::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* OperatorDef::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.OperatorDef.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.OperatorDef.name)\n}\n\n// optional string type = 4;\ninline bool OperatorDef::has_type() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void OperatorDef::set_has_type() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void OperatorDef::clear_has_type() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void OperatorDef::clear_type() {\n  type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_type();\n}\ninline const ::std::string& OperatorDef::type() const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.type)\n  return type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_type(const ::std::string& value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.type)\n}\ninline void OperatorDef::set_type(const char* value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.type)\n}\ninline void OperatorDef::set_type(const char* value, size_t size) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.type)\n}\ninline ::std::string* OperatorDef::mutable_type() {\n  set_has_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.type)\n  return type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* OperatorDef::release_type() {\n  // @@protoc_insertion_point(field_release:caffe2.OperatorDef.type)\n  clear_has_type();\n  return type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_allocated_type(::std::string* type) {\n  if (type != NULL) {\n    set_has_type();\n  } else {\n    clear_has_type();\n  }\n  type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.OperatorDef.type)\n}\n\n// repeated .caffe2.Argument arg = 5;\ninline int OperatorDef::arg_size() const {\n  return arg_.size();\n}\ninline void OperatorDef::clear_arg() {\n  arg_.Clear();\n}\ninline const ::caffe2::Argument& OperatorDef::arg(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.arg)\n  return arg_.Get(index);\n}\ninline ::caffe2::Argument* OperatorDef::mutable_arg(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.arg)\n  return arg_.Mutable(index);\n}\ninline ::caffe2::Argument* OperatorDef::add_arg() {\n  // @@protoc_insertion_point(field_add:caffe2.OperatorDef.arg)\n  return arg_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >*\nOperatorDef::mutable_arg() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.OperatorDef.arg)\n  return &arg_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >&\nOperatorDef::arg() const {\n  // @@protoc_insertion_point(field_list:caffe2.OperatorDef.arg)\n  return arg_;\n}\n\n// optional .caffe2.DeviceOption device_option = 6;\ninline bool OperatorDef::has_device_option() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void OperatorDef::set_has_device_option() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void OperatorDef::clear_has_device_option() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void OperatorDef::clear_device_option() {\n  if (device_option_ != NULL) device_option_->::caffe2::DeviceOption::Clear();\n  clear_has_device_option();\n}\ninline const ::caffe2::DeviceOption& OperatorDef::device_option() const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.device_option)\n  return device_option_ != NULL ? *device_option_\n                         : *::caffe2::DeviceOption::internal_default_instance();\n}\ninline ::caffe2::DeviceOption* OperatorDef::mutable_device_option() {\n  set_has_device_option();\n  if (device_option_ == NULL) {\n    device_option_ = new ::caffe2::DeviceOption;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.device_option)\n  return device_option_;\n}\ninline ::caffe2::DeviceOption* OperatorDef::release_device_option() {\n  // @@protoc_insertion_point(field_release:caffe2.OperatorDef.device_option)\n  clear_has_device_option();\n  ::caffe2::DeviceOption* temp = device_option_;\n  device_option_ = NULL;\n  return temp;\n}\ninline void OperatorDef::set_allocated_device_option(::caffe2::DeviceOption* device_option) {\n  delete device_option_;\n  device_option_ = device_option;\n  if (device_option) {\n    set_has_device_option();\n  } else {\n    clear_has_device_option();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.OperatorDef.device_option)\n}\n\n// optional string engine = 7;\ninline bool OperatorDef::has_engine() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void OperatorDef::set_has_engine() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void OperatorDef::clear_has_engine() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void OperatorDef::clear_engine() {\n  engine_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_engine();\n}\ninline const ::std::string& OperatorDef::engine() const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.engine)\n  return engine_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_engine(const ::std::string& value) {\n  set_has_engine();\n  engine_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.engine)\n}\ninline void OperatorDef::set_engine(const char* value) {\n  set_has_engine();\n  engine_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.engine)\n}\ninline void OperatorDef::set_engine(const char* value, size_t size) {\n  set_has_engine();\n  engine_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.engine)\n}\ninline ::std::string* OperatorDef::mutable_engine() {\n  set_has_engine();\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.engine)\n  return engine_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* OperatorDef::release_engine() {\n  // @@protoc_insertion_point(field_release:caffe2.OperatorDef.engine)\n  clear_has_engine();\n  return engine_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OperatorDef::set_allocated_engine(::std::string* engine) {\n  if (engine != NULL) {\n    set_has_engine();\n  } else {\n    clear_has_engine();\n  }\n  engine_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), engine);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.OperatorDef.engine)\n}\n\n// repeated string control_input = 8;\ninline int OperatorDef::control_input_size() const {\n  return control_input_.size();\n}\ninline void OperatorDef::clear_control_input() {\n  control_input_.Clear();\n}\ninline const ::std::string& OperatorDef::control_input(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.control_input)\n  return control_input_.Get(index);\n}\ninline ::std::string* OperatorDef::mutable_control_input(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.OperatorDef.control_input)\n  return control_input_.Mutable(index);\n}\ninline void OperatorDef::set_control_input(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.control_input)\n  control_input_.Mutable(index)->assign(value);\n}\ninline void OperatorDef::set_control_input(int index, const char* value) {\n  control_input_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.OperatorDef.control_input)\n}\ninline void OperatorDef::set_control_input(int index, const char* value, size_t size) {\n  control_input_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.OperatorDef.control_input)\n}\ninline ::std::string* OperatorDef::add_control_input() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.OperatorDef.control_input)\n  return control_input_.Add();\n}\ninline void OperatorDef::add_control_input(const ::std::string& value) {\n  control_input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.OperatorDef.control_input)\n}\ninline void OperatorDef::add_control_input(const char* value) {\n  control_input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.OperatorDef.control_input)\n}\ninline void OperatorDef::add_control_input(const char* value, size_t size) {\n  control_input_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.OperatorDef.control_input)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nOperatorDef::control_input() const {\n  // @@protoc_insertion_point(field_list:caffe2.OperatorDef.control_input)\n  return control_input_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nOperatorDef::mutable_control_input() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.OperatorDef.control_input)\n  return &control_input_;\n}\n\n// optional bool is_gradient_op = 9 [default = false];\ninline bool OperatorDef::has_is_gradient_op() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void OperatorDef::set_has_is_gradient_op() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void OperatorDef::clear_has_is_gradient_op() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void OperatorDef::clear_is_gradient_op() {\n  is_gradient_op_ = false;\n  clear_has_is_gradient_op();\n}\ninline bool OperatorDef::is_gradient_op() const {\n  // @@protoc_insertion_point(field_get:caffe2.OperatorDef.is_gradient_op)\n  return is_gradient_op_;\n}\ninline void OperatorDef::set_is_gradient_op(bool value) {\n  set_has_is_gradient_op();\n  is_gradient_op_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.OperatorDef.is_gradient_op)\n}\n\ninline const OperatorDef* OperatorDef::internal_default_instance() {\n  return &OperatorDef_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// NetDef\n\n// optional string name = 1;\ninline bool NetDef::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void NetDef::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void NetDef::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void NetDef::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& NetDef::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetDef::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.NetDef.name)\n}\ninline void NetDef::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.NetDef.name)\n}\ninline void NetDef::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NetDef.name)\n}\ninline ::std::string* NetDef::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* NetDef::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.NetDef.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetDef::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NetDef.name)\n}\n\n// repeated .caffe2.OperatorDef op = 2;\ninline int NetDef::op_size() const {\n  return op_.size();\n}\ninline void NetDef::clear_op() {\n  op_.Clear();\n}\ninline const ::caffe2::OperatorDef& NetDef::op(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.op)\n  return op_.Get(index);\n}\ninline ::caffe2::OperatorDef* NetDef::mutable_op(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.op)\n  return op_.Mutable(index);\n}\ninline ::caffe2::OperatorDef* NetDef::add_op() {\n  // @@protoc_insertion_point(field_add:caffe2.NetDef.op)\n  return op_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::OperatorDef >*\nNetDef::mutable_op() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NetDef.op)\n  return &op_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::OperatorDef >&\nNetDef::op() const {\n  // @@protoc_insertion_point(field_list:caffe2.NetDef.op)\n  return op_;\n}\n\n// optional string type = 3;\ninline bool NetDef::has_type() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void NetDef::set_has_type() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void NetDef::clear_has_type() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void NetDef::clear_type() {\n  type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_type();\n}\ninline const ::std::string& NetDef::type() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.type)\n  return type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetDef::set_type(const ::std::string& value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.NetDef.type)\n}\ninline void NetDef::set_type(const char* value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.NetDef.type)\n}\ninline void NetDef::set_type(const char* value, size_t size) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NetDef.type)\n}\ninline ::std::string* NetDef::mutable_type() {\n  set_has_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.type)\n  return type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* NetDef::release_type() {\n  // @@protoc_insertion_point(field_release:caffe2.NetDef.type)\n  clear_has_type();\n  return type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetDef::set_allocated_type(::std::string* type) {\n  if (type != NULL) {\n    set_has_type();\n  } else {\n    clear_has_type();\n  }\n  type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NetDef.type)\n}\n\n// optional int32 num_workers = 4 [deprecated = true];\ninline bool NetDef::has_num_workers() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void NetDef::set_has_num_workers() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void NetDef::clear_has_num_workers() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void NetDef::clear_num_workers() {\n  num_workers_ = 0;\n  clear_has_num_workers();\n}\ninline ::google::protobuf::int32 NetDef::num_workers() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.num_workers)\n  return num_workers_;\n}\ninline void NetDef::set_num_workers(::google::protobuf::int32 value) {\n  set_has_num_workers();\n  num_workers_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.NetDef.num_workers)\n}\n\n// optional .caffe2.DeviceOption device_option = 5;\ninline bool NetDef::has_device_option() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void NetDef::set_has_device_option() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void NetDef::clear_has_device_option() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void NetDef::clear_device_option() {\n  if (device_option_ != NULL) device_option_->::caffe2::DeviceOption::Clear();\n  clear_has_device_option();\n}\ninline const ::caffe2::DeviceOption& NetDef::device_option() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.device_option)\n  return device_option_ != NULL ? *device_option_\n                         : *::caffe2::DeviceOption::internal_default_instance();\n}\ninline ::caffe2::DeviceOption* NetDef::mutable_device_option() {\n  set_has_device_option();\n  if (device_option_ == NULL) {\n    device_option_ = new ::caffe2::DeviceOption;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.device_option)\n  return device_option_;\n}\ninline ::caffe2::DeviceOption* NetDef::release_device_option() {\n  // @@protoc_insertion_point(field_release:caffe2.NetDef.device_option)\n  clear_has_device_option();\n  ::caffe2::DeviceOption* temp = device_option_;\n  device_option_ = NULL;\n  return temp;\n}\ninline void NetDef::set_allocated_device_option(::caffe2::DeviceOption* device_option) {\n  delete device_option_;\n  device_option_ = device_option;\n  if (device_option) {\n    set_has_device_option();\n  } else {\n    clear_has_device_option();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NetDef.device_option)\n}\n\n// repeated .caffe2.Argument arg = 6;\ninline int NetDef::arg_size() const {\n  return arg_.size();\n}\ninline void NetDef::clear_arg() {\n  arg_.Clear();\n}\ninline const ::caffe2::Argument& NetDef::arg(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.arg)\n  return arg_.Get(index);\n}\ninline ::caffe2::Argument* NetDef::mutable_arg(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.arg)\n  return arg_.Mutable(index);\n}\ninline ::caffe2::Argument* NetDef::add_arg() {\n  // @@protoc_insertion_point(field_add:caffe2.NetDef.arg)\n  return arg_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >*\nNetDef::mutable_arg() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NetDef.arg)\n  return &arg_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::Argument >&\nNetDef::arg() const {\n  // @@protoc_insertion_point(field_list:caffe2.NetDef.arg)\n  return arg_;\n}\n\n// repeated string external_input = 7;\ninline int NetDef::external_input_size() const {\n  return external_input_.size();\n}\ninline void NetDef::clear_external_input() {\n  external_input_.Clear();\n}\ninline const ::std::string& NetDef::external_input(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.external_input)\n  return external_input_.Get(index);\n}\ninline ::std::string* NetDef::mutable_external_input(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.external_input)\n  return external_input_.Mutable(index);\n}\ninline void NetDef::set_external_input(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.NetDef.external_input)\n  external_input_.Mutable(index)->assign(value);\n}\ninline void NetDef::set_external_input(int index, const char* value) {\n  external_input_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.NetDef.external_input)\n}\ninline void NetDef::set_external_input(int index, const char* value, size_t size) {\n  external_input_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NetDef.external_input)\n}\ninline ::std::string* NetDef::add_external_input() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.NetDef.external_input)\n  return external_input_.Add();\n}\ninline void NetDef::add_external_input(const ::std::string& value) {\n  external_input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.NetDef.external_input)\n}\ninline void NetDef::add_external_input(const char* value) {\n  external_input_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.NetDef.external_input)\n}\ninline void NetDef::add_external_input(const char* value, size_t size) {\n  external_input_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.NetDef.external_input)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nNetDef::external_input() const {\n  // @@protoc_insertion_point(field_list:caffe2.NetDef.external_input)\n  return external_input_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nNetDef::mutable_external_input() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NetDef.external_input)\n  return &external_input_;\n}\n\n// repeated string external_output = 8;\ninline int NetDef::external_output_size() const {\n  return external_output_.size();\n}\ninline void NetDef::clear_external_output() {\n  external_output_.Clear();\n}\ninline const ::std::string& NetDef::external_output(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NetDef.external_output)\n  return external_output_.Get(index);\n}\ninline ::std::string* NetDef::mutable_external_output(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.NetDef.external_output)\n  return external_output_.Mutable(index);\n}\ninline void NetDef::set_external_output(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.NetDef.external_output)\n  external_output_.Mutable(index)->assign(value);\n}\ninline void NetDef::set_external_output(int index, const char* value) {\n  external_output_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.NetDef.external_output)\n}\ninline void NetDef::set_external_output(int index, const char* value, size_t size) {\n  external_output_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NetDef.external_output)\n}\ninline ::std::string* NetDef::add_external_output() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.NetDef.external_output)\n  return external_output_.Add();\n}\ninline void NetDef::add_external_output(const ::std::string& value) {\n  external_output_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.NetDef.external_output)\n}\ninline void NetDef::add_external_output(const char* value) {\n  external_output_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.NetDef.external_output)\n}\ninline void NetDef::add_external_output(const char* value, size_t size) {\n  external_output_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.NetDef.external_output)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nNetDef::external_output() const {\n  // @@protoc_insertion_point(field_list:caffe2.NetDef.external_output)\n  return external_output_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nNetDef::mutable_external_output() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NetDef.external_output)\n  return &external_output_;\n}\n\ninline const NetDef* NetDef::internal_default_instance() {\n  return &NetDef_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// ExecutionStep\n\n// optional string name = 1;\ninline bool ExecutionStep::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void ExecutionStep::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void ExecutionStep::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void ExecutionStep::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& ExecutionStep::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.name)\n}\ninline void ExecutionStep::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ExecutionStep.name)\n}\ninline void ExecutionStep::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ExecutionStep.name)\n}\ninline ::std::string* ExecutionStep::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ExecutionStep::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.ExecutionStep.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ExecutionStep.name)\n}\n\n// repeated .caffe2.ExecutionStep substep = 2;\ninline int ExecutionStep::substep_size() const {\n  return substep_.size();\n}\ninline void ExecutionStep::clear_substep() {\n  substep_.Clear();\n}\ninline const ::caffe2::ExecutionStep& ExecutionStep::substep(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.substep)\n  return substep_.Get(index);\n}\ninline ::caffe2::ExecutionStep* ExecutionStep::mutable_substep(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.substep)\n  return substep_.Mutable(index);\n}\ninline ::caffe2::ExecutionStep* ExecutionStep::add_substep() {\n  // @@protoc_insertion_point(field_add:caffe2.ExecutionStep.substep)\n  return substep_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >*\nExecutionStep::mutable_substep() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.ExecutionStep.substep)\n  return &substep_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >&\nExecutionStep::substep() const {\n  // @@protoc_insertion_point(field_list:caffe2.ExecutionStep.substep)\n  return substep_;\n}\n\n// repeated string network = 3;\ninline int ExecutionStep::network_size() const {\n  return network_.size();\n}\ninline void ExecutionStep::clear_network() {\n  network_.Clear();\n}\ninline const ::std::string& ExecutionStep::network(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.network)\n  return network_.Get(index);\n}\ninline ::std::string* ExecutionStep::mutable_network(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.network)\n  return network_.Mutable(index);\n}\ninline void ExecutionStep::set_network(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.network)\n  network_.Mutable(index)->assign(value);\n}\ninline void ExecutionStep::set_network(int index, const char* value) {\n  network_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.ExecutionStep.network)\n}\ninline void ExecutionStep::set_network(int index, const char* value, size_t size) {\n  network_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ExecutionStep.network)\n}\ninline ::std::string* ExecutionStep::add_network() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.ExecutionStep.network)\n  return network_.Add();\n}\ninline void ExecutionStep::add_network(const ::std::string& value) {\n  network_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.ExecutionStep.network)\n}\ninline void ExecutionStep::add_network(const char* value) {\n  network_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.ExecutionStep.network)\n}\ninline void ExecutionStep::add_network(const char* value, size_t size) {\n  network_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.ExecutionStep.network)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nExecutionStep::network() const {\n  // @@protoc_insertion_point(field_list:caffe2.ExecutionStep.network)\n  return network_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nExecutionStep::mutable_network() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.ExecutionStep.network)\n  return &network_;\n}\n\n// optional int64 num_iter = 4;\ninline bool ExecutionStep::has_num_iter() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void ExecutionStep::set_has_num_iter() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void ExecutionStep::clear_has_num_iter() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void ExecutionStep::clear_num_iter() {\n  num_iter_ = GOOGLE_LONGLONG(0);\n  clear_has_num_iter();\n}\ninline ::google::protobuf::int64 ExecutionStep::num_iter() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.num_iter)\n  return num_iter_;\n}\ninline void ExecutionStep::set_num_iter(::google::protobuf::int64 value) {\n  set_has_num_iter();\n  num_iter_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.num_iter)\n}\n\n// optional string criteria_network = 5 [deprecated = true];\ninline bool ExecutionStep::has_criteria_network() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void ExecutionStep::set_has_criteria_network() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void ExecutionStep::clear_has_criteria_network() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void ExecutionStep::clear_criteria_network() {\n  criteria_network_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_criteria_network();\n}\ninline const ::std::string& ExecutionStep::criteria_network() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.criteria_network)\n  return criteria_network_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_criteria_network(const ::std::string& value) {\n  set_has_criteria_network();\n  criteria_network_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.criteria_network)\n}\ninline void ExecutionStep::set_criteria_network(const char* value) {\n  set_has_criteria_network();\n  criteria_network_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ExecutionStep.criteria_network)\n}\ninline void ExecutionStep::set_criteria_network(const char* value, size_t size) {\n  set_has_criteria_network();\n  criteria_network_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ExecutionStep.criteria_network)\n}\ninline ::std::string* ExecutionStep::mutable_criteria_network() {\n  set_has_criteria_network();\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.criteria_network)\n  return criteria_network_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ExecutionStep::release_criteria_network() {\n  // @@protoc_insertion_point(field_release:caffe2.ExecutionStep.criteria_network)\n  clear_has_criteria_network();\n  return criteria_network_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_allocated_criteria_network(::std::string* criteria_network) {\n  if (criteria_network != NULL) {\n    set_has_criteria_network();\n  } else {\n    clear_has_criteria_network();\n  }\n  criteria_network_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), criteria_network);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ExecutionStep.criteria_network)\n}\n\n// optional string report_net = 7;\ninline bool ExecutionStep::has_report_net() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void ExecutionStep::set_has_report_net() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void ExecutionStep::clear_has_report_net() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void ExecutionStep::clear_report_net() {\n  report_net_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_report_net();\n}\ninline const ::std::string& ExecutionStep::report_net() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.report_net)\n  return report_net_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_report_net(const ::std::string& value) {\n  set_has_report_net();\n  report_net_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.report_net)\n}\ninline void ExecutionStep::set_report_net(const char* value) {\n  set_has_report_net();\n  report_net_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ExecutionStep.report_net)\n}\ninline void ExecutionStep::set_report_net(const char* value, size_t size) {\n  set_has_report_net();\n  report_net_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ExecutionStep.report_net)\n}\ninline ::std::string* ExecutionStep::mutable_report_net() {\n  set_has_report_net();\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.report_net)\n  return report_net_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ExecutionStep::release_report_net() {\n  // @@protoc_insertion_point(field_release:caffe2.ExecutionStep.report_net)\n  clear_has_report_net();\n  return report_net_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_allocated_report_net(::std::string* report_net) {\n  if (report_net != NULL) {\n    set_has_report_net();\n  } else {\n    clear_has_report_net();\n  }\n  report_net_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), report_net);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ExecutionStep.report_net)\n}\n\n// optional int32 report_interval = 8;\ninline bool ExecutionStep::has_report_interval() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void ExecutionStep::set_has_report_interval() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void ExecutionStep::clear_has_report_interval() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void ExecutionStep::clear_report_interval() {\n  report_interval_ = 0;\n  clear_has_report_interval();\n}\ninline ::google::protobuf::int32 ExecutionStep::report_interval() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.report_interval)\n  return report_interval_;\n}\ninline void ExecutionStep::set_report_interval(::google::protobuf::int32 value) {\n  set_has_report_interval();\n  report_interval_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.report_interval)\n}\n\n// optional int64 run_every_ms = 11;\ninline bool ExecutionStep::has_run_every_ms() const {\n  return (_has_bits_[0] & 0x00000080u) != 0;\n}\ninline void ExecutionStep::set_has_run_every_ms() {\n  _has_bits_[0] |= 0x00000080u;\n}\ninline void ExecutionStep::clear_has_run_every_ms() {\n  _has_bits_[0] &= ~0x00000080u;\n}\ninline void ExecutionStep::clear_run_every_ms() {\n  run_every_ms_ = GOOGLE_LONGLONG(0);\n  clear_has_run_every_ms();\n}\ninline ::google::protobuf::int64 ExecutionStep::run_every_ms() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.run_every_ms)\n  return run_every_ms_;\n}\ninline void ExecutionStep::set_run_every_ms(::google::protobuf::int64 value) {\n  set_has_run_every_ms();\n  run_every_ms_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.run_every_ms)\n}\n\n// optional bool concurrent_substeps = 6;\ninline bool ExecutionStep::has_concurrent_substeps() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void ExecutionStep::set_has_concurrent_substeps() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void ExecutionStep::clear_has_concurrent_substeps() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void ExecutionStep::clear_concurrent_substeps() {\n  concurrent_substeps_ = false;\n  clear_has_concurrent_substeps();\n}\ninline bool ExecutionStep::concurrent_substeps() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.concurrent_substeps)\n  return concurrent_substeps_;\n}\ninline void ExecutionStep::set_concurrent_substeps(bool value) {\n  set_has_concurrent_substeps();\n  concurrent_substeps_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.concurrent_substeps)\n}\n\n// optional string should_stop_blob = 9;\ninline bool ExecutionStep::has_should_stop_blob() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void ExecutionStep::set_has_should_stop_blob() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void ExecutionStep::clear_has_should_stop_blob() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void ExecutionStep::clear_should_stop_blob() {\n  should_stop_blob_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_should_stop_blob();\n}\ninline const ::std::string& ExecutionStep::should_stop_blob() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.should_stop_blob)\n  return should_stop_blob_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_should_stop_blob(const ::std::string& value) {\n  set_has_should_stop_blob();\n  should_stop_blob_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.should_stop_blob)\n}\ninline void ExecutionStep::set_should_stop_blob(const char* value) {\n  set_has_should_stop_blob();\n  should_stop_blob_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ExecutionStep.should_stop_blob)\n}\ninline void ExecutionStep::set_should_stop_blob(const char* value, size_t size) {\n  set_has_should_stop_blob();\n  should_stop_blob_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ExecutionStep.should_stop_blob)\n}\ninline ::std::string* ExecutionStep::mutable_should_stop_blob() {\n  set_has_should_stop_blob();\n  // @@protoc_insertion_point(field_mutable:caffe2.ExecutionStep.should_stop_blob)\n  return should_stop_blob_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ExecutionStep::release_should_stop_blob() {\n  // @@protoc_insertion_point(field_release:caffe2.ExecutionStep.should_stop_blob)\n  clear_has_should_stop_blob();\n  return should_stop_blob_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ExecutionStep::set_allocated_should_stop_blob(::std::string* should_stop_blob) {\n  if (should_stop_blob != NULL) {\n    set_has_should_stop_blob();\n  } else {\n    clear_has_should_stop_blob();\n  }\n  should_stop_blob_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), should_stop_blob);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ExecutionStep.should_stop_blob)\n}\n\n// optional bool only_once = 10;\ninline bool ExecutionStep::has_only_once() const {\n  return (_has_bits_[0] & 0x00000400u) != 0;\n}\ninline void ExecutionStep::set_has_only_once() {\n  _has_bits_[0] |= 0x00000400u;\n}\ninline void ExecutionStep::clear_has_only_once() {\n  _has_bits_[0] &= ~0x00000400u;\n}\ninline void ExecutionStep::clear_only_once() {\n  only_once_ = false;\n  clear_has_only_once();\n}\ninline bool ExecutionStep::only_once() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.only_once)\n  return only_once_;\n}\ninline void ExecutionStep::set_only_once(bool value) {\n  set_has_only_once();\n  only_once_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.only_once)\n}\n\n// optional bool create_workspace = 12;\ninline bool ExecutionStep::has_create_workspace() const {\n  return (_has_bits_[0] & 0x00000800u) != 0;\n}\ninline void ExecutionStep::set_has_create_workspace() {\n  _has_bits_[0] |= 0x00000800u;\n}\ninline void ExecutionStep::clear_has_create_workspace() {\n  _has_bits_[0] &= ~0x00000800u;\n}\ninline void ExecutionStep::clear_create_workspace() {\n  create_workspace_ = false;\n  clear_has_create_workspace();\n}\ninline bool ExecutionStep::create_workspace() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.create_workspace)\n  return create_workspace_;\n}\ninline void ExecutionStep::set_create_workspace(bool value) {\n  set_has_create_workspace();\n  create_workspace_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.create_workspace)\n}\n\n// optional int32 num_concurrent_instances = 13;\ninline bool ExecutionStep::has_num_concurrent_instances() const {\n  return (_has_bits_[0] & 0x00001000u) != 0;\n}\ninline void ExecutionStep::set_has_num_concurrent_instances() {\n  _has_bits_[0] |= 0x00001000u;\n}\ninline void ExecutionStep::clear_has_num_concurrent_instances() {\n  _has_bits_[0] &= ~0x00001000u;\n}\ninline void ExecutionStep::clear_num_concurrent_instances() {\n  num_concurrent_instances_ = 0;\n  clear_has_num_concurrent_instances();\n}\ninline ::google::protobuf::int32 ExecutionStep::num_concurrent_instances() const {\n  // @@protoc_insertion_point(field_get:caffe2.ExecutionStep.num_concurrent_instances)\n  return num_concurrent_instances_;\n}\ninline void ExecutionStep::set_num_concurrent_instances(::google::protobuf::int32 value) {\n  set_has_num_concurrent_instances();\n  num_concurrent_instances_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ExecutionStep.num_concurrent_instances)\n}\n\ninline const ExecutionStep* ExecutionStep::internal_default_instance() {\n  return &ExecutionStep_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// PlanDef\n\n// optional string name = 1;\ninline bool PlanDef::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void PlanDef::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void PlanDef::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void PlanDef::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& PlanDef::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.PlanDef.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void PlanDef::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.PlanDef.name)\n}\ninline void PlanDef::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PlanDef.name)\n}\ninline void PlanDef::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PlanDef.name)\n}\ninline ::std::string* PlanDef::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.PlanDef.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* PlanDef::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.PlanDef.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void PlanDef::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PlanDef.name)\n}\n\n// repeated .caffe2.NetDef network = 2;\ninline int PlanDef::network_size() const {\n  return network_.size();\n}\ninline void PlanDef::clear_network() {\n  network_.Clear();\n}\ninline const ::caffe2::NetDef& PlanDef::network(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.PlanDef.network)\n  return network_.Get(index);\n}\ninline ::caffe2::NetDef* PlanDef::mutable_network(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.PlanDef.network)\n  return network_.Mutable(index);\n}\ninline ::caffe2::NetDef* PlanDef::add_network() {\n  // @@protoc_insertion_point(field_add:caffe2.PlanDef.network)\n  return network_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >*\nPlanDef::mutable_network() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.PlanDef.network)\n  return &network_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::NetDef >&\nPlanDef::network() const {\n  // @@protoc_insertion_point(field_list:caffe2.PlanDef.network)\n  return network_;\n}\n\n// repeated .caffe2.ExecutionStep execution_step = 3;\ninline int PlanDef::execution_step_size() const {\n  return execution_step_.size();\n}\ninline void PlanDef::clear_execution_step() {\n  execution_step_.Clear();\n}\ninline const ::caffe2::ExecutionStep& PlanDef::execution_step(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.PlanDef.execution_step)\n  return execution_step_.Get(index);\n}\ninline ::caffe2::ExecutionStep* PlanDef::mutable_execution_step(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.PlanDef.execution_step)\n  return execution_step_.Mutable(index);\n}\ninline ::caffe2::ExecutionStep* PlanDef::add_execution_step() {\n  // @@protoc_insertion_point(field_add:caffe2.PlanDef.execution_step)\n  return execution_step_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >*\nPlanDef::mutable_execution_step() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.PlanDef.execution_step)\n  return &execution_step_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::ExecutionStep >&\nPlanDef::execution_step() const {\n  // @@protoc_insertion_point(field_list:caffe2.PlanDef.execution_step)\n  return execution_step_;\n}\n\ninline const PlanDef* PlanDef::internal_default_instance() {\n  return &PlanDef_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// BlobProto\n\n// optional string name = 1;\ninline bool BlobProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void BlobProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void BlobProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void BlobProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& BlobProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.BlobProto.name)\n}\ninline void BlobProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.BlobProto.name)\n}\ninline void BlobProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.BlobProto.name)\n}\ninline ::std::string* BlobProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* BlobProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobProto.name)\n}\n\n// optional string type = 2;\ninline bool BlobProto::has_type() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void BlobProto::set_has_type() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void BlobProto::clear_has_type() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void BlobProto::clear_type() {\n  type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_type();\n}\ninline const ::std::string& BlobProto::type() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.type)\n  return type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_type(const ::std::string& value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.BlobProto.type)\n}\ninline void BlobProto::set_type(const char* value) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.BlobProto.type)\n}\ninline void BlobProto::set_type(const char* value, size_t size) {\n  set_has_type();\n  type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.BlobProto.type)\n}\ninline ::std::string* BlobProto::mutable_type() {\n  set_has_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobProto.type)\n  return type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* BlobProto::release_type() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobProto.type)\n  clear_has_type();\n  return type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_allocated_type(::std::string* type) {\n  if (type != NULL) {\n    set_has_type();\n  } else {\n    clear_has_type();\n  }\n  type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobProto.type)\n}\n\n// optional .caffe2.TensorProto tensor = 3;\ninline bool BlobProto::has_tensor() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void BlobProto::set_has_tensor() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void BlobProto::clear_has_tensor() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void BlobProto::clear_tensor() {\n  if (tensor_ != NULL) tensor_->::caffe2::TensorProto::Clear();\n  clear_has_tensor();\n}\ninline const ::caffe2::TensorProto& BlobProto::tensor() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.tensor)\n  return tensor_ != NULL ? *tensor_\n                         : *::caffe2::TensorProto::internal_default_instance();\n}\ninline ::caffe2::TensorProto* BlobProto::mutable_tensor() {\n  set_has_tensor();\n  if (tensor_ == NULL) {\n    tensor_ = new ::caffe2::TensorProto;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobProto.tensor)\n  return tensor_;\n}\ninline ::caffe2::TensorProto* BlobProto::release_tensor() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobProto.tensor)\n  clear_has_tensor();\n  ::caffe2::TensorProto* temp = tensor_;\n  tensor_ = NULL;\n  return temp;\n}\ninline void BlobProto::set_allocated_tensor(::caffe2::TensorProto* tensor) {\n  delete tensor_;\n  tensor_ = tensor;\n  if (tensor) {\n    set_has_tensor();\n  } else {\n    clear_has_tensor();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobProto.tensor)\n}\n\n// optional bytes content = 4;\ninline bool BlobProto::has_content() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void BlobProto::set_has_content() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void BlobProto::clear_has_content() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void BlobProto::clear_content() {\n  content_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_content();\n}\ninline const ::std::string& BlobProto::content() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.content)\n  return content_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_content(const ::std::string& value) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.BlobProto.content)\n}\ninline void BlobProto::set_content(const char* value) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.BlobProto.content)\n}\ninline void BlobProto::set_content(const void* value, size_t size) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.BlobProto.content)\n}\ninline ::std::string* BlobProto::mutable_content() {\n  set_has_content();\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobProto.content)\n  return content_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* BlobProto::release_content() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobProto.content)\n  clear_has_content();\n  return content_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobProto::set_allocated_content(::std::string* content) {\n  if (content != NULL) {\n    set_has_content();\n  } else {\n    clear_has_content();\n  }\n  content_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), content);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobProto.content)\n}\n\n// optional .caffe2.QTensorProto qtensor = 5;\ninline bool BlobProto::has_qtensor() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void BlobProto::set_has_qtensor() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void BlobProto::clear_has_qtensor() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void BlobProto::clear_qtensor() {\n  if (qtensor_ != NULL) qtensor_->::caffe2::QTensorProto::Clear();\n  clear_has_qtensor();\n}\ninline const ::caffe2::QTensorProto& BlobProto::qtensor() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.qtensor)\n  return qtensor_ != NULL ? *qtensor_\n                         : *::caffe2::QTensorProto::internal_default_instance();\n}\ninline ::caffe2::QTensorProto* BlobProto::mutable_qtensor() {\n  set_has_qtensor();\n  if (qtensor_ == NULL) {\n    qtensor_ = new ::caffe2::QTensorProto;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobProto.qtensor)\n  return qtensor_;\n}\ninline ::caffe2::QTensorProto* BlobProto::release_qtensor() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobProto.qtensor)\n  clear_has_qtensor();\n  ::caffe2::QTensorProto* temp = qtensor_;\n  qtensor_ = NULL;\n  return temp;\n}\ninline void BlobProto::set_allocated_qtensor(::caffe2::QTensorProto* qtensor) {\n  delete qtensor_;\n  qtensor_ = qtensor;\n  if (qtensor) {\n    set_has_qtensor();\n  } else {\n    clear_has_qtensor();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobProto.qtensor)\n}\n\n// optional int32 content_num_chunks = 6;\ninline bool BlobProto::has_content_num_chunks() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void BlobProto::set_has_content_num_chunks() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void BlobProto::clear_has_content_num_chunks() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void BlobProto::clear_content_num_chunks() {\n  content_num_chunks_ = 0;\n  clear_has_content_num_chunks();\n}\ninline ::google::protobuf::int32 BlobProto::content_num_chunks() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.content_num_chunks)\n  return content_num_chunks_;\n}\ninline void BlobProto::set_content_num_chunks(::google::protobuf::int32 value) {\n  set_has_content_num_chunks();\n  content_num_chunks_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.BlobProto.content_num_chunks)\n}\n\n// optional int32 content_chunk_id = 7;\ninline bool BlobProto::has_content_chunk_id() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void BlobProto::set_has_content_chunk_id() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void BlobProto::clear_has_content_chunk_id() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void BlobProto::clear_content_chunk_id() {\n  content_chunk_id_ = 0;\n  clear_has_content_chunk_id();\n}\ninline ::google::protobuf::int32 BlobProto::content_chunk_id() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobProto.content_chunk_id)\n  return content_chunk_id_;\n}\ninline void BlobProto::set_content_chunk_id(::google::protobuf::int32 value) {\n  set_has_content_chunk_id();\n  content_chunk_id_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.BlobProto.content_chunk_id)\n}\n\ninline const BlobProto* BlobProto::internal_default_instance() {\n  return &BlobProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// DBReaderProto\n\n// optional string name = 1;\ninline bool DBReaderProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void DBReaderProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void DBReaderProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void DBReaderProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& DBReaderProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.DBReaderProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.DBReaderProto.name)\n}\ninline void DBReaderProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.DBReaderProto.name)\n}\ninline void DBReaderProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.DBReaderProto.name)\n}\ninline ::std::string* DBReaderProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.DBReaderProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DBReaderProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.DBReaderProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.DBReaderProto.name)\n}\n\n// optional string source = 2;\ninline bool DBReaderProto::has_source() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void DBReaderProto::set_has_source() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void DBReaderProto::clear_has_source() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void DBReaderProto::clear_source() {\n  source_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_source();\n}\ninline const ::std::string& DBReaderProto::source() const {\n  // @@protoc_insertion_point(field_get:caffe2.DBReaderProto.source)\n  return source_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_source(const ::std::string& value) {\n  set_has_source();\n  source_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.DBReaderProto.source)\n}\ninline void DBReaderProto::set_source(const char* value) {\n  set_has_source();\n  source_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.DBReaderProto.source)\n}\ninline void DBReaderProto::set_source(const char* value, size_t size) {\n  set_has_source();\n  source_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.DBReaderProto.source)\n}\ninline ::std::string* DBReaderProto::mutable_source() {\n  set_has_source();\n  // @@protoc_insertion_point(field_mutable:caffe2.DBReaderProto.source)\n  return source_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DBReaderProto::release_source() {\n  // @@protoc_insertion_point(field_release:caffe2.DBReaderProto.source)\n  clear_has_source();\n  return source_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_allocated_source(::std::string* source) {\n  if (source != NULL) {\n    set_has_source();\n  } else {\n    clear_has_source();\n  }\n  source_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), source);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.DBReaderProto.source)\n}\n\n// optional string db_type = 3;\ninline bool DBReaderProto::has_db_type() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void DBReaderProto::set_has_db_type() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void DBReaderProto::clear_has_db_type() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void DBReaderProto::clear_db_type() {\n  db_type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_db_type();\n}\ninline const ::std::string& DBReaderProto::db_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.DBReaderProto.db_type)\n  return db_type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_db_type(const ::std::string& value) {\n  set_has_db_type();\n  db_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.DBReaderProto.db_type)\n}\ninline void DBReaderProto::set_db_type(const char* value) {\n  set_has_db_type();\n  db_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.DBReaderProto.db_type)\n}\ninline void DBReaderProto::set_db_type(const char* value, size_t size) {\n  set_has_db_type();\n  db_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.DBReaderProto.db_type)\n}\ninline ::std::string* DBReaderProto::mutable_db_type() {\n  set_has_db_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.DBReaderProto.db_type)\n  return db_type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DBReaderProto::release_db_type() {\n  // @@protoc_insertion_point(field_release:caffe2.DBReaderProto.db_type)\n  clear_has_db_type();\n  return db_type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_allocated_db_type(::std::string* db_type) {\n  if (db_type != NULL) {\n    set_has_db_type();\n  } else {\n    clear_has_db_type();\n  }\n  db_type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), db_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.DBReaderProto.db_type)\n}\n\n// optional string key = 4;\ninline bool DBReaderProto::has_key() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void DBReaderProto::set_has_key() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void DBReaderProto::clear_has_key() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void DBReaderProto::clear_key() {\n  key_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_key();\n}\ninline const ::std::string& DBReaderProto::key() const {\n  // @@protoc_insertion_point(field_get:caffe2.DBReaderProto.key)\n  return key_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_key(const ::std::string& value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.DBReaderProto.key)\n}\ninline void DBReaderProto::set_key(const char* value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.DBReaderProto.key)\n}\ninline void DBReaderProto::set_key(const char* value, size_t size) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.DBReaderProto.key)\n}\ninline ::std::string* DBReaderProto::mutable_key() {\n  set_has_key();\n  // @@protoc_insertion_point(field_mutable:caffe2.DBReaderProto.key)\n  return key_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DBReaderProto::release_key() {\n  // @@protoc_insertion_point(field_release:caffe2.DBReaderProto.key)\n  clear_has_key();\n  return key_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DBReaderProto::set_allocated_key(::std::string* key) {\n  if (key != NULL) {\n    set_has_key();\n  } else {\n    clear_has_key();\n  }\n  key_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), key);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.DBReaderProto.key)\n}\n\ninline const DBReaderProto* DBReaderProto::internal_default_instance() {\n  return &DBReaderProto_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n#ifndef SWIG\nnamespace google {\nnamespace protobuf {\n\ntemplate <> struct is_proto_enum< ::caffe2::TensorProto_DataType> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::caffe2::TensorProto_DataType>() {\n  return ::caffe2::TensorProto_DataType_descriptor();\n}\ntemplate <> struct is_proto_enum< ::caffe2::DeviceType> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::caffe2::DeviceType>() {\n  return ::caffe2::DeviceType_descriptor();\n}\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // SWIG\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fcaffe2_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/caffe2_legacy.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/caffe2_legacy.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fcaffe2_5flegacy_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fcaffe2_5flegacy_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/generated_enum_reflection.h>\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fcaffe2_5flegacy_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fcaffe2_5flegacy_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fcaffe2_5flegacy_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fcaffe2_5flegacy_2eproto();\n\n\nenum LegacyPadding {\n  NOTSET = 0,\n  VALID = 1,\n  SAME = 2,\n  CAFFE_LEGACY_POOLING = 3\n};\nbool LegacyPadding_IsValid(int value);\nconst LegacyPadding LegacyPadding_MIN = NOTSET;\nconst LegacyPadding LegacyPadding_MAX = CAFFE_LEGACY_POOLING;\nconst int LegacyPadding_ARRAYSIZE = LegacyPadding_MAX + 1;\n\nconst ::google::protobuf::EnumDescriptor* LegacyPadding_descriptor();\ninline const ::std::string& LegacyPadding_Name(LegacyPadding value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    LegacyPadding_descriptor(), value);\n}\ninline bool LegacyPadding_Parse(\n    const ::std::string& name, LegacyPadding* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<LegacyPadding>(\n    LegacyPadding_descriptor(), name, value);\n}\n// ===================================================================\n\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n#ifndef SWIG\nnamespace google {\nnamespace protobuf {\n\ntemplate <> struct is_proto_enum< ::caffe2::LegacyPadding> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::caffe2::LegacyPadding>() {\n  return ::caffe2::LegacyPadding_descriptor();\n}\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // SWIG\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fcaffe2_5flegacy_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/hsm.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/hsm.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fhsm_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fhsm_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\nclass HierarchyProto;\nclass NodeProto;\nclass PathNodeProto;\nclass PathProto;\nclass TreeProto;\n\n// ===================================================================\n\nclass NodeProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.NodeProto) */ {\n public:\n  NodeProto();\n  virtual ~NodeProto();\n\n  NodeProto(const NodeProto& from);\n\n  inline NodeProto& operator=(const NodeProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const NodeProto& default_instance();\n\n  static const NodeProto* internal_default_instance();\n\n  void Swap(NodeProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline NodeProto* New() const { return New(NULL); }\n\n  NodeProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const NodeProto& from);\n  void MergeFrom(const NodeProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(NodeProto* other);\n  void UnsafeMergeFrom(const NodeProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .caffe2.NodeProto children = 1;\n  int children_size() const;\n  void clear_children();\n  static const int kChildrenFieldNumber = 1;\n  const ::caffe2::NodeProto& children(int index) const;\n  ::caffe2::NodeProto* mutable_children(int index);\n  ::caffe2::NodeProto* add_children();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NodeProto >*\n      mutable_children();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::NodeProto >&\n      children() const;\n\n  // repeated int32 word_ids = 2;\n  int word_ids_size() const;\n  void clear_word_ids();\n  static const int kWordIdsFieldNumber = 2;\n  ::google::protobuf::int32 word_ids(int index) const;\n  void set_word_ids(int index, ::google::protobuf::int32 value);\n  void add_word_ids(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      word_ids() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_word_ids();\n\n  // optional int32 offset = 3;\n  bool has_offset() const;\n  void clear_offset();\n  static const int kOffsetFieldNumber = 3;\n  ::google::protobuf::int32 offset() const;\n  void set_offset(::google::protobuf::int32 value);\n\n  // optional string name = 4;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 4;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated float scores = 5;\n  int scores_size() const;\n  void clear_scores();\n  static const int kScoresFieldNumber = 5;\n  float scores(int index) const;\n  void set_scores(int index, float value);\n  void add_scores(float value);\n  const ::google::protobuf::RepeatedField< float >&\n      scores() const;\n  ::google::protobuf::RepeatedField< float >*\n      mutable_scores();\n\n  // @@protoc_insertion_point(class_scope:caffe2.NodeProto)\n private:\n  inline void set_has_offset();\n  inline void clear_has_offset();\n  inline void set_has_name();\n  inline void clear_has_name();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NodeProto > children_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > word_ids_;\n  ::google::protobuf::RepeatedField< float > scores_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::int32 offset_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<NodeProto> NodeProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass TreeProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.TreeProto) */ {\n public:\n  TreeProto();\n  virtual ~TreeProto();\n\n  TreeProto(const TreeProto& from);\n\n  inline TreeProto& operator=(const TreeProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const TreeProto& default_instance();\n\n  static const TreeProto* internal_default_instance();\n\n  void Swap(TreeProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline TreeProto* New() const { return New(NULL); }\n\n  TreeProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const TreeProto& from);\n  void MergeFrom(const TreeProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(TreeProto* other);\n  void UnsafeMergeFrom(const TreeProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional .caffe2.NodeProto root_node = 1;\n  bool has_root_node() const;\n  void clear_root_node();\n  static const int kRootNodeFieldNumber = 1;\n  const ::caffe2::NodeProto& root_node() const;\n  ::caffe2::NodeProto* mutable_root_node();\n  ::caffe2::NodeProto* release_root_node();\n  void set_allocated_root_node(::caffe2::NodeProto* root_node);\n\n  // @@protoc_insertion_point(class_scope:caffe2.TreeProto)\n private:\n  inline void set_has_root_node();\n  inline void clear_has_root_node();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::caffe2::NodeProto* root_node_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<TreeProto> TreeProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass HierarchyProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.HierarchyProto) */ {\n public:\n  HierarchyProto();\n  virtual ~HierarchyProto();\n\n  HierarchyProto(const HierarchyProto& from);\n\n  inline HierarchyProto& operator=(const HierarchyProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const HierarchyProto& default_instance();\n\n  static const HierarchyProto* internal_default_instance();\n\n  void Swap(HierarchyProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline HierarchyProto* New() const { return New(NULL); }\n\n  HierarchyProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const HierarchyProto& from);\n  void MergeFrom(const HierarchyProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(HierarchyProto* other);\n  void UnsafeMergeFrom(const HierarchyProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 size = 1;\n  bool has_size() const;\n  void clear_size();\n  static const int kSizeFieldNumber = 1;\n  ::google::protobuf::int32 size() const;\n  void set_size(::google::protobuf::int32 value);\n\n  // repeated .caffe2.PathProto paths = 2;\n  int paths_size() const;\n  void clear_paths();\n  static const int kPathsFieldNumber = 2;\n  const ::caffe2::PathProto& paths(int index) const;\n  ::caffe2::PathProto* mutable_paths(int index);\n  ::caffe2::PathProto* add_paths();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PathProto >*\n      mutable_paths();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::PathProto >&\n      paths() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.HierarchyProto)\n private:\n  inline void set_has_size();\n  inline void clear_has_size();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PathProto > paths_;\n  ::google::protobuf::int32 size_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<HierarchyProto> HierarchyProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass PathProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.PathProto) */ {\n public:\n  PathProto();\n  virtual ~PathProto();\n\n  PathProto(const PathProto& from);\n\n  inline PathProto& operator=(const PathProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const PathProto& default_instance();\n\n  static const PathProto* internal_default_instance();\n\n  void Swap(PathProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline PathProto* New() const { return New(NULL); }\n\n  PathProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const PathProto& from);\n  void MergeFrom(const PathProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(PathProto* other);\n  void UnsafeMergeFrom(const PathProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 word_id = 1;\n  bool has_word_id() const;\n  void clear_word_id();\n  static const int kWordIdFieldNumber = 1;\n  ::google::protobuf::int32 word_id() const;\n  void set_word_id(::google::protobuf::int32 value);\n\n  // repeated .caffe2.PathNodeProto path_nodes = 2;\n  int path_nodes_size() const;\n  void clear_path_nodes();\n  static const int kPathNodesFieldNumber = 2;\n  const ::caffe2::PathNodeProto& path_nodes(int index) const;\n  ::caffe2::PathNodeProto* mutable_path_nodes(int index);\n  ::caffe2::PathNodeProto* add_path_nodes();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PathNodeProto >*\n      mutable_path_nodes();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::PathNodeProto >&\n      path_nodes() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.PathProto)\n private:\n  inline void set_has_word_id();\n  inline void clear_has_word_id();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PathNodeProto > path_nodes_;\n  ::google::protobuf::int32 word_id_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<PathProto> PathProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass PathNodeProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.PathNodeProto) */ {\n public:\n  PathNodeProto();\n  virtual ~PathNodeProto();\n\n  PathNodeProto(const PathNodeProto& from);\n\n  inline PathNodeProto& operator=(const PathNodeProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const PathNodeProto& default_instance();\n\n  static const PathNodeProto* internal_default_instance();\n\n  void Swap(PathNodeProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline PathNodeProto* New() const { return New(NULL); }\n\n  PathNodeProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const PathNodeProto& from);\n  void MergeFrom(const PathNodeProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(PathNodeProto* other);\n  void UnsafeMergeFrom(const PathNodeProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 index = 1;\n  bool has_index() const;\n  void clear_index();\n  static const int kIndexFieldNumber = 1;\n  ::google::protobuf::int32 index() const;\n  void set_index(::google::protobuf::int32 value);\n\n  // optional int32 length = 2;\n  bool has_length() const;\n  void clear_length();\n  static const int kLengthFieldNumber = 2;\n  ::google::protobuf::int32 length() const;\n  void set_length(::google::protobuf::int32 value);\n\n  // optional int32 target = 3;\n  bool has_target() const;\n  void clear_target();\n  static const int kTargetFieldNumber = 3;\n  ::google::protobuf::int32 target() const;\n  void set_target(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.PathNodeProto)\n private:\n  inline void set_has_index();\n  inline void clear_has_index();\n  inline void set_has_length();\n  inline void clear_has_length();\n  inline void set_has_target();\n  inline void clear_has_target();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::int32 index_;\n  ::google::protobuf::int32 length_;\n  ::google::protobuf::int32 target_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fhsm_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fhsm_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fhsm_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<PathNodeProto> PathNodeProto_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// NodeProto\n\n// repeated .caffe2.NodeProto children = 1;\ninline int NodeProto::children_size() const {\n  return children_.size();\n}\ninline void NodeProto::clear_children() {\n  children_.Clear();\n}\ninline const ::caffe2::NodeProto& NodeProto::children(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NodeProto.children)\n  return children_.Get(index);\n}\ninline ::caffe2::NodeProto* NodeProto::mutable_children(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.NodeProto.children)\n  return children_.Mutable(index);\n}\ninline ::caffe2::NodeProto* NodeProto::add_children() {\n  // @@protoc_insertion_point(field_add:caffe2.NodeProto.children)\n  return children_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::NodeProto >*\nNodeProto::mutable_children() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NodeProto.children)\n  return &children_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::NodeProto >&\nNodeProto::children() const {\n  // @@protoc_insertion_point(field_list:caffe2.NodeProto.children)\n  return children_;\n}\n\n// repeated int32 word_ids = 2;\ninline int NodeProto::word_ids_size() const {\n  return word_ids_.size();\n}\ninline void NodeProto::clear_word_ids() {\n  word_ids_.Clear();\n}\ninline ::google::protobuf::int32 NodeProto::word_ids(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NodeProto.word_ids)\n  return word_ids_.Get(index);\n}\ninline void NodeProto::set_word_ids(int index, ::google::protobuf::int32 value) {\n  word_ids_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.NodeProto.word_ids)\n}\ninline void NodeProto::add_word_ids(::google::protobuf::int32 value) {\n  word_ids_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.NodeProto.word_ids)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nNodeProto::word_ids() const {\n  // @@protoc_insertion_point(field_list:caffe2.NodeProto.word_ids)\n  return word_ids_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nNodeProto::mutable_word_ids() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NodeProto.word_ids)\n  return &word_ids_;\n}\n\n// optional int32 offset = 3;\ninline bool NodeProto::has_offset() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void NodeProto::set_has_offset() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void NodeProto::clear_has_offset() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void NodeProto::clear_offset() {\n  offset_ = 0;\n  clear_has_offset();\n}\ninline ::google::protobuf::int32 NodeProto::offset() const {\n  // @@protoc_insertion_point(field_get:caffe2.NodeProto.offset)\n  return offset_;\n}\ninline void NodeProto::set_offset(::google::protobuf::int32 value) {\n  set_has_offset();\n  offset_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.NodeProto.offset)\n}\n\n// optional string name = 4;\ninline bool NodeProto::has_name() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void NodeProto::set_has_name() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void NodeProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void NodeProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& NodeProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.NodeProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NodeProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.NodeProto.name)\n}\ninline void NodeProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.NodeProto.name)\n}\ninline void NodeProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NodeProto.name)\n}\ninline ::std::string* NodeProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.NodeProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* NodeProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.NodeProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NodeProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NodeProto.name)\n}\n\n// repeated float scores = 5;\ninline int NodeProto::scores_size() const {\n  return scores_.size();\n}\ninline void NodeProto::clear_scores() {\n  scores_.Clear();\n}\ninline float NodeProto::scores(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.NodeProto.scores)\n  return scores_.Get(index);\n}\ninline void NodeProto::set_scores(int index, float value) {\n  scores_.Set(index, value);\n  // @@protoc_insertion_point(field_set:caffe2.NodeProto.scores)\n}\ninline void NodeProto::add_scores(float value) {\n  scores_.Add(value);\n  // @@protoc_insertion_point(field_add:caffe2.NodeProto.scores)\n}\ninline const ::google::protobuf::RepeatedField< float >&\nNodeProto::scores() const {\n  // @@protoc_insertion_point(field_list:caffe2.NodeProto.scores)\n  return scores_;\n}\ninline ::google::protobuf::RepeatedField< float >*\nNodeProto::mutable_scores() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.NodeProto.scores)\n  return &scores_;\n}\n\ninline const NodeProto* NodeProto::internal_default_instance() {\n  return &NodeProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// TreeProto\n\n// optional .caffe2.NodeProto root_node = 1;\ninline bool TreeProto::has_root_node() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void TreeProto::set_has_root_node() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void TreeProto::clear_has_root_node() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void TreeProto::clear_root_node() {\n  if (root_node_ != NULL) root_node_->::caffe2::NodeProto::Clear();\n  clear_has_root_node();\n}\ninline const ::caffe2::NodeProto& TreeProto::root_node() const {\n  // @@protoc_insertion_point(field_get:caffe2.TreeProto.root_node)\n  return root_node_ != NULL ? *root_node_\n                         : *::caffe2::NodeProto::internal_default_instance();\n}\ninline ::caffe2::NodeProto* TreeProto::mutable_root_node() {\n  set_has_root_node();\n  if (root_node_ == NULL) {\n    root_node_ = new ::caffe2::NodeProto;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.TreeProto.root_node)\n  return root_node_;\n}\ninline ::caffe2::NodeProto* TreeProto::release_root_node() {\n  // @@protoc_insertion_point(field_release:caffe2.TreeProto.root_node)\n  clear_has_root_node();\n  ::caffe2::NodeProto* temp = root_node_;\n  root_node_ = NULL;\n  return temp;\n}\ninline void TreeProto::set_allocated_root_node(::caffe2::NodeProto* root_node) {\n  delete root_node_;\n  root_node_ = root_node;\n  if (root_node) {\n    set_has_root_node();\n  } else {\n    clear_has_root_node();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.TreeProto.root_node)\n}\n\ninline const TreeProto* TreeProto::internal_default_instance() {\n  return &TreeProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// HierarchyProto\n\n// optional int32 size = 1;\ninline bool HierarchyProto::has_size() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void HierarchyProto::set_has_size() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void HierarchyProto::clear_has_size() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void HierarchyProto::clear_size() {\n  size_ = 0;\n  clear_has_size();\n}\ninline ::google::protobuf::int32 HierarchyProto::size() const {\n  // @@protoc_insertion_point(field_get:caffe2.HierarchyProto.size)\n  return size_;\n}\ninline void HierarchyProto::set_size(::google::protobuf::int32 value) {\n  set_has_size();\n  size_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.HierarchyProto.size)\n}\n\n// repeated .caffe2.PathProto paths = 2;\ninline int HierarchyProto::paths_size() const {\n  return paths_.size();\n}\ninline void HierarchyProto::clear_paths() {\n  paths_.Clear();\n}\ninline const ::caffe2::PathProto& HierarchyProto::paths(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.HierarchyProto.paths)\n  return paths_.Get(index);\n}\ninline ::caffe2::PathProto* HierarchyProto::mutable_paths(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.HierarchyProto.paths)\n  return paths_.Mutable(index);\n}\ninline ::caffe2::PathProto* HierarchyProto::add_paths() {\n  // @@protoc_insertion_point(field_add:caffe2.HierarchyProto.paths)\n  return paths_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::PathProto >*\nHierarchyProto::mutable_paths() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.HierarchyProto.paths)\n  return &paths_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::PathProto >&\nHierarchyProto::paths() const {\n  // @@protoc_insertion_point(field_list:caffe2.HierarchyProto.paths)\n  return paths_;\n}\n\ninline const HierarchyProto* HierarchyProto::internal_default_instance() {\n  return &HierarchyProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// PathProto\n\n// optional int32 word_id = 1;\ninline bool PathProto::has_word_id() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void PathProto::set_has_word_id() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void PathProto::clear_has_word_id() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void PathProto::clear_word_id() {\n  word_id_ = 0;\n  clear_has_word_id();\n}\ninline ::google::protobuf::int32 PathProto::word_id() const {\n  // @@protoc_insertion_point(field_get:caffe2.PathProto.word_id)\n  return word_id_;\n}\ninline void PathProto::set_word_id(::google::protobuf::int32 value) {\n  set_has_word_id();\n  word_id_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.PathProto.word_id)\n}\n\n// repeated .caffe2.PathNodeProto path_nodes = 2;\ninline int PathProto::path_nodes_size() const {\n  return path_nodes_.size();\n}\ninline void PathProto::clear_path_nodes() {\n  path_nodes_.Clear();\n}\ninline const ::caffe2::PathNodeProto& PathProto::path_nodes(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.PathProto.path_nodes)\n  return path_nodes_.Get(index);\n}\ninline ::caffe2::PathNodeProto* PathProto::mutable_path_nodes(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.PathProto.path_nodes)\n  return path_nodes_.Mutable(index);\n}\ninline ::caffe2::PathNodeProto* PathProto::add_path_nodes() {\n  // @@protoc_insertion_point(field_add:caffe2.PathProto.path_nodes)\n  return path_nodes_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::PathNodeProto >*\nPathProto::mutable_path_nodes() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.PathProto.path_nodes)\n  return &path_nodes_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::PathNodeProto >&\nPathProto::path_nodes() const {\n  // @@protoc_insertion_point(field_list:caffe2.PathProto.path_nodes)\n  return path_nodes_;\n}\n\ninline const PathProto* PathProto::internal_default_instance() {\n  return &PathProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// PathNodeProto\n\n// optional int32 index = 1;\ninline bool PathNodeProto::has_index() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void PathNodeProto::set_has_index() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void PathNodeProto::clear_has_index() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void PathNodeProto::clear_index() {\n  index_ = 0;\n  clear_has_index();\n}\ninline ::google::protobuf::int32 PathNodeProto::index() const {\n  // @@protoc_insertion_point(field_get:caffe2.PathNodeProto.index)\n  return index_;\n}\ninline void PathNodeProto::set_index(::google::protobuf::int32 value) {\n  set_has_index();\n  index_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.PathNodeProto.index)\n}\n\n// optional int32 length = 2;\ninline bool PathNodeProto::has_length() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void PathNodeProto::set_has_length() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void PathNodeProto::clear_has_length() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void PathNodeProto::clear_length() {\n  length_ = 0;\n  clear_has_length();\n}\ninline ::google::protobuf::int32 PathNodeProto::length() const {\n  // @@protoc_insertion_point(field_get:caffe2.PathNodeProto.length)\n  return length_;\n}\ninline void PathNodeProto::set_length(::google::protobuf::int32 value) {\n  set_has_length();\n  length_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.PathNodeProto.length)\n}\n\n// optional int32 target = 3;\ninline bool PathNodeProto::has_target() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void PathNodeProto::set_has_target() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void PathNodeProto::clear_has_target() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void PathNodeProto::clear_target() {\n  target_ = 0;\n  clear_has_target();\n}\ninline ::google::protobuf::int32 PathNodeProto::target() const {\n  // @@protoc_insertion_point(field_get:caffe2.PathNodeProto.target)\n  return target_;\n}\ninline void PathNodeProto::set_target(::google::protobuf::int32 value) {\n  set_has_target();\n  target_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.PathNodeProto.target)\n}\n\ninline const PathNodeProto* PathNodeProto::internal_default_instance() {\n  return &PathNodeProto_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fhsm_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/metanet.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/metanet.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fmetanet_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fmetanet_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n#include \"caffe2/proto/caffe2.pb.h\"\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\nclass BlobsMap;\nclass MetaNetDef;\nclass ModelInfo;\nclass NetsMap;\nclass PlansMap;\nclass StringMap;\n\n// ===================================================================\n\nclass ModelInfo : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.ModelInfo) */ {\n public:\n  ModelInfo();\n  virtual ~ModelInfo();\n\n  ModelInfo(const ModelInfo& from);\n\n  inline ModelInfo& operator=(const ModelInfo& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ModelInfo& default_instance();\n\n  static const ModelInfo* internal_default_instance();\n\n  void Swap(ModelInfo* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ModelInfo* New() const { return New(NULL); }\n\n  ModelInfo* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ModelInfo& from);\n  void MergeFrom(const ModelInfo& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ModelInfo* other);\n  void UnsafeMergeFrom(const ModelInfo& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string project = 1;\n  bool has_project() const;\n  void clear_project();\n  static const int kProjectFieldNumber = 1;\n  const ::std::string& project() const;\n  void set_project(const ::std::string& value);\n  void set_project(const char* value);\n  void set_project(const char* value, size_t size);\n  ::std::string* mutable_project();\n  ::std::string* release_project();\n  void set_allocated_project(::std::string* project);\n\n  // optional string modelClass = 2;\n  bool has_modelclass() const;\n  void clear_modelclass();\n  static const int kModelClassFieldNumber = 2;\n  const ::std::string& modelclass() const;\n  void set_modelclass(const ::std::string& value);\n  void set_modelclass(const char* value);\n  void set_modelclass(const char* value, size_t size);\n  ::std::string* mutable_modelclass();\n  ::std::string* release_modelclass();\n  void set_allocated_modelclass(::std::string* modelclass);\n\n  // optional string version = 3;\n  bool has_version() const;\n  void clear_version();\n  static const int kVersionFieldNumber = 3;\n  const ::std::string& version() const;\n  void set_version(const ::std::string& value);\n  void set_version(const char* value);\n  void set_version(const char* value, size_t size);\n  ::std::string* mutable_version();\n  ::std::string* release_version();\n  void set_allocated_version(::std::string* version);\n\n  // optional string predictorType = 4 [default = \"SINGLE_PREDICTOR\"];\n  bool has_predictortype() const;\n  void clear_predictortype();\n  static const int kPredictorTypeFieldNumber = 4;\n  const ::std::string& predictortype() const;\n  void set_predictortype(const ::std::string& value);\n  void set_predictortype(const char* value);\n  void set_predictortype(const char* value, size_t size);\n  ::std::string* mutable_predictortype();\n  ::std::string* release_predictortype();\n  void set_allocated_predictortype(::std::string* predictortype);\n\n  // optional string modelId = 5;\n  bool has_modelid() const;\n  void clear_modelid();\n  static const int kModelIdFieldNumber = 5;\n  const ::std::string& modelid() const;\n  void set_modelid(const ::std::string& value);\n  void set_modelid(const char* value);\n  void set_modelid(const char* value, size_t size);\n  ::std::string* mutable_modelid();\n  ::std::string* release_modelid();\n  void set_allocated_modelid(::std::string* modelid);\n\n  // @@protoc_insertion_point(class_scope:caffe2.ModelInfo)\n private:\n  inline void set_has_project();\n  inline void clear_has_project();\n  inline void set_has_modelclass();\n  inline void clear_has_modelclass();\n  inline void set_has_version();\n  inline void clear_has_version();\n  inline void set_has_predictortype();\n  inline void clear_has_predictortype();\n  inline void set_has_modelid();\n  inline void clear_has_modelid();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr project_;\n  ::google::protobuf::internal::ArenaStringPtr modelclass_;\n  ::google::protobuf::internal::ArenaStringPtr version_;\n  static ::std::string* _default_predictortype_;\n  ::google::protobuf::internal::ArenaStringPtr predictortype_;\n  ::google::protobuf::internal::ArenaStringPtr modelid_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ModelInfo> ModelInfo_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass BlobsMap : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.BlobsMap) */ {\n public:\n  BlobsMap();\n  virtual ~BlobsMap();\n\n  BlobsMap(const BlobsMap& from);\n\n  inline BlobsMap& operator=(const BlobsMap& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const BlobsMap& default_instance();\n\n  static const BlobsMap* internal_default_instance();\n\n  void Swap(BlobsMap* other);\n\n  // implements Message ----------------------------------------------\n\n  inline BlobsMap* New() const { return New(NULL); }\n\n  BlobsMap* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const BlobsMap& from);\n  void MergeFrom(const BlobsMap& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(BlobsMap* other);\n  void UnsafeMergeFrom(const BlobsMap& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string key = 1;\n  bool has_key() const;\n  void clear_key();\n  static const int kKeyFieldNumber = 1;\n  const ::std::string& key() const;\n  void set_key(const ::std::string& value);\n  void set_key(const char* value);\n  void set_key(const char* value, size_t size);\n  ::std::string* mutable_key();\n  ::std::string* release_key();\n  void set_allocated_key(::std::string* key);\n\n  // repeated string value = 2;\n  int value_size() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::std::string& value(int index) const;\n  ::std::string* mutable_value(int index);\n  void set_value(int index, const ::std::string& value);\n  void set_value(int index, const char* value);\n  void set_value(int index, const char* value, size_t size);\n  ::std::string* add_value();\n  void add_value(const ::std::string& value);\n  void add_value(const char* value);\n  void add_value(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& value() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_value();\n\n  // @@protoc_insertion_point(class_scope:caffe2.BlobsMap)\n private:\n  inline void set_has_key();\n  inline void clear_has_key();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> value_;\n  ::google::protobuf::internal::ArenaStringPtr key_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<BlobsMap> BlobsMap_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass NetsMap : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.NetsMap) */ {\n public:\n  NetsMap();\n  virtual ~NetsMap();\n\n  NetsMap(const NetsMap& from);\n\n  inline NetsMap& operator=(const NetsMap& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const NetsMap& default_instance();\n\n  static const NetsMap* internal_default_instance();\n\n  void Swap(NetsMap* other);\n\n  // implements Message ----------------------------------------------\n\n  inline NetsMap* New() const { return New(NULL); }\n\n  NetsMap* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const NetsMap& from);\n  void MergeFrom(const NetsMap& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(NetsMap* other);\n  void UnsafeMergeFrom(const NetsMap& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string key = 1;\n  bool has_key() const;\n  void clear_key();\n  static const int kKeyFieldNumber = 1;\n  const ::std::string& key() const;\n  void set_key(const ::std::string& value);\n  void set_key(const char* value);\n  void set_key(const char* value, size_t size);\n  ::std::string* mutable_key();\n  ::std::string* release_key();\n  void set_allocated_key(::std::string* key);\n\n  // required .caffe2.NetDef value = 2;\n  bool has_value() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::caffe2::NetDef& value() const;\n  ::caffe2::NetDef* mutable_value();\n  ::caffe2::NetDef* release_value();\n  void set_allocated_value(::caffe2::NetDef* value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.NetsMap)\n private:\n  inline void set_has_key();\n  inline void clear_has_key();\n  inline void set_has_value();\n  inline void clear_has_value();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr key_;\n  ::caffe2::NetDef* value_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<NetsMap> NetsMap_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass PlansMap : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.PlansMap) */ {\n public:\n  PlansMap();\n  virtual ~PlansMap();\n\n  PlansMap(const PlansMap& from);\n\n  inline PlansMap& operator=(const PlansMap& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const PlansMap& default_instance();\n\n  static const PlansMap* internal_default_instance();\n\n  void Swap(PlansMap* other);\n\n  // implements Message ----------------------------------------------\n\n  inline PlansMap* New() const { return New(NULL); }\n\n  PlansMap* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const PlansMap& from);\n  void MergeFrom(const PlansMap& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(PlansMap* other);\n  void UnsafeMergeFrom(const PlansMap& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string key = 1;\n  bool has_key() const;\n  void clear_key();\n  static const int kKeyFieldNumber = 1;\n  const ::std::string& key() const;\n  void set_key(const ::std::string& value);\n  void set_key(const char* value);\n  void set_key(const char* value, size_t size);\n  ::std::string* mutable_key();\n  ::std::string* release_key();\n  void set_allocated_key(::std::string* key);\n\n  // required .caffe2.PlanDef value = 2;\n  bool has_value() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::caffe2::PlanDef& value() const;\n  ::caffe2::PlanDef* mutable_value();\n  ::caffe2::PlanDef* release_value();\n  void set_allocated_value(::caffe2::PlanDef* value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.PlansMap)\n private:\n  inline void set_has_key();\n  inline void clear_has_key();\n  inline void set_has_value();\n  inline void clear_has_value();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr key_;\n  ::caffe2::PlanDef* value_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<PlansMap> PlansMap_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass StringMap : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.StringMap) */ {\n public:\n  StringMap();\n  virtual ~StringMap();\n\n  StringMap(const StringMap& from);\n\n  inline StringMap& operator=(const StringMap& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const StringMap& default_instance();\n\n  static const StringMap* internal_default_instance();\n\n  void Swap(StringMap* other);\n\n  // implements Message ----------------------------------------------\n\n  inline StringMap* New() const { return New(NULL); }\n\n  StringMap* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const StringMap& from);\n  void MergeFrom(const StringMap& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(StringMap* other);\n  void UnsafeMergeFrom(const StringMap& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string key = 1;\n  bool has_key() const;\n  void clear_key();\n  static const int kKeyFieldNumber = 1;\n  const ::std::string& key() const;\n  void set_key(const ::std::string& value);\n  void set_key(const char* value);\n  void set_key(const char* value, size_t size);\n  ::std::string* mutable_key();\n  ::std::string* release_key();\n  void set_allocated_key(::std::string* key);\n\n  // required string value = 2;\n  bool has_value() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::std::string& value() const;\n  void set_value(const ::std::string& value);\n  void set_value(const char* value);\n  void set_value(const char* value, size_t size);\n  ::std::string* mutable_value();\n  ::std::string* release_value();\n  void set_allocated_value(::std::string* value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.StringMap)\n private:\n  inline void set_has_key();\n  inline void clear_has_key();\n  inline void set_has_value();\n  inline void clear_has_value();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr key_;\n  ::google::protobuf::internal::ArenaStringPtr value_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<StringMap> StringMap_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass MetaNetDef : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.MetaNetDef) */ {\n public:\n  MetaNetDef();\n  virtual ~MetaNetDef();\n\n  MetaNetDef(const MetaNetDef& from);\n\n  inline MetaNetDef& operator=(const MetaNetDef& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const MetaNetDef& default_instance();\n\n  static const MetaNetDef* internal_default_instance();\n\n  void Swap(MetaNetDef* other);\n\n  // implements Message ----------------------------------------------\n\n  inline MetaNetDef* New() const { return New(NULL); }\n\n  MetaNetDef* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const MetaNetDef& from);\n  void MergeFrom(const MetaNetDef& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(MetaNetDef* other);\n  void UnsafeMergeFrom(const MetaNetDef& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .caffe2.BlobsMap blobs = 1;\n  int blobs_size() const;\n  void clear_blobs();\n  static const int kBlobsFieldNumber = 1;\n  const ::caffe2::BlobsMap& blobs(int index) const;\n  ::caffe2::BlobsMap* mutable_blobs(int index);\n  ::caffe2::BlobsMap* add_blobs();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::BlobsMap >*\n      mutable_blobs();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::BlobsMap >&\n      blobs() const;\n\n  // repeated .caffe2.NetsMap nets = 2;\n  int nets_size() const;\n  void clear_nets();\n  static const int kNetsFieldNumber = 2;\n  const ::caffe2::NetsMap& nets(int index) const;\n  ::caffe2::NetsMap* mutable_nets(int index);\n  ::caffe2::NetsMap* add_nets();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetsMap >*\n      mutable_nets();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::NetsMap >&\n      nets() const;\n\n  // optional .caffe2.ModelInfo modelInfo = 3;\n  bool has_modelinfo() const;\n  void clear_modelinfo();\n  static const int kModelInfoFieldNumber = 3;\n  const ::caffe2::ModelInfo& modelinfo() const;\n  ::caffe2::ModelInfo* mutable_modelinfo();\n  ::caffe2::ModelInfo* release_modelinfo();\n  void set_allocated_modelinfo(::caffe2::ModelInfo* modelinfo);\n\n  // repeated .caffe2.PlansMap plans = 4;\n  int plans_size() const;\n  void clear_plans();\n  static const int kPlansFieldNumber = 4;\n  const ::caffe2::PlansMap& plans(int index) const;\n  ::caffe2::PlansMap* mutable_plans(int index);\n  ::caffe2::PlansMap* add_plans();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PlansMap >*\n      mutable_plans();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::PlansMap >&\n      plans() const;\n\n  // repeated .caffe2.StringMap applicationSpecificInfo = 5;\n  int applicationspecificinfo_size() const;\n  void clear_applicationspecificinfo();\n  static const int kApplicationSpecificInfoFieldNumber = 5;\n  const ::caffe2::StringMap& applicationspecificinfo(int index) const;\n  ::caffe2::StringMap* mutable_applicationspecificinfo(int index);\n  ::caffe2::StringMap* add_applicationspecificinfo();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::StringMap >*\n      mutable_applicationspecificinfo();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::StringMap >&\n      applicationspecificinfo() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.MetaNetDef)\n private:\n  inline void set_has_modelinfo();\n  inline void clear_has_modelinfo();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::BlobsMap > blobs_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::NetsMap > nets_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::PlansMap > plans_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::StringMap > applicationspecificinfo_;\n  ::caffe2::ModelInfo* modelinfo_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fmetanet_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fmetanet_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fmetanet_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<MetaNetDef> MetaNetDef_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// ModelInfo\n\n// optional string project = 1;\ninline bool ModelInfo::has_project() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void ModelInfo::set_has_project() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void ModelInfo::clear_has_project() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void ModelInfo::clear_project() {\n  project_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_project();\n}\ninline const ::std::string& ModelInfo::project() const {\n  // @@protoc_insertion_point(field_get:caffe2.ModelInfo.project)\n  return project_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_project(const ::std::string& value) {\n  set_has_project();\n  project_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ModelInfo.project)\n}\ninline void ModelInfo::set_project(const char* value) {\n  set_has_project();\n  project_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ModelInfo.project)\n}\ninline void ModelInfo::set_project(const char* value, size_t size) {\n  set_has_project();\n  project_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ModelInfo.project)\n}\ninline ::std::string* ModelInfo::mutable_project() {\n  set_has_project();\n  // @@protoc_insertion_point(field_mutable:caffe2.ModelInfo.project)\n  return project_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ModelInfo::release_project() {\n  // @@protoc_insertion_point(field_release:caffe2.ModelInfo.project)\n  clear_has_project();\n  return project_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_allocated_project(::std::string* project) {\n  if (project != NULL) {\n    set_has_project();\n  } else {\n    clear_has_project();\n  }\n  project_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), project);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ModelInfo.project)\n}\n\n// optional string modelClass = 2;\ninline bool ModelInfo::has_modelclass() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void ModelInfo::set_has_modelclass() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void ModelInfo::clear_has_modelclass() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void ModelInfo::clear_modelclass() {\n  modelclass_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_modelclass();\n}\ninline const ::std::string& ModelInfo::modelclass() const {\n  // @@protoc_insertion_point(field_get:caffe2.ModelInfo.modelClass)\n  return modelclass_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_modelclass(const ::std::string& value) {\n  set_has_modelclass();\n  modelclass_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ModelInfo.modelClass)\n}\ninline void ModelInfo::set_modelclass(const char* value) {\n  set_has_modelclass();\n  modelclass_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ModelInfo.modelClass)\n}\ninline void ModelInfo::set_modelclass(const char* value, size_t size) {\n  set_has_modelclass();\n  modelclass_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ModelInfo.modelClass)\n}\ninline ::std::string* ModelInfo::mutable_modelclass() {\n  set_has_modelclass();\n  // @@protoc_insertion_point(field_mutable:caffe2.ModelInfo.modelClass)\n  return modelclass_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ModelInfo::release_modelclass() {\n  // @@protoc_insertion_point(field_release:caffe2.ModelInfo.modelClass)\n  clear_has_modelclass();\n  return modelclass_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_allocated_modelclass(::std::string* modelclass) {\n  if (modelclass != NULL) {\n    set_has_modelclass();\n  } else {\n    clear_has_modelclass();\n  }\n  modelclass_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), modelclass);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ModelInfo.modelClass)\n}\n\n// optional string version = 3;\ninline bool ModelInfo::has_version() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void ModelInfo::set_has_version() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void ModelInfo::clear_has_version() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void ModelInfo::clear_version() {\n  version_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_version();\n}\ninline const ::std::string& ModelInfo::version() const {\n  // @@protoc_insertion_point(field_get:caffe2.ModelInfo.version)\n  return version_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_version(const ::std::string& value) {\n  set_has_version();\n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ModelInfo.version)\n}\ninline void ModelInfo::set_version(const char* value) {\n  set_has_version();\n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ModelInfo.version)\n}\ninline void ModelInfo::set_version(const char* value, size_t size) {\n  set_has_version();\n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ModelInfo.version)\n}\ninline ::std::string* ModelInfo::mutable_version() {\n  set_has_version();\n  // @@protoc_insertion_point(field_mutable:caffe2.ModelInfo.version)\n  return version_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ModelInfo::release_version() {\n  // @@protoc_insertion_point(field_release:caffe2.ModelInfo.version)\n  clear_has_version();\n  return version_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_allocated_version(::std::string* version) {\n  if (version != NULL) {\n    set_has_version();\n  } else {\n    clear_has_version();\n  }\n  version_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), version);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ModelInfo.version)\n}\n\n// optional string predictorType = 4 [default = \"SINGLE_PREDICTOR\"];\ninline bool ModelInfo::has_predictortype() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void ModelInfo::set_has_predictortype() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void ModelInfo::clear_has_predictortype() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void ModelInfo::clear_predictortype() {\n  predictortype_.ClearToDefaultNoArena(_default_predictortype_);\n  clear_has_predictortype();\n}\ninline const ::std::string& ModelInfo::predictortype() const {\n  // @@protoc_insertion_point(field_get:caffe2.ModelInfo.predictorType)\n  return predictortype_.GetNoArena(_default_predictortype_);\n}\ninline void ModelInfo::set_predictortype(const ::std::string& value) {\n  set_has_predictortype();\n  predictortype_.SetNoArena(_default_predictortype_, value);\n  // @@protoc_insertion_point(field_set:caffe2.ModelInfo.predictorType)\n}\ninline void ModelInfo::set_predictortype(const char* value) {\n  set_has_predictortype();\n  predictortype_.SetNoArena(_default_predictortype_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ModelInfo.predictorType)\n}\ninline void ModelInfo::set_predictortype(const char* value, size_t size) {\n  set_has_predictortype();\n  predictortype_.SetNoArena(_default_predictortype_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ModelInfo.predictorType)\n}\ninline ::std::string* ModelInfo::mutable_predictortype() {\n  set_has_predictortype();\n  // @@protoc_insertion_point(field_mutable:caffe2.ModelInfo.predictorType)\n  return predictortype_.MutableNoArena(_default_predictortype_);\n}\ninline ::std::string* ModelInfo::release_predictortype() {\n  // @@protoc_insertion_point(field_release:caffe2.ModelInfo.predictorType)\n  clear_has_predictortype();\n  return predictortype_.ReleaseNoArena(_default_predictortype_);\n}\ninline void ModelInfo::set_allocated_predictortype(::std::string* predictortype) {\n  if (predictortype != NULL) {\n    set_has_predictortype();\n  } else {\n    clear_has_predictortype();\n  }\n  predictortype_.SetAllocatedNoArena(_default_predictortype_, predictortype);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ModelInfo.predictorType)\n}\n\n// optional string modelId = 5;\ninline bool ModelInfo::has_modelid() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void ModelInfo::set_has_modelid() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void ModelInfo::clear_has_modelid() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void ModelInfo::clear_modelid() {\n  modelid_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_modelid();\n}\ninline const ::std::string& ModelInfo::modelid() const {\n  // @@protoc_insertion_point(field_get:caffe2.ModelInfo.modelId)\n  return modelid_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_modelid(const ::std::string& value) {\n  set_has_modelid();\n  modelid_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ModelInfo.modelId)\n}\ninline void ModelInfo::set_modelid(const char* value) {\n  set_has_modelid();\n  modelid_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ModelInfo.modelId)\n}\ninline void ModelInfo::set_modelid(const char* value, size_t size) {\n  set_has_modelid();\n  modelid_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ModelInfo.modelId)\n}\ninline ::std::string* ModelInfo::mutable_modelid() {\n  set_has_modelid();\n  // @@protoc_insertion_point(field_mutable:caffe2.ModelInfo.modelId)\n  return modelid_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ModelInfo::release_modelid() {\n  // @@protoc_insertion_point(field_release:caffe2.ModelInfo.modelId)\n  clear_has_modelid();\n  return modelid_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ModelInfo::set_allocated_modelid(::std::string* modelid) {\n  if (modelid != NULL) {\n    set_has_modelid();\n  } else {\n    clear_has_modelid();\n  }\n  modelid_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), modelid);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ModelInfo.modelId)\n}\n\ninline const ModelInfo* ModelInfo::internal_default_instance() {\n  return &ModelInfo_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// BlobsMap\n\n// required string key = 1;\ninline bool BlobsMap::has_key() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void BlobsMap::set_has_key() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void BlobsMap::clear_has_key() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void BlobsMap::clear_key() {\n  key_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_key();\n}\ninline const ::std::string& BlobsMap::key() const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobsMap.key)\n  return key_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobsMap::set_key(const ::std::string& value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.BlobsMap.key)\n}\ninline void BlobsMap::set_key(const char* value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.BlobsMap.key)\n}\ninline void BlobsMap::set_key(const char* value, size_t size) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.BlobsMap.key)\n}\ninline ::std::string* BlobsMap::mutable_key() {\n  set_has_key();\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobsMap.key)\n  return key_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* BlobsMap::release_key() {\n  // @@protoc_insertion_point(field_release:caffe2.BlobsMap.key)\n  clear_has_key();\n  return key_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BlobsMap::set_allocated_key(::std::string* key) {\n  if (key != NULL) {\n    set_has_key();\n  } else {\n    clear_has_key();\n  }\n  key_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), key);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.BlobsMap.key)\n}\n\n// repeated string value = 2;\ninline int BlobsMap::value_size() const {\n  return value_.size();\n}\ninline void BlobsMap::clear_value() {\n  value_.Clear();\n}\ninline const ::std::string& BlobsMap::value(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.BlobsMap.value)\n  return value_.Get(index);\n}\ninline ::std::string* BlobsMap::mutable_value(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.BlobsMap.value)\n  return value_.Mutable(index);\n}\ninline void BlobsMap::set_value(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:caffe2.BlobsMap.value)\n  value_.Mutable(index)->assign(value);\n}\ninline void BlobsMap::set_value(int index, const char* value) {\n  value_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:caffe2.BlobsMap.value)\n}\ninline void BlobsMap::set_value(int index, const char* value, size_t size) {\n  value_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:caffe2.BlobsMap.value)\n}\ninline ::std::string* BlobsMap::add_value() {\n  // @@protoc_insertion_point(field_add_mutable:caffe2.BlobsMap.value)\n  return value_.Add();\n}\ninline void BlobsMap::add_value(const ::std::string& value) {\n  value_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:caffe2.BlobsMap.value)\n}\ninline void BlobsMap::add_value(const char* value) {\n  value_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:caffe2.BlobsMap.value)\n}\ninline void BlobsMap::add_value(const char* value, size_t size) {\n  value_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:caffe2.BlobsMap.value)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nBlobsMap::value() const {\n  // @@protoc_insertion_point(field_list:caffe2.BlobsMap.value)\n  return value_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nBlobsMap::mutable_value() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.BlobsMap.value)\n  return &value_;\n}\n\ninline const BlobsMap* BlobsMap::internal_default_instance() {\n  return &BlobsMap_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// NetsMap\n\n// required string key = 1;\ninline bool NetsMap::has_key() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void NetsMap::set_has_key() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void NetsMap::clear_has_key() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void NetsMap::clear_key() {\n  key_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_key();\n}\ninline const ::std::string& NetsMap::key() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetsMap.key)\n  return key_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetsMap::set_key(const ::std::string& value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.NetsMap.key)\n}\ninline void NetsMap::set_key(const char* value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.NetsMap.key)\n}\ninline void NetsMap::set_key(const char* value, size_t size) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.NetsMap.key)\n}\ninline ::std::string* NetsMap::mutable_key() {\n  set_has_key();\n  // @@protoc_insertion_point(field_mutable:caffe2.NetsMap.key)\n  return key_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* NetsMap::release_key() {\n  // @@protoc_insertion_point(field_release:caffe2.NetsMap.key)\n  clear_has_key();\n  return key_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void NetsMap::set_allocated_key(::std::string* key) {\n  if (key != NULL) {\n    set_has_key();\n  } else {\n    clear_has_key();\n  }\n  key_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), key);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NetsMap.key)\n}\n\n// required .caffe2.NetDef value = 2;\ninline bool NetsMap::has_value() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void NetsMap::set_has_value() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void NetsMap::clear_has_value() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void NetsMap::clear_value() {\n  if (value_ != NULL) value_->::caffe2::NetDef::Clear();\n  clear_has_value();\n}\ninline const ::caffe2::NetDef& NetsMap::value() const {\n  // @@protoc_insertion_point(field_get:caffe2.NetsMap.value)\n  return value_ != NULL ? *value_\n                         : *::caffe2::NetDef::internal_default_instance();\n}\ninline ::caffe2::NetDef* NetsMap::mutable_value() {\n  set_has_value();\n  if (value_ == NULL) {\n    value_ = new ::caffe2::NetDef;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.NetsMap.value)\n  return value_;\n}\ninline ::caffe2::NetDef* NetsMap::release_value() {\n  // @@protoc_insertion_point(field_release:caffe2.NetsMap.value)\n  clear_has_value();\n  ::caffe2::NetDef* temp = value_;\n  value_ = NULL;\n  return temp;\n}\ninline void NetsMap::set_allocated_value(::caffe2::NetDef* value) {\n  delete value_;\n  value_ = value;\n  if (value) {\n    set_has_value();\n  } else {\n    clear_has_value();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.NetsMap.value)\n}\n\ninline const NetsMap* NetsMap::internal_default_instance() {\n  return &NetsMap_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// PlansMap\n\n// required string key = 1;\ninline bool PlansMap::has_key() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void PlansMap::set_has_key() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void PlansMap::clear_has_key() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void PlansMap::clear_key() {\n  key_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_key();\n}\ninline const ::std::string& PlansMap::key() const {\n  // @@protoc_insertion_point(field_get:caffe2.PlansMap.key)\n  return key_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void PlansMap::set_key(const ::std::string& value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.PlansMap.key)\n}\ninline void PlansMap::set_key(const char* value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PlansMap.key)\n}\ninline void PlansMap::set_key(const char* value, size_t size) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PlansMap.key)\n}\ninline ::std::string* PlansMap::mutable_key() {\n  set_has_key();\n  // @@protoc_insertion_point(field_mutable:caffe2.PlansMap.key)\n  return key_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* PlansMap::release_key() {\n  // @@protoc_insertion_point(field_release:caffe2.PlansMap.key)\n  clear_has_key();\n  return key_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void PlansMap::set_allocated_key(::std::string* key) {\n  if (key != NULL) {\n    set_has_key();\n  } else {\n    clear_has_key();\n  }\n  key_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), key);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PlansMap.key)\n}\n\n// required .caffe2.PlanDef value = 2;\ninline bool PlansMap::has_value() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void PlansMap::set_has_value() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void PlansMap::clear_has_value() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void PlansMap::clear_value() {\n  if (value_ != NULL) value_->::caffe2::PlanDef::Clear();\n  clear_has_value();\n}\ninline const ::caffe2::PlanDef& PlansMap::value() const {\n  // @@protoc_insertion_point(field_get:caffe2.PlansMap.value)\n  return value_ != NULL ? *value_\n                         : *::caffe2::PlanDef::internal_default_instance();\n}\ninline ::caffe2::PlanDef* PlansMap::mutable_value() {\n  set_has_value();\n  if (value_ == NULL) {\n    value_ = new ::caffe2::PlanDef;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.PlansMap.value)\n  return value_;\n}\ninline ::caffe2::PlanDef* PlansMap::release_value() {\n  // @@protoc_insertion_point(field_release:caffe2.PlansMap.value)\n  clear_has_value();\n  ::caffe2::PlanDef* temp = value_;\n  value_ = NULL;\n  return temp;\n}\ninline void PlansMap::set_allocated_value(::caffe2::PlanDef* value) {\n  delete value_;\n  value_ = value;\n  if (value) {\n    set_has_value();\n  } else {\n    clear_has_value();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PlansMap.value)\n}\n\ninline const PlansMap* PlansMap::internal_default_instance() {\n  return &PlansMap_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// StringMap\n\n// required string key = 1;\ninline bool StringMap::has_key() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void StringMap::set_has_key() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void StringMap::clear_has_key() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void StringMap::clear_key() {\n  key_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_key();\n}\ninline const ::std::string& StringMap::key() const {\n  // @@protoc_insertion_point(field_get:caffe2.StringMap.key)\n  return key_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void StringMap::set_key(const ::std::string& value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.StringMap.key)\n}\ninline void StringMap::set_key(const char* value) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.StringMap.key)\n}\ninline void StringMap::set_key(const char* value, size_t size) {\n  set_has_key();\n  key_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.StringMap.key)\n}\ninline ::std::string* StringMap::mutable_key() {\n  set_has_key();\n  // @@protoc_insertion_point(field_mutable:caffe2.StringMap.key)\n  return key_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* StringMap::release_key() {\n  // @@protoc_insertion_point(field_release:caffe2.StringMap.key)\n  clear_has_key();\n  return key_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void StringMap::set_allocated_key(::std::string* key) {\n  if (key != NULL) {\n    set_has_key();\n  } else {\n    clear_has_key();\n  }\n  key_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), key);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.StringMap.key)\n}\n\n// required string value = 2;\ninline bool StringMap::has_value() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void StringMap::set_has_value() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void StringMap::clear_has_value() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void StringMap::clear_value() {\n  value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_value();\n}\ninline const ::std::string& StringMap::value() const {\n  // @@protoc_insertion_point(field_get:caffe2.StringMap.value)\n  return value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void StringMap::set_value(const ::std::string& value) {\n  set_has_value();\n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.StringMap.value)\n}\ninline void StringMap::set_value(const char* value) {\n  set_has_value();\n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.StringMap.value)\n}\ninline void StringMap::set_value(const char* value, size_t size) {\n  set_has_value();\n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.StringMap.value)\n}\ninline ::std::string* StringMap::mutable_value() {\n  set_has_value();\n  // @@protoc_insertion_point(field_mutable:caffe2.StringMap.value)\n  return value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* StringMap::release_value() {\n  // @@protoc_insertion_point(field_release:caffe2.StringMap.value)\n  clear_has_value();\n  return value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void StringMap::set_allocated_value(::std::string* value) {\n  if (value != NULL) {\n    set_has_value();\n  } else {\n    clear_has_value();\n  }\n  value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.StringMap.value)\n}\n\ninline const StringMap* StringMap::internal_default_instance() {\n  return &StringMap_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// MetaNetDef\n\n// repeated .caffe2.BlobsMap blobs = 1;\ninline int MetaNetDef::blobs_size() const {\n  return blobs_.size();\n}\ninline void MetaNetDef::clear_blobs() {\n  blobs_.Clear();\n}\ninline const ::caffe2::BlobsMap& MetaNetDef::blobs(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.MetaNetDef.blobs)\n  return blobs_.Get(index);\n}\ninline ::caffe2::BlobsMap* MetaNetDef::mutable_blobs(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.MetaNetDef.blobs)\n  return blobs_.Mutable(index);\n}\ninline ::caffe2::BlobsMap* MetaNetDef::add_blobs() {\n  // @@protoc_insertion_point(field_add:caffe2.MetaNetDef.blobs)\n  return blobs_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::BlobsMap >*\nMetaNetDef::mutable_blobs() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.MetaNetDef.blobs)\n  return &blobs_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::BlobsMap >&\nMetaNetDef::blobs() const {\n  // @@protoc_insertion_point(field_list:caffe2.MetaNetDef.blobs)\n  return blobs_;\n}\n\n// repeated .caffe2.NetsMap nets = 2;\ninline int MetaNetDef::nets_size() const {\n  return nets_.size();\n}\ninline void MetaNetDef::clear_nets() {\n  nets_.Clear();\n}\ninline const ::caffe2::NetsMap& MetaNetDef::nets(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.MetaNetDef.nets)\n  return nets_.Get(index);\n}\ninline ::caffe2::NetsMap* MetaNetDef::mutable_nets(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.MetaNetDef.nets)\n  return nets_.Mutable(index);\n}\ninline ::caffe2::NetsMap* MetaNetDef::add_nets() {\n  // @@protoc_insertion_point(field_add:caffe2.MetaNetDef.nets)\n  return nets_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::NetsMap >*\nMetaNetDef::mutable_nets() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.MetaNetDef.nets)\n  return &nets_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::NetsMap >&\nMetaNetDef::nets() const {\n  // @@protoc_insertion_point(field_list:caffe2.MetaNetDef.nets)\n  return nets_;\n}\n\n// optional .caffe2.ModelInfo modelInfo = 3;\ninline bool MetaNetDef::has_modelinfo() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void MetaNetDef::set_has_modelinfo() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void MetaNetDef::clear_has_modelinfo() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void MetaNetDef::clear_modelinfo() {\n  if (modelinfo_ != NULL) modelinfo_->::caffe2::ModelInfo::Clear();\n  clear_has_modelinfo();\n}\ninline const ::caffe2::ModelInfo& MetaNetDef::modelinfo() const {\n  // @@protoc_insertion_point(field_get:caffe2.MetaNetDef.modelInfo)\n  return modelinfo_ != NULL ? *modelinfo_\n                         : *::caffe2::ModelInfo::internal_default_instance();\n}\ninline ::caffe2::ModelInfo* MetaNetDef::mutable_modelinfo() {\n  set_has_modelinfo();\n  if (modelinfo_ == NULL) {\n    modelinfo_ = new ::caffe2::ModelInfo;\n  }\n  // @@protoc_insertion_point(field_mutable:caffe2.MetaNetDef.modelInfo)\n  return modelinfo_;\n}\ninline ::caffe2::ModelInfo* MetaNetDef::release_modelinfo() {\n  // @@protoc_insertion_point(field_release:caffe2.MetaNetDef.modelInfo)\n  clear_has_modelinfo();\n  ::caffe2::ModelInfo* temp = modelinfo_;\n  modelinfo_ = NULL;\n  return temp;\n}\ninline void MetaNetDef::set_allocated_modelinfo(::caffe2::ModelInfo* modelinfo) {\n  delete modelinfo_;\n  modelinfo_ = modelinfo;\n  if (modelinfo) {\n    set_has_modelinfo();\n  } else {\n    clear_has_modelinfo();\n  }\n  // @@protoc_insertion_point(field_set_allocated:caffe2.MetaNetDef.modelInfo)\n}\n\n// repeated .caffe2.PlansMap plans = 4;\ninline int MetaNetDef::plans_size() const {\n  return plans_.size();\n}\ninline void MetaNetDef::clear_plans() {\n  plans_.Clear();\n}\ninline const ::caffe2::PlansMap& MetaNetDef::plans(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.MetaNetDef.plans)\n  return plans_.Get(index);\n}\ninline ::caffe2::PlansMap* MetaNetDef::mutable_plans(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.MetaNetDef.plans)\n  return plans_.Mutable(index);\n}\ninline ::caffe2::PlansMap* MetaNetDef::add_plans() {\n  // @@protoc_insertion_point(field_add:caffe2.MetaNetDef.plans)\n  return plans_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::PlansMap >*\nMetaNetDef::mutable_plans() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.MetaNetDef.plans)\n  return &plans_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::PlansMap >&\nMetaNetDef::plans() const {\n  // @@protoc_insertion_point(field_list:caffe2.MetaNetDef.plans)\n  return plans_;\n}\n\n// repeated .caffe2.StringMap applicationSpecificInfo = 5;\ninline int MetaNetDef::applicationspecificinfo_size() const {\n  return applicationspecificinfo_.size();\n}\ninline void MetaNetDef::clear_applicationspecificinfo() {\n  applicationspecificinfo_.Clear();\n}\ninline const ::caffe2::StringMap& MetaNetDef::applicationspecificinfo(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.MetaNetDef.applicationSpecificInfo)\n  return applicationspecificinfo_.Get(index);\n}\ninline ::caffe2::StringMap* MetaNetDef::mutable_applicationspecificinfo(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.MetaNetDef.applicationSpecificInfo)\n  return applicationspecificinfo_.Mutable(index);\n}\ninline ::caffe2::StringMap* MetaNetDef::add_applicationspecificinfo() {\n  // @@protoc_insertion_point(field_add:caffe2.MetaNetDef.applicationSpecificInfo)\n  return applicationspecificinfo_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::StringMap >*\nMetaNetDef::mutable_applicationspecificinfo() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.MetaNetDef.applicationSpecificInfo)\n  return &applicationspecificinfo_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::StringMap >&\nMetaNetDef::applicationspecificinfo() const {\n  // @@protoc_insertion_point(field_list:caffe2.MetaNetDef.applicationSpecificInfo)\n  return applicationspecificinfo_;\n}\n\ninline const MetaNetDef* MetaNetDef::internal_default_instance() {\n  return &MetaNetDef_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fmetanet_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/predictor_consts.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/predictor_consts.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fpredictor_5fconsts_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fpredictor_5fconsts_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\n\nclass PredictorConsts;\n\n// ===================================================================\n\nclass PredictorConsts : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.PredictorConsts) */ {\n public:\n  PredictorConsts();\n  virtual ~PredictorConsts();\n\n  PredictorConsts(const PredictorConsts& from);\n\n  inline PredictorConsts& operator=(const PredictorConsts& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const PredictorConsts& default_instance();\n\n  static const PredictorConsts* internal_default_instance();\n\n  void Swap(PredictorConsts* other);\n\n  // implements Message ----------------------------------------------\n\n  inline PredictorConsts* New() const { return New(NULL); }\n\n  PredictorConsts* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const PredictorConsts& from);\n  void MergeFrom(const PredictorConsts& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(PredictorConsts* other);\n  void UnsafeMergeFrom(const PredictorConsts& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string META_NET_DEF = 1 [default = \"!!META_NET_DEF\"];\n  bool has_meta_net_def() const;\n  void clear_meta_net_def();\n  static const int kMETANETDEFFieldNumber = 1;\n  const ::std::string& meta_net_def() const;\n  void set_meta_net_def(const ::std::string& value);\n  void set_meta_net_def(const char* value);\n  void set_meta_net_def(const char* value, size_t size);\n  ::std::string* mutable_meta_net_def();\n  ::std::string* release_meta_net_def();\n  void set_allocated_meta_net_def(::std::string* meta_net_def);\n\n  // optional string PREDICTOR_DBREADER = 2 [default = \"!!PREDICTOR_DBREADER\"];\n  bool has_predictor_dbreader() const;\n  void clear_predictor_dbreader();\n  static const int kPREDICTORDBREADERFieldNumber = 2;\n  const ::std::string& predictor_dbreader() const;\n  void set_predictor_dbreader(const ::std::string& value);\n  void set_predictor_dbreader(const char* value);\n  void set_predictor_dbreader(const char* value, size_t size);\n  ::std::string* mutable_predictor_dbreader();\n  ::std::string* release_predictor_dbreader();\n  void set_allocated_predictor_dbreader(::std::string* predictor_dbreader);\n\n  // optional string PARAMETERS_BLOB_TYPE = 3 [default = \"PARAMETERS_BLOB_TYPE\"];\n  bool has_parameters_blob_type() const;\n  void clear_parameters_blob_type();\n  static const int kPARAMETERSBLOBTYPEFieldNumber = 3;\n  const ::std::string& parameters_blob_type() const;\n  void set_parameters_blob_type(const ::std::string& value);\n  void set_parameters_blob_type(const char* value);\n  void set_parameters_blob_type(const char* value, size_t size);\n  ::std::string* mutable_parameters_blob_type();\n  ::std::string* release_parameters_blob_type();\n  void set_allocated_parameters_blob_type(::std::string* parameters_blob_type);\n\n  // optional string INPUTS_BLOB_TYPE = 4 [default = \"INPUTS_BLOB_TYPE\"];\n  bool has_inputs_blob_type() const;\n  void clear_inputs_blob_type();\n  static const int kINPUTSBLOBTYPEFieldNumber = 4;\n  const ::std::string& inputs_blob_type() const;\n  void set_inputs_blob_type(const ::std::string& value);\n  void set_inputs_blob_type(const char* value);\n  void set_inputs_blob_type(const char* value, size_t size);\n  ::std::string* mutable_inputs_blob_type();\n  ::std::string* release_inputs_blob_type();\n  void set_allocated_inputs_blob_type(::std::string* inputs_blob_type);\n\n  // optional string OUTPUTS_BLOB_TYPE = 5 [default = \"OUTPUTS_BLOB_TYPE\"];\n  bool has_outputs_blob_type() const;\n  void clear_outputs_blob_type();\n  static const int kOUTPUTSBLOBTYPEFieldNumber = 5;\n  const ::std::string& outputs_blob_type() const;\n  void set_outputs_blob_type(const ::std::string& value);\n  void set_outputs_blob_type(const char* value);\n  void set_outputs_blob_type(const char* value, size_t size);\n  ::std::string* mutable_outputs_blob_type();\n  ::std::string* release_outputs_blob_type();\n  void set_allocated_outputs_blob_type(::std::string* outputs_blob_type);\n\n  // optional string GLOBAL_INIT_NET_TYPE = 6 [default = \"GLOBAL_INIT_NET_TYPE\"];\n  bool has_global_init_net_type() const;\n  void clear_global_init_net_type();\n  static const int kGLOBALINITNETTYPEFieldNumber = 6;\n  const ::std::string& global_init_net_type() const;\n  void set_global_init_net_type(const ::std::string& value);\n  void set_global_init_net_type(const char* value);\n  void set_global_init_net_type(const char* value, size_t size);\n  ::std::string* mutable_global_init_net_type();\n  ::std::string* release_global_init_net_type();\n  void set_allocated_global_init_net_type(::std::string* global_init_net_type);\n\n  // optional string PREDICT_INIT_NET_TYPE = 7 [default = \"PREDICT_INIT_NET_TYPE\"];\n  bool has_predict_init_net_type() const;\n  void clear_predict_init_net_type();\n  static const int kPREDICTINITNETTYPEFieldNumber = 7;\n  const ::std::string& predict_init_net_type() const;\n  void set_predict_init_net_type(const ::std::string& value);\n  void set_predict_init_net_type(const char* value);\n  void set_predict_init_net_type(const char* value, size_t size);\n  ::std::string* mutable_predict_init_net_type();\n  ::std::string* release_predict_init_net_type();\n  void set_allocated_predict_init_net_type(::std::string* predict_init_net_type);\n\n  // optional string PREDICT_NET_TYPE = 8 [default = \"PREDICT_NET_TYPE\"];\n  bool has_predict_net_type() const;\n  void clear_predict_net_type();\n  static const int kPREDICTNETTYPEFieldNumber = 8;\n  const ::std::string& predict_net_type() const;\n  void set_predict_net_type(const ::std::string& value);\n  void set_predict_net_type(const char* value);\n  void set_predict_net_type(const char* value, size_t size);\n  ::std::string* mutable_predict_net_type();\n  ::std::string* release_predict_net_type();\n  void set_allocated_predict_net_type(::std::string* predict_net_type);\n\n  // optional string SINGLE_PREDICTOR = 9 [default = \"SINGLE_PREDICTOR\"];\n  bool has_single_predictor() const;\n  void clear_single_predictor();\n  static const int kSINGLEPREDICTORFieldNumber = 9;\n  const ::std::string& single_predictor() const;\n  void set_single_predictor(const ::std::string& value);\n  void set_single_predictor(const char* value);\n  void set_single_predictor(const char* value, size_t size);\n  ::std::string* mutable_single_predictor();\n  ::std::string* release_single_predictor();\n  void set_allocated_single_predictor(::std::string* single_predictor);\n\n  // optional string MULTI_PREDICTOR = 10 [default = \"MULTI_PREDICTOR\"];\n  bool has_multi_predictor() const;\n  void clear_multi_predictor();\n  static const int kMULTIPREDICTORFieldNumber = 10;\n  const ::std::string& multi_predictor() const;\n  void set_multi_predictor(const ::std::string& value);\n  void set_multi_predictor(const char* value);\n  void set_multi_predictor(const char* value, size_t size);\n  ::std::string* mutable_multi_predictor();\n  ::std::string* release_multi_predictor();\n  void set_allocated_multi_predictor(::std::string* multi_predictor);\n\n  // optional string TRAIN_INIT_PLAN_TYPE = 11 [default = \"TRAIN_INIT_PLAN_TYPE\"];\n  bool has_train_init_plan_type() const;\n  void clear_train_init_plan_type();\n  static const int kTRAININITPLANTYPEFieldNumber = 11;\n  const ::std::string& train_init_plan_type() const;\n  void set_train_init_plan_type(const ::std::string& value);\n  void set_train_init_plan_type(const char* value);\n  void set_train_init_plan_type(const char* value, size_t size);\n  ::std::string* mutable_train_init_plan_type();\n  ::std::string* release_train_init_plan_type();\n  void set_allocated_train_init_plan_type(::std::string* train_init_plan_type);\n\n  // optional string TRAIN_PLAN_TYPE = 12 [default = \"TRAIN_PLAN_TYPE\"];\n  bool has_train_plan_type() const;\n  void clear_train_plan_type();\n  static const int kTRAINPLANTYPEFieldNumber = 12;\n  const ::std::string& train_plan_type() const;\n  void set_train_plan_type(const ::std::string& value);\n  void set_train_plan_type(const char* value);\n  void set_train_plan_type(const char* value, size_t size);\n  ::std::string* mutable_train_plan_type();\n  ::std::string* release_train_plan_type();\n  void set_allocated_train_plan_type(::std::string* train_plan_type);\n\n  // @@protoc_insertion_point(class_scope:caffe2.PredictorConsts)\n private:\n  inline void set_has_meta_net_def();\n  inline void clear_has_meta_net_def();\n  inline void set_has_predictor_dbreader();\n  inline void clear_has_predictor_dbreader();\n  inline void set_has_parameters_blob_type();\n  inline void clear_has_parameters_blob_type();\n  inline void set_has_inputs_blob_type();\n  inline void clear_has_inputs_blob_type();\n  inline void set_has_outputs_blob_type();\n  inline void clear_has_outputs_blob_type();\n  inline void set_has_global_init_net_type();\n  inline void clear_has_global_init_net_type();\n  inline void set_has_predict_init_net_type();\n  inline void clear_has_predict_init_net_type();\n  inline void set_has_predict_net_type();\n  inline void clear_has_predict_net_type();\n  inline void set_has_single_predictor();\n  inline void clear_has_single_predictor();\n  inline void set_has_multi_predictor();\n  inline void clear_has_multi_predictor();\n  inline void set_has_train_init_plan_type();\n  inline void clear_has_train_init_plan_type();\n  inline void set_has_train_plan_type();\n  inline void clear_has_train_plan_type();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  static ::std::string* _default_meta_net_def_;\n  ::google::protobuf::internal::ArenaStringPtr meta_net_def_;\n  static ::std::string* _default_predictor_dbreader_;\n  ::google::protobuf::internal::ArenaStringPtr predictor_dbreader_;\n  static ::std::string* _default_parameters_blob_type_;\n  ::google::protobuf::internal::ArenaStringPtr parameters_blob_type_;\n  static ::std::string* _default_inputs_blob_type_;\n  ::google::protobuf::internal::ArenaStringPtr inputs_blob_type_;\n  static ::std::string* _default_outputs_blob_type_;\n  ::google::protobuf::internal::ArenaStringPtr outputs_blob_type_;\n  static ::std::string* _default_global_init_net_type_;\n  ::google::protobuf::internal::ArenaStringPtr global_init_net_type_;\n  static ::std::string* _default_predict_init_net_type_;\n  ::google::protobuf::internal::ArenaStringPtr predict_init_net_type_;\n  static ::std::string* _default_predict_net_type_;\n  ::google::protobuf::internal::ArenaStringPtr predict_net_type_;\n  static ::std::string* _default_single_predictor_;\n  ::google::protobuf::internal::ArenaStringPtr single_predictor_;\n  static ::std::string* _default_multi_predictor_;\n  ::google::protobuf::internal::ArenaStringPtr multi_predictor_;\n  static ::std::string* _default_train_init_plan_type_;\n  ::google::protobuf::internal::ArenaStringPtr train_init_plan_type_;\n  static ::std::string* _default_train_plan_type_;\n  ::google::protobuf::internal::ArenaStringPtr train_plan_type_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fpredictor_5fconsts_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fpredictor_5fconsts_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fpredictor_5fconsts_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<PredictorConsts> PredictorConsts_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// PredictorConsts\n\n// optional string META_NET_DEF = 1 [default = \"!!META_NET_DEF\"];\ninline bool PredictorConsts::has_meta_net_def() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void PredictorConsts::set_has_meta_net_def() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void PredictorConsts::clear_has_meta_net_def() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void PredictorConsts::clear_meta_net_def() {\n  meta_net_def_.ClearToDefaultNoArena(_default_meta_net_def_);\n  clear_has_meta_net_def();\n}\ninline const ::std::string& PredictorConsts::meta_net_def() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.META_NET_DEF)\n  return meta_net_def_.GetNoArena(_default_meta_net_def_);\n}\ninline void PredictorConsts::set_meta_net_def(const ::std::string& value) {\n  set_has_meta_net_def();\n  meta_net_def_.SetNoArena(_default_meta_net_def_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.META_NET_DEF)\n}\ninline void PredictorConsts::set_meta_net_def(const char* value) {\n  set_has_meta_net_def();\n  meta_net_def_.SetNoArena(_default_meta_net_def_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.META_NET_DEF)\n}\ninline void PredictorConsts::set_meta_net_def(const char* value, size_t size) {\n  set_has_meta_net_def();\n  meta_net_def_.SetNoArena(_default_meta_net_def_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.META_NET_DEF)\n}\ninline ::std::string* PredictorConsts::mutable_meta_net_def() {\n  set_has_meta_net_def();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.META_NET_DEF)\n  return meta_net_def_.MutableNoArena(_default_meta_net_def_);\n}\ninline ::std::string* PredictorConsts::release_meta_net_def() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.META_NET_DEF)\n  clear_has_meta_net_def();\n  return meta_net_def_.ReleaseNoArena(_default_meta_net_def_);\n}\ninline void PredictorConsts::set_allocated_meta_net_def(::std::string* meta_net_def) {\n  if (meta_net_def != NULL) {\n    set_has_meta_net_def();\n  } else {\n    clear_has_meta_net_def();\n  }\n  meta_net_def_.SetAllocatedNoArena(_default_meta_net_def_, meta_net_def);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.META_NET_DEF)\n}\n\n// optional string PREDICTOR_DBREADER = 2 [default = \"!!PREDICTOR_DBREADER\"];\ninline bool PredictorConsts::has_predictor_dbreader() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void PredictorConsts::set_has_predictor_dbreader() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void PredictorConsts::clear_has_predictor_dbreader() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void PredictorConsts::clear_predictor_dbreader() {\n  predictor_dbreader_.ClearToDefaultNoArena(_default_predictor_dbreader_);\n  clear_has_predictor_dbreader();\n}\ninline const ::std::string& PredictorConsts::predictor_dbreader() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n  return predictor_dbreader_.GetNoArena(_default_predictor_dbreader_);\n}\ninline void PredictorConsts::set_predictor_dbreader(const ::std::string& value) {\n  set_has_predictor_dbreader();\n  predictor_dbreader_.SetNoArena(_default_predictor_dbreader_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n}\ninline void PredictorConsts::set_predictor_dbreader(const char* value) {\n  set_has_predictor_dbreader();\n  predictor_dbreader_.SetNoArena(_default_predictor_dbreader_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n}\ninline void PredictorConsts::set_predictor_dbreader(const char* value, size_t size) {\n  set_has_predictor_dbreader();\n  predictor_dbreader_.SetNoArena(_default_predictor_dbreader_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n}\ninline ::std::string* PredictorConsts::mutable_predictor_dbreader() {\n  set_has_predictor_dbreader();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n  return predictor_dbreader_.MutableNoArena(_default_predictor_dbreader_);\n}\ninline ::std::string* PredictorConsts::release_predictor_dbreader() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n  clear_has_predictor_dbreader();\n  return predictor_dbreader_.ReleaseNoArena(_default_predictor_dbreader_);\n}\ninline void PredictorConsts::set_allocated_predictor_dbreader(::std::string* predictor_dbreader) {\n  if (predictor_dbreader != NULL) {\n    set_has_predictor_dbreader();\n  } else {\n    clear_has_predictor_dbreader();\n  }\n  predictor_dbreader_.SetAllocatedNoArena(_default_predictor_dbreader_, predictor_dbreader);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.PREDICTOR_DBREADER)\n}\n\n// optional string PARAMETERS_BLOB_TYPE = 3 [default = \"PARAMETERS_BLOB_TYPE\"];\ninline bool PredictorConsts::has_parameters_blob_type() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void PredictorConsts::set_has_parameters_blob_type() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void PredictorConsts::clear_has_parameters_blob_type() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void PredictorConsts::clear_parameters_blob_type() {\n  parameters_blob_type_.ClearToDefaultNoArena(_default_parameters_blob_type_);\n  clear_has_parameters_blob_type();\n}\ninline const ::std::string& PredictorConsts::parameters_blob_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n  return parameters_blob_type_.GetNoArena(_default_parameters_blob_type_);\n}\ninline void PredictorConsts::set_parameters_blob_type(const ::std::string& value) {\n  set_has_parameters_blob_type();\n  parameters_blob_type_.SetNoArena(_default_parameters_blob_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_parameters_blob_type(const char* value) {\n  set_has_parameters_blob_type();\n  parameters_blob_type_.SetNoArena(_default_parameters_blob_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_parameters_blob_type(const char* value, size_t size) {\n  set_has_parameters_blob_type();\n  parameters_blob_type_.SetNoArena(_default_parameters_blob_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_parameters_blob_type() {\n  set_has_parameters_blob_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n  return parameters_blob_type_.MutableNoArena(_default_parameters_blob_type_);\n}\ninline ::std::string* PredictorConsts::release_parameters_blob_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n  clear_has_parameters_blob_type();\n  return parameters_blob_type_.ReleaseNoArena(_default_parameters_blob_type_);\n}\ninline void PredictorConsts::set_allocated_parameters_blob_type(::std::string* parameters_blob_type) {\n  if (parameters_blob_type != NULL) {\n    set_has_parameters_blob_type();\n  } else {\n    clear_has_parameters_blob_type();\n  }\n  parameters_blob_type_.SetAllocatedNoArena(_default_parameters_blob_type_, parameters_blob_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.PARAMETERS_BLOB_TYPE)\n}\n\n// optional string INPUTS_BLOB_TYPE = 4 [default = \"INPUTS_BLOB_TYPE\"];\ninline bool PredictorConsts::has_inputs_blob_type() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void PredictorConsts::set_has_inputs_blob_type() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void PredictorConsts::clear_has_inputs_blob_type() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void PredictorConsts::clear_inputs_blob_type() {\n  inputs_blob_type_.ClearToDefaultNoArena(_default_inputs_blob_type_);\n  clear_has_inputs_blob_type();\n}\ninline const ::std::string& PredictorConsts::inputs_blob_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n  return inputs_blob_type_.GetNoArena(_default_inputs_blob_type_);\n}\ninline void PredictorConsts::set_inputs_blob_type(const ::std::string& value) {\n  set_has_inputs_blob_type();\n  inputs_blob_type_.SetNoArena(_default_inputs_blob_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_inputs_blob_type(const char* value) {\n  set_has_inputs_blob_type();\n  inputs_blob_type_.SetNoArena(_default_inputs_blob_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_inputs_blob_type(const char* value, size_t size) {\n  set_has_inputs_blob_type();\n  inputs_blob_type_.SetNoArena(_default_inputs_blob_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_inputs_blob_type() {\n  set_has_inputs_blob_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n  return inputs_blob_type_.MutableNoArena(_default_inputs_blob_type_);\n}\ninline ::std::string* PredictorConsts::release_inputs_blob_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n  clear_has_inputs_blob_type();\n  return inputs_blob_type_.ReleaseNoArena(_default_inputs_blob_type_);\n}\ninline void PredictorConsts::set_allocated_inputs_blob_type(::std::string* inputs_blob_type) {\n  if (inputs_blob_type != NULL) {\n    set_has_inputs_blob_type();\n  } else {\n    clear_has_inputs_blob_type();\n  }\n  inputs_blob_type_.SetAllocatedNoArena(_default_inputs_blob_type_, inputs_blob_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.INPUTS_BLOB_TYPE)\n}\n\n// optional string OUTPUTS_BLOB_TYPE = 5 [default = \"OUTPUTS_BLOB_TYPE\"];\ninline bool PredictorConsts::has_outputs_blob_type() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void PredictorConsts::set_has_outputs_blob_type() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void PredictorConsts::clear_has_outputs_blob_type() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void PredictorConsts::clear_outputs_blob_type() {\n  outputs_blob_type_.ClearToDefaultNoArena(_default_outputs_blob_type_);\n  clear_has_outputs_blob_type();\n}\ninline const ::std::string& PredictorConsts::outputs_blob_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n  return outputs_blob_type_.GetNoArena(_default_outputs_blob_type_);\n}\ninline void PredictorConsts::set_outputs_blob_type(const ::std::string& value) {\n  set_has_outputs_blob_type();\n  outputs_blob_type_.SetNoArena(_default_outputs_blob_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_outputs_blob_type(const char* value) {\n  set_has_outputs_blob_type();\n  outputs_blob_type_.SetNoArena(_default_outputs_blob_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n}\ninline void PredictorConsts::set_outputs_blob_type(const char* value, size_t size) {\n  set_has_outputs_blob_type();\n  outputs_blob_type_.SetNoArena(_default_outputs_blob_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_outputs_blob_type() {\n  set_has_outputs_blob_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n  return outputs_blob_type_.MutableNoArena(_default_outputs_blob_type_);\n}\ninline ::std::string* PredictorConsts::release_outputs_blob_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n  clear_has_outputs_blob_type();\n  return outputs_blob_type_.ReleaseNoArena(_default_outputs_blob_type_);\n}\ninline void PredictorConsts::set_allocated_outputs_blob_type(::std::string* outputs_blob_type) {\n  if (outputs_blob_type != NULL) {\n    set_has_outputs_blob_type();\n  } else {\n    clear_has_outputs_blob_type();\n  }\n  outputs_blob_type_.SetAllocatedNoArena(_default_outputs_blob_type_, outputs_blob_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.OUTPUTS_BLOB_TYPE)\n}\n\n// optional string GLOBAL_INIT_NET_TYPE = 6 [default = \"GLOBAL_INIT_NET_TYPE\"];\ninline bool PredictorConsts::has_global_init_net_type() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void PredictorConsts::set_has_global_init_net_type() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void PredictorConsts::clear_has_global_init_net_type() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void PredictorConsts::clear_global_init_net_type() {\n  global_init_net_type_.ClearToDefaultNoArena(_default_global_init_net_type_);\n  clear_has_global_init_net_type();\n}\ninline const ::std::string& PredictorConsts::global_init_net_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n  return global_init_net_type_.GetNoArena(_default_global_init_net_type_);\n}\ninline void PredictorConsts::set_global_init_net_type(const ::std::string& value) {\n  set_has_global_init_net_type();\n  global_init_net_type_.SetNoArena(_default_global_init_net_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n}\ninline void PredictorConsts::set_global_init_net_type(const char* value) {\n  set_has_global_init_net_type();\n  global_init_net_type_.SetNoArena(_default_global_init_net_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n}\ninline void PredictorConsts::set_global_init_net_type(const char* value, size_t size) {\n  set_has_global_init_net_type();\n  global_init_net_type_.SetNoArena(_default_global_init_net_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_global_init_net_type() {\n  set_has_global_init_net_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n  return global_init_net_type_.MutableNoArena(_default_global_init_net_type_);\n}\ninline ::std::string* PredictorConsts::release_global_init_net_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n  clear_has_global_init_net_type();\n  return global_init_net_type_.ReleaseNoArena(_default_global_init_net_type_);\n}\ninline void PredictorConsts::set_allocated_global_init_net_type(::std::string* global_init_net_type) {\n  if (global_init_net_type != NULL) {\n    set_has_global_init_net_type();\n  } else {\n    clear_has_global_init_net_type();\n  }\n  global_init_net_type_.SetAllocatedNoArena(_default_global_init_net_type_, global_init_net_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.GLOBAL_INIT_NET_TYPE)\n}\n\n// optional string PREDICT_INIT_NET_TYPE = 7 [default = \"PREDICT_INIT_NET_TYPE\"];\ninline bool PredictorConsts::has_predict_init_net_type() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void PredictorConsts::set_has_predict_init_net_type() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void PredictorConsts::clear_has_predict_init_net_type() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void PredictorConsts::clear_predict_init_net_type() {\n  predict_init_net_type_.ClearToDefaultNoArena(_default_predict_init_net_type_);\n  clear_has_predict_init_net_type();\n}\ninline const ::std::string& PredictorConsts::predict_init_net_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n  return predict_init_net_type_.GetNoArena(_default_predict_init_net_type_);\n}\ninline void PredictorConsts::set_predict_init_net_type(const ::std::string& value) {\n  set_has_predict_init_net_type();\n  predict_init_net_type_.SetNoArena(_default_predict_init_net_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n}\ninline void PredictorConsts::set_predict_init_net_type(const char* value) {\n  set_has_predict_init_net_type();\n  predict_init_net_type_.SetNoArena(_default_predict_init_net_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n}\ninline void PredictorConsts::set_predict_init_net_type(const char* value, size_t size) {\n  set_has_predict_init_net_type();\n  predict_init_net_type_.SetNoArena(_default_predict_init_net_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_predict_init_net_type() {\n  set_has_predict_init_net_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n  return predict_init_net_type_.MutableNoArena(_default_predict_init_net_type_);\n}\ninline ::std::string* PredictorConsts::release_predict_init_net_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n  clear_has_predict_init_net_type();\n  return predict_init_net_type_.ReleaseNoArena(_default_predict_init_net_type_);\n}\ninline void PredictorConsts::set_allocated_predict_init_net_type(::std::string* predict_init_net_type) {\n  if (predict_init_net_type != NULL) {\n    set_has_predict_init_net_type();\n  } else {\n    clear_has_predict_init_net_type();\n  }\n  predict_init_net_type_.SetAllocatedNoArena(_default_predict_init_net_type_, predict_init_net_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.PREDICT_INIT_NET_TYPE)\n}\n\n// optional string PREDICT_NET_TYPE = 8 [default = \"PREDICT_NET_TYPE\"];\ninline bool PredictorConsts::has_predict_net_type() const {\n  return (_has_bits_[0] & 0x00000080u) != 0;\n}\ninline void PredictorConsts::set_has_predict_net_type() {\n  _has_bits_[0] |= 0x00000080u;\n}\ninline void PredictorConsts::clear_has_predict_net_type() {\n  _has_bits_[0] &= ~0x00000080u;\n}\ninline void PredictorConsts::clear_predict_net_type() {\n  predict_net_type_.ClearToDefaultNoArena(_default_predict_net_type_);\n  clear_has_predict_net_type();\n}\ninline const ::std::string& PredictorConsts::predict_net_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n  return predict_net_type_.GetNoArena(_default_predict_net_type_);\n}\ninline void PredictorConsts::set_predict_net_type(const ::std::string& value) {\n  set_has_predict_net_type();\n  predict_net_type_.SetNoArena(_default_predict_net_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n}\ninline void PredictorConsts::set_predict_net_type(const char* value) {\n  set_has_predict_net_type();\n  predict_net_type_.SetNoArena(_default_predict_net_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n}\ninline void PredictorConsts::set_predict_net_type(const char* value, size_t size) {\n  set_has_predict_net_type();\n  predict_net_type_.SetNoArena(_default_predict_net_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_predict_net_type() {\n  set_has_predict_net_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n  return predict_net_type_.MutableNoArena(_default_predict_net_type_);\n}\ninline ::std::string* PredictorConsts::release_predict_net_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n  clear_has_predict_net_type();\n  return predict_net_type_.ReleaseNoArena(_default_predict_net_type_);\n}\ninline void PredictorConsts::set_allocated_predict_net_type(::std::string* predict_net_type) {\n  if (predict_net_type != NULL) {\n    set_has_predict_net_type();\n  } else {\n    clear_has_predict_net_type();\n  }\n  predict_net_type_.SetAllocatedNoArena(_default_predict_net_type_, predict_net_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.PREDICT_NET_TYPE)\n}\n\n// optional string SINGLE_PREDICTOR = 9 [default = \"SINGLE_PREDICTOR\"];\ninline bool PredictorConsts::has_single_predictor() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void PredictorConsts::set_has_single_predictor() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void PredictorConsts::clear_has_single_predictor() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void PredictorConsts::clear_single_predictor() {\n  single_predictor_.ClearToDefaultNoArena(_default_single_predictor_);\n  clear_has_single_predictor();\n}\ninline const ::std::string& PredictorConsts::single_predictor() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n  return single_predictor_.GetNoArena(_default_single_predictor_);\n}\ninline void PredictorConsts::set_single_predictor(const ::std::string& value) {\n  set_has_single_predictor();\n  single_predictor_.SetNoArena(_default_single_predictor_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n}\ninline void PredictorConsts::set_single_predictor(const char* value) {\n  set_has_single_predictor();\n  single_predictor_.SetNoArena(_default_single_predictor_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n}\ninline void PredictorConsts::set_single_predictor(const char* value, size_t size) {\n  set_has_single_predictor();\n  single_predictor_.SetNoArena(_default_single_predictor_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n}\ninline ::std::string* PredictorConsts::mutable_single_predictor() {\n  set_has_single_predictor();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n  return single_predictor_.MutableNoArena(_default_single_predictor_);\n}\ninline ::std::string* PredictorConsts::release_single_predictor() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n  clear_has_single_predictor();\n  return single_predictor_.ReleaseNoArena(_default_single_predictor_);\n}\ninline void PredictorConsts::set_allocated_single_predictor(::std::string* single_predictor) {\n  if (single_predictor != NULL) {\n    set_has_single_predictor();\n  } else {\n    clear_has_single_predictor();\n  }\n  single_predictor_.SetAllocatedNoArena(_default_single_predictor_, single_predictor);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.SINGLE_PREDICTOR)\n}\n\n// optional string MULTI_PREDICTOR = 10 [default = \"MULTI_PREDICTOR\"];\ninline bool PredictorConsts::has_multi_predictor() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void PredictorConsts::set_has_multi_predictor() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void PredictorConsts::clear_has_multi_predictor() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void PredictorConsts::clear_multi_predictor() {\n  multi_predictor_.ClearToDefaultNoArena(_default_multi_predictor_);\n  clear_has_multi_predictor();\n}\ninline const ::std::string& PredictorConsts::multi_predictor() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.MULTI_PREDICTOR)\n  return multi_predictor_.GetNoArena(_default_multi_predictor_);\n}\ninline void PredictorConsts::set_multi_predictor(const ::std::string& value) {\n  set_has_multi_predictor();\n  multi_predictor_.SetNoArena(_default_multi_predictor_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.MULTI_PREDICTOR)\n}\ninline void PredictorConsts::set_multi_predictor(const char* value) {\n  set_has_multi_predictor();\n  multi_predictor_.SetNoArena(_default_multi_predictor_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.MULTI_PREDICTOR)\n}\ninline void PredictorConsts::set_multi_predictor(const char* value, size_t size) {\n  set_has_multi_predictor();\n  multi_predictor_.SetNoArena(_default_multi_predictor_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.MULTI_PREDICTOR)\n}\ninline ::std::string* PredictorConsts::mutable_multi_predictor() {\n  set_has_multi_predictor();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.MULTI_PREDICTOR)\n  return multi_predictor_.MutableNoArena(_default_multi_predictor_);\n}\ninline ::std::string* PredictorConsts::release_multi_predictor() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.MULTI_PREDICTOR)\n  clear_has_multi_predictor();\n  return multi_predictor_.ReleaseNoArena(_default_multi_predictor_);\n}\ninline void PredictorConsts::set_allocated_multi_predictor(::std::string* multi_predictor) {\n  if (multi_predictor != NULL) {\n    set_has_multi_predictor();\n  } else {\n    clear_has_multi_predictor();\n  }\n  multi_predictor_.SetAllocatedNoArena(_default_multi_predictor_, multi_predictor);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.MULTI_PREDICTOR)\n}\n\n// optional string TRAIN_INIT_PLAN_TYPE = 11 [default = \"TRAIN_INIT_PLAN_TYPE\"];\ninline bool PredictorConsts::has_train_init_plan_type() const {\n  return (_has_bits_[0] & 0x00000400u) != 0;\n}\ninline void PredictorConsts::set_has_train_init_plan_type() {\n  _has_bits_[0] |= 0x00000400u;\n}\ninline void PredictorConsts::clear_has_train_init_plan_type() {\n  _has_bits_[0] &= ~0x00000400u;\n}\ninline void PredictorConsts::clear_train_init_plan_type() {\n  train_init_plan_type_.ClearToDefaultNoArena(_default_train_init_plan_type_);\n  clear_has_train_init_plan_type();\n}\ninline const ::std::string& PredictorConsts::train_init_plan_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n  return train_init_plan_type_.GetNoArena(_default_train_init_plan_type_);\n}\ninline void PredictorConsts::set_train_init_plan_type(const ::std::string& value) {\n  set_has_train_init_plan_type();\n  train_init_plan_type_.SetNoArena(_default_train_init_plan_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n}\ninline void PredictorConsts::set_train_init_plan_type(const char* value) {\n  set_has_train_init_plan_type();\n  train_init_plan_type_.SetNoArena(_default_train_init_plan_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n}\ninline void PredictorConsts::set_train_init_plan_type(const char* value, size_t size) {\n  set_has_train_init_plan_type();\n  train_init_plan_type_.SetNoArena(_default_train_init_plan_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_train_init_plan_type() {\n  set_has_train_init_plan_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n  return train_init_plan_type_.MutableNoArena(_default_train_init_plan_type_);\n}\ninline ::std::string* PredictorConsts::release_train_init_plan_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n  clear_has_train_init_plan_type();\n  return train_init_plan_type_.ReleaseNoArena(_default_train_init_plan_type_);\n}\ninline void PredictorConsts::set_allocated_train_init_plan_type(::std::string* train_init_plan_type) {\n  if (train_init_plan_type != NULL) {\n    set_has_train_init_plan_type();\n  } else {\n    clear_has_train_init_plan_type();\n  }\n  train_init_plan_type_.SetAllocatedNoArena(_default_train_init_plan_type_, train_init_plan_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.TRAIN_INIT_PLAN_TYPE)\n}\n\n// optional string TRAIN_PLAN_TYPE = 12 [default = \"TRAIN_PLAN_TYPE\"];\ninline bool PredictorConsts::has_train_plan_type() const {\n  return (_has_bits_[0] & 0x00000800u) != 0;\n}\ninline void PredictorConsts::set_has_train_plan_type() {\n  _has_bits_[0] |= 0x00000800u;\n}\ninline void PredictorConsts::clear_has_train_plan_type() {\n  _has_bits_[0] &= ~0x00000800u;\n}\ninline void PredictorConsts::clear_train_plan_type() {\n  train_plan_type_.ClearToDefaultNoArena(_default_train_plan_type_);\n  clear_has_train_plan_type();\n}\ninline const ::std::string& PredictorConsts::train_plan_type() const {\n  // @@protoc_insertion_point(field_get:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n  return train_plan_type_.GetNoArena(_default_train_plan_type_);\n}\ninline void PredictorConsts::set_train_plan_type(const ::std::string& value) {\n  set_has_train_plan_type();\n  train_plan_type_.SetNoArena(_default_train_plan_type_, value);\n  // @@protoc_insertion_point(field_set:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n}\ninline void PredictorConsts::set_train_plan_type(const char* value) {\n  set_has_train_plan_type();\n  train_plan_type_.SetNoArena(_default_train_plan_type_, ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n}\ninline void PredictorConsts::set_train_plan_type(const char* value, size_t size) {\n  set_has_train_plan_type();\n  train_plan_type_.SetNoArena(_default_train_plan_type_,\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n}\ninline ::std::string* PredictorConsts::mutable_train_plan_type() {\n  set_has_train_plan_type();\n  // @@protoc_insertion_point(field_mutable:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n  return train_plan_type_.MutableNoArena(_default_train_plan_type_);\n}\ninline ::std::string* PredictorConsts::release_train_plan_type() {\n  // @@protoc_insertion_point(field_release:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n  clear_has_train_plan_type();\n  return train_plan_type_.ReleaseNoArena(_default_train_plan_type_);\n}\ninline void PredictorConsts::set_allocated_train_plan_type(::std::string* train_plan_type) {\n  if (train_plan_type != NULL) {\n    set_has_train_plan_type();\n  } else {\n    clear_has_train_plan_type();\n  }\n  train_plan_type_.SetAllocatedNoArena(_default_train_plan_type_, train_plan_type);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.PredictorConsts.TRAIN_PLAN_TYPE)\n}\n\ninline const PredictorConsts* PredictorConsts::internal_default_instance() {\n  return &PredictorConsts_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fpredictor_5fconsts_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/proto/prof_dag.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: caffe2/proto/prof_dag.proto\n\n#ifndef PROTOBUF_caffe2_2fproto_2fprof_5fdag_2eproto__INCLUDED\n#define PROTOBUF_caffe2_2fproto_2fprof_5fdag_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace caffe2 {\n\n// Internal implementation detail -- do not call these.\nvoid protobuf_AddDesc_caffe2_2fproto_2fprof_5fdag_2eproto();\nvoid protobuf_InitDefaults_caffe2_2fproto_2fprof_5fdag_2eproto();\nvoid protobuf_AssignDesc_caffe2_2fproto_2fprof_5fdag_2eproto();\nvoid protobuf_ShutdownFile_caffe2_2fproto_2fprof_5fdag_2eproto();\n\nclass ProfDAGProto;\nclass ProfDAGProtos;\n\n// ===================================================================\n\nclass ProfDAGProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.ProfDAGProto) */ {\n public:\n  ProfDAGProto();\n  virtual ~ProfDAGProto();\n\n  ProfDAGProto(const ProfDAGProto& from);\n\n  inline ProfDAGProto& operator=(const ProfDAGProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ProfDAGProto& default_instance();\n\n  static const ProfDAGProto* internal_default_instance();\n\n  void Swap(ProfDAGProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ProfDAGProto* New() const { return New(NULL); }\n\n  ProfDAGProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ProfDAGProto& from);\n  void MergeFrom(const ProfDAGProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ProfDAGProto* other);\n  void UnsafeMergeFrom(const ProfDAGProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // required float mean = 2;\n  bool has_mean() const;\n  void clear_mean();\n  static const int kMeanFieldNumber = 2;\n  float mean() const;\n  void set_mean(float value);\n\n  // required float stddev = 3;\n  bool has_stddev() const;\n  void clear_stddev();\n  static const int kStddevFieldNumber = 3;\n  float stddev() const;\n  void set_stddev(float value);\n\n  // @@protoc_insertion_point(class_scope:caffe2.ProfDAGProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_mean();\n  inline void clear_has_mean();\n  inline void set_has_stddev();\n  inline void clear_has_stddev();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  float mean_;\n  float stddev_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fprof_5fdag_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fprof_5fdag_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fprof_5fdag_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fprof_5fdag_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ProfDAGProto> ProfDAGProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass ProfDAGProtos : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:caffe2.ProfDAGProtos) */ {\n public:\n  ProfDAGProtos();\n  virtual ~ProfDAGProtos();\n\n  ProfDAGProtos(const ProfDAGProtos& from);\n\n  inline ProfDAGProtos& operator=(const ProfDAGProtos& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ProfDAGProtos& default_instance();\n\n  static const ProfDAGProtos* internal_default_instance();\n\n  void Swap(ProfDAGProtos* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ProfDAGProtos* New() const { return New(NULL); }\n\n  ProfDAGProtos* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ProfDAGProtos& from);\n  void MergeFrom(const ProfDAGProtos& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ProfDAGProtos* other);\n  void UnsafeMergeFrom(const ProfDAGProtos& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .caffe2.ProfDAGProto stats = 1;\n  int stats_size() const;\n  void clear_stats();\n  static const int kStatsFieldNumber = 1;\n  const ::caffe2::ProfDAGProto& stats(int index) const;\n  ::caffe2::ProfDAGProto* mutable_stats(int index);\n  ::caffe2::ProfDAGProto* add_stats();\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ProfDAGProto >*\n      mutable_stats();\n  const ::google::protobuf::RepeatedPtrField< ::caffe2::ProfDAGProto >&\n      stats() const;\n\n  // @@protoc_insertion_point(class_scope:caffe2.ProfDAGProtos)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::caffe2::ProfDAGProto > stats_;\n  friend void  protobuf_InitDefaults_caffe2_2fproto_2fprof_5fdag_2eproto_impl();\n  friend void  protobuf_AddDesc_caffe2_2fproto_2fprof_5fdag_2eproto_impl();\n  friend void protobuf_AssignDesc_caffe2_2fproto_2fprof_5fdag_2eproto();\n  friend void protobuf_ShutdownFile_caffe2_2fproto_2fprof_5fdag_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ProfDAGProtos> ProfDAGProtos_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// ProfDAGProto\n\n// required string name = 1;\ninline bool ProfDAGProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void ProfDAGProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void ProfDAGProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void ProfDAGProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& ProfDAGProto::name() const {\n  // @@protoc_insertion_point(field_get:caffe2.ProfDAGProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ProfDAGProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:caffe2.ProfDAGProto.name)\n}\ninline void ProfDAGProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:caffe2.ProfDAGProto.name)\n}\ninline void ProfDAGProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:caffe2.ProfDAGProto.name)\n}\ninline ::std::string* ProfDAGProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:caffe2.ProfDAGProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ProfDAGProto::release_name() {\n  // @@protoc_insertion_point(field_release:caffe2.ProfDAGProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ProfDAGProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:caffe2.ProfDAGProto.name)\n}\n\n// required float mean = 2;\ninline bool ProfDAGProto::has_mean() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void ProfDAGProto::set_has_mean() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void ProfDAGProto::clear_has_mean() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void ProfDAGProto::clear_mean() {\n  mean_ = 0;\n  clear_has_mean();\n}\ninline float ProfDAGProto::mean() const {\n  // @@protoc_insertion_point(field_get:caffe2.ProfDAGProto.mean)\n  return mean_;\n}\ninline void ProfDAGProto::set_mean(float value) {\n  set_has_mean();\n  mean_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ProfDAGProto.mean)\n}\n\n// required float stddev = 3;\ninline bool ProfDAGProto::has_stddev() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void ProfDAGProto::set_has_stddev() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void ProfDAGProto::clear_has_stddev() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void ProfDAGProto::clear_stddev() {\n  stddev_ = 0;\n  clear_has_stddev();\n}\ninline float ProfDAGProto::stddev() const {\n  // @@protoc_insertion_point(field_get:caffe2.ProfDAGProto.stddev)\n  return stddev_;\n}\ninline void ProfDAGProto::set_stddev(float value) {\n  set_has_stddev();\n  stddev_ = value;\n  // @@protoc_insertion_point(field_set:caffe2.ProfDAGProto.stddev)\n}\n\ninline const ProfDAGProto* ProfDAGProto::internal_default_instance() {\n  return &ProfDAGProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// ProfDAGProtos\n\n// repeated .caffe2.ProfDAGProto stats = 1;\ninline int ProfDAGProtos::stats_size() const {\n  return stats_.size();\n}\ninline void ProfDAGProtos::clear_stats() {\n  stats_.Clear();\n}\ninline const ::caffe2::ProfDAGProto& ProfDAGProtos::stats(int index) const {\n  // @@protoc_insertion_point(field_get:caffe2.ProfDAGProtos.stats)\n  return stats_.Get(index);\n}\ninline ::caffe2::ProfDAGProto* ProfDAGProtos::mutable_stats(int index) {\n  // @@protoc_insertion_point(field_mutable:caffe2.ProfDAGProtos.stats)\n  return stats_.Mutable(index);\n}\ninline ::caffe2::ProfDAGProto* ProfDAGProtos::add_stats() {\n  // @@protoc_insertion_point(field_add:caffe2.ProfDAGProtos.stats)\n  return stats_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::caffe2::ProfDAGProto >*\nProfDAGProtos::mutable_stats() {\n  // @@protoc_insertion_point(field_mutable_list:caffe2.ProfDAGProtos.stats)\n  return &stats_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::caffe2::ProfDAGProto >&\nProfDAGProtos::stats() const {\n  // @@protoc_insertion_point(field_list:caffe2.ProfDAGProtos.stats)\n  return stats_;\n}\n\ninline const ProfDAGProtos* ProfDAGProtos::internal_default_instance() {\n  return &ProfDAGProtos_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace caffe2\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_caffe2_2fproto_2fprof_5fdag_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/python/pybind_state.h",
    "content": "#pragma once\n\n#include <unordered_map>\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/init.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/memonger.h\"\n#include \"caffe2/core/net.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/scope_guard.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/types.h\"\n#include \"caffe2/core/workspace.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n\n#include <Python.h>\n#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n#define PY_ARRAY_UNIQUE_SYMBOL caffe2_python_ARRAY_API\n#include <numpy/arrayobject.h>\n\n// Temporary solution for numpy < 1.7 versions: old macro, no promises.\n// You're strongly advised to upgrade to >= 1.7.\n#ifndef NPY_ARRAY_C_CONTIGUOUS\n#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS\n#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x))\n#endif\n\nnamespace caffe2 {\nnamespace python {\n\nnamespace py = pybind11;\n\n// Add methods common to both CPU and GPU mode.\nvoid addGlobalMethods(pybind11::module& m);\n// Expose Workspace, Net, Blob\nvoid addObjectMethods(pybind11::module& m);\n\nclass BlobFetcherBase {\n public:\n  struct FetchedBlob {\n    pybind11::object obj;\n    bool copied;\n  };\n  virtual ~BlobFetcherBase();\n  virtual pybind11::object Fetch(const Blob& blob) = 0;\n};\n\nclass BlobFeederBase {\n public:\n  virtual ~BlobFeederBase();\n  virtual void\n  Feed(const DeviceOption& option, PyArrayObject* array, Blob* blob) = 0;\n};\n\nCAFFE_DECLARE_TYPED_REGISTRY(BlobFetcherRegistry, CaffeTypeId, BlobFetcherBase);\n#define REGISTER_BLOB_FETCHER(id, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(BlobFetcherRegistry, id, __VA_ARGS__)\ninline unique_ptr<BlobFetcherBase> CreateFetcher(CaffeTypeId id) {\n  return BlobFetcherRegistry()->Create(id);\n}\n\nCAFFE_DECLARE_TYPED_REGISTRY(BlobFeederRegistry, int, BlobFeederBase);\n#define REGISTER_BLOB_FEEDER(device_type, ...) \\\n  CAFFE_REGISTER_TYPED_CLASS(BlobFeederRegistry, device_type, __VA_ARGS__)\ninline unique_ptr<BlobFeederBase> CreateFeeder(int device_type) {\n  return BlobFeederRegistry()->Create(device_type);\n}\n\nstatic_assert(\n    sizeof(int) == sizeof(int32_t),\n    \"We make an assumption that int is always int32 for numpy \"\n    \"type mapping.\");\n\nint CaffeToNumpyType(const TypeMeta& meta);\nconst TypeMeta& NumpyTypeToCaffe(int numpy_type);\n\ntemplate <class Context>\nclass TensorFetcher : public BlobFetcherBase {\n public:\n  pybind11::object Fetch(const Blob& blob) override {\n    return FetchTensor(blob.Get<Tensor<Context>>(), true).obj;\n  }\n\n  bool NeedsCopy(const TypeMeta& meta) const {\n    return !std::is_same<Context, CPUContext>::value ||\n        CaffeToNumpyType(meta) == NPY_OBJECT;\n  }\n\n  FetchedBlob FetchTensor(const Tensor<Context>& tensor, bool force_copy) {\n    FetchedBlob result;\n    CAFFE_ENFORCE_GE(tensor.size(), 0, \"Trying to fetch unitilized tensor\");\n    const int numpy_type = CaffeToNumpyType(tensor.meta());\n    CAFFE_ENFORCE(\n        numpy_type != -1,\n        \"This tensor's data type is not supported: \",\n        tensor.meta().name(),\n        \".\");\n    std::vector<npy_intp> npy_dims;\n    for (const auto dim : tensor.dims()) {\n      npy_dims.push_back(dim);\n    }\n    result.copied = force_copy || NeedsCopy(tensor.meta());\n    void* outPtr;\n    if (result.copied) {\n      result.obj = pybind11::object(\n          PyArray_SimpleNew(tensor.ndim(), npy_dims.data(), numpy_type),\n          /* borrowed */ false);\n      outPtr = static_cast<void*>(\n          PyArray_DATA(reinterpret_cast<PyArrayObject*>(result.obj.ptr())));\n    } else {\n      outPtr = const_cast<Tensor<Context>&>(tensor).raw_mutable_data();\n      result.obj = pybind11::object(\n          PyArray_SimpleNewFromData(\n              tensor.ndim(), npy_dims.data(), numpy_type, outPtr),\n          /* borrowed */ false);\n    }\n\n    if (numpy_type == NPY_OBJECT) {\n      PyObject** outObj = reinterpret_cast<PyObject**>(outPtr);\n      auto* str = tensor.template data<std::string>();\n      for (int i = 0; i < tensor.size(); ++i) {\n        outObj[i] = PyBytes_FromStringAndSize(str->data(), str->size());\n        str++;\n        // cleanup on failure\n        if (outObj[i] == nullptr) {\n          for (int j = 0; j < i; ++j) {\n            Py_DECREF(outObj[j]);\n          }\n          CAFFE_THROW(\"Failed to allocate string for ndarray of strings.\");\n        }\n      }\n      return result;\n    }\n\n    if (result.copied) {\n      Context context;\n      context.template CopyBytes<Context, CPUContext>(\n          tensor.nbytes(), tensor.raw_data(), outPtr);\n      context.FinishDeviceComputation();\n    }\n    return result;\n  }\n};\n\ntemplate <class Context>\nclass TensorFeeder : public BlobFeederBase {\n public:\n  void FeedTensor(\n      const DeviceOption& option,\n      PyArrayObject* original_array,\n      Tensor<Context>* tensor) {\n    PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);\n    auto g = MakeGuard([&]() { Py_XDECREF(array); });\n\n    const auto npy_type = PyArray_TYPE(array);\n    const TypeMeta& meta = NumpyTypeToCaffe(npy_type);\n    CAFFE_ENFORCE(\n        meta.id() != 0,\n        \"This numpy data type is not supported: \",\n        PyArray_TYPE(array),\n        \".\");\n    Context context(option);\n    context.SwitchToDevice();\n    // numpy requires long int as its dims.\n    int ndim = PyArray_NDIM(array);\n    npy_intp* npy_dims = PyArray_DIMS(array);\n    std::vector<TIndex> dims;\n    for (int i = 0; i < ndim; ++i) {\n      dims.push_back(npy_dims[i]);\n    }\n    tensor->Resize(dims);\n\n    // Now, copy the data to the tensor.\n    switch (npy_type) {\n      case NPY_OBJECT: {\n        PyObject** input = reinterpret_cast<PyObject**>(PyArray_DATA(array));\n        auto* outPtr = tensor->template mutable_data<std::string>();\n        for (int i = 0; i < tensor->size(); ++i) {\n          char* str;\n          Py_ssize_t strSize;\n#if PY_MAJOR_VERSION > 2\n          if (PyBytes_Check(input[i])) {\n            CAFFE_ENFORCE(\n                PyBytes_AsStringAndSize(input[i], &str, &strSize) != -1,\n                \"Had a PyBytes object but cannot convert it to a string.\");\n          } else if (PyUnicode_Check(input[i])) { // string\n            str = PyUnicode_AsUTF8AndSize(input[i], &strSize);\n            CAFFE_ENFORCE(\n                str,\n                \"Had a PyUnicode object but cannot convert it to a string.\");\n          } else {\n            CAFFE_THROW(\"Unsupported python object type passed into ndarray.\");\n          }\n#else\n          CAFFE_ENFORCE(\n              PyBytes_AsStringAndSize(input[i], &str, &strSize) != -1,\n              \"Unsupported python object type passed into ndarray.\");\n#endif // PY_MAJOR_VERSION > 2\n          outPtr[i] = std::string(str, strSize);\n        }\n        break;\n      }\n      case NPY_UNICODE:\n        CAFFE_THROW(\n            \"You are feeding in a numpy array of unicode. Caffe2 C++ does not \"\n            \"support unicode yet. Please ensure that you are passing in bytes \"\n            \"instead of unicode strings.\");\n        break;\n      default:\n        context.template CopyBytes<CPUContext, Context>(\n            tensor->size() * meta.itemsize(),\n            static_cast<void*>(PyArray_DATA(array)),\n            tensor->raw_mutable_data(meta));\n    }\n    context.FinishDeviceComputation();\n  }\n\n  virtual void\n  Feed(const DeviceOption& option, PyArrayObject* original_array, Blob* blob) {\n    FeedTensor(option, original_array, blob->GetMutable<Tensor<Context>>());\n  }\n};\n\nnamespace python_detail {\nstruct Func;\n}\n\nclass PythonOpBase : public Operator<CPUContext> {\n public:\n  PythonOpBase(\n      const OperatorDef& operator_def,\n      Workspace* ws,\n      const std::string& pickled_builder_arg_name);\n\n  bool RunOnDevice() override final;\n  virtual ~PythonOpBase();\n\n protected:\n  virtual const python_detail::Func& getFunc(const std::string& token) = 0;\n  Workspace* ws_;\n\n private:\n  const std::string token_;\n  std::unique_ptr<python_detail::Func> built_func_;\n};\n\nclass PythonOp final : public PythonOpBase {\n public:\n  PythonOp(const OperatorDef& operator_def, Workspace* ws)\n      : PythonOpBase(operator_def, ws, \"pickled_builder\") {}\n\n protected:\n  const python_detail::Func& getFunc(const std::string& token) override;\n};\n\nclass PythonGradientOp final : public PythonOpBase {\n public:\n  PythonGradientOp(const OperatorDef& operator_def, Workspace* ws)\n      : PythonOpBase(operator_def, ws, \"pickled_grad_builder\") {}\n\n protected:\n  const python_detail::Func& getFunc(const std::string& token) override;\n};\n\n} // namespace python\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/queue/blobs_queue.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <condition_variable>\n#include <memory>\n#include <mutex>\n#include <queue>\n\n#include \"caffe2/core/blob_stats.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/stats.h\"\n#include \"caffe2/core/tensor.h\"\n#include \"caffe2/core/workspace.h\"\n\nnamespace caffe2 {\n\n// A thread-safe, bounded, blocking queue.\n// Modelled as a circular buffer.\n\n// Containing blobs are owned by the workspace.\n// On read, we swap out the underlying data for the blob passed in for blobs\n\nclass BlobsQueue : public std::enable_shared_from_this<BlobsQueue> {\n public:\n  BlobsQueue(\n      Workspace* ws,\n      const std::string& queueName,\n      size_t capacity,\n      size_t numBlobs,\n      bool enforceUniqueName,\n      const std::vector<std::string>& fieldNames = {});\n\n  ~BlobsQueue() {\n    close();\n  }\n\n  bool blockingRead(\n      const std::vector<Blob*>& inputs,\n      float timeout_secs = 0.0f);\n  bool tryWrite(const std::vector<Blob*>& inputs);\n  bool blockingWrite(const std::vector<Blob*>& inputs);\n  void close();\n  size_t getNumBlobs() const {\n    return numBlobs_;\n  }\n\n private:\n  bool canWrite();\n  void doWrite(const std::vector<Blob*>& inputs);\n\n  std::atomic<bool> closing_{false};\n\n  size_t numBlobs_;\n  std::mutex mutex_; // protects all variables in the class.\n  std::condition_variable cv_;\n  int64_t reader_{0};\n  int64_t writer_{0};\n  std::vector<std::vector<Blob*>> queue_;\n  const std::string name_;\n\n  struct QueueStats {\n    CAFFE_STAT_CTOR(QueueStats);\n    CAFFE_EXPORTED_STAT(queue_balance);\n    CAFFE_EXPORTED_STAT(queue_dequeued_records);\n    CAFFE_DETAILED_EXPORTED_STAT(queue_dequeued_bytes);\n  } stats_;\n};\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/queue/blobs_queue_db.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include <chrono>\n#include <string>\n\n#include \"caffe2/core/db.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/stats.h\"\n#include \"caffe2/queue/blobs_queue.h\"\n\nnamespace caffe2 {\nnamespace db {\n\nnamespace {\nconst std::string& GetStringFromBlob(Blob* blob) {\n  if (blob->template IsType<string>()) {\n    return blob->template Get<string>();\n  } else if (blob->template IsType<Tensor<CPUContext>>()) {\n    return *blob->template Get<Tensor<CPUContext>>().template data<string>();\n  } else {\n    CAFFE_THROW(\"Unsupported Blob type\");\n  }\n}\n}\n\nclass BlobsQueueDBCursor : public Cursor {\n public:\n  explicit BlobsQueueDBCursor(\n      std::shared_ptr<BlobsQueue> queue,\n      int key_blob_index,\n      int value_blob_index,\n      float timeout_secs)\n      : queue_(queue),\n        key_blob_index_(key_blob_index),\n        value_blob_index_(value_blob_index),\n        timeout_secs_(timeout_secs),\n        inited_(false),\n        valid_(false) {\n    LOG(INFO) << \"BlobsQueueDBCursor constructed\";\n    CAFFE_ENFORCE(queue_ != nullptr, \"queue is null\");\n    CAFFE_ENFORCE(value_blob_index_ >= 0, \"value_blob_index < 0\");\n  }\n\n  virtual ~BlobsQueueDBCursor() {}\n\n  void Seek(const string& /* unused */) override {\n    CAFFE_THROW(\"Seek is not supported.\");\n  }\n\n  bool SupportsSeek() override {\n    return false;\n  }\n\n  void SeekToFirst() override {\n    // not applicable\n  }\n\n  void Next() override {\n    unique_ptr<Blob> blob = make_unique<Blob>();\n    vector<Blob*> blob_vector{blob.get()};\n    auto success = queue_->blockingRead(blob_vector, timeout_secs_);\n    if (!success) {\n      LOG(ERROR) << \"Timed out reading from BlobsQueue or it is closed\";\n      valid_ = false;\n      return;\n    }\n\n    if (key_blob_index_ >= 0) {\n      key_ = GetStringFromBlob(blob_vector[key_blob_index_]);\n    }\n    value_ = GetStringFromBlob(blob_vector[value_blob_index_]);\n    valid_ = true;\n  }\n\n  string key() override {\n    if (!inited_) {\n      Next();\n      inited_ = true;\n    }\n    return key_;\n  }\n\n  string value() override {\n    if (!inited_) {\n      Next();\n      inited_ = true;\n    }\n    return value_;\n  }\n\n  bool Valid() override {\n    return valid_;\n  }\n\n private:\n  std::shared_ptr<BlobsQueue> queue_;\n  int key_blob_index_;\n  int value_blob_index_;\n  float timeout_secs_;\n  bool inited_;\n  string key_;\n  string value_;\n  bool valid_;\n};\n\nclass BlobsQueueDB : public DB {\n public:\n  BlobsQueueDB(\n      const string& source,\n      Mode mode,\n      std::shared_ptr<BlobsQueue> queue,\n      int key_blob_index = -1,\n      int value_blob_index = 0,\n      float timeout_secs = 0.0)\n      : DB(source, mode),\n        queue_(queue),\n        key_blob_index_(key_blob_index),\n        value_blob_index_(value_blob_index),\n        timeout_secs_(timeout_secs) {\n    LOG(INFO) << \"BlobsQueueDB constructed\";\n  }\n\n  virtual ~BlobsQueueDB() {\n    Close();\n  }\n\n  void Close() override {}\n  unique_ptr<Cursor> NewCursor() override {\n    return make_unique<BlobsQueueDBCursor>(\n        queue_, key_blob_index_, value_blob_index_, timeout_secs_);\n  }\n\n  unique_ptr<Transaction> NewTransaction() override {\n    CAFFE_THROW(\"Not implemented.\");\n  }\n\n private:\n  std::shared_ptr<BlobsQueue> queue_;\n  int key_blob_index_;\n  int value_blob_index_;\n  float timeout_secs_;\n};\n} // namespace db\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/queue/queue_ops.h",
    "content": "#pragma once\n\n#include <memory>\n#include \"blobs_queue.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nclass CreateBlobsQueueOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  CreateBlobsQueueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        ws_(ws),\n        name(operator_def.output().Get(0)) {}\n\n  bool RunOnDevice() override {\n    const auto capacity =\n        OperatorBase::template GetSingleArgument<int>(\"capacity\", 1);\n    const auto numBlobs =\n        OperatorBase::template GetSingleArgument<int>(\"num_blobs\", 1);\n    const auto enforceUniqueName =\n        OperatorBase::template GetSingleArgument<int>(\n            \"enforce_unique_name\", false);\n    const auto fieldNames =\n        OperatorBase::template GetRepeatedArgument<std::string>(\"field_names\");\n    CAFFE_ENFORCE_EQ(this->OutputSize(), 1);\n    auto queuePtr = Operator<Context>::Outputs()[0]\n                        ->template GetMutable<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queuePtr);\n    *queuePtr = std::make_shared<BlobsQueue>(\n        ws_, name, capacity, numBlobs, enforceUniqueName, fieldNames);\n    return true;\n  }\n\n private:\n  Workspace* ws_{nullptr};\n  const std::string name;\n};\n\ntemplate <typename Context>\nclass EnqueueBlobsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE(InputSize() > 1);\n    auto queue = Operator<Context>::Inputs()[0]\n                     ->template Get<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queue && OutputSize() == queue->getNumBlobs());\n    return queue->blockingWrite(this->Outputs());\n  }\n\n private:\n};\n\ntemplate <typename Context>\nclass DequeueBlobsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  DequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    timeout_secs_ = OperatorBase::GetSingleArgument<float>(\"timeout_secs\", 0);\n  }\n\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE(InputSize() == 1);\n    auto queue =\n        OperatorBase::Inputs()[0]->template Get<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queue && OutputSize() == queue->getNumBlobs());\n    return queue->blockingRead(this->Outputs(), timeout_secs_);\n  }\n\n private:\n  float timeout_secs_;\n};\n\ntemplate <typename Context>\nclass CloseBlobsQueueOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE_EQ(InputSize(), 1);\n    auto queue =\n        OperatorBase::Inputs()[0]->template Get<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queue);\n    queue->close();\n    return true;\n  }\n\n private:\n};\n\ntemplate <typename Context>\nclass SafeEnqueueBlobsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n  bool RunOnDevice() override {\n    auto queue = Operator<Context>::Inputs()[0]\n                     ->template Get<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queue);\n    auto size = queue->getNumBlobs();\n    CAFFE_ENFORCE(\n        OutputSize() == size + 1,\n        \"Expected \" + caffe2::to_string(size + 1) + \", \" + \" got: \" +\n            caffe2::to_string(size));\n    bool status = queue->blockingWrite(this->Outputs());\n    Output(size)->Resize();\n    math::Set<bool, Context>(\n        1, !status, Output(size)->template mutable_data<bool>(), &context_);\n    return true;\n  }\n};\n\ntemplate <typename Context>\nclass SafeDequeueBlobsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  using Operator<Context>::Operator;\n\n  SafeDequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        numRecords_(OperatorBase::GetSingleArgument<int>(\"num_records\", 1)) {\n    CAFFE_ENFORCE_GT(numRecords_, 0);\n  }\n\n  bool dequeueMany(std::shared_ptr<BlobsQueue>& queue) {\n    auto size = queue->getNumBlobs();\n\n    if (blobs_.size() != size) {\n      blobs_.resize(size);\n      blobPtrs_.resize(size);\n      for (int col = 0; col < size; ++col) {\n        blobPtrs_.at(col) = &blobs_.at(col);\n      }\n    }\n\n    const int kTensorGrowthPct = 40;\n    for (int i = 0; i < numRecords_; ++i) {\n      if (!queue->blockingRead(blobPtrs_)) {\n        // if we read at least one record, status is still true\n        return i > 0;\n      }\n      for (int col = 0; col < size; ++col) {\n        auto* out = this->Output(col);\n        const auto& in = blobPtrs_.at(col)->template Get<Tensor<Context>>();\n        if (i == 0) {\n          out->CopyFrom(in);\n        } else {\n          auto oldSize = out->size();\n          out->Extend(in.dims()[0], kTensorGrowthPct, &context_);\n          auto* dst =\n              (char*)out->raw_mutable_data() + oldSize * in.meta().itemsize();\n          context_.template CopyItems<Context, Context>(\n              in.meta(), in.size(), in.raw_data(), dst);\n        }\n      }\n    }\n    return true;\n  }\n\n  bool dequeueOne(std::shared_ptr<BlobsQueue>& queue) {\n    return queue->blockingRead(this->Outputs());\n  }\n\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE(InputSize() == 1);\n    auto queue = Operator<Context>::Inputs()[0]\n                     ->template Get<std::shared_ptr<BlobsQueue>>();\n    CAFFE_ENFORCE(queue);\n\n    auto size = queue->getNumBlobs();\n    CAFFE_ENFORCE_EQ(OutputSize(), size + 1);\n\n    bool status = numRecords_ > 1 ? dequeueMany(queue) : dequeueOne(queue);\n\n    Output(size)->Resize();\n    math::Set<bool, Context>(\n        1, !status, Output(size)->template mutable_data<bool>(), &context_);\n    return true;\n  }\n\n private:\n  int numRecords_;\n  std::vector<Blob> blobs_;\n  std::vector<Blob*> blobPtrs_;\n};\n\ntemplate <typename Context>\nclass WeightedSampleDequeueBlobsOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  WeightedSampleDequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {\n    vector<float> weights = OperatorBase::GetRepeatedArgument<float>(\"weights\");\n    if (weights.empty()) {\n      weights.resize(InputSize(), 1.0f);\n    }\n    CAFFE_ENFORCE_EQ(InputSize(), weights.size());\n\n    float sum = accumulate(weights.begin(), weights.end(), 0.0f);\n    CAFFE_ENFORCE(sum > 0.0f, \"Sum of weights must be positive\");\n    cumProbs_.resize(weights.size());\n    for (int i = 0; i < weights.size(); i++) {\n      cumProbs_[i] = weights[i] / sum;\n      CAFFE_ENFORCE_GE(\n          cumProbs_[i], 0.0f, \"Each probability must be non-negative\");\n    }\n    std::partial_sum(cumProbs_.begin(), cumProbs_.end(), cumProbs_.begin());\n    // Put last value to be 1.0001 to avoid numerical issues.\n    cumProbs_.back() = 1.0001;\n\n    LOG(INFO) << \"Dequeue weights: \" << weights;\n    LOG(INFO) << \"cumProbs: \" << cumProbs_;\n  }\n\n  bool RunOnDevice() override {\n    float r;\n    math::RandUniform<float, Context>(1, 0.0f, 1.0f, &r, &context_);\n    auto lb = lower_bound(cumProbs_.begin(), cumProbs_.end(), r);\n    CAFFE_ENFORCE(lb != cumProbs_.end(), \"Cannot find \", r, \" in cumProbs_.\");\n\n    auto queue = Operator<Context>::Inputs()[lb - cumProbs_.begin()]\n                     ->template Get<std::shared_ptr<BlobsQueue>>();\n\n    CAFFE_ENFORCE(queue);\n    auto size = queue->getNumBlobs();\n    CAFFE_ENFORCE_EQ(OutputSize(), size + 1);\n    bool status = queue->blockingRead(this->Outputs());\n    Output(size)->Resize();\n    math::Set<bool, Context>(\n        1, !status, Output(size)->template mutable_data<bool>(), &context_);\n    return true;\n  }\n\n private:\n  vector<float> cumProbs_;\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/queue/rebatching_queue.h",
    "content": "#pragma once\n\n#include <atomic>\n#include <condition_variable>\n#include <memory>\n#include <mutex>\n#include <queue>\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/core/stats.h\"\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\n// TODO: This is a very naive implementation with a single mutex. We can do the\n// atomic index + circular queue optimizations or pull something more\n// heavy-weight later\n\nclass RebatchingQueue {\n public:\n  RebatchingQueue(size_t capacity, size_t numBlobs);\n\n  ~RebatchingQueue();\n\n  bool enqueueOne(\n      CPUContext& context,\n      const std::vector<const TensorCPU*>& inputs);\n\n  bool enqueueMany(\n      CPUContext& context,\n      const std::vector<const TensorCPU*>& inputs);\n\n  bool dequeue(\n      CPUContext& context,\n      size_t numElements,\n      const std::vector<TensorCPU*>& outputs);\n\n  size_t capacity() const;\n\n  size_t numBlobs() const;\n\n  bool isClosed() const;\n\n  void close();\n\n private:\n  bool enqueue(std::vector<std::vector<TensorCPU>> splittedInputs);\n\n  bool canWrite() const;\n  bool canRead() const;\n\n  const size_t capacity_;\n  const size_t numBlobs_;\n\n  mutable std::mutex mutex_;\n\n  bool isClosed_{false};\n\n  uint64_t head_{0};\n  uint64_t tail_{0};\n\n  std::condition_variable cvEmpty_;\n  std::condition_variable cvOverflow_;\n\n  std::vector<std::vector<TensorCPU>> queue_;\n};\n} // caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/queue/rebatching_queue_ops.h",
    "content": "#pragma once\n\n#include \"rebatching_queue.h\"\n\nnamespace caffe2 {\n\nusing RebatchingQueuePtr = std::unique_ptr<RebatchingQueue>;\n\nclass CreateRebatchingQueueOp : public Operator<CPUContext> {\n public:\n  CreateRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    *OperatorBase::Output<RebatchingQueuePtr>(0) =\n        RebatchingQueuePtr(new RebatchingQueue(\n            OperatorBase::GetSingleArgument<int>(\"capacity\", 1),\n            OperatorBase::GetSingleArgument<int>(\"num_blobs\", 1)));\n    return true;\n  }\n};\n\nclass EnqueueRebatchingQueueOp : public Operator<CPUContext> {\n public:\n  EnqueueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator(operator_def, ws),\n        enqueueBatch_(\n            OperatorBase::GetSingleArgument<bool>(\"enqueue_batch\", false)) {}\n  bool RunOnDevice() override {\n    auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();\n    CHECK(queue);\n    CAFFE_ENFORCE_EQ(InputSize(), queue->numBlobs() + 1);\n    std::vector<const TensorCPU*> inputTensors;\n    inputTensors.reserve(InputSize() - 1);\n    for (int i = 1; i < InputSize(); ++i) {\n      inputTensors.push_back(&Input(i));\n    }\n\n    return enqueueBatch_ ? queue->enqueueMany(context_, inputTensors)\n                         : queue->enqueueOne(context_, inputTensors);\n  }\n\n private:\n  const bool enqueueBatch_;\n};\n\nclass DequeueRebatchingQueueOp : public Operator<CPUContext> {\n public:\n  DequeueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator(operator_def, ws),\n        numElements_(OperatorBase::GetSingleArgument<int>(\"num_elements\", 1)) {}\n\n  bool RunOnDevice() override {\n    auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();\n    CHECK(queue);\n\n    std::vector<TensorCPU*> outputTensors;\n    outputTensors.reserve(OutputSize());\n    for (int i = 0; i < OutputSize(); ++i) {\n      outputTensors.push_back(Output(i));\n    }\n\n    return queue->dequeue(context_, numElements_, outputTensors);\n  }\n\n private:\n  int numElements_;\n};\n\nclass CloseRebatchingQueueOp : public Operator<CPUContext> {\n public:\n  CloseRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE_EQ(InputSize(), 1);\n    auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();\n    CAFFE_ENFORCE(queue);\n    queue->close();\n    return true;\n  }\n};\n} // caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/adagrad_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nvoid adagrad_update(\n    int N,\n    const float* w,\n    const float* g,\n    const float* h,\n    float* nw,\n    float* nh,\n    float epsilon,\n    float decay,\n    const float* lr,\n    Context* /*context*/) {\n  for (auto i = 0; i < N; ++i) {\n    float gi = g[i];\n    float hi = nh[i] = decay * h[i] + gi * gi;\n    nw[i] = w[i] + lr[0] * gi / (std::sqrt(hi) + epsilon);\n  }\n}\n\ntemplate <typename T, class Context>\nclass AdagradOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  AdagradOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        epsilon_(OperatorBase::GetSingleArgument<T>(\"epsilon\", 1e-5f)),\n        decay_(OperatorBase::GetSingleArgument<T>(\"decay\", 1.0f)) {}\n\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_1).size());\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(PARAM).size());\n    Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));\n    Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));\n    adagrad_update<Context>(\n        Input(GRAD).size(),\n        Input(PARAM).template data<T>(),\n        Input(GRAD).template data<T>(),\n        Input(MOMENT_1).template data<T>(),\n        Output(OUTPUT_PARAM)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENT_1)->template mutable_data<T>(),\n        epsilon_,\n        decay_,\n        Input(LR).template data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  T epsilon_;\n  T decay_;\n  INPUT_TAGS(PARAM, MOMENT_1, GRAD, LR);\n  OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);\n};\n\ntemplate <typename T, class Context>\nclass SparseAdagradOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n\n  bool RunOnDevice() override {\n    // Enforce shapes\n    CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());\n    CAFFE_ENFORCE_EQ(Input(LR).size(), 1);\n    CAFFE_ENFORCE_EQ(\n        Input(PARAM).size_from_dim(1),\n        Input(GRAD).size_from_dim(Input(INDICES).ndim()));\n\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename SIndex>\n  bool DoRunWithType() {\n    const auto* lr = Input(LR).template data<T>();\n    const auto* indices = Input(INDICES).template data<SIndex>();\n    const auto* gradIn = Input(GRAD).template data<T>();\n    const auto* paramIn = Input(PARAM).template data<T>();\n    const auto* momentIn = Input(MOMENT_1).template data<T>();\n    auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();\n    auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();\n\n    auto n = Input(INDICES).size();\n    if (n == 0) {\n      return true;\n    }\n\n    auto block_size = Input(GRAD).size() / n;\n    for (auto i = 0; i < n; ++i) {\n      auto idx = indices[i];\n      if (block_size == 1) {\n        float gi = gradIn[i];\n        float hi = momentOut[idx] = momentIn[idx] + gi * gi;\n        paramOut[idx] = paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);\n      } else {\n        auto offsetI = i * block_size;\n        auto offsetIdx = idx * block_size;\n\n#ifndef NDEBUG\n        CAFFE_ENFORCE_GE(\n            Input(PARAM).size(),\n            block_size + offsetIdx,\n            this->debug_def().input(PARAM),\n            \", out of bound,  idx:\",\n            idx,\n            \" for input i:\",\n            i,\n            \" and block size:\",\n            block_size);\n        CAFFE_ENFORCE_GE(\n            Input(GRAD).size(),\n            block_size + offsetI,\n            this->debug_def().input(GRAD),\n            \", out of bound idx, idx:\",\n            idx,\n            \" for input i:\",\n            i);\n#endif\n        adagrad_update(\n            block_size,\n            paramIn + offsetIdx,\n            gradIn + offsetI,\n            momentIn + offsetIdx,\n            paramOut + offsetIdx,\n            momentOut + offsetIdx,\n            epsilon_,\n            1.0f,\n            lr,\n            &context_);\n      }\n    }\n    return true;\n  }\n\n protected:\n  T epsilon_;\n  INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR);\n  OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);\n};\n\ntemplate <typename T, class Context>\nclass RowWiseSparseAdagradOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RowWiseSparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n\n  bool RunOnDevice() override {\n    // Enforce shapes\n    CAFFE_ENFORCE_EQ(Input(PARAM).dims()[0], Input(MOMENT_1).size());\n    CAFFE_ENFORCE_EQ(Input(LR).size(), 1);\n    CAFFE_ENFORCE_EQ(\n        Input(PARAM).size_from_dim(1),\n        Input(GRAD).size_from_dim(Input(INDICES).ndim()));\n\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename SIndex>\n  bool DoRunWithType() {\n    const auto* lr = Input(LR).template data<T>();\n    const auto* indices = Input(INDICES).template data<SIndex>();\n    const auto* gradIn = Input(GRAD).template data<T>();\n    const auto* paramIn = Input(PARAM).template data<T>();\n    const auto* momentIn = Input(MOMENT_1).template data<T>();\n    auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();\n    auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();\n\n    auto n = Input(INDICES).size();\n    if (n == 0) {\n      return true;\n    }\n\n    auto block_size = Input(GRAD).size() / n;\n\n    for (auto i = 0; i < n; ++i) {\n      auto idx = indices[i];\n      if (block_size == 1) {\n        float gi = gradIn[i];\n        float hi = momentOut[idx] = momentIn[idx] + gi * gi;\n        paramOut[idx] = paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);\n      } else {\n        auto offsetI = i * block_size;\n        auto offsetIdx = idx * block_size;\n\n#ifndef NDEBUG\n        CAFFE_ENFORCE_GE(\n            Input(PARAM).size(),\n            block_size + offsetIdx,\n            this->debug_def().input(PARAM),\n            \", out of bound,  idx:\",\n            idx,\n            \" for input i:\",\n            i,\n            \" and block size:\",\n            block_size);\n        CAFFE_ENFORCE_GE(\n            Input(GRAD).size(),\n            block_size + offsetI,\n            this->debug_def().input(GRAD),\n            \", out of bound idx, idx:\",\n            idx,\n            \" for input i:\",\n            i);\n#endif\n\n        const float* w = paramIn + offsetIdx;\n        const float* g = gradIn + offsetI;\n        const float* h = momentIn + idx;\n        float* nw = paramOut + offsetIdx;\n        float* nh = momentOut + idx;\n        float hs = 0.;\n        for (auto j = 0; j < block_size; ++j) {\n          float gj = g[j];\n          hs += gj * gj;\n        }\n        float hi = nh[0] = h[0] + hs / block_size;\n        for (auto j = 0; j < block_size; ++j) {\n          nw[j] = w[j] + lr[0] * g[j] / (std::sqrt(hi) + epsilon_);\n        }\n      }\n    }\n    return true;\n  }\n\n protected:\n  T epsilon_;\n  INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR);\n  OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/adam_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nvoid adam_update(\n    int N,\n    const float* g,\n    const float* m,\n    const float* v,\n    float* ng,\n    float* nm,\n    float* nv,\n    float beta1,\n    float beta2,\n    float eps_hat,\n    float correction,\n    const float* lr,\n    Context* /*context*/) {\n  for (auto i = 0; i < N; ++i) {\n    float gi = g[i];\n    float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);\n    float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);\n    ng[i] = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat);\n  }\n}\n\ntemplate <typename Context>\nvoid adam_compute(\n    int N,\n    const float* w,\n    const float* g,\n    const float* m,\n    const float* v,\n    float* nw,\n    float* nm,\n    float* nv,\n    float beta1,\n    float beta2,\n    float eps_hat,\n    float correction,\n    const float* lr,\n    Context* /*context*/) {\n  for (auto i = 0; i < N; ++i) {\n    float gi = g[i];\n    float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);\n    float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);\n    float ng = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat);\n    nw[i] = w[i] + ng;\n  }\n}\n\ntemplate <typename T, class Context>\nclass AdamOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  AdamOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        beta1_(OperatorBase::GetSingleArgument<float>(\"beta1\", 0.9f)),\n        beta2_(OperatorBase::GetSingleArgument<float>(\"beta2\", 0.999f)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n  bool RunOnDevice() override {\n    // Iter live on the CPU\n    CAFFE_ENFORCE(OperatorBase::InputIsType<TensorCPU>(ITER));\n    CAFFE_ENFORCE(Input(LR).size() == 1);\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(PARAM).size());\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_1).size());\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_2).size());\n    Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));\n    Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));\n    Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));\n\n    const auto iter =\n        OperatorBase::Input<TensorCPU>(ITER).template data<int64_t>()[0];\n\n    const auto t = iter + 1;\n    const auto correction =\n        std::sqrt(T(1.) - std::pow(beta2_, t)) / (T(1.) - std::pow(beta1_, t));\n    adam_compute<Context>(\n        Input(GRAD).size(),\n        Input(PARAM).template data<T>(),\n        Input(GRAD).template data<T>(),\n        Input(MOMENT_1).template data<T>(),\n        Input(MOMENT_2).template data<T>(),\n        Output(OUTPUT_PARAM)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENT_1)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENT_2)->template mutable_data<T>(),\n        beta1_,\n        beta2_,\n        epsilon_,\n        correction,\n        Input(LR).template data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  T beta1_{0.9};\n  T beta2_{0.999};\n  T epsilon_{1e-8};\n  INPUT_TAGS(PARAM, MOMENT_1, MOMENT_2, GRAD, LR, ITER);\n  OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, OUTPUT_MOMENT_2);\n};\n\ntemplate <typename T, class Context>\nclass SparseAdamOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseAdamOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        beta1_(OperatorBase::GetSingleArgument<float>(\"beta1\", 0.9f)),\n        beta2_(OperatorBase::GetSingleArgument<float>(\"beta2\", 0.999f)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n\n  bool RunOnDevice() override {\n    // Enforce shapes\n    CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size());\n    CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_2).size());\n    CAFFE_ENFORCE_EQ(Input(PARAM).size_from_dim(1),\n        Input(GRAD).size_from_dim(Input(INDICES).ndim()));\n    CAFFE_ENFORCE_EQ(Input(LR).size(), 1);\n\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename SIndex>\n  bool DoRunWithType() {\n    const auto* lr = Input(LR).template data<T>();\n    const auto iter =\n        OperatorBase::Input<TensorCPU>(ITER).template data<int64_t>()[0];\n\n    const auto t = iter + 1;\n    const auto correction =\n        std::sqrt(T(1.) - std::pow(beta2_, t)) / (T(1.) - std::pow(beta1_, t));\n\n    auto block_size = Input(PARAM).size() / Input(PARAM).dim(0);\n    auto n = Input(GRAD).size() / block_size;\n\n    const auto* paramIn = Input(PARAM).template data<T>();\n    const auto* indices = Input(INDICES).template data<SIndex>();\n    const auto* gradIn = Input(GRAD).template data<T>();\n    const auto* moment1In = Input(MOMENT_1).template data<T>();\n    const auto* moment2In = Input(MOMENT_2).template data<T>();\n    auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();\n    auto* moment1Out = Output(OUTPUT_MOMENT_1)->template mutable_data<T>();\n    auto* moment2Out = Output(OUTPUT_MOMENT_2)->template mutable_data<T>();\n\n    for (auto i = 0; i < n; ++i) {\n      auto idx = indices[i];\n\n      if (block_size == 1) {\n        float gi = gradIn[i];\n        float mi = moment1Out[idx] =\n            moment1In[idx] * beta1_ + gi * (1 - beta1_);\n        float vi = moment2Out[idx] =\n            moment2In[idx] * beta2_ + gi * gi * (1 - beta2_);\n        paramOut[idx] =\n            paramIn[idx] + lr[0] * correction * mi / (std::sqrt(vi) + epsilon_);\n\n      } else {\n        auto offsetI = i * block_size;\n        auto offsetIdx = idx * block_size;\n\n#ifndef NDEBUG\n        CAFFE_ENFORCE_GE(\n            Input(PARAM).size(),\n            block_size + offsetIdx,\n            this->debug_def().input(PARAM),\n            \", out of bound,  idx:\",\n            idx,\n            \" for input i:\",\n            i,\n            \" and block size:\",\n            block_size);\n        CAFFE_ENFORCE_GE(\n            Input(GRAD).size(),\n            block_size + offsetI,\n            this->debug_def().input(GRAD),\n            \", out of bound idx, idx:\",\n            idx,\n            \" for input i:\",\n            i);\n#endif\n\n        adam_compute(\n            block_size,\n            paramIn + offsetIdx,\n            gradIn + offsetI,\n            moment1In + offsetIdx,\n            moment2In + offsetIdx,\n            paramOut + offsetIdx,\n            moment1Out + offsetIdx,\n            moment2Out + offsetIdx,\n            beta1_,\n            beta2_,\n            epsilon_,\n            correction,\n            lr,\n            &context_);\n      }\n    }\n    return true;\n  }\n\n protected:\n  T beta1_;\n  T beta2_;\n  T epsilon_;\n  INPUT_TAGS(PARAM, MOMENT_1, MOMENT_2, INDICES, GRAD, LR, ITER);\n  OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, OUTPUT_MOMENT_2);\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/ftrl_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T>\nstruct FtrlParams {\n  explicit FtrlParams(OperatorBase* op)\n      : alphaInv(1.0 / op->GetSingleArgument<float>(\"alpha\", 0.005f)),\n        beta(op->GetSingleArgument<float>(\"beta\", 1.0f)),\n        lambda1(op->GetSingleArgument<float>(\"lambda1\", 0.001f)),\n        lambda2(op->GetSingleArgument<float>(\"lambda2\", 0.001f)) {}\n  T alphaInv;\n  T beta;\n  T lambda1;\n  T lambda2;\n};\n\n// TODO(dzhulgakov): implement GPU version if necessary\ntemplate <typename T, class Context>\nclass FtrlOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  FtrlOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), params_(this) {\n    CAFFE_ENFORCE(\n        !HasArgument(\"alpha\") || ALPHA >= InputSize(),\n        \"Cannot specify alpha by both input and argument\");\n  }\n  bool RunOnDevice() override;\n\n protected:\n  FtrlParams<T> params_;\n  INPUT_TAGS(VAR, N_Z, GRAD, ALPHA);\n  OUTPUT_TAGS(OUTPUT_VAR, OUTPUT_N_Z);\n};\n\ntemplate <typename T>\nclass SparseFtrlOp final : public Operator<CPUContext> {\n public:\n  SparseFtrlOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<CPUContext>(operator_def, ws), params_(this) {\n    CAFFE_ENFORCE(\n        !HasArgument(\"alpha\") || ALPHA >= InputSize(),\n        \"Cannot specify alpha by both input and argument\");\n  }\n\n  bool RunOnDevice() override {\n    // run time learning rate override\n    if (ALPHA < InputSize()) {\n      CAFFE_ENFORCE_EQ(Input(ALPHA).size(), 1, \"alpha should be real-valued\");\n      params_.alphaInv = 1.0 / *(Input(ALPHA).template data<T>());\n    }\n    // Use run-time polymorphism\n    auto& indices = Input(INDICES);\n    if (indices.template IsType<int32_t>()) {\n      DoRun<int32_t>();\n    } else if (indices.template IsType<int64_t>()) {\n      DoRun<int64_t>();\n    } else {\n      LOG(FATAL) << \"Unsupported type of INDICES in SparseFtrlOp: \"\n                      << indices.meta().name();\n    }\n    return true;\n  }\n\n protected:\n  FtrlParams<T> params_;\n  INPUT_TAGS(VAR, N_Z, INDICES, GRAD, ALPHA);\n  OUTPUT_TAGS(OUTPUT_VAR, OUTPUT_N_Z);\n\n private:\n  template <typename SIndex>\n  void DoRun();\n};\n\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/iter_op.h",
    "content": "#ifndef CAFFE2_SGD_ITER_OP_H_\n#define CAFFE2_SGD_ITER_OP_H_\n\n#include <limits>\n#include <mutex>\n\n#include \"caffe2/core/blob_serialization.h\"\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ninline void IncrementIter(TensorCPU* output) {\n  CAFFE_ENFORCE_EQ(\n      output->size(),\n      1,\n      \"The output of IterOp exists, but not of the right size.\");\n  int64_t* iter = output->template mutable_data<int64_t>();\n  CAFFE_ENFORCE(*iter >= 0, \"Previous iteration number is negative.\");\n  CAFFE_ENFORCE(\n      *iter < std::numeric_limits<int64_t>::max(), \"Overflow will happen!\");\n  (*iter)++;\n}\n\n// IterOp runs an iteration counter. I cannot think of a case where we would\n// need to access the iter variable on device, so this will always produce a\n// tensor on the CPU side. If the blob already exists and is a tensor<int64_t>\n// object, we will simply increment it (this emulates the case when we want to\n// resume training). Otherwise we will have the iter starting with 0.\ntemplate <class Context>\nclass IterOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  IterOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    if (InputSize() == 0) {\n      if (!OperatorBase::OutputIsType<TensorCPU>(0)) {\n        // This is the first run; set the iter to start with 0.\n        LOG(ERROR) << \"You are using an old definition of IterOp that will \"\n                      \"be deprecated soon. More specifically, IterOp now \"\n                      \"requires an explicit in-place input and output.\";\n\n        auto* output = OperatorBase::Output<TensorCPU>(0);\n        VLOG(1) << \"Initializing iter counter.\";\n        output->Resize(1);\n        output->template mutable_data<int64_t>()[0] = 0;\n      }\n    }\n    IncrementIter(OperatorBase::Output<TensorCPU>(0));\n    return true;\n  }\n};\n\ntemplate <class Context>\nclass AtomicIterOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  AtomicIterOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws) {}\n\n  bool RunOnDevice() override {\n    auto& mutex = OperatorBase::Input<std::unique_ptr<std::mutex>>(0);\n    std::lock_guard<std::mutex> lg(*mutex);\n    IncrementIter(OperatorBase::Output<TensorCPU>(0));\n    return true;\n  }\n};\n\nclass MutexSerializer : public BlobSerializerBase {\n public:\n  /**\n   * Serializes a std::unique_ptr<std::mutex>. Note that this blob has to\n   * contain std::unique_ptr<std::mutex>, otherwise this function produces a\n   * fatal error.\n   */\n  void Serialize(\n      const Blob& blob,\n      const string& name,\n      BlobSerializerBase::SerializationAcceptor acceptor) override;\n};\n\nclass MutexDeserializer : public BlobDeserializerBase {\n public:\n  void Deserialize(const BlobProto& proto, Blob* blob) override;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_SGD_ITER_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/learning_rate_functors.h",
    "content": "#ifndef CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_\n#define CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_\n\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\n// LearningRateFunctor is a functor that when fed with an iter number, produces\n// the learning rate for the corresponding iteration.\ntemplate <typename T>\nclass LearningRateFunctor {\n public:\n  virtual ~LearningRateFunctor() {}\n  virtual T operator()(const int64_t iter) const = 0;\n};\n\n// Fixed: not changing the learning rate at all.\ntemplate <typename T>\nclass FixedLearningRate : public LearningRateFunctor<T> {\n public:\n  T operator()(const int64_t /*iter*/) const override {\n    return 1.;\n  }\n};\n\n// Step: return gamma ^ (floor(iter / step))\ntemplate <typename T>\nclass StepLearningRate : public LearningRateFunctor<T> {\n public:\n  StepLearningRate(const int stepsize, const T gamma)\n      : stepsize_(stepsize), gamma_(gamma) {}\n  T operator()(const int64_t iter) const override {\n    return std::pow(gamma_, static_cast<T>(iter / stepsize_));\n  }\n\n  int stepsize_;\n  T gamma_;\n};\n\n// Exp: return gamma ^ iter\ntemplate <typename T>\nclass ExpLearningRate : public LearningRateFunctor<T> {\n public:\n  explicit ExpLearningRate(const T gamma) : gamma_(gamma) {}\n  T operator()(const int64_t iter) const override {\n    return std::pow(gamma_, static_cast<T>(iter));\n  }\n\n  T gamma_;\n};\n\n// Inv: return (1 + gamma * iter) ^ (-power)\ntemplate <typename T>\nclass InvLearningRate : public LearningRateFunctor<T> {\n public:\n  InvLearningRate(const T gamma, const T power)\n      : gamma_(gamma), power_(power) {}\n  T operator()(const int64_t iter) const override {\n    return std::pow(T(1) + gamma_ * iter, -power_);\n  }\n  T gamma_;\n  T power_;\n};\n\n// Poly: return (1 - iter/max_iter) ^ (power)\ntemplate <typename T>\nclass PolyLearningRate : public LearningRateFunctor<T> {\n public:\n  PolyLearningRate(const T power, const int64_t max_iter) \n      : power_(power), max_iter_(max_iter) {}\n  T operator()(const int64_t iter) const override {\n    return std::pow(1 - T(iter)/T(max_iter_), power_);\n  }\n  T power_;\n  uint64_t max_iter_;\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/learning_rate_op.h",
    "content": "#ifndef CAFFE2_SGD_LEARNING_RATE_OP_H_\n#define CAFFE2_SGD_LEARNING_RATE_OP_H_\n\n#include <cfloat>\n#include <cmath>\n#include \"caffe2/core/context.h\"\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/sgd/learning_rate_functors.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass LearningRateOp final : public Operator<Context> {\n public:\n  LearningRateOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws), functor_(nullptr),\n        base_lr_(\n            OperatorBase::template GetSingleArgument<float>(\n                \"base_lr\", FLT_MAX)) {\n    CAFFE_ENFORCE_NE(base_lr_, FLT_MAX, \"Base learning rate must be set.\");\n    const string policy = OperatorBase::GetSingleArgument<string>(\"policy\", \"\");\n    CAFFE_ENFORCE(policy.size(), \"Must specify a learning rate policy.\");\n    if (policy == \"fixed\") {\n      functor_.reset(new FixedLearningRate<T>());\n    } else if (policy == \"step\") {\n      int stepsize =\n          OperatorBase::template GetSingleArgument<int>(\"stepsize\", 0);\n      T gamma = OperatorBase::template GetSingleArgument<float>(\"gamma\", 0);\n      DCHECK_GT(stepsize, 0);\n      DCHECK_GT(gamma, 0);\n      functor_.reset(new StepLearningRate<T>(stepsize, gamma));\n    } else if (policy == \"exp\") {\n      T gamma = OperatorBase::template GetSingleArgument<float>(\"gamma\", 0);\n      DCHECK_GT(gamma, 0);\n      functor_.reset(new ExpLearningRate<T>(gamma));\n    } else if (policy == \"inv\") {\n      T gamma = OperatorBase::template GetSingleArgument<float>(\"gamma\", 0);\n      T power = OperatorBase::template GetSingleArgument<float>(\"power\", 0);\n      DCHECK_GT(gamma, 0);\n      DCHECK_GT(power, 0);\n      functor_.reset(new InvLearningRate<T>(gamma, power));\n    } else if (policy == \"poly\") {\n      int max_iter = OperatorBase::template GetSingleArgument<int>(\"max_iter\", -1);\n      T power = OperatorBase::template GetSingleArgument<float>(\"power\", 0);\n      DCHECK_GT(power, 0);\n      functor_.reset(new PolyLearningRate<T>(power, max_iter));\n    } else {\n      LOG(FATAL) << \"Unknown learning rate policy: \" << policy;\n    }\n  }\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n\n  bool RunOnDevice() override {\n    int64_t iter =\n        OperatorBase::Input<TensorCPU>(0).template data<int64_t>()[0];\n    T learning_rate = base_lr_ * (*functor_)(iter);\n    // Write to output.\n    auto* output = Output(0);\n    output->Resize(vector<TIndex>());\n    context_.template Copy<T, CPUContext, Context>(\n        1, &learning_rate, Output(0)->template mutable_data<T>());\n    return true;\n  }\n\n private:\n  unique_ptr<LearningRateFunctor<T> > functor_;\n  T base_lr_;\n\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_SGD_LEARNING_RATE_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/momentum_sgd_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nvoid momentum_sgd_update(\n    const int N,\n    const float* g,\n    const float* m,\n    float* ng,\n    float* nm,\n    const float* lr,\n    const float momentum,\n    const bool nesterov,\n    float* param,\n    Context* /*context*/) {\n  const float LR = lr[0];\n  for (auto i = 0; i < N; ++i) {\n    if (!nesterov) {\n      const float adjusted_gradient = LR * g[i] + momentum * m[i];\n      nm[i] = adjusted_gradient;\n      ng[i] = adjusted_gradient;\n    } else {\n      const float mi = m[i];\n      const float mi_new = momentum * mi + LR * g[i];\n      nm[i] = mi_new;\n      ng[i] = (1 + momentum) * mi_new - momentum * mi;\n    }\n\n    if (param) {\n      param[i] -= ng[i];\n    }\n  }\n}\n\ntemplate <typename T, class Context>\nclass MomentumSGDOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MomentumSGDOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        momentum_(OperatorBase::GetSingleArgument<T>(\"momentum\", 0.0)),\n        nesterov_(OperatorBase::GetSingleArgument<int>(\"nesterov\", 0)) {}\n\n  bool RunOnDevice() override {\n    // Iter live on the CPU\n    CAFFE_ENFORCE(OperatorBase::InputIsType<Tensor<Context>>(GRAD));\n    CAFFE_ENFORCE(OperatorBase::InputIsType<Tensor<Context>>(MOMENTUM));\n    CAFFE_ENFORCE(Input(LR).size() == 1);\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENTUM).size());\n    Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));\n    Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));\n\n    momentum_sgd_update<Context>(\n        Input(GRAD).size(),\n        Input(GRAD).template data<T>(),\n        Input(MOMENTUM).template data<T>(),\n        Output(OUTPUT_GRAD)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),\n        Input(LR).template data<T>(),\n        momentum_,\n        nesterov_,\n        NULL,\n        &context_);\n    return true;\n  }\n\n protected:\n  T momentum_{0.9};\n  bool nesterov_;\n  INPUT_TAGS(GRAD, MOMENTUM, LR);\n  OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM);\n};\n\ntemplate <typename T, class Context>\nclass MomentumSGDUpdateOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  MomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        momentum_(OperatorBase::GetSingleArgument<T>(\"momentum\", 0.0)),\n        nesterov_(OperatorBase::GetSingleArgument<int>(\"nesterov\", 0)) {}\n\n  bool RunOnDevice() override {\n    // Iter live on the CPU\n    CAFFE_ENFORCE(OperatorBase::InputIsType<Tensor<Context>>(GRAD));\n    CAFFE_ENFORCE(OperatorBase::InputIsType<Tensor<Context>>(MOMENTUM));\n    CAFFE_ENFORCE(Input(LR).size() == 1);\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENTUM).size());\n    Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));\n    Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));\n\n    momentum_sgd_update<Context>(\n        Input(GRAD).size(),\n        Input(GRAD).template data<T>(),\n        Input(MOMENTUM).template data<T>(),\n        Output(OUTPUT_GRAD)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),\n        Input(LR).template data<T>(),\n        momentum_,\n        nesterov_,\n        Output(OUTPUT_PARAM)->template mutable_data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  T momentum_{0.9};\n  bool nesterov_;\n  INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM);\n  OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);\n};\n\ntemplate <typename T, class Context>\nclass SparseMomentumSGDUpdateOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  SparseMomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        momentum_(OperatorBase::GetSingleArgument<T>(\"momentum\", 0.0)),\n        nesterov_(OperatorBase::GetSingleArgument<int>(\"nesterov\", 0)) {}\n\n  bool RunOnDevice() override {\n    // Resize [potentially] out-of-place blobs\n    Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));\n\n    // Enforce shapes\n    CAFFE_ENFORCE_EQ(Input(LR).size(), 1);\n    CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENTUM).size());\n    CAFFE_ENFORCE_EQ(Input(PARAM).size_from_dim(1),\n        Input(GRAD).size_from_dim(Input(INDICES).ndim()));\n\n    return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(\n        this, Input(INDICES));\n  }\n\n  template <typename SIndex>\n  bool DoRunWithType() {\n    auto block_size = Input(PARAM).size() / Input(PARAM).dim(0);\n    auto n = Input(GRAD).size() / block_size;\n\n    const auto* gradIn = Input(GRAD).template data<T>();\n    const auto* momentumIn = Input(MOMENTUM).template data<T>();\n    const auto* lr = Input(LR).template data<T>();\n    const auto* paramIn = Input(PARAM).template data<T>();\n    const auto* indices = Input(INDICES).template data<SIndex>();\n\n    auto* gradOut = Output(OUTPUT_GRAD)->template mutable_data<T>();\n    auto* momentumOut = Output(OUTPUT_MOMENTUM)->template mutable_data<T>();\n    auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();\n\n    for (auto i = 0; i < n; ++i) {\n      auto idx = indices[i];\n      auto offsetI = i * block_size;\n      auto offsetIdx = idx * block_size;\n\n      CAFFE_ENFORCE(offsetIdx + block_size <= Input(PARAM).size());\n      CAFFE_ENFORCE(offsetI + block_size <= Input(GRAD).size());\n\n      momentum_sgd_update<Context>(\n          block_size,\n          gradIn + offsetI,\n          momentumIn + offsetIdx,\n          gradOut + offsetI,\n          momentumOut + offsetIdx,\n          lr,\n          momentum_,\n          nesterov_,\n          paramOut + offsetIdx,\n          &context_);\n    }\n    return true;\n  }\n\n protected:\n  T momentum_;\n  bool nesterov_;\n  INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM, INDICES);\n  OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/rmsprop_op.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common_omp.h\"\n#include \"caffe2/core/operator.h\"\n\nnamespace caffe2 {\n\ntemplate <typename Context>\nvoid rmsprop_update(\n    int N,\n    const float* g,\n    const float* ms,\n    const float* mom,\n    float* ng,\n    float* nms,\n    float* nmom,\n    float decay,\n    float momentum,\n    float epsilon,\n    const float* lr,\n    Context* context);\n\ntemplate <typename T, class Context>\nclass RmsPropOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  RmsPropOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        decay_(OperatorBase::GetSingleArgument<float>(\"decay\", 0.9f)),\n        momentum_(OperatorBase::GetSingleArgument<float>(\"momentum\", 0.0f)),\n        epsilon_(OperatorBase::GetSingleArgument<float>(\"epsilon\", 1e-5f)) {}\n  bool RunOnDevice() override {\n    CAFFE_ENFORCE(Input(LR).size() == 1);\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(MEAN_SQUARES).size());\n    CAFFE_ENFORCE(Input(GRAD).size() == Input(OUTPUT_MOMENTUM).size());\n    Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));\n    Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));\n    Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES));\n    Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));\n    rmsprop_update<Context>(\n        Input(GRAD).size(),\n        Input(GRAD).template data<T>(),\n        Input(MEAN_SQUARES).template data<T>(),\n        Input(MOMENTUM).template data<T>(),\n        Output(OUTPUT_GRAD)->template mutable_data<T>(),\n        Output(OUTPUT_MEAN_SQUARES)->template mutable_data<T>(),\n        Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),\n        decay_,\n        momentum_,\n        epsilon_,\n        Input(LR).template data<T>(),\n        &context_);\n    return true;\n  }\n\n protected:\n  T decay_{0.9};\n  T momentum_{0.0};\n  T epsilon_{1e-8};\n  INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR);\n  OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM);\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/sgd/yellowfin_op.h",
    "content": "// YellowFin: An automatic tuner for momentum SGD\n// (https://arxiv.org/abs/1706.03471)\n// The YellowFinOp tunes learning rate and momentum and performs momentum SGD\n// steps. The learning rate and momentum are separate for any matrix of\n// parameters.\n\n#pragma once\n\n#include <cmath>\n#include <cstring>\n#include \"caffe2/core/operator.h\"\n#include \"caffe2/utils/math.h\"\n\nnamespace caffe2 {\n\ntemplate <typename T, class Context>\nclass YellowFinOp final : public Operator<Context> {\n public:\n  USE_OPERATOR_CONTEXT_FUNCTIONS;\n  YellowFinOp(const OperatorDef& operator_def, Workspace* ws)\n      : Operator<Context>(operator_def, ws),\n        curv_win_width_(\n            OperatorBase::GetSingleArgument<int>(\"curv_win_width\", 20)),\n        nesterov_(OperatorBase::GetSingleArgument<int>(\"nesterov\", false)),\n        zero_debias_(\n            OperatorBase::GetSingleArgument<bool>(\"zero_debias\", true)),\n        epsilon_(OperatorBase::GetSingleArgument<T>(\"epsilon\", 1e-6f)),\n        beta_(OperatorBase::GetSingleArgument<T>(\"beta\", 0.999f)) {}\n\n protected:\n  // GetLrMu and MomentumSgdUpdate have different implementations for GPU and\n  // CPU. All other methods are generic.\n  void GetLrMu();\n  void MomentumSgdUpdate();\n\n  void AfterApply() {\n    // g\n    MovingAverage(D_, grad_, g_avg_, g_avg_out_, g_deb_);\n    // g2\n    math::Mul(D_, grad_, grad_, aux_vector_, &context_);\n    MovingAverage(D_, aux_vector_, g2_avg_, g2_avg_out_, g2_deb_);\n    // g_norm2\n    math::Dot(D_, grad_, grad_, g_norm2_, &context_);\n    math::Maximum(1, epsilon_, g_norm2_, g_norm2_, &context_);\n    MovingAverage(1, g_norm2_, g_norm2_avg_, g_norm2_avg_out_, g_norm2_deb_);\n    // g_norm\n    math::Sqrt(1, g_norm2_, g_norm_, &context_);\n    MovingAverage(1, g_norm_, g_norm_avg_, g_norm_avg_out_, g_norm_deb_);\n    math::Maximum(1, epsilon_, g_norm_deb_, g_norm_deb_, &context_);\n    // Curvature range: g_norm2_min, g_norm2_max\n    math::CopyVector(curv_win_width_, curv_win_, curv_win_out_, &context_);\n    T* curv_win_cell = curv_win_out_ + (iter_ - 1) % curv_win_width_;\n    math::Log(1, g_norm2_, curv_win_cell, &context_);\n    int valid_end = std::min(curv_win_width_, iter_);\n    math::ReduceMin(\n        valid_end, curv_win_out_, g_norm2_min_, &scratch_tensor_, &context_);\n    math::ReduceMax(\n        valid_end, curv_win_out_, g_norm2_max_, &scratch_tensor_, &context_);\n    MovingAverage(\n        1,\n        g_norm2_min_,\n        g_norm2_min_avg_,\n        g_norm2_min_avg_out_,\n        g_norm2_min_deb_);\n    MovingAverage(\n        1,\n        g_norm2_max_,\n        g_norm2_max_avg_,\n        g_norm2_max_avg_out_,\n        g_norm2_max_deb_);\n    math::Exp(1, g_norm2_min_deb_, g_norm2_min_deb_, &context_);\n    math::Exp(1, g_norm2_max_deb_, g_norm2_max_deb_, &context_);\n    math::Maximum(1, epsilon_, g_norm2_min_deb_, g_norm2_min_deb_, &context_);\n    math::Maximum(1, epsilon_, g_norm2_max_deb_, g_norm2_max_deb_, &context_);\n    // Gradient variance\n    math::Dot(D_, g_deb_, g_deb_, aux_scalar_, &context_);\n\n    math::Sub(1, g_norm2_deb_, aux_scalar_, variance_, &context_);\n    math::Maximum(1, epsilon_, variance_, variance_, &context_);\n    // Distance to opt\n    math::Div(1, g_norm_avg_out_, g_norm2_avg_out_, distance_, &context_);\n    MovingAverage(\n        1, distance_, distance_avg_, distance_avg_out_, distance_deb_);\n    if (iter_ > 1) {\n      GetLrMu();\n    }\n  }\n\n  void MovingAverage(\n      const int N,\n      const T* elt,\n      const T* avg,\n      T* new_avg,\n      T* debias_avg) {\n    const T one = 1;\n    math::Scale(N, beta_, avg, new_avg, &context_);\n    math::Axpy(N, one - beta_, elt, new_avg, &context_);\n    math::Scale(N, debias_factor_, new_avg, debias_avg, &context_);\n  }\n\n  T ZeroDebiasFactor() {\n    if (zero_debias_) {\n      const T one = 1;\n      return one / (one - std::pow(beta_, iter_));\n    } else {\n      return 1;\n    }\n  }\n\n public:\n  bool RunOnDevice() override {\n// Iter live on the CPU\n\n#define CAFFE2_YF_READ_INPUT(INPUT_NAME, VAR_NAME)  \\\n  const auto VAR_NAME##_tensor = Input(INPUT_NAME); \\\n  VAR_NAME##_ = VAR_NAME##_tensor.template data<T>();\n\n    CAFFE2_YF_READ_INPUT(PARAM, param)\n    CAFFE2_YF_READ_INPUT(MOMENT, moment)\n    CAFFE2_YF_READ_INPUT(LR_AVG, lr_avg)\n    CAFFE2_YF_READ_INPUT(MU_AVG, mu_avg)\n    CAFFE2_YF_READ_INPUT(CURV_WIN, curv_win)\n    CAFFE2_YF_READ_INPUT(G_AVG, g_avg)\n    CAFFE2_YF_READ_INPUT(G2_AVG, g2_avg)\n    CAFFE2_YF_READ_INPUT(SCALARS_MEMORY, scalars_memory)\n    CAFFE2_YF_READ_INPUT(GRAD, grad)\n#undef CAFFE2_YF_READ_OUTPUT\n\n    CAFFE_ENFORCE(OperatorBase::InputIsType<TensorCPU>(ITER));\n    CAFFE_ENFORCE_EQ(lr_avg_tensor.size(), 1);\n    CAFFE_ENFORCE_EQ(mu_avg_tensor.size(), 1);\n    CAFFE_ENFORCE_EQ(param_tensor.ndim(), moment_tensor.ndim());\n    CAFFE_ENFORCE_EQ(param_tensor.ndim(), g_avg_tensor.ndim());\n    CAFFE_ENFORCE_EQ(param_tensor.ndim(), g2_avg_tensor.ndim());\n    CAFFE_ENFORCE_EQ(param_tensor.ndim(), grad_tensor.ndim());\n    for (int i = 0; i < param_tensor.ndim(); ++i) {\n      CAFFE_ENFORCE_EQ(param_tensor.dim32(i), moment_tensor.dim32(i));\n      CAFFE_ENFORCE_EQ(param_tensor.dim32(i), g_avg_tensor.dim32(i));\n      CAFFE_ENFORCE_EQ(param_tensor.dim32(i), g2_avg_tensor.dim32(i));\n      CAFFE_ENFORCE_EQ(param_tensor.dim32(i), grad_tensor.dim32(i));\n    }\n\n    iter_ = OperatorBase::Input<TensorCPU>(ITER).template data<int64_t>()[0];\n\n    D_ = param_tensor.size();\n\n    // Input data - persistent memory for internal scalars\n    // Note: Memory for these scalars is being allocated during initialization\n    //       of the network. If you want to add / remove a scalar, make a\n    //       suitable change of memory size in the initialization.\n    const T* memory_it = scalars_memory_ - 1;\n    g_norm_avg_ = ++memory_it;\n    g_norm2_avg_ = ++memory_it;\n    g_norm2_min_avg_ = ++memory_it;\n    g_norm2_max_avg_ = ++memory_it;\n    distance_avg_ = ++memory_it;\n\n// Output data\n\n#define CAFFE2_YF_READ_OUTPUT(OUTPUT_NAME, VAR_NAME)         \\\n  auto VAR_NAME##_out_tensor = Output(OUTPUT_##OUTPUT_NAME); \\\n  VAR_NAME##_out_tensor->ResizeLike(VAR_NAME##_tensor);      \\\n  VAR_NAME##_out_ = VAR_NAME##_out_tensor->template mutable_data<T>();\n\n    CAFFE2_YF_READ_OUTPUT(PARAM, param)\n    CAFFE2_YF_READ_OUTPUT(MOMENT, moment)\n    CAFFE2_YF_READ_OUTPUT(LR_AVG, lr_avg)\n    CAFFE2_YF_READ_OUTPUT(MU_AVG, mu_avg)\n    CAFFE2_YF_READ_OUTPUT(CURV_WIN, curv_win)\n    CAFFE2_YF_READ_OUTPUT(G_AVG, g_avg)\n    CAFFE2_YF_READ_OUTPUT(G2_AVG, g2_avg)\n    CAFFE2_YF_READ_OUTPUT(SCALARS_MEMORY, scalars_memory)\n#undef CAFFE2_YF_READ_OUTPUT\n\n    T* out_memory_it = scalars_memory_out_ - 1;\n    g_norm_avg_out_ = ++out_memory_it;\n    g_norm2_avg_out_ = ++out_memory_it;\n    g_norm2_min_avg_out_ = ++out_memory_it;\n    g_norm2_max_avg_out_ = ++out_memory_it;\n    distance_avg_out_ = ++out_memory_it;\n\n#define CAFFE2_YF_INIT_VECTOR(NAME) \\\n  NAME##_tensor_.Resize(D_);        \\\n  NAME##_ = NAME##_tensor_.template mutable_data<T>();\n\n    CAFFE2_YF_INIT_VECTOR(aux_vector)\n    CAFFE2_YF_INIT_VECTOR(g_deb)\n    CAFFE2_YF_INIT_VECTOR(g2_deb)\n    CAFFE2_YF_INIT_VECTOR(g_deb2)\n#undef CAFFE2_YF_INIT_VECTOR\n\n#define CAFFE2_YF_INIT_SCALAR(NAME) \\\n  NAME##_tensor_.Resize(1);         \\\n  NAME##_ = NAME##_tensor_.template mutable_data<T>();\n\n    CAFFE2_YF_INIT_SCALAR(aux_scalar)\n    CAFFE2_YF_INIT_SCALAR(distance)\n    CAFFE2_YF_INIT_SCALAR(distance_deb)\n    CAFFE2_YF_INIT_SCALAR(g_norm)\n    CAFFE2_YF_INIT_SCALAR(g_norm_deb)\n    CAFFE2_YF_INIT_SCALAR(g_norm2)\n    CAFFE2_YF_INIT_SCALAR(g_norm2_max)\n    CAFFE2_YF_INIT_SCALAR(g_norm2_max_deb)\n    CAFFE2_YF_INIT_SCALAR(g_norm2_min)\n    CAFFE2_YF_INIT_SCALAR(g_norm2_min_deb)\n    CAFFE2_YF_INIT_SCALAR(g_norm2_deb)\n    CAFFE2_YF_INIT_SCALAR(lr)\n    CAFFE2_YF_INIT_SCALAR(lr_deb)\n    CAFFE2_YF_INIT_SCALAR(mu_deb)\n    CAFFE2_YF_INIT_SCALAR(mu)\n    CAFFE2_YF_INIT_SCALAR(variance)\n#undef CAFFE2_YF_INIT_SCALAR\n\n    debias_factor_ = ZeroDebiasFactor();\n    MomentumSgdUpdate();\n    AfterApply();\n    return true;\n  }\n\n protected:\n  int curv_win_width_;\n  bool nesterov_;\n  bool zero_debias_;\n\n  T epsilon_;\n  T beta_;\n  T debias_factor_;\n\n  int D_;\n\n// Temporary memory on device, listed all variables used in calculations\n#define CAFFE2_YF_DEFINE_TENSOR(NAME) \\\n  Tensor<Context> NAME##_tensor_;     \\\n  T* NAME##_;\n\n  CAFFE2_YF_DEFINE_TENSOR(aux_vector)\n  CAFFE2_YF_DEFINE_TENSOR(g_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g2_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g_deb2)\n\n  CAFFE2_YF_DEFINE_TENSOR(aux_scalar)\n  CAFFE2_YF_DEFINE_TENSOR(distance)\n  CAFFE2_YF_DEFINE_TENSOR(distance_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2_max)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2_max_deb)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2_min)\n  CAFFE2_YF_DEFINE_TENSOR(g_norm2_min_deb)\n  CAFFE2_YF_DEFINE_TENSOR(lr)\n  CAFFE2_YF_DEFINE_TENSOR(lr_deb)\n  CAFFE2_YF_DEFINE_TENSOR(mu)\n  CAFFE2_YF_DEFINE_TENSOR(mu_deb)\n  CAFFE2_YF_DEFINE_TENSOR(variance)\n\n  Tensor<Context> scratch_tensor_;\n\n#undef CAFFE2_YF_DEFINE_TENSOR\n\n  // Input tensors' data\n  const T* param_;\n  const T* moment_;\n  const T* lr_avg_;\n  const T* mu_avg_;\n  const T* curv_win_;\n  const T* g_avg_;\n  const T* g2_avg_;\n  const T* scalars_memory_;\n  const T* grad_;\n  int iter_;\n\n  // Scalar data from scalars_memory_ input tensor\n  const T* g_norm_avg_;\n  const T* g_norm2_avg_;\n  const T* g_norm2_min_avg_;\n  const T* g_norm2_max_avg_;\n  const T* distance_avg_;\n\n  // Output tensors' data\n\n  T* param_out_;\n  T* moment_out_;\n  T* lr_avg_out_;\n  T* mu_avg_out_;\n  T* curv_win_out_;\n  T* g_avg_out_;\n  T* g2_avg_out_;\n  T* scalars_memory_out_;\n\n  // Scalar data from scalars_memory_ output tensor\n  T* g_norm_avg_out_;\n  T* g_norm2_avg_out_;\n  T* g_norm2_min_avg_out_;\n  T* g_norm2_max_avg_out_;\n  T* distance_avg_out_;\n\n  INPUT_TAGS(\n      PARAM,\n      MOMENT,\n      LR_AVG,\n      MU_AVG,\n      CURV_WIN,\n      G_AVG,\n      G2_AVG,\n      SCALARS_MEMORY,\n      GRAD,\n      ITER);\n  OUTPUT_TAGS(\n      OUTPUT_PARAM,\n      OUTPUT_MOMENT,\n      OUTPUT_LR_AVG,\n      OUTPUT_MU_AVG,\n      OUTPUT_CURV_WIN,\n      OUTPUT_G_AVG,\n      OUTPUT_G2_AVG,\n      OUTPUT_SCALARS_MEMORY);\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/transforms/common_subexpression_elimination.h",
    "content": "// Copyright 2004-present Facebook. All Rights Reserved.\n\n#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/transform.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n/**\n * Common Subexpression Elimination\n *\n * This transforms looks for specific operators (denoted by whitelisted_ops_),\n * and removes unnecessary repetition of that operator.\n *\n * Consider some operator of X, that reads from blob b_ written to by W.\n * X_a and X_b read the output of X. However, another operator Y, is the same\n * type as X, has the same arguments as X, and reads from the same input b_,\n * written to by W. It's output is the same as X. Y_a, Y_b, and Y_c read from Y.\n *\n * Then, we can eliminate the common subexpressions X and Y, and merge them to\n * Z, where X_a, X_b, Y_a, Y_b, and Y_c all read from Z.\n *\n *\n * TODO(benz): Fix the error to not match nodes that write to external output.\n */\nclass CommonSubexpressionEliminationTransform : public Transform {\n public:\n  CommonSubexpressionEliminationTransform() {\n    SetPatternMatchType(SORTED_WRT_EXECUTION_ORDER);\n  }\n\n protected:\n  bool PatternRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph,\n      int idx) override;\n  bool ValidatorRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph) override;\n  bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)\n      override;\n\n private:\n  bool IsWhitelisted(string op_type) {\n    return whitelisted_ops_.count(op_type);\n  }\n  std::set<string> whitelisted_ops_ = {\"LearningRate\", \"FC\"};\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/transforms/conv_to_nnpack_transform.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/transforms/single_op_transform.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\nclass ConvToNNPackTransform : public SingleOpTransform {\n protected:\n  // Specify what the op needs to be to match the pattern.\n  bool MatchOperator(const OperatorDef& op) override {\n    return (\n        op.type() == \"Conv\" && op.device_option().device_type() == CPU &&\n        op.engine() != \"NNPACK\");\n  }\n\n  // Specify how the operator should be replaced.\n  void ReplaceOperator(OperatorDef* op) override {\n    op->set_engine(\"NNPACK\");\n  }\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/transforms/pattern_net_transform.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/transform.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n/**\n * PatternNetTransform allows you to create transforms using a simple\n * interface.\n *\n * Simply provide a Pattern NetDef and a Replace NetDef,\n * and this Transform will find subgraphs which fit the pattern net,\n * and replace it with the replace net.\n */\nclass PatternNetTransform : public Transform {\n public:\n  PatternNetTransform(const NetDef& pattern_net, const NetDef& replace_net)\n      : p_(transform::Graph(pattern_net)), r_(transform::Graph(replace_net)) {\n    // external input and output must match!\n    CAFFE_ENFORCE(\n        p_.external_input() == r_.external_input(),\n        \"External inputs do not match!\");\n    CAFFE_ENFORCE(\n        p_.external_output() == r_.external_output(),\n        \"External outputs do not match!\");\n    ordered_ops_ = GetPatternTraversalOrder(p_);\n    inverse_ops_.resize(ordered_ops_.size());\n    for (int i = 0; i < ordered_ops_.size(); i++) {\n      inverse_ops_[ordered_ops_[i]] = i;\n    }\n  }\n\n  void EnableArgumentMatching() {\n    argument_match_ = true;\n  }\n\n  void DisableArgumentMatching() {\n    argument_match_ = false;\n  }\n\n protected:\n  /**\n   * We want to the final result of subgraph to match the PatternNet in the\n   * order of ordered_ops, operator by operator.\n   *\n   * [[[ ie. g.node(subgraph[i]) should match p.node(ordered_ops[i]) ]]]\n   *\n   * PatternRule for PatternNetTransform does the following:\n   *\n   * When trying to insert node idx into subgraph[p_idx],\n   * we need to see if the edges between index and the\n   * subgraph match the edges between p[ordered_ops[idx]]\n   * and p[ordered_ops[0]...ordered_ops[p_idx-1]].\n   */\n  bool PatternRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph,\n      int idx) override;\n  /**\n   * ValidatorRule for PatternNetTransform does the following:\n   *\n   * Checks if the size of subgraph and p.size() are the same. That's it!\n   */\n  bool ValidatorRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph) override;\n  /**\n   * ReplaceRule for PatternNet Transform does the following:\n   *\n   * 1) Figure out edge renamings for edges going into/out of the subgraph.\n   * That is, for each blob in the pattern graph, what is it called in the\n   * matched subgraph?\n   *\n   * 2) Remove the matched subgraph.\n   *\n   * 3) Append the replace graph's operators to the graph's operators, and use\n   *    the renamings to rename the blob names.\n   *\n   * 4) Create all the children/parent relationships within the replaced graph,\n   *    and stitch together the inputs and outputs into the rest of the graph,\n   *    matching the removed subgraph.\n   */\n  bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)\n      override;\n\n private:\n  /**\n   * This returns a permutation of the Pattern Net's operators.\n   * The permutation satisfies this property:\n   *    - For any index i, order(i) is a neighbor of some node from\n   *      {order(1), ..., order(i-1)}.\n   *\n   * Why is this important? Consider the following case:\n   * PatternNet: 0 ---> 2 <--- 1\n   *\n   * When we have matched onto [0], and trying to add [1] to our subgraph,\n   * we cannot, since PatternMatch only considers neighbors of the current\n   * subgraph as a candidate next node.\n   *\n   * Therefore, we must present the subgraph in an order such that each node is\n   * a neighbor of its prefix subgraph. One ordering for the above example is\n   * [0, 2, 1].\n   */\n  std::vector<int> GetPatternTraversalOrder(const transform::Graph& g);\n\n  // Graph of Pattern NetDef\n  transform::Graph p_;\n\n  // The Traversal Order of the Pattern Net's Operators\n  // This is a permutation of the numbers from {0, ..., p.size()-1}\n  std::vector<int> ordered_ops_;\n\n  // The Inverse of the Traversal Order of the Pattern Net's Operators\n  // That is, inverse_ops[ordered_ops[i]] == i is always true.\n  std::vector<int> inverse_ops_;\n\n  // Graph of Replace NetDef\n  transform::Graph r_;\n\n  // This flag determines if the transform will match operator arguments.\n  bool argument_match_ = false;\n\n  const string TransformBlobWrapper(const string& blob_name) {\n    return \"transform/\" + blob_name + \"_\" + caffe2::to_string(ssa_id_);\n  }\n\n  int ssa_id_ = 0;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/transforms/single_op_transform.h",
    "content": "#pragma once\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/transform.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n#include \"caffe2/utils/proto_utils.h\"\n\nnamespace caffe2 {\n\n/**\n * Single Op Transform Base class\n *\n * A transform which is applied to a single node, in place.\n *\n * Transforms which derive from SingleOpTransform need to override:\n * ReplaceOperator and MatchOperator.\n */\nclass SingleOpTransform : public Transform {\n protected:\n  bool PatternRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph,\n      int idx) override;\n  bool ValidatorRule(\n      const transform::Graph& g,\n      const std::vector<int>& subgraph) override;\n  bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)\n      override;\n\n  // Specify what the op needs to be to match the pattern.\n  virtual bool MatchOperator(const OperatorDef& op) = 0;\n\n  // Specify how the operator should be replaced.\n  virtual void ReplaceOperator(OperatorDef* op) = 0;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/cast.h",
    "content": "#pragma once\n\n#include <caffe2/utils/proto_utils.h>\n\nnamespace caffe2 {\n\nnamespace cast {\n\ninline TensorProto_DataType GetCastDataType(const ArgumentHelper& helper, std::string arg) {\n  TensorProto_DataType to;\n  if (helper.HasSingleArgumentOfType<string>(arg)) {\n#ifndef CAFFE2_USE_LITE_PROTO\n    string s = helper.GetSingleArgument<string>(arg, \"float\");\n    std::transform(s.begin(), s.end(), s.begin(), ::toupper);\n    CAFFE_ENFORCE(TensorProto_DataType_Parse(s, &to), \"Unknown 'to' argument: \", s);\n#else\n    CAFFE_THROW(\"String cast op not supported\");\n#endif\n  } else {\n    to = static_cast<TensorProto_DataType>(\n        helper.GetSingleArgument<int>(arg, TensorProto_DataType_FLOAT));\n  }\n  return to;\n}\n\n};  // namespace cast\n\n};  // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/cblas.h",
    "content": "// This is the exact cblas.h header file, placed here purely in order to get\n// the enums.\n\n#include \"caffe2/core/macros.h\"\n\n#ifndef CBLAS_H\n#ifdef CAFFE2_USE_MKL\n#include <mkl_cblas.h>\n#else  // CAFFE2_USE_MKL\n\n#ifndef CBLAS_ENUM_DEFINED_H\n   #define CBLAS_ENUM_DEFINED_H\n   enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102 };\n   enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113,\n                         AtlasConj=114};\n   enum CBLAS_UPLO  {CblasUpper=121, CblasLower=122};\n   enum CBLAS_DIAG  {CblasNonUnit=131, CblasUnit=132};\n   enum CBLAS_SIDE  {CblasLeft=141, CblasRight=142};\n#endif\n\n#ifndef CBLAS_ENUM_ONLY\n#define CBLAS_H\n#define CBLAS_INDEX int\n\nint cblas_errprn(int ierr, int info, char *form, ...);\nvoid cblas_xerbla(int p, const char *rout, const char *form, ...);\n\n/*\n * ===========================================================================\n * Prototypes for level 1 BLAS functions (complex are recast as routines)\n * ===========================================================================\n */\nfloat  cblas_sdsdot(const int N, const float alpha, const float *X,\n                    const int incX, const float *Y, const int incY);\ndouble cblas_dsdot(const int N, const float *X, const int incX, const float *Y,\n                   const int incY);\nfloat  cblas_sdot(const int N, const float  *X, const int incX,\n                  const float  *Y, const int incY);\ndouble cblas_ddot(const int N, const double *X, const int incX,\n                  const double *Y, const int incY);\n/*\n * Functions having prefixes Z and C only\n */\nvoid   cblas_cdotu_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotu);\nvoid   cblas_cdotc_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotc);\n\nvoid   cblas_zdotu_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotu);\nvoid   cblas_zdotc_sub(const int N, const void *X, const int incX,\n                       const void *Y, const int incY, void *dotc);\n\n\n/*\n * Functions having prefixes S D SC DZ\n */\nfloat  cblas_snrm2(const int N, const float *X, const int incX);\nfloat  cblas_sasum(const int N, const float *X, const int incX);\n\ndouble cblas_dnrm2(const int N, const double *X, const int incX);\ndouble cblas_dasum(const int N, const double *X, const int incX);\n\nfloat  cblas_scnrm2(const int N, const void *X, const int incX);\nfloat  cblas_scasum(const int N, const void *X, const int incX);\n\ndouble cblas_dznrm2(const int N, const void *X, const int incX);\ndouble cblas_dzasum(const int N, const void *X, const int incX);\n\n\n/*\n * Functions having standard 4 prefixes (S D C Z)\n */\nCBLAS_INDEX cblas_isamax(const int N, const float  *X, const int incX);\nCBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);\nCBLAS_INDEX cblas_icamax(const int N, const void   *X, const int incX);\nCBLAS_INDEX cblas_izamax(const int N, const void   *X, const int incX);\n\n/*\n * ===========================================================================\n * Prototypes for level 1 BLAS routines\n * ===========================================================================\n */\n\n/*\n * Routines with standard 4 prefixes (s, d, c, z)\n */\nvoid cblas_sswap(const int N, float *X, const int incX,\n                 float *Y, const int incY);\nvoid cblas_scopy(const int N, const float *X, const int incX,\n                 float *Y, const int incY);\nvoid cblas_saxpy(const int N, const float alpha, const float *X,\n                 const int incX, float *Y, const int incY);\nvoid catlas_saxpby(const int N, const float alpha, const float *X,\n                  const int incX, const float beta, float *Y, const int incY);\nvoid catlas_sset\n   (const int N, const float alpha, float *X, const int incX);\n\nvoid cblas_dswap(const int N, double *X, const int incX,\n                 double *Y, const int incY);\nvoid cblas_dcopy(const int N, const double *X, const int incX,\n                 double *Y, const int incY);\nvoid cblas_daxpy(const int N, const double alpha, const double *X,\n                 const int incX, double *Y, const int incY);\nvoid catlas_daxpby(const int N, const double alpha, const double *X,\n                  const int incX, const double beta, double *Y, const int incY);\nvoid catlas_dset\n   (const int N, const double alpha, double *X, const int incX);\n\nvoid cblas_cswap(const int N, void *X, const int incX,\n                 void *Y, const int incY);\nvoid cblas_ccopy(const int N, const void *X, const int incX,\n                 void *Y, const int incY);\nvoid cblas_caxpy(const int N, const void *alpha, const void *X,\n                 const int incX, void *Y, const int incY);\nvoid catlas_caxpby(const int N, const void *alpha, const void *X,\n                  const int incX, const void *beta, void *Y, const int incY);\nvoid catlas_cset\n   (const int N, const void *alpha, void *X, const int incX);\n\nvoid cblas_zswap(const int N, void *X, const int incX,\n                 void *Y, const int incY);\nvoid cblas_zcopy(const int N, const void *X, const int incX,\n                 void *Y, const int incY);\nvoid cblas_zaxpy(const int N, const void *alpha, const void *X,\n                 const int incX, void *Y, const int incY);\nvoid catlas_zaxpby(const int N, const void *alpha, const void *X,\n                  const int incX, const void *beta, void *Y, const int incY);\nvoid catlas_zset\n   (const int N, const void *alpha, void *X, const int incX);\n\n\n/*\n * Routines with S and D prefix only\n */\nvoid cblas_srotg(float *a, float *b, float *c, float *s);\nvoid cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);\nvoid cblas_srot(const int N, float *X, const int incX,\n                float *Y, const int incY, const float c, const float s);\nvoid cblas_srotm(const int N, float *X, const int incX,\n                float *Y, const int incY, const float *P);\n\nvoid cblas_drotg(double *a, double *b, double *c, double *s);\nvoid cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);\nvoid cblas_drot(const int N, double *X, const int incX,\n                double *Y, const int incY, const double c, const double s);\nvoid cblas_drotm(const int N, double *X, const int incX,\n                double *Y, const int incY, const double *P);\n\n\n/*\n * Routines with S D C Z CS and ZD prefixes\n */\nvoid cblas_sscal(const int N, const float alpha, float *X, const int incX);\nvoid cblas_dscal(const int N, const double alpha, double *X, const int incX);\nvoid cblas_cscal(const int N, const void *alpha, void *X, const int incX);\nvoid cblas_zscal(const int N, const void *alpha, void *X, const int incX);\nvoid cblas_csscal(const int N, const float alpha, void *X, const int incX);\nvoid cblas_zdscal(const int N, const double alpha, void *X, const int incX);\n\n/*\n * Extra reference routines provided by ATLAS, but not mandated by the standard\n */\nvoid cblas_crotg(void *a, void *b, void *c, void *s);\nvoid cblas_zrotg(void *a, void *b, void *c, void *s);\nvoid cblas_csrot(const int N, void *X, const int incX, void *Y, const int incY,\n                 const float c, const float s);\nvoid cblas_zdrot(const int N, void *X, const int incX, void *Y, const int incY,\n                 const double c, const double s);\n\n/*\n * ===========================================================================\n * Prototypes for level 2 BLAS\n * ===========================================================================\n */\n\n/*\n * Routines with standard 4 prefixes (S, D, C, Z)\n */\nvoid cblas_sgemv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 const float *X, const int incX, const float beta,\n                 float *Y, const int incY);\nvoid cblas_sgbmv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const float alpha,\n                 const float *A, const int lda, const float *X,\n                 const int incX, const float beta, float *Y, const int incY);\nvoid cblas_strmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *A, const int lda,\n                 float *X, const int incX);\nvoid cblas_stbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const float *A, const int lda,\n                 float *X, const int incX);\nvoid cblas_stpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *Ap, float *X, const int incX);\nvoid cblas_strsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *A, const int lda, float *X,\n                 const int incX);\nvoid cblas_stbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const float *A, const int lda,\n                 float *X, const int incX);\nvoid cblas_stpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const float *Ap, float *X, const int incX);\n\nvoid cblas_dgemv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 const double *X, const int incX, const double beta,\n                 double *Y, const int incY);\nvoid cblas_dgbmv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const double alpha,\n                 const double *A, const int lda, const double *X,\n                 const int incX, const double beta, double *Y, const int incY);\nvoid cblas_dtrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *A, const int lda,\n                 double *X, const int incX);\nvoid cblas_dtbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const double *A, const int lda,\n                 double *X, const int incX);\nvoid cblas_dtpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *Ap, double *X, const int incX);\nvoid cblas_dtrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *A, const int lda, double *X,\n                 const int incX);\nvoid cblas_dtbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const double *A, const int lda,\n                 double *X, const int incX);\nvoid cblas_dtpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const double *Ap, double *X, const int incX);\n\nvoid cblas_cgemv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *X, const int incX, const void *beta,\n                 void *Y, const int incY);\nvoid cblas_cgbmv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const void *alpha,\n                 const void *A, const int lda, const void *X,\n                 const int incX, const void *beta, void *Y, const int incY);\nvoid cblas_ctrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ctbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ctpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\nvoid cblas_ctrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, void *X,\n                 const int incX);\nvoid cblas_ctbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ctpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\n\nvoid cblas_zgemv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *X, const int incX, const void *beta,\n                 void *Y, const int incY);\nvoid cblas_zgbmv(const enum CBLAS_ORDER Order,\n                 const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\n                 const int KL, const int KU, const void *alpha,\n                 const void *A, const int lda, const void *X,\n                 const int incX, const void *beta, void *Y, const int incY);\nvoid cblas_ztrmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ztbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ztpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\nvoid cblas_ztrsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *A, const int lda, void *X,\n                 const int incX);\nvoid cblas_ztbsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const int K, const void *A, const int lda,\n                 void *X, const int incX);\nvoid cblas_ztpsv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\n                 const int N, const void *Ap, void *X, const int incX);\n\n\n/*\n * Routines with S and D prefixes only\n */\nvoid cblas_ssymv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const float alpha, const float *A,\n                 const int lda, const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_ssbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const float alpha, const float *A,\n                 const int lda, const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_sspmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const float alpha, const float *Ap,\n                 const float *X, const int incX,\n                 const float beta, float *Y, const int incY);\nvoid cblas_sger(const enum CBLAS_ORDER Order, const int M, const int N,\n                const float alpha, const float *X, const int incX,\n                const float *Y, const int incY, float *A, const int lda);\nvoid cblas_ssyr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, float *A, const int lda);\nvoid cblas_sspr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, float *Ap);\nvoid cblas_ssyr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, const float *Y, const int incY, float *A,\n                const int lda);\nvoid cblas_sspr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const float *X,\n                const int incX, const float *Y, const int incY, float *A);\n\nvoid cblas_dsymv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const double alpha, const double *A,\n                 const int lda, const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dsbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const double alpha, const double *A,\n                 const int lda, const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dspmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const double alpha, const double *Ap,\n                 const double *X, const int incX,\n                 const double beta, double *Y, const int incY);\nvoid cblas_dger(const enum CBLAS_ORDER Order, const int M, const int N,\n                const double alpha, const double *X, const int incX,\n                const double *Y, const int incY, double *A, const int lda);\nvoid cblas_dsyr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, double *A, const int lda);\nvoid cblas_dspr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, double *Ap);\nvoid cblas_dsyr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, const double *Y, const int incY, double *A,\n                const int lda);\nvoid cblas_dspr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const double *X,\n                const int incX, const double *Y, const int incY, double *A);\n\n\n/*\n * Routines with C and Z prefixes only\n */\nvoid cblas_chemv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_chbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_chpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *Ap,\n                 const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_cgeru(const enum CBLAS_ORDER Order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_cgerc(const enum CBLAS_ORDER Order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_cher(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const void *X, const int incX,\n                void *A, const int lda);\nvoid cblas_chpr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const float alpha, const void *X,\n                const int incX, void *A);\nvoid cblas_cher2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *A, const int lda);\nvoid cblas_chpr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *Ap);\n\nvoid cblas_zhemv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zhbmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const int K, const void *alpha, const void *A,\n                 const int lda, const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zhpmv(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const int N, const void *alpha, const void *Ap,\n                 const void *X, const int incX,\n                 const void *beta, void *Y, const int incY);\nvoid cblas_zgeru(const enum CBLAS_ORDER Order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zgerc(const enum CBLAS_ORDER Order, const int M, const int N,\n                 const void *alpha, const void *X, const int incX,\n                 const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zher(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const void *X, const int incX,\n                void *A, const int lda);\nvoid cblas_zhpr(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                const int N, const double alpha, const void *X,\n                const int incX, void *A);\nvoid cblas_zher2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *A, const int lda);\nvoid cblas_zhpr2(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo, const int N,\n                const void *alpha, const void *X, const int incX,\n                const void *Y, const int incY, void *Ap);\n\n/*\n * ===========================================================================\n * Prototypes for level 3 BLAS\n * ===========================================================================\n */\n\n/*\n * Routines with standard 4 prefixes (S, D, C, Z)\n */\nvoid cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const float alpha, const float *A,\n                 const int lda, const float *B, const int ldb,\n                 const float beta, float *C, const int ldc);\nvoid cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 const float *B, const int ldb, const float beta,\n                 float *C, const int ldc);\nvoid cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const float alpha, const float *A, const int lda,\n                 const float beta, float *C, const int ldc);\nvoid cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const float alpha, const float *A, const int lda,\n                  const float *B, const int ldb, const float beta,\n                  float *C, const int ldc);\nvoid cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 float *B, const int ldb);\nvoid cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const float alpha, const float *A, const int lda,\n                 float *B, const int ldb);\n\nvoid cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const double alpha, const double *A,\n                 const int lda, const double *B, const int ldb,\n                 const double beta, double *C, const int ldc);\nvoid cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 const double *B, const int ldb, const double beta,\n                 double *C, const int ldc);\nvoid cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const double alpha, const double *A, const int lda,\n                 const double beta, double *C, const int ldc);\nvoid cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const double alpha, const double *A, const int lda,\n                  const double *B, const int ldb, const double beta,\n                  double *C, const int ldc);\nvoid cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 double *B, const int ldb);\nvoid cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const double alpha, const double *A, const int lda,\n                 double *B, const int ldb);\n\nvoid cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const void *alpha, const void *A,\n                 const int lda, const void *B, const int ldb,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const void *alpha, const void *A, const int lda,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const void *beta,\n                  void *C, const int ldc);\nvoid cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\nvoid cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\n\nvoid cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\n                 const int K, const void *alpha, const void *A,\n                 const int lda, const void *B, const int ldb,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const void *alpha, const void *A, const int lda,\n                 const void *beta, void *C, const int ldc);\nvoid cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const void *beta,\n                  void *C, const int ldc);\nvoid cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\nvoid cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\n                 const enum CBLAS_DIAG Diag, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 void *B, const int ldb);\n\n\n/*\n * Routines with prefixes C and Z only\n */\nvoid cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const float alpha, const void *A, const int lda,\n                 const float beta, void *C, const int ldc);\nvoid cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const float beta,\n                  void *C, const int ldc);\nvoid cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\n                 const enum CBLAS_UPLO Uplo, const int M, const int N,\n                 const void *alpha, const void *A, const int lda,\n                 const void *B, const int ldb, const void *beta,\n                 void *C, const int ldc);\nvoid cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                 const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                 const double alpha, const void *A, const int lda,\n                 const double beta, void *C, const int ldc);\nvoid cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\n                  const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\n                  const void *alpha, const void *A, const int lda,\n                  const void *B, const int ldb, const double beta,\n                  void *C, const int ldc);\n\nint cblas_errprn(int ierr, int info, char *form, ...);\n\n#endif  /* end #ifdef CBLAS_ENUM_ONLY */\n#endif  // CAFFE2_USE_MKL\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/conversions.h",
    "content": "#pragma once\n\n#include <caffe2/core/types.h>\n\n#ifdef __CUDA_ARCH__\n// Proxy for including cuda_fp16.h, because common_gpu.h\n// has necessary diagnostic guards.\n#include <caffe2/core/common_gpu.h>\n#endif\n\n#ifdef __CUDA_ARCH__\n#define CONVERSIONS_DECL __host__ __device__ inline\n#else\n#define CONVERSIONS_DECL inline\n#endif\n\nnamespace caffe2 {\n\nnamespace convert {\n\nnamespace {\ninline float16 cpu_float2half_rn(float f) {\n  float16 ret;\n\n  static_assert(\n      sizeof(unsigned int) == sizeof(float),\n      \"Programming error sizeof(unsigned int) != sizeof(float)\");\n\n  unsigned* xp = reinterpret_cast<unsigned int*>(&f);\n  unsigned x = *xp;\n  unsigned u = (x & 0x7fffffff), remainder, shift, lsb, lsb_s1, lsb_m1;\n  unsigned sign, exponent, mantissa;\n\n  // Get rid of +NaN/-NaN case first.\n  if (u > 0x7f800000) {\n    ret.x = 0x7fffU;\n    return ret;\n  }\n\n  sign = ((x >> 16) & 0x8000);\n\n  // Get rid of +Inf/-Inf, +0/-0.\n  if (u > 0x477fefff) {\n    ret.x = sign | 0x7c00U;\n    return ret;\n  }\n  if (u < 0x33000001) {\n    ret.x = (sign | 0x0000);\n    return ret;\n  }\n\n  exponent = ((u >> 23) & 0xff);\n  mantissa = (u & 0x7fffff);\n\n  if (exponent > 0x70) {\n    shift = 13;\n    exponent -= 0x70;\n  } else {\n    shift = 0x7e - exponent;\n    exponent = 0;\n    mantissa |= 0x800000;\n  }\n  lsb = (1 << shift);\n  lsb_s1 = (lsb >> 1);\n  lsb_m1 = (lsb - 1);\n\n  // Round to nearest even.\n  remainder = (mantissa & lsb_m1);\n  mantissa >>= shift;\n  if (remainder > lsb_s1 || (remainder == lsb_s1 && (mantissa & 0x1))) {\n    ++mantissa;\n    if (!(mantissa & 0x3ff)) {\n      ++exponent;\n      mantissa = 0;\n    }\n  }\n\n  ret.x = (sign | (exponent << 10) | mantissa);\n\n  return ret;\n}\n\ninline float cpu_half2float(float16 h) {\n  unsigned sign = ((h.x >> 15) & 1);\n  unsigned exponent = ((h.x >> 10) & 0x1f);\n  unsigned mantissa = ((h.x & 0x3ff) << 13);\n\n  if (exponent == 0x1f) { /* NaN or Inf */\n    mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0);\n    exponent = 0xff;\n  } else if (!exponent) { /* Denorm or Zero */\n    if (mantissa) {\n      unsigned int msb;\n      exponent = 0x71;\n      do {\n        msb = (mantissa & 0x400000);\n        mantissa <<= 1; /* normalize */\n        --exponent;\n      } while (!msb);\n      mantissa &= 0x7fffff; /* 1.mantissa is implicit */\n    }\n  } else {\n    exponent += 0x70;\n  }\n\n  unsigned i = ((sign << 31) | (exponent << 23) | mantissa);\n  float ret;\n  memcpy(&ret, &i, sizeof(i));\n  return ret;\n}\n\n}; // anonymous\n\n#if __CUDACC__\n\n#if CUDA_VERSION >= 9000\nCONVERSIONS_DECL float16 halfToFloat16(half x) {\n#ifdef __GNUC__\n#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n#pragma GCC diagnostic push\n#endif\n#pragma GCC diagnostic ignored \"-Wstrict-aliasing\"\n#endif // __GNUC__\n  float16 r = *reinterpret_cast<float16*>(&x);\n#ifdef __GNUC__\n#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)\n#pragma GCC diagnostic pop\n#endif\n#endif // __GNUC__\n  return r;\n}\n\ninline half float16ToHalf(const float16 x) {\n  __half_raw hr;\n  hr.x = x.x;\n  half r(hr);\n  return r;\n}\n\ninline half floatToHalf(const float x) {\n  float16 xh = cpu_float2half_rn(x);\n  return float16ToHalf(xh);\n}\n\n#else\ninline float16 halfToFloat16(__half x) {\n  float16 r;\n  r.x = x.x;\n  return r;\n}\n\ninline __half float16ToHalf(const float16 x) {\n  __half r;\n  r.x = x.x;\n  return r;\n}\n\ninline half floatToHalf(const float x) {\n  float16 xh = cpu_float2half_rn(x);\n  return float16ToHalf(xh);\n}\n#endif // CUDA_VERSION\n\n#endif // __CUDACC__\n\n// general version: defer to static_cast\ntemplate <typename IN, typename OUT>\nCONVERSIONS_DECL OUT To(const IN in) {\n  return static_cast<OUT>(in);\n}\n\n// explicit for fp16\ntemplate <>\nCONVERSIONS_DECL float16 To(const float in) {\n#if __CUDA_ARCH__\n  // hacky interface between C2 fp16 and CUDA\n#if CUDA_VERSION >= 9000\n  half rh = static_cast<half>(in);\n  return halfToFloat16(rh);\n#else\n  float16 ret;\n  ret.x = __float2half(in).x;\n  return ret;\n#endif // CUDA_VERSION >= 9000\n#else\n  return cpu_float2half_rn(in);\n#endif\n}\n\ntemplate <>\nCONVERSIONS_DECL float To(const float16 in) {\n#if __CUDA_ARCH__\n#if CUDA_VERSION >= 9000\n  __half_raw tmp;\n#else\n  __half tmp;\n#endif\n  tmp.x = in.x;\n  return __half2float(tmp);\n#else\n  return cpu_half2float(in);\n#endif\n};\n\ntemplate <>\nCONVERSIONS_DECL float To(const float in) {\n  return in;\n}\n\ntemplate <typename OUT, typename IN>\nCONVERSIONS_DECL OUT Get(IN x) {\n  return static_cast<OUT>(x);\n}\n\ntemplate <>\nCONVERSIONS_DECL float Get(float16 x) {\n  return To<float16, float>(x);\n}\n\ntemplate <>\nCONVERSIONS_DECL float16 Get(float x) {\n  return To<float, float16>(x);\n}\n\n}; // namespace convert\n\n}; // namespace caffe2\n\n#undef CONVERSIONS_DECL\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/cpu_neon.h",
    "content": "#ifndef CAFFE2_UTILS_CPU_NEON_H_\n#define CAFFE2_UTILS_CPU_NEON_H_\n\n// Provides a variety of ARM NEON-specific utility functions\n#ifdef __ARM_NEON__\n#include <arm_neon.h>\n\nnamespace caffe2 {\n\ntemplate <typename T>\ninline bool isPointerAligned(T* p, size_t align) {\n  return (reinterpret_cast<uintptr_t>(p) % align == 0);\n}\n\ninline float32x4_t vert_sum_f32(float32x4_t v0,\n                                float32x4_t v1,\n                                float32x4_t v2,\n                                float32x4_t v3) {\n  v0 = vaddq_f32(v0, v1);\n  v2 = vaddq_f32(v2, v3);\n  return vaddq_f32(v0, v2);\n}\n\ninline float horizontal_sum_f32(float32x4_t v0,\n                                float32x4_t v1,\n                                float32x4_t v2,\n                                float32x4_t v3) {\n  v0 = vert_sum_f32(v0, v1, v2, v3);\n  float32x2_t v = vadd_f32(vget_high_f32(v0), vget_low_f32(v0));\n  return vget_lane_f32(vpadd_f32(v, v), 0);\n}\n\n// Load/store functions that assume alignment\n\ninline float32x4_t vld1q_f32_aligned(const float* p) {\n  return vld1q_f32((const float*)\n                   __builtin_assume_aligned(p, sizeof(float32x4_t)));\n}\n\ninline void vst1q_f32_aligned(float* p, float32x4_t v) {\n  vst1q_f32((float*) __builtin_assume_aligned(p, sizeof(float32x4_t)), v);\n}\n\ninline void vst4_u8_aligned(uint8_t* p, uint8x8x4_t v) {\n  vst4_u8((uint8_t*)\n          __builtin_assume_aligned(p, sizeof(uint8x8x4_t)), v);\n}\n\n}  // namespace caffe2\n\n#endif // __ARM_NEON__\n\n#endif  // CAFFE2_UTILS_CPU_NEON_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/cpuid.h",
    "content": "#pragma once\n\n#include <cstdint>\n\n#ifdef _MSC_VER\n#include <intrin.h>\n#endif\n\nnamespace caffe2 {\n\nclass CpuId;\n\nconst CpuId& GetCpuId();\n\n///////////////////////////////////////////////////////////////////////////////\n// Implementation of CpuId that is borrowed from folly.\n///////////////////////////////////////////////////////////////////////////////\n\n/**\n * Identification of an Intel CPU.\n * Supports CPUID feature flags (EAX=1) and extended features (EAX=7, ECX=0).\n * Values from\n * http://www.intel.com/content/www/us/en/processors/processor-identification-cpuid-instruction-note.html\n */\nclass CpuId {\n public:\n  CpuId();\n\n#define X(name, r, bit)              \\\n  inline bool name() const {         \\\n    return ((r) & (1U << bit)) != 0; \\\n  }\n\n// cpuid(1): Processor Info and Feature Bits.\n#define C(name, bit) X(name, f1c_, bit)\n  C(sse3, 0)\n  C(pclmuldq, 1)\n  C(dtes64, 2)\n  C(monitor, 3)\n  C(dscpl, 4)\n  C(vmx, 5)\n  C(smx, 6)\n  C(eist, 7)\n  C(tm2, 8)\n  C(ssse3, 9)\n  C(cnxtid, 10)\n  C(fma, 12)\n  C(cx16, 13)\n  C(xtpr, 14)\n  C(pdcm, 15)\n  C(pcid, 17)\n  C(dca, 18)\n  C(sse41, 19)\n  C(sse42, 20)\n  C(x2apic, 21)\n  C(movbe, 22)\n  C(popcnt, 23)\n  C(tscdeadline, 24)\n  C(aes, 25)\n  C(xsave, 26)\n  C(osxsave, 27)\n  C(avx, 28)\n  C(f16c, 29)\n  C(rdrand, 30)\n#undef C\n\n#define D(name, bit) X(name, f1d_, bit)\n  D(fpu, 0)\n  D(vme, 1)\n  D(de, 2)\n  D(pse, 3)\n  D(tsc, 4)\n  D(msr, 5)\n  D(pae, 6)\n  D(mce, 7)\n  D(cx8, 8)\n  D(apic, 9)\n  D(sep, 11)\n  D(mtrr, 12)\n  D(pge, 13)\n  D(mca, 14)\n  D(cmov, 15)\n  D(pat, 16)\n  D(pse36, 17)\n  D(psn, 18)\n  D(clfsh, 19)\n  D(ds, 21)\n  D(acpi, 22)\n  D(mmx, 23)\n  D(fxsr, 24)\n  D(sse, 25)\n  D(sse2, 26)\n  D(ss, 27)\n  D(htt, 28)\n  D(tm, 29)\n  D(pbe, 31)\n#undef D\n\n// cpuid(7): Extended Features.\n#define B(name, bit) X(name, f7b_, bit)\n  B(bmi1, 3)\n  B(hle, 4)\n  B(avx2, 5)\n  B(smep, 7)\n  B(bmi2, 8)\n  B(erms, 9)\n  B(invpcid, 10)\n  B(rtm, 11)\n  B(mpx, 14)\n  B(avx512f, 16)\n  B(avx512dq, 17)\n  B(rdseed, 18)\n  B(adx, 19)\n  B(smap, 20)\n  B(avx512ifma, 21)\n  B(pcommit, 22)\n  B(clflushopt, 23)\n  B(clwb, 24)\n  B(avx512pf, 26)\n  B(avx512er, 27)\n  B(avx512cd, 28)\n  B(sha, 29)\n  B(avx512bw, 30)\n  B(avx512vl, 31)\n#undef B\n\n#define E(name, bit) X(name, f7c_, bit)\n  E(prefetchwt1, 0)\n  E(avx512vbmi, 1)\n#undef C\n\n#undef X\n\n private:\n  static uint32_t f1c_;\n  static uint32_t f1d_;\n  static uint32_t f7b_;\n  static uint32_t f7c_;\n};\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/fixed_divisor.h",
    "content": "#ifndef CAFFE2_UTILS_FIXED_DIVISOR_H_\n#define CAFFE2_UTILS_FIXED_DIVISOR_H_\n\n#include <cstdlib>\n#include <stdint.h>\n\nnamespace caffe2 {\n\n// Utility class for quickly calculating quotients and remainders for\n// a known integer divisor\ntemplate <typename T>\nclass FixedDivisor {\n};\n\n// Works for any positive divisor, 1 to INT_MAX. One 64-bit\n// multiplication and one 64-bit shift is used to calculate the\n// result.\ntemplate <>\nclass FixedDivisor<int32_t> {\n public:\n  FixedDivisor(int32_t d) : d_(d) {\n    calcSignedMagic();\n  }\n\n  uint64_t getMagic() const {\n    return magic_;\n  }\n\n  int getShift() const {\n    return shift_;\n  }\n\n  /// Calculates `q = n / d`.\n  inline int32_t div(int32_t n) const {\n    // In lieu of a mulhi instruction being available, perform the\n    // work in uint64\n    uint64_t mul64 = magic_ * (uint64_t) n;\n    return (int32_t) (mul64 >> shift_);\n  }\n\n  /// Calculates `r = n % d`.\n  inline int32_t mod(int32_t n) const {\n    return n - d_ * div(n);\n  }\n\n  /// Calculates `q = n / d` and `r = n % d` together.\n  inline void divMod(int32_t n, int32_t& q, int32_t& r) const {\n    const int32_t quotient = div(n);\n    q = quotient;\n    r = n - d_ * quotient;\n  }\n\n private:\n  /**\n     Calculates magic multiplicative value and shift amount for\n     calculating `q = n / d` for signed 32-bit integers.\n     Implementation taken from Hacker's Delight section 10.\n  */\n  void calcSignedMagic() {\n    if (d_ == 1) {\n      magic_ = UINT64_C(0x1) << 32;\n      shift_ = 32;\n      return;\n    }\n\n    const uint32_t two31 = UINT32_C(0x80000000);\n    uint32_t ad = std::abs(d_);\n    uint32_t t = two31 + ((uint32_t) d_ >> 31);\n    uint32_t anc = t - 1 - t % ad;   // Absolute value of nc.\n    uint32_t p = 31;                 // Init. p.\n    uint32_t q1 = two31 / anc;       // Init. q1 = 2**p/|nc|.\n    uint32_t r1 = two31 - q1 * anc;  // Init. r1 = rem(2**p, |nc|).\n    uint32_t q2 = two31 / ad;        // Init. q2 = 2**p/|d|.\n    uint32_t r2 = two31 - q2 * ad;   // Init. r2 = rem(2**p, |d|).\n    uint32_t delta = 0;\n\n    do {\n      p = p + 1;\n      q1 = 2 * q1;         // Update q1 = 2**p/|nc|.\n      r1 = 2 * r1;         // Update r1 = rem(2**p, |nc|).\n\n      if (r1 >= anc) {     // (Must be an unsigned\n        q1 = q1 + 1;       // comparison here).\n        r1 = r1 - anc;\n      }\n\n      q2 = 2 * q2;         // Update q2 = 2**p/|d|.\n      r2 = 2 * r2;         // Update r2 = rem(2**p, |d|).\n\n      if (r2 >= ad) {      // (Must be an unsigned\n        q2 = q2 + 1;       // comparison here).\n        r2 = r2 - ad;\n      }\n\n      delta = ad - r2;\n    } while (q1 < delta || (q1 == delta && r1 == 0));\n\n    int32_t magic = q2 + 1;\n    if (d_ < 0) {\n      magic = -magic;\n    }\n    shift_ = p;\n    magic_ = (uint64_t) (uint32_t) magic;\n  }\n\n  int32_t d_;\n  uint64_t magic_;\n  int shift_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_UTILS_FIXED_DIVISOR_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/math-detail.h",
    "content": "#ifndef CAFFE2_UTILS_MATH_DETAIL_H_\n#define CAFFE2_UTILS_MATH_DETAIL_H_\nnamespace caffe2 {\n\nclass CPUContext;\n\nnamespace math {\nnamespace detail {\n\n// proxy to a class because of partial specialization limitations for functions\n\ntemplate<typename T, class Context, int FixedSize>\nstruct ScaleImpl {\n  inline void operator()(\n      const int N,\n      const float alpha,\n      const T* x,\n      T* y,\n      Context* context) {\n    Scale(N, alpha, x, y, context);\n  }\n};\n\n// Put light-weight implementations in .h file to enable inlining\ntemplate<typename T>\nstruct ScaleImpl<T, CPUContext, 1> {\n  inline void operator()(\n      const int N,\n      const float alpha,\n      const T* x,\n      T* y,\n      CPUContext* /*context*/) {\n    DCHECK_EQ(N, 1);\n    *y = *x * alpha;\n  }\n};\n\ntemplate<typename T, class Context, int FixedSize>\nstruct AxpyImpl {\n  inline void operator()(\n      const int N,\n      const float alpha,\n      const T* x,\n      T* y,\n      Context* context) {\n    Axpy(N, alpha, x, y, context);\n  }\n};\n\n// Put light-weight implementations in .h file to enable inlining\ntemplate<typename T>\nstruct AxpyImpl<T, CPUContext, 1> {\n  inline void operator()(\n      const int N,\n      const float alpha,\n      const T* x,\n      T* y,\n      CPUContext* /*context*/) {\n    DCHECK_EQ(N, 1);\n    *y += *x * alpha;\n  }\n};\n\n\n}  // namespace detail\n\ntemplate <typename T, class Context, int FixedSize>\ninline void ScaleFixedSize(\n    const int N,\n    const float alpha,\n    const T* x,\n    T* y,\n    Context* context) {\n  detail::ScaleImpl<T, Context, FixedSize>()(N, alpha, x, y, context);\n}\n\ntemplate <typename T, class Context, int FixedSize>\ninline void AxpyFixedSize(\n    const int N,\n    const float alpha,\n    const T* x,\n    T* y,\n    Context* context) {\n  detail::AxpyImpl<T, Context, FixedSize>()(N, alpha, x, y, context);\n}\n\n}  // namespace math\n}  // namespace caffe2\n\n#endif  // CAFFE2_UTILS_MATH_DETAIL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/math.h",
    "content": "#ifndef CAFFE2_UTILS_MATH_H_\n#define CAFFE2_UTILS_MATH_H_\n// This is a simple translation from the old Caffe math interfaces. We aim to\n// still keep it simple, so all platforms would be able to support it fairly\n// easily.\n\n// We include the cblas header here so that we can obtain the macros from cblas.\nextern \"C\" {\n#include \"caffe2/utils/cblas.h\"\n}\n\n#ifdef CAFFE2_USE_ACCELERATE\n#include <Accelerate/Accelerate.h>\n#endif // CAFFE2_USE_ACCELERATE\n\n#include \"caffe2/core/common.h\"\n#include \"caffe2/core/types.h\"\n\n#ifndef __CUDACC__\n#include \"Eigen/Core\"\n#include \"Eigen/Dense\"\n#endif\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass Tensor;\n\n// An empty class as a placeholder for a math function that has no specific\n// engine specified.\nclass DefaultEngine {};\n\n#ifndef __CUDACC__\n// Common Eigen types that we will often use\ntemplate <typename T>\nusing EigenMatrixMap =\n    Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> >;\ntemplate <typename T>\nusing EigenArrayMap =\n    Eigen::Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic> >;\ntemplate <typename T>\nusing EigenVectorMap = Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 1> >;\ntemplate <typename T>\nusing EigenVectorArrayMap = Eigen::Map<Eigen::Array<T, Eigen::Dynamic, 1> >;\ntemplate <typename T>\nusing ConstEigenMatrixMap =\n    Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> >;\ntemplate <typename T>\nusing ConstEigenArrayMap =\n    Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic> >;\ntemplate <typename T>\nusing ConstEigenVectorMap =\n    Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1> >;\ntemplate <typename T>\nusing ConstEigenVectorArrayMap =\n    Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, 1> >;\n#endif\n\nnamespace math {\n\ntemplate <typename T, class Context>\nvoid Exp(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid Log(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid Cos(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid Sin(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid SinCos(const int N, const T* x, T* ys, T* yc, Context* context);\ntemplate <typename T, class Context>\nvoid Abs(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid Sqrt(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid InvSqrt(const int N, const T* x, T* y, Context* context);\ntemplate <typename T, class Context>\nvoid Sqr(const int N, const T* x, T* y, Context* context);\n\ntemplate <typename T, class Context>\nvoid Not(const int N, const T* x, T* y, Context* context);\n\ntemplate <typename T, class Context>\nvoid Powx(const int N, const T* a, const T b, T* y, Context* context);\n\n#define CAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(name)                         \\\n  template <typename T, class Context>                                       \\\n  void name(const int N, const T* a, const T* b, bool* y, Context* context); \\\n  template <typename T, class Context>                                       \\\n  void name##ToRow(                                                          \\\n      const int M,                                                           \\\n      const int N,                                                           \\\n      const T* a,                                                            \\\n      const T* b,                                                            \\\n      bool* y,                                                               \\\n      Context* context);\n\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(LT);\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(LE);\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(GT);\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(GE);\n\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(And);\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(Or);\nCAFFE2_DECLARE_BINARY_OP_BINARY_RESULT(Xor);\n\n#undef CAFFE2_DECLARE_BINARY_OP_BINARY_RESULT\n\n#define CAFFE2_DECLARE_BINARY_OP(name)                                    \\\n  template <typename T, class Context>                                    \\\n  void name(const int N, const T* a, const T* b, T* y, Context* context); \\\n  template <typename T, class Context>                                    \\\n  void name##ToRow(                                                       \\\n      const int M,                                                        \\\n      const int N,                                                        \\\n      const T* a,                                                         \\\n      const T* b,                                                         \\\n      T* y,                                                               \\\n      Context* context);                                                  \\\n  template <typename T, class Context>                                    \\\n  void name##ToRow(                                                       \\\n      const int M, const int N, const T* x, T* y, Context* context);      \\\n  template <typename T, class Context>                                    \\\n  void name##ToCol(                                                       \\\n      const int M, const int N, const T* x, T* y, Context* context);\n\nCAFFE2_DECLARE_BINARY_OP(Add);\nCAFFE2_DECLARE_BINARY_OP(Sub);\nCAFFE2_DECLARE_BINARY_OP(Mul);\nCAFFE2_DECLARE_BINARY_OP(Div);\n\n#undef CAFFE2_DECLARE_BINARY_OP\n\ntemplate <typename T, class Context>\nvoid ReduceMin(\n    const int N,\n    const T* x,\n    T* y,\n    Tensor<Context>* scratch_ptr,\n    Context* context);\ntemplate <typename T, class Context>\nvoid ReduceMax(\n    const int N,\n    const T* x,\n    T* y,\n    Tensor<Context>* scratch_ptr,\n    Context* context);\n\n// Adds batch sub-tensors elementwise to output. Stripe is the stripe length\n// and N is the number of elements to add (size of Y).\ntemplate <typename T, class Context>\nvoid AddStripedBatch(\n    const int N,\n    const T* first,\n    T* y,\n    const int stripe,\n    const int batch,\n    Context* context);\n\n// Compute the row-wise max of a N*D matrix X, and write it to a N\n// dimensional vector y.\ntemplate <typename T, class Context>\nvoid RowwiseMax(const int N, const int D, const T* x, T* y,\n                Context* context);\n\n// Compute the column-wise max of a N*D matrix X, and write it to a D\n// dimensional vector y.\ntemplate <typename T, class Context>\nvoid ColwiseMax(const int N, const int D, const T* x, T* y,\n                Context* context);\n\n// Elemwise maximum of vector x and vector y. z[i] = max(x[i], y[i])\ntemplate <typename T, class Context>\nvoid ElemwiseMax(const int N, const T* x, const T* y, T* z, Context* context);\n\n// Elemwise maximum of vector x and scalar alpha. y[i] = max(x[i], alpha)\ntemplate <typename T, class Context>\nvoid Maximum(\n    const int N,\n    const float alpha,\n    const T* x,\n    T* y,\n    Context* context);\n\n// Decaf gemm provides a simpler interface to the gemm functions, with the\n// limitation that the data has to be contiguous in memory.\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nvoid Gemm(\n    const CBLAS_TRANSPOSE TransA,\n    const CBLAS_TRANSPOSE TransB,\n    const int M,\n    const int N,\n    const int K,\n    const float alpha,\n    const T* A,\n    const T* B,\n    const float beta,\n    T* C,\n    Context* context,\n    TensorProto::DataType math_type = TensorProto_DataType_FLOAT);\n\n// We also provide a gemm that has explicit lda, ldb and ldc specified.\n// In most cases you probably want to use the function above, though.\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nvoid GemmEx(\n    const CBLAS_TRANSPOSE TransA,\n    const CBLAS_TRANSPOSE TransB,\n    const int M,\n    const int N,\n    const int K,\n    const T alpha,\n    const T* A,\n    const int lda,\n    const T* B,\n    const int ldb,\n    const T beta,\n    T* C,\n    const int ldc,\n    Context* context);\n\n// GemmBatched provides a simple abstraction into library routines\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nvoid GemmBatched(\n    const CBLAS_TRANSPOSE TransA,\n    const CBLAS_TRANSPOSE TransB,\n    const int A_size,\n    const int A_batches,\n    const int B_size,\n    const int B_batches,\n    const int M,\n    const int N,\n    const int K,\n    const float alpha,\n    const T* A,\n    const T* B,\n    const float beta,\n    T* C,\n    Context* context,\n    Tensor<Context>* scratch = nullptr,\n    TensorProto::DataType math_type = TensorProto_DataType_FLOAT);\n\n// Gemv always takes in a M*N matrix A, and depending on whether we set TransA\n// to Trans, the output is:\n// CblasNoTrans: x is an N dim vector and y is an M dim vector.\n// CblasTrans:   x is an M dim vector and y is an N dim vector.\ntemplate <typename T, class Context, class Engine = DefaultEngine>\nvoid Gemv(\n    const CBLAS_TRANSPOSE TransA,\n    const int M,\n    const int N,\n    const float alpha,\n    const T* A,\n    const T* x,\n    const float beta,\n    T* y,\n    Context* context,\n    TensorProto::DataType math_type = TensorProto_DataType_FLOAT);\n\ntemplate <typename T, class Context>\nvoid Set(const TIndex N, const T alpha, T* X, Context* context);\n\ntemplate <typename T, class Context>\nvoid RandUniform(const int n, const T a, const T b, T* r,\n                 Context* context);\n\ntemplate <typename T, class Context>\nvoid RandUniformUnique(\n    const size_t n,\n    const T a,\n    const T b,\n    T* r,\n    const size_t m,\n    const T* avoid,\n    Context* context);\n\ntemplate <typename T, class Context>\nvoid RandGaussian(\n    const int n,\n    const T mean,\n    const T std,\n    T* r,\n    Context* context);\n\n// Dot matrix of vector a and b, and writes the result to a single value y.\ntemplate <typename T, class Context>\nvoid Dot(const int N, const T* a, const T* b, T* y, Context* context);\n\n// Sum of vector x, and writes the result to a single value y.\ntemplate <typename T, class Context>\nvoid Sum(const int N, const T* x, T* y, Context* context,\n         Tensor<Context>* scratch_ptr = nullptr);\n\n// Sum of squares of vector x, and writes the result to a single value y.\ntemplate <typename T, class Context>\nvoid SumSqr(\n    const int N,\n    const T* x,\n    T* y,\n    Context* context,\n    Tensor<Context>* scratch_ptr = nullptr);\n\n// Select does index selection of the rows a N*D matrix x, and gives the N\n// dimensional vector y that contains the selected data.\ntemplate <typename T, class Context>\nvoid Select(const int N, const int D, const T* x, const int* idx, T* y,\n            Context* context);\n\ntemplate <typename T, class Context>\nvoid Scale(const int N, const float alpha, const T* x, T* y, Context* context);\n\n// Different from the Scale function above, if alpha is passed in\n// as a pointer, we will assume that it lives on the Context device,\n// for example on GPU.\ntemplate <typename T, class Context>\nvoid Scale(const int N, const float* alpha, const T* x, T* y, Context* context);\n\ntemplate <typename T, class Context>\nvoid Axpy(const int N, const float alpha, const T* x, T* y, Context* context);\n\n// Different from the Axpy function above, if alpha is passed in\n// as a pointer, we will assume that it lives on the Context device,\n// for example on GPU.\ntemplate <typename T, class Context>\nvoid Axpy(const int N, const float* alpha, const T* x, T* y, Context* context);\n\ntemplate <typename T, class Context>\nvoid Axpby(\n    const int N,\n    const float alpha,\n    const T* x,\n    const T b,\n    T* y,\n    Context* context);\n\ntemplate <typename T, class Context, int order>\nvoid Im2colNd(\n    const T* data_img,\n    const int* im_shape,\n    const int* col_shape,\n    const int img_size,\n    const int col_size,\n    const int* kernel_shape,\n    const int* stride,\n    const int* dilation,\n    const int* pad,\n    const int N,\n    T* data_col,\n    Context* context,\n    bool accumulate_output = false);\n\ntemplate <typename T, class Context, int order>\nvoid Col2imNd(\n    const T* data_col,\n    const int* img_shape,\n    const int* col_shape,\n    const int img_size,\n    const int col_size,\n    const int* kernel_shape,\n    const int* stride,\n    const int* dilation,\n    const int* pad,\n    const int N,\n    T* data_img,\n    Context* context);\n\ntemplate <typename T, class Context, int order>\nvoid Im2col(\n    const T* data_im,\n    const int channels,\n    const int height,\n    const int width,\n    const int kernel_h,\n    const int kernel_w,\n    const int dilation_h,\n    const int dilation_w,\n    const int pad_t,\n    const int pad_l,\n    const int pad_b,\n    const int pad_r,\n    const int stride_h,\n    const int stride_w,\n    T* data_col,\n    Context* context);\n\ntemplate <typename T, class Context, int order>\nvoid Col2im(\n    const T* data_col,\n    const int channels,\n    const int height,\n    const int width,\n    const int patch_h,\n    const int patch_w,\n    const int dilation_h,\n    const int dilation_w,\n    const int pad_t,\n    const int pad_l,\n    const int pad_b,\n    const int pad_r,\n    const int stride_h,\n    const int stride_w,\n    T* data_im,\n    Context* context);\n\n// Applies a per-channel bias value to each channel of the input\n// image. image_size is H * W\ntemplate <typename T, class Context>\nvoid BiasCHW(\n  const T* bias,\n  const int bias_channels,\n  const int image_size,\n  T* image,\n  Context* context);\n\ntemplate <class Context>\nvoid CopyMatrix(const size_t item_size, const int M, const int N, const void* A,\n                const int lda, void* B, const int ldb, Context* context);\n\ntemplate <typename T, class Context>\nvoid CopyVector(const int N, const T* A, T* B, Context* context);\n\nuint32_t randomNumberSeed();\n\n// Function uses casting from int to unsigned to compare if value of\n// parameter a is greater or equal to zero and lower than value of\n// parameter b. The b parameter is of type signed and is always\n// positive,\n// therefore its value is always lower than 0x800... where casting\n// negative value of a parameter converts it to value higher than\n// 0x800...\n// The casting allows to use one condition instead of two.\ninline bool is_a_ge_zero_and_a_lt_b(int a, int b) {\n  return static_cast<unsigned>(a) < static_cast<unsigned>(b);\n}\n\n// Calculates ceil(a / b). User must be careful to ensure that there\n// is no overflow or underflow in the calculation.\ntemplate <typename T>\nconstexpr T divUp(T a, T b) {\n  return (a + b - (T) 1) / b;\n}\n\n// Rounds a up to the next highest multiple of b. User must be careful\n// to ensure that there is no overflow or underflow in the calculation\n// of divUp.\ntemplate <typename T>\nconstexpr T roundUp(T a, T b) {\n  return divUp<T>(a, b) * b;\n}\n\n// Returns true if the given integer type is a power-of-2 (positive only)\n// Note(jiayq): windows reported an error per\n//     https://github.com/caffe2/caffe2/issues/997\n// and as a result will make it a macro.\n#ifdef _MSC_VER\n#define integerIsPowerOf2(v) ((v) && !((v) & ((v) - 1)))\n#else // _MSC_VER\ntemplate <typename T>\nconstexpr bool integerIsPowerOf2(T v) {\n  return (v && !(v & (v - 1)));\n}\n#endif // _MSC_VER\n\n// Returns log2(n) for a positive integer type\ntemplate <typename T>\nconstexpr int integerLog2(T n, int p = 0) {\n  return (n <= 1) ? p : integerLog2(n / 2, p + 1);\n}\n\n// Returns the next highest power-of-2 for an integer type\ntemplate <typename T>\nconstexpr T integerNextHighestPowerOf2(T v) {\n  return (integerIsPowerOf2(v) ? (T)2 * v : ((T)1 << (integerLog2(v) + 1)));\n}\n\n}  // namespace math\n}  // namespace caffe2\n\n#include \"caffe2/utils/math-detail.h\"\n#endif  // CAFFE2_UTILS_MATH_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/murmur_hash3.h",
    "content": "//-----------------------------------------------------------------------------\n// MurmurHash3 was written by Austin Appleby, and is placed in the public\n// domain. The author hereby disclaims copyright to this source code.\n\n#pragma once\n\n//-----------------------------------------------------------------------------\n// Platform-specific functions and macros\n\n// Microsoft Visual Studio\n\n#if defined(_MSC_VER) && (_MSC_VER < 1600)\n\ntypedef unsigned char uint8_t;\ntypedef unsigned int uint32_t;\ntypedef unsigned __int64 uint64_t;\n\n// Other compilers\n\n#else // defined(_MSC_VER)\n\n#include <stdint.h>\n\n#endif // !defined(_MSC_VER)\n\nnamespace caffe2 {\n\nvoid MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out);\n\nvoid MurmurHash3_x86_128(const void* key, int len, uint32_t seed, void* out);\n\nvoid MurmurHash3_x64_128(const void* key, int len, uint32_t seed, void* out);\n\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/proto_utils.h",
    "content": "#ifndef CAFFE2_UTILS_PROTO_UTILS_H_\n#define CAFFE2_UTILS_PROTO_UTILS_H_\n\n#ifdef CAFFE2_USE_LITE_PROTO\n#include <google/protobuf/message_lite.h>\n#else // CAFFE2_USE_LITE_PROTO\n#include <google/protobuf/message.h>\n#endif  // !CAFFE2_USE_LITE_PROTO\n\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/proto/caffe2.pb.h\"\n\nnamespace caffe2 {\n\nusing std::string;\nusing ::google::protobuf::MessageLite;\n\n// A wrapper function to return device name string for use in blob serialization\n// / deserialization. This should have one to one correspondence with\n// caffe2/proto/caffe2.proto: enum DeviceType.\n//\n// Note that we can't use DeviceType_Name, because that is only available in\n// protobuf-full, and some platforms (like mobile) may want to use\n// protobuf-lite instead.\nstd::string DeviceTypeName(const int32_t& d);\n\n// Returns if the two DeviceOptions are pointing to the same device.\nbool IsSameDevice(const DeviceOption& lhs, const DeviceOption& rhs);\n\n// Common interfaces that reads file contents into a string.\nbool ReadStringFromFile(const char* filename, string* str);\nbool WriteStringToFile(const string& str, const char* filename);\n\n// Common interfaces that are supported by both lite and full protobuf.\nbool ReadProtoFromBinaryFile(const char* filename, MessageLite* proto);\ninline bool ReadProtoFromBinaryFile(const string filename, MessageLite* proto) {\n  return ReadProtoFromBinaryFile(filename.c_str(), proto);\n}\n\nvoid WriteProtoToBinaryFile(const MessageLite& proto, const char* filename);\ninline void WriteProtoToBinaryFile(const MessageLite& proto,\n                                   const string& filename) {\n  return WriteProtoToBinaryFile(proto, filename.c_str());\n}\n\n#ifdef CAFFE2_USE_LITE_PROTO\n\ninline string ProtoDebugString(const MessageLite& proto) {\n  return proto.SerializeAsString();\n}\n\n// Text format MessageLite wrappers: these functions do nothing but just\n// allowing things to compile. It will produce a runtime error if you are using\n// MessageLite but still want text support.\ninline bool ReadProtoFromTextFile(\n    const char* /*filename*/,\n    MessageLite* /*proto*/) {\n  LOG(FATAL) << \"If you are running lite version, you should not be \"\n                  << \"calling any text-format protobuffers.\";\n  return false;  // Just to suppress compiler warning.\n}\ninline bool ReadProtoFromTextFile(const string filename, MessageLite* proto) {\n  return ReadProtoFromTextFile(filename.c_str(), proto);\n}\n\ninline void WriteProtoToTextFile(\n    const MessageLite& /*proto*/,\n    const char* /*filename*/) {\n  LOG(FATAL) << \"If you are running lite version, you should not be \"\n                  << \"calling any text-format protobuffers.\";\n}\ninline void WriteProtoToTextFile(const MessageLite& proto,\n                                 const string& filename) {\n  return WriteProtoToTextFile(proto, filename.c_str());\n}\n\ninline bool ReadProtoFromFile(const char* filename, MessageLite* proto) {\n  return (ReadProtoFromBinaryFile(filename, proto) ||\n          ReadProtoFromTextFile(filename, proto));\n}\n\ninline bool ReadProtoFromFile(const string& filename, MessageLite* proto) {\n  return ReadProtoFromFile(filename.c_str(), proto);\n}\n\n#else  // CAFFE2_USE_LITE_PROTO\n\nusing ::google::protobuf::Message;\n\ninline string ProtoDebugString(const Message& proto) {\n  return proto.ShortDebugString();\n}\n\nbool ReadProtoFromTextFile(const char* filename, Message* proto);\ninline bool ReadProtoFromTextFile(const string filename, Message* proto) {\n  return ReadProtoFromTextFile(filename.c_str(), proto);\n}\n\nvoid WriteProtoToTextFile(const Message& proto, const char* filename);\ninline void WriteProtoToTextFile(const Message& proto, const string& filename) {\n  return WriteProtoToTextFile(proto, filename.c_str());\n}\n\n// Read Proto from a file, letting the code figure out if it is text or binary.\ninline bool ReadProtoFromFile(const char* filename, Message* proto) {\n  return (ReadProtoFromBinaryFile(filename, proto) ||\n          ReadProtoFromTextFile(filename, proto));\n}\n\ninline bool ReadProtoFromFile(const string& filename, Message* proto) {\n  return ReadProtoFromFile(filename.c_str(), proto);\n}\n\n#endif  // CAFFE2_USE_LITE_PROTO\n\ntemplate <\n    class IterableInputs = std::initializer_list<string>,\n    class IterableOutputs = std::initializer_list<string>,\n    class IterableArgs = std::initializer_list<Argument>>\nOperatorDef CreateOperatorDef(\n    const string& type,\n    const string& name,\n    const IterableInputs& inputs,\n    const IterableOutputs& outputs,\n    const IterableArgs& args,\n    const DeviceOption& device_option = DeviceOption(),\n    const string& engine = \"\") {\n  OperatorDef def;\n  def.set_type(type);\n  def.set_name(name);\n  for (const string& in : inputs) {\n    def.add_input(in);\n  }\n  for (const string& out : outputs) {\n    def.add_output(out);\n  }\n  for (const Argument& arg : args) {\n    def.add_arg()->CopyFrom(arg);\n  }\n  if (device_option.has_device_type()) {\n    def.mutable_device_option()->CopyFrom(device_option);\n  }\n  if (engine.size()) {\n    def.set_engine(engine);\n  }\n  return def;\n}\n\n// A simplified version compared to the full CreateOperator, if you do not need\n// to specify args.\ntemplate <\n    class IterableInputs = std::initializer_list<string>,\n    class IterableOutputs = std::initializer_list<string>>\ninline OperatorDef CreateOperatorDef(\n    const string& type,\n    const string& name,\n    const IterableInputs& inputs,\n    const IterableOutputs& outputs,\n    const DeviceOption& device_option = DeviceOption(),\n    const string& engine = \"\") {\n  return CreateOperatorDef(\n      type,\n      name,\n      inputs,\n      outputs,\n      std::vector<Argument>(),\n      device_option,\n      engine);\n}\n\nbool HasOutput(const OperatorDef& op, const std::string& output);\nbool HasInput(const OperatorDef& op, const std::string& input);\n\n/**\n * @brief A helper class to index into arguments.\n *\n * This helper helps us to more easily index into a set of arguments\n * that are present in the operator. To save memory, the argument helper\n * does not copy the operator def, so one would need to make sure that the\n * lifetime of the OperatorDef object outlives that of the ArgumentHelper.\n */\nclass ArgumentHelper {\n public:\n  template <typename Def>\n  static bool HasArgument(const Def& def, const string& name) {\n    return ArgumentHelper(def).HasArgument(name);\n  }\n\n  template <typename Def, typename T>\n  static T GetSingleArgument(\n      const Def& def,\n      const string& name,\n      const T& default_value) {\n    return ArgumentHelper(def).GetSingleArgument<T>(name, default_value);\n  }\n\n  template <typename Def, typename T>\n  static bool HasSingleArgumentOfType(const Def& def, const string& name) {\n    return ArgumentHelper(def).HasSingleArgumentOfType<T>(name);\n  }\n\n  template <typename Def, typename T>\n  static vector<T> GetRepeatedArgument(\n      const Def& def,\n      const string& name,\n      const std::vector<T>& default_value = std::vector<T>()) {\n    return ArgumentHelper(def).GetRepeatedArgument<T>(name, default_value);\n  }\n\n  template <typename Def, typename MessageType>\n  static MessageType GetMessageArgument(const Def& def, const string& name) {\n    return ArgumentHelper(def).GetMessageArgument<MessageType>(name);\n  }\n\n  template <typename Def, typename MessageType>\n  static vector<MessageType> GetRepeatedMessageArgument(\n      const Def& def,\n      const string& name) {\n    return ArgumentHelper(def).GetRepeatedMessageArgument<MessageType>(name);\n  }\n\n  explicit ArgumentHelper(const OperatorDef& def);\n  explicit ArgumentHelper(const NetDef& netdef);\n  bool HasArgument(const string& name) const;\n\n  template <typename T>\n  T GetSingleArgument(const string& name, const T& default_value) const;\n  template <typename T>\n  bool HasSingleArgumentOfType(const string& name) const;\n  template <typename T>\n  vector<T> GetRepeatedArgument(\n      const string& name,\n      const std::vector<T>& default_value = std::vector<T>()) const;\n\n  template <typename MessageType>\n  MessageType GetMessageArgument(const string& name) const {\n    CAFFE_ENFORCE(arg_map_.count(name), \"Cannot find parameter named \", name);\n    MessageType message;\n    if (arg_map_.at(name).has_s()) {\n      CAFFE_ENFORCE(\n          message.ParseFromString(arg_map_.at(name).s()),\n          \"Faild to parse content from the string\");\n    } else {\n      VLOG(1) << \"Return empty message for parameter \" << name;\n    }\n    return message;\n  }\n\n  template <typename MessageType>\n  vector<MessageType> GetRepeatedMessageArgument(const string& name) const {\n    CAFFE_ENFORCE(arg_map_.count(name), \"Cannot find parameter named \", name);\n    vector<MessageType> messages(arg_map_.at(name).strings_size());\n    for (int i = 0; i < messages.size(); ++i) {\n      CAFFE_ENFORCE(\n          messages[i].ParseFromString(arg_map_.at(name).strings(i)),\n          \"Faild to parse content from the string\");\n    }\n    return messages;\n  }\n\n private:\n  CaffeMap<string, Argument> arg_map_;\n};\n\nconst Argument& GetArgument(const OperatorDef& def, const string& name);\nbool GetFlagArgument(\n    const OperatorDef& def,\n    const string& name,\n    bool def_value = false);\n\nArgument* GetMutableArgument(\n    const string& name,\n    const bool create_if_missing,\n    OperatorDef* def);\n\ntemplate <typename T>\nArgument MakeArgument(const string& name, const T& value);\n\ntemplate <typename T>\ninline void AddArgument(const string& name, const T& value, OperatorDef* def) {\n  GetMutableArgument(name, true, def)->CopyFrom(MakeArgument(name, value));\n}\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_UTILS_PROTO_UTILS_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/signal_handler.h",
    "content": "#pragma once\n\n#if defined(__APPLE__)\n#define CAFFE2_SUPPORTS_SIGNAL_HANDLER\n#elif defined(__linux__)\n#define CAFFE2_SUPPORTS_FATAL_SIGNAL_HANDLERS\n#define CAFFE2_SUPPORTS_SIGNAL_HANDLER\n#endif\n\nnamespace caffe2 {\n\nclass SignalHandler {\n public:\n  enum class Action {\n    NONE,\n    STOP\n  };\n\n  // Contructor. Specify what action to take when a signal is received.\n  SignalHandler(Action SIGINT_action,\n                Action SIGHUP_action);\n  ~SignalHandler();\n\n  Action CheckForSignals();\n\n private:\n  bool GotSIGINT();\n  bool GotSIGHUP();\n  Action SIGINT_action_;\n  Action SIGHUP_action_;\n  unsigned long my_sigint_count_;\n  unsigned long my_sighup_count_;\n};\n\n#if defined(CAFFE2_SUPPORTS_FATAL_SIGNAL_HANDLERS)\n// This works by setting up certain fatal signal handlers. Previous fatal\n// signal handlers will still be called when the signal is raised. Defaults\n// to being off.\nvoid setPrintStackTracesOnFatalSignal(bool print);\nbool printStackTracesOnFatalSignal();\n#endif // defined(CAFFE2_SUPPORTS_SIGNAL_HANDLER)\n\n}  // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/simple_queue.h",
    "content": "#ifndef CAFFE2_UTILS_SIMPLE_QUEUE_H_\n#define CAFFE2_UTILS_SIMPLE_QUEUE_H_\n\n#include <condition_variable>  // NOLINT\n#include <mutex>  // NOLINT\n#include <queue>\n\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\n\n// This is a very simple queue that Yangqing wrote when bottlefeeding the baby,\n// so don't take it seriously. What it does is a minimal thread-safe queue that\n// allows me to run network as a DAG.\n//\n// A usual work pattern looks like this: one or multiple producers push jobs\n// into this queue, and one or multiple workers pops jobs from this queue. If\n// nothing is in the queue but NoMoreJobs() is not called yet, the pop calls\n// will wait. If NoMoreJobs() has been called, pop calls will return false,\n// which serves as a message to the workers that they should exit.\ntemplate <typename T>\nclass SimpleQueue {\n public:\n  SimpleQueue() : no_more_jobs_(false) {}\n\n  // Pops a value and writes it to the value pointer. If there is nothing in the\n  // queue, this will wait till a value is inserted to the queue. If there are\n  // no more jobs to pop, the function returns false. Otherwise, it returns\n  // true.\n  bool Pop(T* value) {\n    std::unique_lock<std::mutex> mutex_lock(mutex_);\n    while (queue_.size() == 0 && !no_more_jobs_) cv_.wait(mutex_lock);\n    if (queue_.size() == 0 && no_more_jobs_) return false;\n    *value = queue_.front();\n    queue_.pop();\n    return true;\n  }\n\n  int size() {\n    std::unique_lock<std::mutex> mutex_lock(mutex_);\n    return queue_.size();\n  }\n\n  // Push pushes a value to the queue.\n  void Push(const T& value) {\n    {\n      std::lock_guard<std::mutex> mutex_lock(mutex_);\n      CAFFE_ENFORCE(!no_more_jobs_, \"Cannot push to a closed queue.\");\n      queue_.push(value);\n    }\n    cv_.notify_one();\n  }\n\n  // NoMoreJobs() marks the close of this queue. It also notifies all waiting\n  // Pop() calls so that they either check out remaining jobs, or return false.\n  // After NoMoreJobs() is called, this queue is considered closed - no more\n  // Push() functions are allowed, and once existing items are all checked out\n  // by the Pop() functions, any more Pop() function will immediately return\n  // false with nothing set to the value.\n  void NoMoreJobs() {\n    {\n      std::lock_guard<std::mutex> mutex_lock(mutex_);\n      no_more_jobs_ = true;\n    }\n    cv_.notify_all();\n  }\n\n private:\n  std::mutex mutex_;\n  std::condition_variable cv_;\n  std::queue<T> queue_;\n  bool no_more_jobs_;\n  // We do not allow copy constructors.\n  SimpleQueue(const SimpleQueue& /*src*/) {}\n};\n\n}  // namespace caffe2\n\n#endif  // CAFFE2_UTILS_SIMPLE_QUEUE_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/smart_tensor_printer.h",
    "content": "#pragma once\n\n#include \"caffe2/core/tensor.h\"\n\nnamespace caffe2 {\n\n// This is a wrapper around the TensorPrinter that doesn't require the user to\n// explicit specify the type of the tensor while calling the Print() method.\n// It also supports a convenience function with a default constructed printer as\n// a static method.\nclass SmartTensorPrinter {\n public:\n  // The proliferation of constructors is to give the feature parity with\n  // TensorPrinter\n  // yet not repeat the default arguments explicitly in case they change in the\n  // future.\n  SmartTensorPrinter() = default;\n\n  explicit SmartTensorPrinter(const std::string& tensor_name);\n\n  SmartTensorPrinter(\n      const std::string& tensor_name,\n      const std::string& file_name);\n\n  SmartTensorPrinter(\n      const std::string& tensor_name,\n      const std::string& file_name,\n      int limit);\n\n  void Print(const Tensor<CPUContext>& tensor);\n\n  template <class Context>\n  void PrintMeta(const Tensor<Context>& tensor) {\n    tensorPrinter_.PrintMeta(tensor);\n  }\n\n  // Uses a default constructed SmartTensorPrinter\n  static void PrintTensor(const Tensor<CPUContext>& tensor);\n\n  // Uses a default constructed SmartTensorPrinter\n  template <class Context>\n  void PrintTensorMeta(const Tensor<Context>& tensor) {\n    DefaultTensorPrinter().PrintMeta(tensor);\n  }\n\n private:\n  // Returns a thread local default constructed TensorPrinter\n  static SmartTensorPrinter& DefaultTensorPrinter();\n\n  TensorPrinter tensorPrinter_;\n};\n}\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/string_utils.h",
    "content": "#pragma once\n\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace caffe2 {\n\nstd::vector<std::string> split(char separator, const std::string& string);\nsize_t editDistance(\n  const std::string& s1, const std::string& s2, size_t max_distance = 0);\n\nint32_t editDistanceHelper(const char* s1,\n  size_t s1_len,\n  const char* s2,\n  size_t s2_len,\n  std::vector<size_t> &current,\n  std::vector<size_t> &previous,\n  std::vector<size_t> &previous1,\n  size_t max_distance);\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/thread_pool.h",
    "content": "#ifndef CAFFE2_UTILS_THREAD_POOL_H_\n#define CAFFE2_UTILS_THREAD_POOL_H_\n\n#include <condition_variable>\n#include <functional>\n#include <mutex>\n#include <queue>\n#include <thread>\n#include <utility>\n\nclass TaskThreadPool{\n private:\n    struct task_element_t {\n        bool run_with_id;\n        const std::function< void() > no_id;\n        const std::function< void(std::size_t) > with_id;\n\n        explicit task_element_t(const std::function< void() >& f) :\n            run_with_id(false), no_id(f), with_id(nullptr) { }\n        explicit task_element_t(const std::function< void(std::size_t) >& f) :\n            run_with_id(true), no_id(nullptr), with_id(f) { }\n    };\n    std::queue<task_element_t> tasks_;\n    std::vector<std::thread> threads_;\n    std::mutex mutex_;\n    std::condition_variable condition_;\n    std::condition_variable completed_;\n    bool running_;\n    bool complete_;\n    std::size_t available_;\n    std::size_t total_;\n\n public:\n    /// @brief Constructor.\n    explicit TaskThreadPool(std::size_t pool_size)\n        :  threads_(pool_size), running_(true), complete_(true),\n           available_(pool_size), total_(pool_size) {\n        for ( std::size_t i = 0; i < pool_size; ++i ) {\n            threads_[i] = std::thread(\n                std::bind(&TaskThreadPool::main_loop, this, i));\n        }\n    }\n\n    /// @brief Destructor.\n    ~TaskThreadPool() {\n        // Set running flag to false then notify all threads.\n        {\n            std::unique_lock< std::mutex > lock(mutex_);\n            running_ = false;\n            condition_.notify_all();\n        }\n\n        try {\n            for (auto& t : threads_) {\n              t.join();\n            }\n        }\n        // Suppress all exceptions.\n        catch (const std::exception&) {}\n    }\n\n    /// @brief Add task to the thread pool if a thread is currently available.\n    template <typename Task>\n    void runTask(Task task) {\n        std::unique_lock<std::mutex> lock(mutex_);\n\n        // Set task and signal condition variable so that a worker thread will\n        // wake up and use the task.\n        tasks_.push(task_element_t(static_cast<std::function< void() >>(task)));\n        complete_ = false;\n        condition_.notify_one();\n    }\n\n    template <typename Task>\n    void runTaskWithID(Task task) {\n      std::unique_lock<std::mutex> lock(mutex_);\n\n      // Set task and signal condition variable so that a worker thread will\n      // wake up and use the task.\n      tasks_.push(task_element_t(static_cast<std::function< void(std::size_t) >>(\n                                   task)));\n      complete_ = false;\n      condition_.notify_one();\n    }\n\n    /// @brief Wait for queue to be empty\n    void waitWorkComplete() {\n        std::unique_lock<std::mutex> lock(mutex_);\n        while (!complete_)\n          completed_.wait(lock);\n    }\n\n private:\n    /// @brief Entry point for pool threads.\n    void main_loop(std::size_t index) {\n        while (running_) {\n            // Wait on condition variable while the task is empty and\n            // the pool is still running.\n            std::unique_lock<std::mutex> lock(mutex_);\n            while (tasks_.empty() && running_) {\n                condition_.wait(lock);\n            }\n            // If pool is no longer running, break out of loop.\n            if (!running_) break;\n\n            // Copy task locally and remove from the queue.  This is\n            // done within its own scope so that the task object is\n            // destructed immediately after running the task.  This is\n            // useful in the event that the function contains\n            // shared_ptr arguments bound via bind.\n            {\n                auto tasks = tasks_.front();\n                tasks_.pop();\n                // Decrement count, indicating thread is no longer available.\n                --available_;\n\n                lock.unlock();\n\n                // Run the task.\n                try {\n                  if (tasks.run_with_id) {\n                      tasks.with_id(index);\n                  } else {\n                      tasks.no_id();\n                  }\n                }\n                // Suppress all exceptions.\n                catch ( const std::exception& ) {}\n\n                // Update status of empty, maybe\n                // Need to recover the lock first\n                lock.lock();\n\n                // Increment count, indicating thread is available.\n                ++available_;\n                if (tasks_.empty() && available_ == total_) {\n                    complete_ = true;\n                    completed_.notify_one();\n                }\n            }\n        }  // while running_\n    }\n};\n\n#endif\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/threadpool/ThreadPool.h",
    "content": "#ifndef CAFFE2_UTILS_THREADPOOL_H_\n#define CAFFE2_UTILS_THREADPOOL_H_\n\n#include \"ThreadPoolCommon.h\"\n\n#ifndef CAFFE2_THREADPOOL_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n// ThreadPool only used in mobile builds at the moment\n#if CAFFE2_THREADPOOL_MOBILE\n\n#include <memory>\n#include <mutex>\n#include <vector>\n\n//\n// A work-stealing threadpool loosely based off of pthreadpool\n//\n\nnamespace caffe2 {\n\nclass Task;\nclass WorkersPool;\n\nconstexpr size_t kCacheLineSize = 64;\n\nclass alignas(kCacheLineSize) ThreadPool {\n public:\n  // Constructs a work-stealing threadpool with the given number of\n  // threads\n  static std::unique_ptr<ThreadPool> defaultThreadPool();\n  ThreadPool(int numThreads);\n  ~ThreadPool();\n  // Returns the number of threads currently in use\n  int getNumThreads() const;\n\n  // Sets the minimum work size (range) for which to invoke the\n  // threadpool; work sizes smaller than this will just be run on the\n  // main (calling) thread\n  void setMinWorkSize(size_t size);\n  size_t getMinWorkSize() const { return minWorkSize_; }\n  void run(const std::function<void(int, size_t)>& fn, size_t range);\n\nprivate:\n  mutable std::mutex executionMutex_;\n  size_t minWorkSize_;\n  size_t numThreads_;\n  std::shared_ptr<WorkersPool> workersPool_;\n  std::vector<std::shared_ptr<Task>> tasks_;\n};\n\n} // namespace caffe2\n\n#endif // CAFFE2_THREADPOOL_MOBILE\n\n#endif // CAFFE2_UTILS_THREADPOOL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/threadpool/ThreadPoolCommon.h",
    "content": "#ifndef CAFFE2_UTILS_THREADPOOL_COMMON_H_\n#define CAFFE2_UTILS_THREADPOOL_COMMON_H_\n\n#ifdef __APPLE__\n#include <TargetConditionals.h>\n#endif\n\n// caffe2 depends upon NNPACK, which depends upon this threadpool, so\n// unfortunately we can't reference core/common.h here\n\n// This is copied from core/common.h's definition of CAFFE2_MOBILE\n// Define enabled when building for iOS or Android devices\n#if !defined(CAFFE2_THREADPOOL_MOBILE)\n#if defined(__ANDROID__)\n#define CAFFE2_ANDROID 1\n#define CAFFE2_THREADPOOL_MOBILE 1\n#elif (defined(__APPLE__) &&                                            \\\n       (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE))\n#define CAFFE2_IOS 1\n#define CAFFE2_THREADPOOL_MOBILE 1\n#elif (defined(__APPLE__) && TARGET_OS_MAC)\n#define CAFFE2_IOS 1\n#define CAFFE2_THREADPOOL_MOBILE 1\n#else\n#define CAFFE2_THREADPOOL_MOBILE 0\n#endif // ANDROID / IOS / MACOS\n#endif // CAFFE2_THREADPOOL_MOBILE\n\n#endif  // CAFFE2_UTILS_THREADPOOL_COMMON_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/threadpool/WorkersPool.h",
    "content": "#include \"caffe2/core/common.h\"\n#include \"caffe2/core/logging.h\"\n#include <atomic>\n#include <thread>\n#include <condition_variable>\n\n#if defined(_MSC_VER)\n#include <intrin.h>\n#endif\n\nnamespace caffe2 {\n\n// Uses code derived from gemmlowp,\n// https://github.com/google/gemmlowp/blob/6c91e1ed0c2eff1182d804310b92911fe9c18019/internal/multi_thread_gemm.h\n// Changes:\n// - allocation-free execute()\n// - Use RAII where possible.\n// - Run the first task on the main thread (since that is the largest task).\n// - removed custom allocator.\n// - Removed some ifdef's\n// - cache-line align Worker.\n// - use std::atomic instead of volatile and custom barriers.\n// - use std::mutex/std::condition_variable instead of raw pthreads.\n\nconstexpr size_t kGEMMLOWPCacheLineSize = 64;\n\ntemplate <typename T>\nstruct AllocAligned {\n  // Allocate a T aligned at an `align` byte address\n  template <typename... Args>\n  static T* alloc(Args&&... args) {\n    void* p = nullptr;\n\n#if defined(__ANDROID__)\n    p = memalign(kGEMMLOWPCacheLineSize, sizeof(T));\n#elif defined(_MSC_VER)\n    p = _aligned_malloc(sizeof(T), kGEMMLOWPCacheLineSize);\n#else\n    posix_memalign((void**)&p, kGEMMLOWPCacheLineSize, sizeof(T));\n#endif\n\n    if (p) {\n      return new (p) T(std::forward<Args>(args)...);\n    }\n\n    return nullptr;\n  }\n\n  // Free a T previously allocated via AllocAligned<T>::alloc()\n  static void release(T* p) {\n    if (p) {\n      p->~T();\n      free((void*)p);\n    }\n  }\n};\n\n// Deleter object for unique_ptr for an aligned object\ntemplate <typename T>\nstruct AlignedDeleter {\n  void operator()(T* p) const { AllocAligned<T>::release(p); }\n};\n\n// make_unique that guarantees alignment\ntemplate <typename T>\nstruct MakeAligned {\n  template <typename... Args>\n  static std::unique_ptr<T, AlignedDeleter<T>> make(Args&&... args) {\n    return std::unique_ptr<T, AlignedDeleter<T>>(\n        AllocAligned<T>::alloc(std::forward<Args>(args)...));\n  }\n};\n\nconst int kMaxBusyWaitNOPs = 32 * 1000 * 1000;\n\n#if defined(_MSC_VER)\n#define GEMMLOWP_NOP __nop();\n#else\n#define GEMMLOWP_NOP \"nop\\n\"\n#endif\n\n#define GEMMLOWP_STRING_CONCAT_4(X) X X X X\n#define GEMMLOWP_NOP4 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP)\n#define GEMMLOWP_NOP16 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP4)\n#define GEMMLOWP_NOP64 GEMMLOWP_STRING_CONCAT_4(GEMMLOWP_NOP16)\n\ninline int Do256NOPs() {\n#if defined(_MSC_VER)\n  GEMMLOWP_NOP64;\n#else\n  asm volatile(GEMMLOWP_NOP64);\n#endif\n  return 64;\n}\n\n#undef GEMMLOWP_STRING_CONCAT_4\n#undef GEMMLOWP_NOP256\n#undef GEMMLOWP_NOP64\n#undef GEMMLOWP_NOP16\n#undef GEMMLOWP_NOP4\n#undef GEMMLOWP_NOP\n\n// Waits until *var != initial_value.\n//\n// Returns the new value of *var. The guarantee here is that\n// the return value is different from initial_value, and that that\n// new value has been taken by *var at some point during the\n// execution of this function. There is no guarantee that this is\n// still the value of *var when this function returns, since *var is\n// not assumed to be guarded by any lock.\n//\n// First does some busy-waiting for a fixed number of no-op cycles,\n// then falls back to passive waiting for the given condvar, guarded\n// by the given mutex.\n//\n// The idea of doing some initial busy-waiting is to help get\n// better and more consistent multithreading benefits for small GEMM sizes.\n// Busy-waiting help ensuring that if we need to wake up soon after having\n// started waiting, then we can wake up quickly (as opposed to, say,\n// having to wait to be scheduled again by the OS). On the other hand,\n// we must still eventually revert to passive waiting for longer waits\n// (e.g. worker threads having finished a GEMM and waiting until the next GEMM)\n// so as to avoid permanently spinning.\n//\ntemplate <typename T>\nT WaitForVariableChange(std::atomic<T>* var,\n                        T initial_value,\n                        std::condition_variable* cond,\n                        std::mutex* mutex) {\n  // If we are on a platform that supports it, spin for some time.\n  {\n    int nops = 0;\n    // First, trivial case where the variable already changed value.\n    T new_value = var->load(std::memory_order_relaxed);\n    if (new_value != initial_value) {\n      std::atomic_thread_fence(std::memory_order_acquire);\n      return new_value;\n    }\n    // Then try busy-waiting.\n    while (nops < kMaxBusyWaitNOPs) {\n      nops += Do256NOPs();\n      new_value = var->load(std::memory_order_relaxed);\n      if (new_value != initial_value) {\n        std::atomic_thread_fence(std::memory_order_acquire);\n        return new_value;\n      }\n    }\n  }\n\n  // Finally, do real passive waiting.\n  {\n    std::unique_lock<std::mutex> g(*mutex);\n    T new_value = var->load(std::memory_order_relaxed);\n    // Handle spurious wakeups.\n    cond->wait(g, [&]() {\n      new_value = var->load(std::memory_order_relaxed);\n      return new_value != initial_value;\n    });\n    DCHECK_NE(static_cast<size_t>(new_value), static_cast<size_t>(initial_value));\n    return new_value;\n  }\n}\n\n// A BlockingCounter lets one thread to wait for N events to occur.\n// This is how the master thread waits for all the worker threads\n// to have finished working.\nclass BlockingCounter {\n public:\n  // Sets/resets the counter; initial_count is the number of\n  // decrementing events that the Wait() call will be waiting for.\n  void Reset(std::size_t initial_count) {\n    std::lock_guard<std::mutex> g(mutex_);\n    DCHECK_EQ(count_, 0);\n    count_ = initial_count;\n  }\n\n  // Decrements the counter; if the counter hits zero, signals\n  // the thread that was waiting for that, and returns true.\n  // Otherwise (if the decremented count is still nonzero),\n  // returns false.\n  bool DecrementCount() {\n    const auto count_value = count_.fetch_sub(1, std::memory_order_relaxed) - 1;\n    DCHECK_GE(count_value, 0);\n    if (count_value == 0) {\n      std::lock_guard<std::mutex> g(mutex_);\n      cond_.notify_one();\n    }\n    bool retval = count_value == 0;\n    return retval;\n  }\n\n  // Waits for the N other threads (N having been set by Reset())\n  // to hit the BlockingCounter.\n  void Wait() {\n    while (size_t count_value = count_.load(std::memory_order_relaxed)) {\n      WaitForVariableChange(&count_, count_value, &cond_, &mutex_);\n    }\n  }\n\n private:\n  std::condition_variable cond_;\n  std::mutex mutex_;\n  std::atomic<std::size_t> count_{0};\n};\n\n// A workload for a worker.\nstruct Task {\n  Task() {}\n  virtual ~Task() {}\n  virtual void Run() = 0;\n};\n\n// A worker thread.\nclass alignas(kGEMMLOWPCacheLineSize) Worker {\n public:\n  enum class State : uint8_t {\n    ThreadStartup, // The initial state before the thread main loop runs.\n    Ready, // Is not working, has not yet received new work to do.\n    HasWork, // Has work to do.\n    ExitAsSoonAsPossible // Should exit at earliest convenience.\n  };\n\n  explicit Worker(BlockingCounter* counter_to_decrement_when_ready)\n      : task_(nullptr),\n        state_(State::ThreadStartup),\n        counter_to_decrement_when_ready_(counter_to_decrement_when_ready) {\n    thread_ = caffe2::make_unique<std::thread>([this]() { this->ThreadFunc(); });\n  }\n\n  ~Worker() {\n    ChangeState(State::ExitAsSoonAsPossible);\n    thread_->join();\n  }\n\n  // Changes State; may be called from either the worker thread\n  // or the master thread; however, not all state transitions are legal,\n  // which is guarded by assertions.\n  void ChangeState(State new_state) {\n    std::lock_guard<std::mutex> g(state_mutex_);\n    DCHECK(new_state != state_.load(std::memory_order_relaxed));\n    switch (state_.load(std::memory_order_relaxed)) {\n    case State::ThreadStartup:\n      DCHECK(new_state == State::Ready);\n      break;\n    case State::Ready:\n      DCHECK(new_state == State::HasWork || new_state == State::ExitAsSoonAsPossible);\n      break;\n    case State::HasWork:\n      DCHECK(new_state == State::Ready || new_state == State::ExitAsSoonAsPossible);\n      break;\n    default:\n      abort();\n    }\n    state_.store(new_state, std::memory_order_relaxed);\n    state_cond_.notify_one();\n    if (new_state == State::Ready) {\n      counter_to_decrement_when_ready_->DecrementCount();\n    }\n  }\n\n  // Thread entry point.\n  void ThreadFunc() {\n    ChangeState(State::Ready);\n\n    // Thread main loop\n    while (true) {\n      // Get a state to act on\n      // In the 'Ready' state, we have nothing to do but to wait until\n      // we switch to another state.\n      State state_to_act_upon =\n          WaitForVariableChange(&state_, State::Ready, &state_cond_, &state_mutex_);\n\n      // We now have a state to act on, so act.\n      switch (state_to_act_upon) {\n      case State::HasWork:\n        // Got work to do! So do it, and then revert to 'Ready' state.\n        DCHECK(task_);\n        task_->Run();\n        task_ = nullptr;\n        ChangeState(State::Ready);\n        break;\n      case State::ExitAsSoonAsPossible:\n        return;\n      default:\n        abort();\n      }\n    }\n  }\n\n  static void* ThreadFunc(void* arg) {\n    static_cast<Worker*>(arg)->ThreadFunc();\n    return nullptr;\n  }\n\n  // Called by the master thead to give this worker work to do.\n  // It is only legal to call this if the worker\n  void StartWork(Task* task) {\n    DCHECK(!task_);\n    task_ = task;\n    DCHECK(state_.load(std::memory_order_acquire) == State::Ready);\n    ChangeState(State::HasWork);\n  }\n\n private:\n  // The underlying thread.\n  std::unique_ptr<std::thread> thread_;\n\n  // The task to be worked on.\n  // Visibility of writes to task_ guarded by state_mutex_.\n  Task* task_;\n\n  // The condition variable and mutex guarding state changes.\n  std::condition_variable state_cond_;\n  std::mutex state_mutex_;\n\n  // The state enum tells if we're currently working, waiting for work, etc.\n  std::atomic<State> state_;\n\n  // pointer to the master's thread BlockingCounter object, to notify the\n  // master thread of when this worker switches to the 'Ready' state.\n  BlockingCounter* const counter_to_decrement_when_ready_;\n};\n\nclass WorkersPool {\n public:\n  WorkersPool() {}\n\n  void Execute(const std::vector<std::shared_ptr<Task>>& tasks) {\n    CAFFE_ENFORCE_GE(tasks.size(), 1);\n    // One of the tasks will be run on the current thread.\n    int workers_count = tasks.size() - 1;\n    CreateWorkers(workers_count);\n    DCHECK_LE(workers_count, workers_.size());\n    counter_to_decrement_when_ready_.Reset(workers_count);\n    for (auto task = 1; task < tasks.size(); ++task) {\n      workers_[task - 1]->StartWork(tasks[task].get());\n    }\n    // Execute the remaining workload immediately on the current thread.\n    auto& task = tasks.front();\n    task->Run();\n    // Wait for the workers submitted above to finish.\n    counter_to_decrement_when_ready_.Wait();\n  }\n\n private:\n  // Ensures that the pool has at least the given count of workers.\n  // If any new worker has to be created, this function waits for it to\n  // be ready.\n  void CreateWorkers(std::size_t workers_count) {\n    if (workers_.size() >= workers_count) {\n      return;\n    }\n    counter_to_decrement_when_ready_.Reset(workers_count - workers_.size());\n    while (workers_.size() < workers_count) {\n      workers_.push_back(MakeAligned<Worker>::make(&counter_to_decrement_when_ready_));\n    }\n    counter_to_decrement_when_ready_.Wait();\n  }\n\n  DISABLE_COPY_AND_ASSIGN(WorkersPool);\n  std::vector<std::unique_ptr<Worker, AlignedDeleter<Worker>>> workers_;\n  // The BlockingCounter used to wait for the workers.\n  BlockingCounter counter_to_decrement_when_ready_;\n};\n} // namespace caffe2\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/threadpool/pthreadpool.h",
    "content": "// pthreadpool header from https://github.com/Maratyszcza/pthreadpool\n// for NNPACK\n#ifndef CAFFE2_UTILS_PTHREADPOOL_H_\n#define CAFFE2_UTILS_PTHREADPOOL_H_\n\n#include \"ThreadPoolCommon.h\"\n\n#ifndef CAFFE2_THREADPOOL_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n// ThreadPool only used in mobile builds at the moment\n#if CAFFE2_THREADPOOL_MOBILE\n\n#include <stddef.h> // for size_t\n\ntypedef struct pthreadpool* pthreadpool_t;\n\ntypedef void (*pthreadpool_function_1d_t)(void*, size_t);\ntypedef void (*pthreadpool_function_1d_tiled_t)(void*, size_t, size_t);\ntypedef void (*pthreadpool_function_2d_t)(void*, size_t, size_t);\ntypedef void (*pthreadpool_function_2d_tiled_t)(void*, size_t, size_t, size_t, size_t);\ntypedef void (*pthreadpool_function_3d_t)(void*, size_t, size_t, size_t);\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n/**\n * Creates a thread pool with the specified number of threads.\n *\n * @param[in]  threads_count  The number of threads in the thread pool.\n *    A value of 0 has special interpretation: it creates a thread for each\n *    processor core available in the system.\n *\n * @returns  A pointer to an opaque thread pool object.\n *    On error the function returns NULL and sets errno accordingly.\n */\npthreadpool_t pthreadpool_create(size_t threads_count);\n\n/**\n * Queries the number of threads in a thread pool.\n *\n * @param[in]  threadpool  The thread pool to query.\n *\n * @returns  The number of threads in the thread pool.\n */\nsize_t pthreadpool_get_threads_count(pthreadpool_t threadpool);\n\n\n/**\n * Processes items in parallel using threads from a thread pool.\n *\n * When the call returns, all items have been processed and the thread pool is\n * ready for a new task.\n *\n * @note If multiple threads call this function with the same thread pool, the\n *    calls are serialized.\n *\n * @param[in]  threadpool  The thread pool to use for parallelisation.\n * @param[in]  function    The function to call for each item.\n * @param[in]  argument    The first argument passed to the @a function.\n * @param[in]  items       The number of items to process. The @a function\n *    will be called once for each item.\n */\nvoid pthreadpool_compute_1d(\n    pthreadpool_t threadpool,\n    pthreadpool_function_1d_t function,\n    void* argument,\n    size_t range);\n\nvoid pthreadpool_compute_1d_tiled(\n    pthreadpool_t threadpool,\n    pthreadpool_function_1d_tiled_t function,\n    void* argument,\n    size_t range,\n    size_t tile);\n\nvoid pthreadpool_compute_2d(\n    pthreadpool_t threadpool,\n    pthreadpool_function_2d_t function,\n    void* argument,\n    size_t range_i,\n    size_t range_j);\n\nvoid pthreadpool_compute_2d_tiled(\n    pthreadpool_t threadpool,\n    pthreadpool_function_2d_tiled_t function,\n    void* argument,\n    size_t range_i,\n    size_t range_j,\n    size_t tile_i,\n    size_t tile_j);\n\n/**\n * Terminates threads in the thread pool and releases associated resources.\n *\n * @warning  Accessing the thread pool after a call to this function constitutes\n *    undefined behaviour and may cause data corruption.\n *\n * @param[in,out]  threadpool  The thread pool to destroy.\n */\nvoid pthreadpool_destroy(pthreadpool_t threadpool);\n\n#ifdef __cplusplus\n} /* extern \"C\" */\n#endif\n\n#endif // CAFFE2_THREADPOOL_MOBILE\n\n#endif // CAFFE2_UTILS_PTHREADPOOL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/threadpool/pthreadpool_impl.h",
    "content": "#ifndef CAFFE2_UTILS_PTHREADPOOL_IMPL_H_\n#define CAFFE2_UTILS_PTHREADPOOL_IMPL_H_\n\n#include \"ThreadPoolCommon.h\"\n\n#ifndef CAFFE2_THREADPOOL_MOBILE\n#error \"mobile build state not defined\"\n#endif\n\n#if CAFFE2_THREADPOOL_MOBILE\n\nnamespace caffe2 {\n\nstruct ThreadPool;\n\n} // namespace caffe2\n\nextern \"C\" {\n\n// Wrapper for the caffe2 threadpool for the usage of NNPACK\nstruct pthreadpool {\n  pthreadpool(caffe2::ThreadPool* pool) : pool_(pool) {}\n  caffe2::ThreadPool* pool_;\n};\n\n} // extern \"C\"\n\n#endif // CAFFE2_THREADPOOL_MOBILE\n\n#endif  // CAFFE2_UTILS_PTHREADPOOL_IMPL_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/utils/zmq_helper.h",
    "content": "#ifndef CAFFE2_UTILS_ZMQ_HELPER_H_\n#define CAFFE2_UTILS_ZMQ_HELPER_H_\n\n#include <zmq.h>\n\n#include \"caffe2/core/logging.h\"\n\nnamespace caffe2 {\n\nclass ZmqContext {\n public:\n  explicit ZmqContext(int io_threads) : ptr_(zmq_ctx_new()) {\n    CAFFE_ENFORCE(ptr_ != nullptr, \"Failed to create zmq context.\");\n    int rc = zmq_ctx_set(ptr_, ZMQ_IO_THREADS, io_threads);\n    CAFFE_ENFORCE_EQ(rc, 0);\n    rc = zmq_ctx_set(ptr_, ZMQ_MAX_SOCKETS, ZMQ_MAX_SOCKETS_DFLT);\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n  ~ZmqContext() {\n    int rc = zmq_ctx_destroy(ptr_);\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  void* ptr() { return ptr_; }\n\n private:\n  void* ptr_;\n\n  DISABLE_COPY_AND_ASSIGN(ZmqContext);\n};\n\nclass ZmqMessage {\n public:\n  ZmqMessage() {\n    int rc = zmq_msg_init(&msg_);\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  ~ZmqMessage() {\n    int rc = zmq_msg_close(&msg_);\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  zmq_msg_t* msg() { return &msg_; }\n\n  void* data() { return zmq_msg_data(&msg_); }\n  size_t size() { return zmq_msg_size(&msg_); }\n\n private:\n  zmq_msg_t msg_;\n  DISABLE_COPY_AND_ASSIGN(ZmqMessage);\n};\n\nclass ZmqSocket {\n public:\n  explicit ZmqSocket(int type)\n      : context_(1), ptr_(zmq_socket(context_.ptr(), type)) {\n    CAFFE_ENFORCE(ptr_ != nullptr, \"Faild to create zmq socket.\");\n  }\n\n  ~ZmqSocket() {\n    int rc = zmq_close(ptr_);\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  void Bind(const string& addr) {\n    int rc = zmq_bind(ptr_, addr.c_str());\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  void Unbind(const string& addr) {\n    int rc = zmq_unbind(ptr_, addr.c_str());\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  void Connect(const string& addr) {\n    int rc = zmq_connect(ptr_, addr.c_str());\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  void Disconnect(const string& addr) {\n    int rc = zmq_disconnect(ptr_, addr.c_str());\n    CAFFE_ENFORCE_EQ(rc, 0);\n  }\n\n  int Send(const string& msg, int flags) {\n    int nbytes = zmq_send(ptr_, msg.c_str(), msg.size(), flags);\n    if (nbytes) {\n      return nbytes;\n    } else if (zmq_errno() == EAGAIN) {\n      return 0;\n    } else {\n      LOG(FATAL) << \"Cannot send zmq message. Error number: \"\n                      << zmq_errno();\n      return 0;\n    }\n  }\n\n  int SendTillSuccess(const string& msg, int flags) {\n    CAFFE_ENFORCE(msg.size(), \"You cannot send an empty message.\");\n    int nbytes = 0;\n    do {\n      nbytes = Send(msg, flags);\n    } while (nbytes == 0);\n    return nbytes;\n  }\n\n  int Recv(ZmqMessage* msg) {\n    int nbytes = zmq_msg_recv(msg->msg(), ptr_, 0);\n    if (nbytes >= 0) {\n      return nbytes;\n    } else if (zmq_errno() == EAGAIN || zmq_errno() == EINTR) {\n      return 0;\n    } else {\n      LOG(FATAL) << \"Cannot receive zmq message. Error number: \"\n                      << zmq_errno();\n      return 0;\n    }\n  }\n\n  int RecvTillSuccess(ZmqMessage* msg) {\n    int nbytes = 0;\n    do {\n      nbytes = Recv(msg);\n    } while (nbytes == 0);\n    return nbytes;\n  }\n\n private:\n  ZmqContext context_;\n  void* ptr_;\n};\n\n}  // namespace caffe2\n\n\n#endif  // CAFFE2_UTILS_ZMQ_HELPER_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/video/video_decoder.h",
    "content": "#ifndef CAFFE2_VIDEO_VIDEO_DECODER_H_\n#define CAFFE2_VIDEO_VIDEO_DECODER_H_\n\n#include <stdio.h>\n#include <memory>\n#include <string>\n#include <vector>\n#include \"caffe2/core/logging.h\"\n\nextern \"C\" {\n#include <libavformat/avformat.h>\n#include <libavformat/avio.h>\n}\n\nnamespace caffe2 {\n\n#define VIO_BUFFER_SZ 32768\n#define MAX_DECODING_FRAMES 10000\n\n// enum to specify 3 special fps sampling behaviors:\n// 0: disable fps sampling, no frame sampled at all\n// -1: unlimited fps sampling, will sample at native video fps\n// -2: disable fps sampling, but will get the frame at specific timestamp\nenum SpecialFps {\n  SAMPLE_NO_FRAME = 0,\n  SAMPLE_ALL_FRAMES = -1,\n  SAMPLE_TIMESTAMP_ONLY = -2,\n};\n\n// sampling interval for fps starting at specified timestamp\n// use enum SpecialFps to set special fps decoding behavior\n// note sampled fps will not always accurately follow the target fps,\n// because sampled frame has to snap to actual frame timestamp,\n// e.g. video fps = 25, sample fps = 4 will sample every 0.28s, not 0.25\n// video fps = 25, sample fps = 5 will sample every 0.24s, not 0.2,\n// because of floating-point division accuracy (1 / 5.0 is not exactly 0.2)\nstruct SampleInterval {\n  double timestamp;\n  double fps;\n  SampleInterval() : timestamp(-1), fps(SpecialFps::SAMPLE_ALL_FRAMES) {}\n  SampleInterval(double ts, double f) : timestamp(ts), fps(f) {}\n  bool operator<(const SampleInterval& itvl) const {\n    return (timestamp < itvl.timestamp);\n  }\n};\n\nclass Params {\n public:\n  // return all key-frames regardless of specified fps\n  bool keyFrames_ = false;\n\n  // Output image pixel format\n  AVPixelFormat pixelFormat_ = AVPixelFormat::AV_PIX_FMT_RGB24;\n\n  // Index of stream to decode.\n  // -1 will automatically decode the first video stream.\n  int streamIndex_ = -1;\n\n  // How many frames to output at most from the video\n  // -1 no limit\n  int maximumOutputFrames_ = -1;\n\n  // Output video size, -1 to preserve origianl dimension\n  int outputWidth_ = -1;\n  int outputHeight_ = -1;\n\n  // max output dimension, -1 to preserve original size\n  // the larger dimension of the video will be scaled to this size,\n  // and the second dimension will be scaled to preserve aspect ratio\n  int maxOutputDimension_ = -1;\n\n  // intervals_ control variable sampling fps between different timestamps\n  // intervals_ must be ordered strictly ascending by timestamps\n  // the first interval must have a timestamp of zero\n  // fps must be either the 3 special fps defined in SpecialFps, or > 0\n  std::vector<SampleInterval> intervals_ = {{0, SpecialFps::SAMPLE_ALL_FRAMES}};\n\n  Params() {}\n\n  /**\n   * FPS of output frames\n   * setting here will reset intervals_ and force decoding at target FPS\n   * This can be used if user just want to decode at a steady fps\n   */\n  Params& fps(float v) {\n    intervals_.clear();\n    intervals_.emplace_back(0, v);\n    return *this;\n  }\n\n  /**\n   * Pixel format of output buffer, default PIX_FMT_RGB24\n   */\n  Params& pixelFormat(AVPixelFormat pixelFormat) {\n    pixelFormat_ = pixelFormat;\n    return *this;\n  }\n\n  /**\n   * Return all key-frames\n   */\n  Params& keyFrames(bool keyFrames) {\n    keyFrames_ = keyFrames;\n    return *this;\n  }\n\n  /**\n   * Index of video stream to process, defaults to the first video stream\n   */\n  Params& streamIndex(int index) {\n    streamIndex_ = index;\n    return *this;\n  }\n\n  /**\n   * Only output this many frames, default to no limit\n   */\n  Params& maxOutputFrames(int count) {\n    maximumOutputFrames_ = count;\n    return *this;\n  }\n\n  /**\n   * Output frame width, default to video width\n   */\n  Params& outputWidth(int width) {\n    outputWidth_ = width;\n    return *this;\n  }\n\n  /**\n   * Output frame height, default to video height\n   */\n  Params& outputHeight(int height) {\n    outputHeight_ = height;\n    return *this;\n  }\n\n  /**\n   * Max dimension of either width or height, if any is bigger\n   * it will be scaled down to this and econd dimension\n   * will be scaled down to maintain aspect ratio.\n   */\n  Params& maxOutputDimension(int size) {\n    maxOutputDimension_ = size;\n    return *this;\n  }\n};\n\n// data structure for storing decoded video frames\nclass DecodedFrame {\n public:\n  struct avDeleter {\n    void operator()(unsigned char* p) const {\n      av_free(p);\n    }\n  };\n  typedef std::unique_ptr<uint8_t, avDeleter> AvDataPtr;\n\n  // decoded data buffer\n  AvDataPtr data_;\n\n  // size in bytes\n  int size_ = 0;\n\n  // frame dimensions\n  int width_ = 0;\n  int height_ = 0;\n\n  // timestamp in seconds since beginning of video\n  double timestamp_ = 0;\n\n  // true if this is a key frame.\n  bool keyFrame_ = false;\n\n  // index of frame in video\n  int index_ = -1;\n\n  // Sequential number of outputted frame\n  int outputFrameIndex_ = -1;\n};\n\nclass VideoIOContext {\n public:\n  explicit VideoIOContext(const std::string fname)\n      : workBuffersize_(VIO_BUFFER_SZ),\n        workBuffer_((uint8_t*)av_malloc(workBuffersize_)),\n        inputFile_(nullptr),\n        inputBuffer_(nullptr),\n        inputBufferSize_(0) {\n    inputFile_ = fopen(fname.c_str(), \"rb\");\n    if (inputFile_ == nullptr) {\n      LOG(ERROR) << \"Error opening video file \" << fname;\n    }\n    ctx_ = avio_alloc_context(\n        static_cast<unsigned char*>(workBuffer_.get()),\n        workBuffersize_,\n        0,\n        this,\n        &VideoIOContext::readFile,\n        nullptr, // no write function\n        &VideoIOContext::seekFile);\n  }\n\n  explicit VideoIOContext(const char* buffer, int size)\n      : workBuffersize_(VIO_BUFFER_SZ),\n        workBuffer_((uint8_t*)av_malloc(workBuffersize_)),\n        inputFile_(nullptr),\n        inputBuffer_(buffer),\n        inputBufferSize_(size) {\n    ctx_ = avio_alloc_context(\n        static_cast<unsigned char*>(workBuffer_.get()),\n        workBuffersize_,\n        0,\n        this,\n        &VideoIOContext::readMemory,\n        nullptr, // no write function\n        &VideoIOContext::seekMemory);\n  }\n\n  ~VideoIOContext() {\n    av_free(ctx_);\n    if (inputFile_) {\n      fclose(inputFile_);\n    }\n  }\n\n  int read(unsigned char* buf, int buf_size) {\n    if (inputBuffer_) {\n      return readMemory(this, buf, buf_size);\n    } else if (inputFile_) {\n      return readFile(this, buf, buf_size);\n    } else {\n      return -1;\n    }\n  }\n\n  int64_t seek(int64_t offset, int whence) {\n    if (inputBuffer_) {\n      return seekMemory(this, offset, whence);\n    } else if (inputFile_) {\n      return seekFile(this, offset, whence);\n    } else {\n      return -1;\n    }\n  }\n\n  static int readFile(void* opaque, unsigned char* buf, int buf_size) {\n    VideoIOContext* h = static_cast<VideoIOContext*>(opaque);\n    if (feof(h->inputFile_)) {\n      return AVERROR_EOF;\n    }\n    size_t ret = fread(buf, 1, buf_size, h->inputFile_);\n    if (ret < buf_size) {\n      if (ferror(h->inputFile_)) {\n        return -1;\n      }\n    }\n    return ret;\n  }\n\n  static int64_t seekFile(void* opaque, int64_t offset, int whence) {\n    VideoIOContext* h = static_cast<VideoIOContext*>(opaque);\n    switch (whence) {\n      case SEEK_CUR: // from current position\n      case SEEK_END: // from eof\n      case SEEK_SET: // from beginning of file\n        return fseek(h->inputFile_, static_cast<long>(offset), whence);\n        break;\n      case AVSEEK_SIZE:\n        int64_t cur = ftell(h->inputFile_);\n        fseek(h->inputFile_, 0L, SEEK_END);\n        int64_t size = ftell(h->inputFile_);\n        fseek(h->inputFile_, cur, SEEK_SET);\n        return size;\n    }\n\n    return -1;\n  }\n\n  static int readMemory(void* opaque, unsigned char* buf, int buf_size) {\n    VideoIOContext* h = static_cast<VideoIOContext*>(opaque);\n    if (buf_size < 0) {\n      return -1;\n    }\n\n    int reminder = h->inputBufferSize_ - h->offset_;\n    int r = buf_size < reminder ? buf_size : reminder;\n    if (r < 0) {\n      return AVERROR_EOF;\n    }\n\n    memcpy(buf, h->inputBuffer_ + h->offset_, r);\n    h->offset_ += r;\n    return r;\n  }\n\n  static int64_t seekMemory(void* opaque, int64_t offset, int whence) {\n    VideoIOContext* h = static_cast<VideoIOContext*>(opaque);\n    switch (whence) {\n      case SEEK_CUR: // from current position\n        h->offset_ += offset;\n        break;\n      case SEEK_END: // from eof\n        h->offset_ = h->inputBufferSize_ + offset;\n        break;\n      case SEEK_SET: // from beginning of file\n        h->offset_ = offset;\n        break;\n      case AVSEEK_SIZE:\n        return h->inputBufferSize_;\n    }\n    return h->offset_;\n  }\n\n  AVIOContext* get_avio() {\n    return ctx_;\n  }\n\n private:\n  int workBuffersize_;\n  DecodedFrame::AvDataPtr workBuffer_;\n  // for file mode\n  FILE* inputFile_;\n\n  // for memory mode\n  const char* inputBuffer_;\n  int inputBufferSize_;\n  int offset_ = 0;\n\n  AVIOContext* ctx_;\n};\n\nstruct VideoMeta {\n  double fps;\n  int width;\n  int height;\n  enum AVMediaType codec_type;\n  AVPixelFormat pixFormat;\n  VideoMeta()\n      : fps(-1),\n        width(-1),\n        height(-1),\n        codec_type(AVMEDIA_TYPE_VIDEO),\n        pixFormat(AVPixelFormat::AV_PIX_FMT_RGB24) {}\n};\n\nclass VideoDecoder {\n public:\n  VideoDecoder();\n\n  void decodeFile(\n      const std::string filename,\n      const Params& params,\n      std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,\n      int maxFrames = 0, /* max frames we want decoded. 0 implies decode all */\n      bool decodeFromStart = true /* decode from start or randomly seek into\n                                     intermediate frame ? */\n      );\n\n  void decodeMemory(\n      const char* buffer,\n      const int size,\n      const Params& params,\n      std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,\n      int maxFrames = 0, /* max frames we want decoded. 0 implies decode all */\n      bool decodeFromStart = true /* decode from start or randomly seek into\n                                     intermediate frame ? */\n      );\n\n private:\n  std::string ffmpegErrorStr(int result);\n\n  void decodeLoop(\n      const std::string& videoName,\n      VideoIOContext& ioctx,\n      const Params& params,\n      std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,\n      int maxFrames = 0, /* max frames we want decoded. 0 implies decode all */\n      bool decodeFromStart = true /* decode from start or randomly seek into\n                                     intermediate frame ? */\n      );\n};\n}\n\n#endif // CAFFE2_VIDEO_VIDEO_DECODER_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/video/video_input_op.h",
    "content": "#ifndef CAFFE2_VIDEO_VIDEO_INPUT_OP_H_\n#define CAFFE2_VIDEO_VIDEO_INPUT_OP_H_\n\n#include <iostream>\n#include <random>\n#include <string>\n\n#include <opencv2/opencv.hpp>\n\n#include \"caffe2/core/db.h\"\n#include \"caffe2/core/logging.h\"\n#include \"caffe2/operators/prefetch_op.h\"\n#include \"caffe2/utils/math.h\"\n#include \"caffe2/utils/thread_pool.h\"\n#include \"caffe2/video/video_io.h\"\n\nnamespace caffe2 {\n\ntemplate <class Context>\nclass VideoInputOp final : public PrefetchOperator<Context> {\n public:\n  using OperatorBase::OutputSize;\n  using PrefetchOperator<Context>::context_;\n  using PrefetchOperator<Context>::prefetch_thread_;\n  explicit VideoInputOp(const OperatorDef& operator_def, Workspace* ws);\n  ~VideoInputOp() {\n    PrefetchOperator<Context>::Finalize();\n  }\n\n  // override methods\n  bool Prefetch() override;\n  bool CopyPrefetched() override;\n\n private:\n  bool GetClipAndLabelFromDBValue(\n      const std::string& value,\n      float*& buffer,\n      int* label_data,\n      std::mt19937* randgen);\n\n  void DecodeAndTransform(\n      const std::string value,\n      float* clip_data,\n      int* label_data,\n      const int crop_size,\n      const bool mirror,\n      const float mean,\n      const float std,\n      std::mt19937* randgen,\n      std::bernoulli_distribution* mirror_this_clip);\n\n  const db::DBReader* reader_;\n  CPUContext cpu_context_;\n  TensorCPU prefetched_clip_;\n  TensorCPU prefetched_label_;\n  Tensor<Context> prefetched_clip_on_device_;\n  Tensor<Context> prefetched_label_on_device_;\n  int batch_size_;\n  float mean_;\n  float std_;\n  int crop_;\n  int scale_h_;\n  int scale_w_;\n  int length_;\n  int sampling_rate_;\n  bool mirror_;\n  bool temporal_jitter_;\n  bool use_image_;\n  bool multiple_label_;\n  int num_of_labels_;\n  bool use_local_file_;\n  bool is_test_;\n  std::string im_extension_;\n\n  // thread pool for parse + decode\n  int num_decode_threads_;\n  std::shared_ptr<TaskThreadPool> thread_pool_;\n};\n\ntemplate <class Context>\nVideoInputOp<Context>::VideoInputOp(\n    const OperatorDef& operator_def,\n    Workspace* ws)\n    : PrefetchOperator<Context>(operator_def, ws),\n      reader_(nullptr),\n      batch_size_(\n          OperatorBase::template GetSingleArgument<int>(\"batch_size\", 0)),\n      mean_(OperatorBase::template GetSingleArgument<float>(\"mean\", 0.)),\n      std_(OperatorBase::template GetSingleArgument<float>(\"std\", 1.)),\n      crop_(OperatorBase::template GetSingleArgument<int>(\"crop\", -1)),\n      scale_h_(OperatorBase::template GetSingleArgument<int>(\"height\", 0)),\n      scale_w_(OperatorBase::template GetSingleArgument<int>(\"width\", 0)),\n      length_(OperatorBase::template GetSingleArgument<int>(\"length\", 0)),\n      sampling_rate_(\n          OperatorBase::template GetSingleArgument<int>(\"sampling_rate\", 1)),\n      mirror_(OperatorBase::template GetSingleArgument<int>(\"mirror\", 0)),\n      temporal_jitter_(\n          OperatorBase::template GetSingleArgument<int>(\"temporal_jitter\", 1)),\n      use_image_(OperatorBase::template GetSingleArgument<int>(\"use_image\", 0)),\n      multiple_label_(\n          OperatorBase::template GetSingleArgument<int>(\"multiple_label\", 0)),\n      num_of_labels_(\n          OperatorBase::template GetSingleArgument<int>(\"num_of_labels\", 0)),\n      use_local_file_(\n          OperatorBase::template GetSingleArgument<int>(\"use_local_file\", 0)),\n      is_test_(OperatorBase::template GetSingleArgument<int>(\n          OpSchema::Arg_IsTest,\n          0)),\n      im_extension_(\n          OperatorBase::template GetSingleArgument<string>(\"im_extension\", \"\")),\n      num_decode_threads_(\n          OperatorBase::template GetSingleArgument<int>(\"decode_threads\", 4)),\n\n      thread_pool_(new TaskThreadPool(num_decode_threads_)) {\n  CAFFE_ENFORCE_GT(batch_size_, 0, \"Batch size should be nonnegative.\");\n  CAFFE_ENFORCE_GE(scale_h_, 0, \"Must provide the scale value.\");\n  CAFFE_ENFORCE_GE(scale_w_, 0, \"Must provide the cropping value.\");\n  CAFFE_ENFORCE_GT(length_, 0, \"Must provide the clip length value.\");\n  CAFFE_ENFORCE_GT(crop_, 0, \"Must provide the cropping value.\");\n  CAFFE_ENFORCE_GE(\n      scale_h_,\n      crop_,\n      \"The scaled height must be no smaller than the crop value.\");\n  CAFFE_ENFORCE_GE(\n      scale_w_,\n      crop_,\n      \"The scaled width must be no smaller than the crop value.\");\n  if (multiple_label_) {\n    CAFFE_ENFORCE_GT(\n        num_of_labels_,\n        0,\n        \"Number of labels must be set for using multiple label output.\");\n  }\n\n  // Always need a dbreader, even when using local video files\n  CAFFE_ENFORCE_GT(\n      operator_def.input_size(), 0, \"Need to have a DBReader blob input\");\n\n  LOG(INFO) << \"Creating a clip input op with the following setting: \";\n  LOG(INFO) << \"    Using \" << num_decode_threads_ << \" CPU threads;\";\n  if (temporal_jitter_) {\n    LOG(INFO) << \"  Using temporal jittering;\";\n  }\n  LOG(INFO) << \"    Outputting in batches of \" << batch_size_ << \" images;\";\n  LOG(INFO) << \"    Scaling image to \" << scale_h_ << \"x\" << scale_w_;\n\n  LOG(INFO) << \"    Cropping video frame to \" << crop_\n            << (mirror_ ? \" with \" : \" without \") << \"random mirroring;\";\n  LOG(INFO) << \"    Using \" << (is_test_ ? \"center\" : \"random\") << \" crop\";\n  LOG(INFO) << \"    Using a clip of \" << length_ << \" frames;\";\n  LOG(INFO) << \"    Using a sampling rate of 1:\" << sampling_rate_;\n  LOG(INFO) << \"    Subtract mean \" << mean_ << \" and divide by std \" << std_\n            << \".\";\n  vector<TIndex> data_shape(5);\n  vector<TIndex> label_shape(2);\n\n  data_shape[0] = batch_size_;\n  // Assume color videos, will convert to 3 channels, even with black & with\n  // input videos\n  data_shape[1] = 3;\n  data_shape[2] = length_;\n  data_shape[3] = crop_;\n  data_shape[4] = crop_;\n  prefetched_clip_.Resize(data_shape);\n\n  // If multiple label is used, outout label is a binary vector of length\n  // number of labels-dim in indicating which labels present\n  if (multiple_label_) {\n    label_shape[0] = batch_size_;\n    label_shape[1] = num_of_labels_;\n    prefetched_label_.Resize(label_shape);\n  } else {\n    prefetched_label_.Resize(vector<TIndex>(1, batch_size_));\n  }\n}\n\ntemplate <class Context>\nbool VideoInputOp<Context>::GetClipAndLabelFromDBValue(\n    const string& value,\n    float*& buffer,\n    int* label_data,\n    std::mt19937* randgen) {\n  TensorProtos protos;\n  CAFFE_ENFORCE(protos.ParseFromString(value));\n  const TensorProto& video_proto = protos.protos(0);\n  const TensorProto& label_proto = protos.protos(1);\n\n  int start_frm = -1;\n  if (!temporal_jitter_) {\n    const TensorProto& start_frm_proto = protos.protos(2);\n    start_frm = start_frm_proto.int32_data(0);\n  }\n\n  // assign labels\n  if (!multiple_label_) {\n    label_data[0] = label_proto.int32_data(0);\n  } else {\n    // For multiple label case, output label is a binary vector\n    // where presented concepts are makred 1\n    memset(label_data, 0, sizeof(int) * num_of_labels_);\n    for (int i = 0; i < label_proto.int32_data_size(); i++) {\n      label_data[label_proto.int32_data(i)] = 1;\n    }\n  }\n\n  if (use_local_file_) {\n    CAFFE_ENFORCE_EQ(\n        video_proto.data_type(),\n        TensorProto::STRING,\n        \"Database with a file_list is expected to be string data\");\n  }\n\n  if (video_proto.data_type() == TensorProto::STRING) {\n    const string& encoded_video_str = video_proto.string_data(0);\n    int encoded_size = encoded_video_str.size();\n    if (!use_local_file_) {\n      DecodeClipFromMemoryBuffer(\n          const_cast<char*>(encoded_video_str.data()),\n          encoded_size,\n          start_frm,\n          length_,\n          scale_h_,\n          scale_w_,\n          sampling_rate_,\n          buffer,\n          randgen);\n    } else {\n      // encoded string contains an absolute path to a local file or folder\n      std::string filename = encoded_video_str;\n      if (use_image_) {\n        CAFFE_ENFORCE(\n          !temporal_jitter_,\n          \"Temporal jittering is not suported for image sequence input\"\n        );\n        CHECK(ReadClipFromFrames(\n            filename,\n            start_frm,\n            im_extension_,\n            length_,\n            scale_h_,\n            scale_w_,\n            sampling_rate_,\n            buffer));\n      } else {\n        if (temporal_jitter_) {\n          int num_of_frames = GetNumberOfFrames(filename);\n          start_frm = std::uniform_int_distribution<>(\n              0, num_of_frames - length_ * sampling_rate_ + 1)(*randgen);\n          CHECK(DecodeClipFromVideoFile(\n              filename,\n              start_frm,\n              length_,\n              scale_h_,\n              scale_w_,\n              sampling_rate_,\n              buffer));\n        } else {\n          CHECK(DecodeClipFromVideoFile(\n              filename,\n              start_frm,\n              length_,\n              scale_h_,\n              scale_w_,\n              sampling_rate_,\n              buffer));\n        }\n      }\n    }\n  } else if (video_proto.data_type() == TensorProto::BYTE) {\n    DecodeClipFromMemoryBuffer(\n        video_proto.byte_data().data(),\n        video_proto.byte_data().size(),\n        start_frm,\n        length_,\n        scale_h_,\n        scale_w_,\n        sampling_rate_,\n        buffer,\n        randgen);\n  } else {\n    LOG(FATAL) << \"Unknown video data type.\";\n  }\n  return true;\n}\n\ntemplate <class Context>\nvoid VideoInputOp<Context>::DecodeAndTransform(\n    const std::string value,\n    float* clip_data,\n    int* label_data,\n    const int crop_size,\n    const bool mirror,\n    const float mean,\n    const float std,\n    std::mt19937* randgen,\n    std::bernoulli_distribution* mirror_this_clip) {\n  float* buffer = nullptr;\n\n  // Decode the video from memory or read from a local file\n  CHECK(GetClipAndLabelFromDBValue(value, buffer, label_data, randgen));\n\n  if (buffer) {\n    ClipTransform(\n        buffer,\n        3,\n        length_,\n        scale_h_,\n        scale_w_,\n        crop_size,\n        mirror,\n        mean,\n        std,\n        clip_data,\n        randgen,\n        mirror_this_clip,\n        is_test_);\n\n    delete[] buffer;\n  }\n}\n\ntemplate <class Context>\nbool VideoInputOp<Context>::Prefetch() {\n  // We will get the reader pointer from input.\n  // If we use local clips, db will store the list\n  reader_ = &OperatorBase::Input<db::DBReader>(0);\n\n  const int channels = 3;\n\n  // Call mutable_data() once to allocate the underlying memory.\n  prefetched_clip_.mutable_data<float>();\n  prefetched_label_.mutable_data<int>();\n\n  // Prefetching handled with a thread pool of \"decode_threads\" threads.\n  std::mt19937 meta_randgen(time(nullptr));\n  std::vector<std::mt19937> randgen_per_thread;\n  for (int i = 0; i < num_decode_threads_; ++i) {\n    randgen_per_thread.emplace_back(meta_randgen());\n  }\n\n  std::bernoulli_distribution mirror_this_clip(0.5);\n  for (int item_id = 0; item_id < batch_size_; ++item_id) {\n    std::mt19937* randgen = &randgen_per_thread[item_id % num_decode_threads_];\n\n    // get the label data pointer for the item_id -th example\n    int* label_data = prefetched_label_.mutable_data<int>() +\n        (multiple_label_ ? num_of_labels_ : 1) * item_id;\n\n    // get the clip data pointer for the item_id -th example\n    float* clip_data = prefetched_clip_.mutable_data<float>() +\n        crop_ * crop_ * length_ * channels * item_id;\n\n    std::string key, value;\n    // read data\n    reader_->Read(&key, &value);\n\n    thread_pool_->runTask(std::bind(\n        &VideoInputOp<Context>::DecodeAndTransform,\n        this,\n        std::string(value),\n        clip_data,\n        label_data,\n        crop_,\n        mirror_,\n        mean_,\n        std_,\n        randgen,\n        &mirror_this_clip));\n  } // for over the batch\n  thread_pool_->waitWorkComplete();\n\n  // If the context is not CPUContext, we will need to do a copy in the\n  // prefetch function as well.\n  if (!std::is_same<Context, CPUContext>::value) {\n    prefetched_clip_on_device_.CopyFrom(prefetched_clip_, &context_);\n    prefetched_label_on_device_.CopyFrom(prefetched_label_, &context_);\n  }\n  return true;\n}\n\ntemplate <class Context>\nbool VideoInputOp<Context>::CopyPrefetched() {\n  auto* clip_output = OperatorBase::Output<Tensor<Context>>(0);\n  auto* label_output = OperatorBase::Output<Tensor<Context>>(1);\n  if (std::is_same<Context, CPUContext>::value) {\n    clip_output->CopyFrom(prefetched_clip_, &context_);\n    label_output->CopyFrom(prefetched_label_, &context_);\n  } else {\n    clip_output->CopyFrom(prefetched_clip_on_device_, &context_);\n    label_output->CopyFrom(prefetched_label_on_device_, &context_);\n  }\n  return true;\n}\n\n} // namespace caffe2\n\n#endif // CAFFE2_VIDEO_VIDEO_INPUT_OP_H_\n"
  },
  {
    "path": "app/src/main/cpp/caffe2/video/video_io.h",
    "content": "#ifndef CAFFE2_VIDEO_VIDEO_IO_H_\n#define CAFFE2_VIDEO_VIDEO_IO_H_\n\n#include <opencv2/opencv.hpp>\n#include <random>\n#include \"caffe/proto/caffe.pb.h\"\n\n#include <iostream>\n\nnamespace caffe2 {\n\nvoid ImageChannelToBuffer(const cv::Mat* img, float* buffer, int c);\n\nvoid ImageDataToBuffer(\n    unsigned char* data_buffer,\n    int height,\n    int width,\n    float* buffer,\n    int c);\n\nint GetNumberOfFrames(std::string filename);\n\ndouble GetVideoFPS(std::string filename);\n\nvoid GetVideoMeta(std::string filename, int& number_of_frames, double& fps);\n\nvoid ClipTransform(\n    const float* clip_data,\n    const int channels,\n    const int length,\n    const int height,\n    const int width,\n    const int crop,\n    const bool mirror,\n    float mean,\n    float std,\n    float* transformed_clip,\n    std::mt19937* randgen,\n    std::bernoulli_distribution* mirror_this_clip,\n    const bool use_center_crop);\n\nbool ReadClipFromFrames(\n    std::string input_dir,\n    const int start_frm,\n    std::string file_extension,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer);\n\nbool ReadClipFromVideoLazzy(\n    std::string filename,\n    const int start_frm,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer);\n\nbool ReadClipFromVideoSequential(\n    std::string filename,\n    const int start_frm,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer);\n\nbool ReadClipFromVideo(\n    std::string filename,\n    const int start_frm,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer);\n\nbool DecodeClipFromVideoFile(\n    std::string filename,\n    const int start_frm,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer);\n\nbool DecodeClipFromMemoryBuffer(\n    const char* video_buffer,\n    const int size,\n    const int start_frm,\n    const int length,\n    const int height,\n    const int width,\n    const int sampling_rate,\n    float*& buffer,\n    std::mt19937* randgen);\n}\n\n#endif // CAFFE2_VIDEO_VIDEO_IO_H_\n"
  },
  {
    "path": "app/src/main/cpp/classes.h",
    "content": "const char * imagenet_classes[] {\n\"tench, Tinca tinca\",\n\"goldfish, Carassius auratus\",\n\"great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias\",\n\"tiger shark, Galeocerdo cuvieri\",\n\"hammerhead, hammerhead shark\",\n\"electric ray, crampfish, numbfish, torpedo\",\n\"stingray\",\n\"cock\",\n\"hen\",\n\"ostrich, Struthio camelus\",\n\"brambling, Fringilla montifringilla\",\n\"goldfinch, Carduelis carduelis\",\n\"house finch, linnet, Carpodacus mexicanus\",\n\"junco, snowbird\",\n\"indigo bunting, indigo finch, indigo bird, Passerina cyanea\",\n\"robin, American robin, Turdus migratorius\",\n\"bulbul\",\n\"jay\",\n\"magpie\",\n\"chickadee\",\n\"water ouzel, dipper\",\n\"kite\",\n\"bald eagle, American eagle, Haliaeetus leucocephalus\",\n\"vulture\",\n\"great grey owl, great gray owl, Strix nebulosa\",\n\"European fire salamander, Salamandra salamandra\",\n\"common newt, Triturus vulgaris\",\n\"eft\",\n\"spotted salamander, Ambystoma maculatum\",\n\"axolotl, mud puppy, Ambystoma mexicanum\",\n\"bullfrog, Rana catesbeiana\",\n\"tree frog, tree-frog\",\n\"tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui\",\n\"loggerhead, loggerhead turtle, Caretta caretta\",\n\"leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea\",\n\"mud turtle\",\n\"terrapin\",\n\"box turtle, box tortoise\",\n\"banded gecko\",\n\"common iguana, iguana, Iguana iguana\",\n\"American chameleon, anole, Anolis carolinensis\",\n\"whiptail, whiptail lizard\",\n\"agama\",\n\"frilled lizard, Chlamydosaurus kingi\",\n\"alligator lizard\",\n\"Gila monster, Heloderma suspectum\",\n\"green lizard, Lacerta viridis\",\n\"African chameleon, Chamaeleo chamaeleon\",\n\"Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis\",\n\"African crocodile, Nile crocodile, Crocodylus niloticus\",\n\"American alligator, Alligator mississipiensis\",\n\"triceratops\",\n\"thunder snake, worm snake, Carphophis amoenus\",\n\"ringneck snake, ring-necked snake, ring snake\",\n\"hognose snake, puff adder, sand viper\",\n\"green snake, grass snake\",\n\"king snake, kingsnake\",\n\"garter snake, grass snake\",\n\"water snake\",\n\"vine snake\",\n\"night snake, Hypsiglena torquata\",\n\"boa constrictor, Constrictor constrictor\",\n\"rock python, rock snake, Python sebae\",\n\"Indian cobra, Naja naja\",\n\"green mamba\",\n\"sea snake\",\n\"horned viper, cerastes, sand viper, horned asp, Cerastes cornutus\",\n\"diamondback, diamondback rattlesnake, Crotalus adamanteus\",\n\"sidewinder, horned rattlesnake, Crotalus cerastes\",\n\"trilobite\",\n\"harvestman, daddy longlegs, Phalangium opilio\",\n\"scorpion\",\n\"black and gold garden spider, Argiope aurantia\",\n\"barn spider, Araneus cavaticus\",\n\"garden spider, Aranea diademata\",\n\"black widow, Latrodectus mactans\",\n\"tarantula\",\n\"wolf spider, hunting spider\",\n\"tick\",\n\"centipede\",\n\"black grouse\",\n\"ptarmigan\",\n\"ruffed grouse, partridge, Bonasa umbellus\",\n\"prairie chicken, prairie grouse, prairie fowl\",\n\"peacock\",\n\"quail\",\n\"partridge\",\n\"African grey, African gray, Psittacus erithacus\",\n\"macaw\",\n\"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita\",\n\"lorikeet\",\n\"coucal\",\n\"bee eater\",\n\"hornbill\",\n\"hummingbird\",\n\"jacamar\",\n\"toucan\",\n\"drake\",\n\"red-breasted merganser, Mergus serrator\",\n\"goose\",\n\"black swan, Cygnus atratus\",\n\"tusker\",\n\"echidna, spiny anteater, anteater\",\n\"platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus\",\n\"wallaby, brush kangaroo\",\n\"koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus\",\n\"wombat\",\n\"jellyfish\",\n\"sea anemone, anemone\",\n\"brain coral\",\n\"flatworm, platyhelminth\",\n\"nematode, nematode worm, roundworm\",\n\"conch\",\n\"snail\",\n\"slug\",\n\"sea slug, nudibranch\",\n\"chiton, coat-of-mail shell, sea cradle, polyplacophore\",\n\"chambered nautilus, pearly nautilus, nautilus\",\n\"Dungeness crab, Cancer magister\",\n\"rock crab, Cancer irroratus\",\n\"fiddler crab\",\n\"king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica\",\n\"American lobster, Northern lobster, Maine lobster, Homarus americanus\",\n\"spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish\",\n\"crayfish, crawfish, crawdad, crawdaddy\",\n\"hermit crab\",\n\"isopod\",\n\"white stork, Ciconia ciconia\",\n\"black stork, Ciconia nigra\",\n\"spoonbill\",\n\"flamingo\",\n\"little blue heron, Egretta caerulea\",\n\"American egret, great white heron, Egretta albus\",\n\"bittern\",\n\"crane\",\n\"limpkin, Aramus pictus\",\n\"European gallinule, Porphyrio porphyrio\",\n\"American coot, marsh hen, mud hen, water hen, Fulica americana\",\n\"bustard\",\n\"ruddy turnstone, Arenaria interpres\",\n\"red-backed sandpiper, dunlin, Erolia alpina\",\n\"redshank, Tringa totanus\",\n\"dowitcher\",\n\"oystercatcher, oyster catcher\",\n\"pelican\",\n\"king penguin, Aptenodytes patagonica\",\n\"albatross, mollymawk\",\n\"grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus\",\n\"killer whale, killer, orca, grampus, sea wolf, Orcinus orca\",\n\"dugong, Dugong dugon\",\n\"sea lion\",\n\"Chihuahua\",\n\"Japanese spaniel\",\n\"Maltese dog, Maltese terrier, Maltese\",\n\"Pekinese, Pekingese, Peke\",\n\"Shih-Tzu\",\n\"Blenheim spaniel\",\n\"papillon\",\n\"toy terrier\",\n\"Rhodesian ridgeback\",\n\"Afghan hound, Afghan\",\n\"basset, basset hound\",\n\"beagle\",\n\"bloodhound, sleuthhound\",\n\"bluetick\",\n\"black-and-tan coonhound\",\n\"Walker hound, Walker foxhound\",\n\"English foxhound\",\n\"redbone\",\n\"borzoi, Russian wolfhound\",\n\"Irish wolfhound\",\n\"Italian greyhound\",\n\"whippet\",\n\"Ibizan hound, Ibizan Podenco\",\n\"Norwegian elkhound, elkhound\",\n\"otterhound, otter hound\",\n\"Saluki, gazelle hound\",\n\"Scottish deerhound, deerhound\",\n\"Weimaraner\",\n\"Staffordshire bullterrier, Staffordshire bull terrier\",\n\"American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier\",\n\"Bedlington terrier\",\n\"Border terrier\",\n\"Kerry blue terrier\",\n\"Irish terrier\",\n\"Norfolk terrier\",\n\"Norwich terrier\",\n\"Yorkshire terrier\",\n\"wire-haired fox terrier\",\n\"Lakeland terrier\",\n\"Sealyham terrier, Sealyham\",\n\"Airedale, Airedale terrier\",\n\"cairn, cairn terrier\",\n\"Australian terrier\",\n\"Dandie Dinmont, Dandie Dinmont terrier\",\n\"Boston bull, Boston terrier\",\n\"miniature schnauzer\",\n\"giant schnauzer\",\n\"standard schnauzer\",\n\"Scotch terrier, Scottish terrier, Scottie\",\n\"Tibetan terrier, chrysanthemum dog\",\n\"silky terrier, Sydney silky\",\n\"soft-coated wheaten terrier\",\n\"West Highland white terrier\",\n\"Lhasa, Lhasa apso\",\n\"flat-coated retriever\",\n\"curly-coated retriever\",\n\"golden retriever\",\n\"Labrador retriever\",\n\"Chesapeake Bay retriever\",\n\"German short-haired pointer\",\n\"vizsla, Hungarian pointer\",\n\"English setter\",\n\"Irish setter, red setter\",\n\"Gordon setter\",\n\"Brittany spaniel\",\n\"clumber, clumber spaniel\",\n\"English springer, English springer spaniel\",\n\"Welsh springer spaniel\",\n\"cocker spaniel, English cocker spaniel, cocker\",\n\"Sussex spaniel\",\n\"Irish water spaniel\",\n\"kuvasz\",\n\"schipperke\",\n\"groenendael\",\n\"malinois\",\n\"briard\",\n\"kelpie\",\n\"komondor\",\n\"Old English sheepdog, bobtail\",\n\"Shetland sheepdog, Shetland sheep dog, Shetland\",\n\"collie\",\n\"Border collie\",\n\"Bouvier des Flandres, Bouviers des Flandres\",\n\"Rottweiler\",\n\"German shepherd, German shepherd dog, German police dog, alsatian\",\n\"Doberman, Doberman pinscher\",\n\"miniature pinscher\",\n\"Greater Swiss Mountain dog\",\n\"Bernese mountain dog\",\n\"Appenzeller\",\n\"EntleBucher\",\n\"boxer\",\n\"bull mastiff\",\n\"Tibetan mastiff\",\n\"French bulldog\",\n\"Great Dane\",\n\"Saint Bernard, St Bernard\",\n\"Eskimo dog, husky\",\n\"malamute, malemute, Alaskan malamute\",\n\"Siberian husky\",\n\"dalmatian, coach dog, carriage dog\",\n\"affenpinscher, monkey pinscher, monkey dog\",\n\"basenji\",\n\"pug, pug-dog\",\n\"Leonberg\",\n\"Newfoundland, Newfoundland dog\",\n\"Great Pyrenees\",\n\"Samoyed, Samoyede\",\n\"Pomeranian\",\n\"chow, chow chow\",\n\"keeshond\",\n\"Brabancon griffon\",\n\"Pembroke, Pembroke Welsh corgi\",\n\"Cardigan, Cardigan Welsh corgi\",\n\"toy poodle\",\n\"miniature poodle\",\n\"standard poodle\",\n\"Mexican hairless\",\n\"timber wolf, grey wolf, gray wolf, Canis lupus\",\n\"white wolf, Arctic wolf, Canis lupus tundrarum\",\n\"red wolf, maned wolf, Canis rufus, Canis niger\",\n\"coyote, prairie wolf, brush wolf, Canis latrans\",\n\"dingo, warrigal, warragal, Canis dingo\",\n\"dhole, Cuon alpinus\",\n\"African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus\",\n\"hyena, hyaena\",\n\"red fox, Vulpes vulpes\",\n\"kit fox, Vulpes macrotis\",\n\"Arctic fox, white fox, Alopex lagopus\",\n\"grey fox, gray fox, Urocyon cinereoargenteus\",\n\"tabby, tabby cat\",\n\"tiger cat\",\n\"Persian cat\",\n\"Siamese cat, Siamese\",\n\"Egyptian cat\",\n\"cougar, puma, catamount, mountain lion, painter, panther, Felis concolor\",\n\"lynx, catamount\",\n\"leopard, Panthera pardus\",\n\"snow leopard, ounce, Panthera uncia\",\n\"jaguar, panther, Panthera onca, Felis onca\",\n\"lion, king of beasts, Panthera leo\",\n\"tiger, Panthera tigris\",\n\"cheetah, chetah, Acinonyx jubatus\",\n\"brown bear, bruin, Ursus arctos\",\n\"American black bear, black bear, Ursus americanus, Euarctos americanus\",\n\"ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus\",\n\"sloth bear, Melursus ursinus, Ursus ursinus\",\n\"mongoose\",\n\"meerkat, mierkat\",\n\"tiger beetle\",\n\"ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle\",\n\"ground beetle, carabid beetle\",\n\"long-horned beetle, longicorn, longicorn beetle\",\n\"leaf beetle, chrysomelid\",\n\"dung beetle\",\n\"rhinoceros beetle\",\n\"weevil\",\n\"fly\",\n\"bee\",\n\"ant, emmet, pismire\",\n\"grasshopper, hopper\",\n\"cricket\",\n\"walking stick, walkingstick, stick insect\",\n\"cockroach, roach\",\n\"mantis, mantid\",\n\"cicada, cicala\",\n\"leafhopper\",\n\"lacewing, lacewing fly\",\n\"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk\",\n\"damselfly\",\n\"admiral\",\n\"ringlet, ringlet butterfly\",\n\"monarch, monarch butterfly, milkweed butterfly, Danaus plexippus\",\n\"cabbage butterfly\",\n\"sulphur butterfly, sulfur butterfly\",\n\"lycaenid, lycaenid butterfly\",\n\"starfish, sea star\",\n\"sea urchin\",\n\"sea cucumber, holothurian\",\n\"wood rabbit, cottontail, cottontail rabbit\",\n\"hare\",\n\"Angora, Angora rabbit\",\n\"hamster\",\n\"porcupine, hedgehog\",\n\"fox squirrel, eastern fox squirrel, Sciurus niger\",\n\"marmot\",\n\"beaver\",\n\"guinea pig, Cavia cobaya\",\n\"sorrel\",\n\"zebra\",\n\"hog, pig, grunter, squealer, Sus scrofa\",\n\"wild boar, boar, Sus scrofa\",\n\"warthog\",\n\"hippopotamus, hippo, river horse, Hippopotamus amphibius\",\n\"ox\",\n\"water buffalo, water ox, Asiatic buffalo, Bubalus bubalis\",\n\"bison\",\n\"ram, tup\",\n\"bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis\",\n\"ibex, Capra ibex\",\n\"hartebeest\",\n\"impala, Aepyceros melampus\",\n\"gazelle\",\n\"Arabian camel, dromedary, Camelus dromedarius\",\n\"llama\",\n\"weasel\",\n\"mink\",\n\"polecat, fitch, foulmart, foumart, Mustela putorius\",\n\"black-footed ferret, ferret, Mustela nigripes\",\n\"otter\",\n\"skunk, polecat, wood pussy\",\n\"badger\",\n\"armadillo\",\n\"three-toed sloth, ai, Bradypus tridactylus\",\n\"orangutan, orang, orangutang, Pongo pygmaeus\",\n\"gorilla, Gorilla gorilla\",\n\"chimpanzee, chimp, Pan troglodytes\",\n\"gibbon, Hylobates lar\",\n\"siamang, Hylobates syndactylus, Symphalangus syndactylus\",\n\"guenon, guenon monkey\",\n\"patas, hussar monkey, Erythrocebus patas\",\n\"baboon\",\n\"macaque\",\n\"langur\",\n\"colobus, colobus monkey\",\n\"proboscis monkey, Nasalis larvatus\",\n\"marmoset\",\n\"capuchin, ringtail, Cebus capucinus\",\n\"howler monkey, howler\",\n\"titi, titi monkey\",\n\"spider monkey, Ateles geoffroyi\",\n\"squirrel monkey, Saimiri sciureus\",\n\"Madagascar cat, ring-tailed lemur, Lemur catta\",\n\"indri, indris, Indri indri, Indri brevicaudatus\",\n\"Indian elephant, Elephas maximus\",\n\"African elephant, Loxodonta africana\",\n\"lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens\",\n\"giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca\",\n\"barracouta, snoek\",\n\"eel\",\n\"coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch\",\n\"rock beauty, Holocanthus tricolor\",\n\"anemone fish\",\n\"sturgeon\",\n\"gar, garfish, garpike, billfish, Lepisosteus osseus\",\n\"lionfish\",\n\"puffer, pufferfish, blowfish, globefish\",\n\"abacus\",\n\"abaya\",\n\"academic gown, academic robe, judge's robe\",\n\"accordion, piano accordion, squeeze box\",\n\"acoustic guitar\",\n\"aircraft carrier, carrier, flattop, attack aircraft carrier\",\n\"airliner\",\n\"airship, dirigible\",\n\"altar\",\n\"ambulance\",\n\"amphibian, amphibious vehicle\",\n\"analog clock\",\n\"apiary, bee house\",\n\"apron\",\n\"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin\",\n\"assault rifle, assault gun\",\n\"backpack, back pack, knapsack, packsack, rucksack, haversack\",\n\"bakery, bakeshop, bakehouse\",\n\"balance beam, beam\",\n\"balloon\",\n\"ballpoint, ballpoint pen, ballpen, Biro\",\n\"Band Aid\",\n\"banjo\",\n\"bannister, banister, balustrade, balusters, handrail\",\n\"barbell\",\n\"barber chair\",\n\"barbershop\",\n\"barn\",\n\"barometer\",\n\"barrel, cask\",\n\"barrow, garden cart, lawn cart, wheelbarrow\",\n\"baseball\",\n\"basketball\",\n\"bassinet\",\n\"bassoon\",\n\"bathing cap, swimming cap\",\n\"bath towel\",\n\"bathtub, bathing tub, bath, tub\",\n\"beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon\",\n\"beacon, lighthouse, beacon light, pharos\",\n\"beaker\",\n\"bearskin, busby, shako\",\n\"beer bottle\",\n\"beer glass\",\n\"bell cote, bell cot\",\n\"bib\",\n\"bicycle-built-for-two, tandem bicycle, tandem\",\n\"bikini, two-piece\",\n\"binder, ring-binder\",\n\"binoculars, field glasses, opera glasses\",\n\"birdhouse\",\n\"boathouse\",\n\"bobsled, bobsleigh, bob\",\n\"bolo tie, bolo, bola tie, bola\",\n\"bonnet, poke bonnet\",\n\"bookcase\",\n\"bookshop, bookstore, bookstall\",\n\"bottlecap\",\n\"bow\",\n\"bow tie, bow-tie, bowtie\",\n\"brass, memorial tablet, plaque\",\n\"brassiere, bra, bandeau\",\n\"breakwater, groin, groyne, mole, bulwark, seawall, jetty\",\n\"breastplate, aegis, egis\",\n\"broom\",\n\"bucket, pail\",\n\"buckle\",\n\"bulletproof vest\",\n\"bullet train, bullet\",\n\"butcher shop, meat market\",\n\"cab, hack, taxi, taxicab\",\n\"caldron, cauldron\",\n\"candle, taper, wax light\",\n\"cannon\",\n\"canoe\",\n\"can opener, tin opener\",\n\"cardigan\",\n\"car mirror\",\n\"carousel, carrousel, merry-go-round, roundabout, whirligig\",\n\"carpenter's kit, tool kit\",\n\"carton\",\n\"car wheel\",\n\"cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM\",\n\"cassette\",\n\"cassette player\",\n\"castle\",\n\"catamaran\",\n\"CD player\",\n\"cello, violoncello\",\n\"cellular telephone, cellular phone, cellphone, cell, mobile phone\",\n\"chain\",\n\"chainlink fence\",\n\"chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour\",\n\"chain saw, chainsaw\",\n\"chest\",\n\"chiffonier, commode\",\n\"chime, bell, gong\",\n\"china cabinet, china closet\",\n\"Christmas stocking\",\n\"church, church building\",\n\"cinema, movie theater, movie theatre, movie house, picture palace\",\n\"cleaver, meat cleaver, chopper\",\n\"cliff dwelling\",\n\"cloak\",\n\"clog, geta, patten, sabot\",\n\"cocktail shaker\",\n\"coffee mug\",\n\"coffeepot\",\n\"coil, spiral, volute, whorl, helix\",\n\"combination lock\",\n\"computer keyboard, keypad\",\n\"confectionery, confectionary, candy store\",\n\"container ship, containership, container vessel\",\n\"convertible\",\n\"corkscrew, bottle screw\",\n\"cornet, horn, trumpet, trump\",\n\"cowboy boot\",\n\"cowboy hat, ten-gallon hat\",\n\"cradle\",\n\"crane\",\n\"crash helmet\",\n\"crate\",\n\"crib, cot\",\n\"Crock Pot\",\n\"croquet ball\",\n\"crutch\",\n\"cuirass\",\n\"dam, dike, dyke\",\n\"desk\",\n\"desktop computer\",\n\"dial telephone, dial phone\",\n\"diaper, nappy, napkin\",\n\"digital clock\",\n\"digital watch\",\n\"dining table, board\",\n\"dishrag, dishcloth\",\n\"dishwasher, dish washer, dishwashing machine\",\n\"disk brake, disc brake\",\n\"dock, dockage, docking facility\",\n\"dogsled, dog sled, dog sleigh\",\n\"dome\",\n\"doormat, welcome mat\",\n\"drilling platform, offshore rig\",\n\"drum, membranophone, tympan\",\n\"drumstick\",\n\"dumbbell\",\n\"Dutch oven\",\n\"electric fan, blower\",\n\"electric guitar\",\n\"electric locomotive\",\n\"entertainment center\",\n\"envelope\",\n\"espresso maker\",\n\"face powder\",\n\"feather boa, boa\",\n\"file, file cabinet, filing cabinet\",\n\"fireboat\",\n\"fire engine, fire truck\",\n\"fire screen, fireguard\",\n\"flagpole, flagstaff\",\n\"flute, transverse flute\",\n\"folding chair\",\n\"football helmet\",\n\"forklift\",\n\"fountain\",\n\"fountain pen\",\n\"four-poster\",\n\"freight car\",\n\"French horn, horn\",\n\"frying pan, frypan, skillet\",\n\"fur coat\",\n\"garbage truck, dustcart\",\n\"gasmask, respirator, gas helmet\",\n\"gas pump, gasoline pump, petrol pump, island dispenser\",\n\"goblet\",\n\"go-kart\",\n\"golf ball\",\n\"golfcart, golf cart\",\n\"gondola\",\n\"gong, tam-tam\",\n\"gown\",\n\"grand piano, grand\",\n\"greenhouse, nursery, glasshouse\",\n\"grille, radiator grille\",\n\"grocery store, grocery, food market, market\",\n\"guillotine\",\n\"hair slide\",\n\"hair spray\",\n\"half track\",\n\"hammer\",\n\"hamper\",\n\"hand blower, blow dryer, blow drier, hair dryer, hair drier\",\n\"hand-held computer, hand-held microcomputer\",\n\"handkerchief, hankie, hanky, hankey\",\n\"hard disc, hard disk, fixed disk\",\n\"harmonica, mouth organ, harp, mouth harp\",\n\"harp\",\n\"harvester, reaper\",\n\"hatchet\",\n\"holster\",\n\"home theater, home theatre\",\n\"honeycomb\",\n\"hook, claw\",\n\"hoopskirt, crinoline\",\n\"horizontal bar, high bar\",\n\"horse cart, horse-cart\",\n\"hourglass\",\n\"iPod\",\n\"iron, smoothing iron\",\n\"jack-o'-lantern\",\n\"jean, blue jean, denim\",\n\"jeep, landrover\",\n\"jersey, T-shirt, tee shirt\",\n\"jigsaw puzzle\",\n\"jinrikisha, ricksha, rickshaw\",\n\"joystick\",\n\"kimono\",\n\"knee pad\",\n\"knot\",\n\"lab coat, laboratory coat\",\n\"ladle\",\n\"lampshade, lamp shade\",\n\"laptop, laptop computer\",\n\"lawn mower, mower\",\n\"lens cap, lens cover\",\n\"letter opener, paper knife, paperknife\",\n\"library\",\n\"lifeboat\",\n\"lighter, light, igniter, ignitor\",\n\"limousine, limo\",\n\"liner, ocean liner\",\n\"lipstick, lip rouge\",\n\"Loafer\",\n\"lotion\",\n\"loudspeaker, speaker, speaker unit, loudspeaker system, speaker system\",\n\"loupe, jeweler's loupe\",\n\"lumbermill, sawmill\",\n\"magnetic compass\",\n\"mailbag, postbag\",\n\"mailbox, letter box\",\n\"maillot\",\n\"maillot, tank suit\",\n\"manhole cover\",\n\"maraca\",\n\"marimba, xylophone\",\n\"mask\",\n\"matchstick\",\n\"maypole\",\n\"maze, labyrinth\",\n\"measuring cup\",\n\"medicine chest, medicine cabinet\",\n\"megalith, megalithic structure\",\n\"microphone, mike\",\n\"microwave, microwave oven\",\n\"military uniform\",\n\"milk can\",\n\"minibus\",\n\"miniskirt, mini\",\n\"minivan\",\n\"missile\",\n\"mitten\",\n\"mixing bowl\",\n\"mobile home, manufactured home\",\n\"Model T\",\n\"modem\",\n\"monastery\",\n\"monitor\",\n\"moped\",\n\"mortar\",\n\"mortarboard\",\n\"mosque\",\n\"mosquito net\",\n\"motor scooter, scooter\",\n\"mountain bike, all-terrain bike, off-roader\",\n\"mountain tent\",\n\"mouse, computer mouse\",\n\"mousetrap\",\n\"moving van\",\n\"muzzle\",\n\"nail\",\n\"neck brace\",\n\"necklace\",\n\"nipple\",\n\"notebook, notebook computer\",\n\"obelisk\",\n\"oboe, hautboy, hautbois\",\n\"ocarina, sweet potato\",\n\"odometer, hodometer, mileometer, milometer\",\n\"oil filter\",\n\"organ, pipe organ\",\n\"oscilloscope, scope, cathode-ray oscilloscope, CRO\",\n\"overskirt\",\n\"oxcart\",\n\"oxygen mask\",\n\"packet\",\n\"paddle, boat paddle\",\n\"paddlewheel, paddle wheel\",\n\"padlock\",\n\"paintbrush\",\n\"pajama, pyjama, pj's, jammies\",\n\"palace\",\n\"panpipe, pandean pipe, syrinx\",\n\"paper towel\",\n\"parachute, chute\",\n\"parallel bars, bars\",\n\"park bench\",\n\"parking meter\",\n\"passenger car, coach, carriage\",\n\"patio, terrace\",\n\"pay-phone, pay-station\",\n\"pedestal, plinth, footstall\",\n\"pencil box, pencil case\",\n\"pencil sharpener\",\n\"perfume, essence\",\n\"Petri dish\",\n\"photocopier\",\n\"pick, plectrum, plectron\",\n\"pickelhaube\",\n\"picket fence, paling\",\n\"pickup, pickup truck\",\n\"pier\",\n\"piggy bank, penny bank\",\n\"pill bottle\",\n\"pillow\",\n\"ping-pong ball\",\n\"pinwheel\",\n\"pirate, pirate ship\",\n\"pitcher, ewer\",\n\"plane, carpenter's plane, woodworking plane\",\n\"planetarium\",\n\"plastic bag\",\n\"plate rack\",\n\"plow, plough\",\n\"plunger, plumber's helper\",\n\"Polaroid camera, Polaroid Land camera\",\n\"pole\",\n\"police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria\",\n\"poncho\",\n\"pool table, billiard table, snooker table\",\n\"pop bottle, soda bottle\",\n\"pot, flowerpot\",\n\"potter's wheel\",\n\"power drill\",\n\"prayer rug, prayer mat\",\n\"printer\",\n\"prison, prison house\",\n\"projectile, missile\",\n\"projector\",\n\"puck, hockey puck\",\n\"punching bag, punch bag, punching ball, punchball\",\n\"purse\",\n\"quill, quill pen\",\n\"quilt, comforter, comfort, puff\",\n\"racer, race car, racing car\",\n\"racket, racquet\",\n\"radiator\",\n\"radio, wireless\",\n\"radio telescope, radio reflector\",\n\"rain barrel\",\n\"recreational vehicle, RV, R.V.\",\n\"reel\",\n\"reflex camera\",\n\"refrigerator, icebox\",\n\"remote control, remote\",\n\"restaurant, eating house, eating place, eatery\",\n\"revolver, six-gun, six-shooter\",\n\"rifle\",\n\"rocking chair, rocker\",\n\"rotisserie\",\n\"rubber eraser, rubber, pencil eraser\",\n\"rugby ball\",\n\"rule, ruler\",\n\"running shoe\",\n\"safe\",\n\"safety pin\",\n\"saltshaker, salt shaker\",\n\"sandal\",\n\"sarong\",\n\"sax, saxophone\",\n\"scabbard\",\n\"scale, weighing machine\",\n\"school bus\",\n\"schooner\",\n\"scoreboard\",\n\"screen, CRT screen\",\n\"screw\",\n\"screwdriver\",\n\"seat belt, seatbelt\",\n\"sewing machine\",\n\"shield, buckler\",\n\"shoe shop, shoe-shop, shoe store\",\n\"shoji\",\n\"shopping basket\",\n\"shopping cart\",\n\"shovel\",\n\"shower cap\",\n\"shower curtain\",\n\"ski\",\n\"ski mask\",\n\"sleeping bag\",\n\"slide rule, slipstick\",\n\"sliding door\",\n\"slot, one-armed bandit\",\n\"snorkel\",\n\"snowmobile\",\n\"snowplow, snowplough\",\n\"soap dispenser\",\n\"soccer ball\",\n\"sock\",\n\"solar dish, solar collector, solar furnace\",\n\"sombrero\",\n\"soup bowl\",\n\"space bar\",\n\"space heater\",\n\"space shuttle\",\n\"spatula\",\n\"speedboat\",\n\"spider web, spider's web\",\n\"spindle\",\n\"sports car, sport car\",\n\"spotlight, spot\",\n\"stage\",\n\"steam locomotive\",\n\"steel arch bridge\",\n\"steel drum\",\n\"stethoscope\",\n\"stole\",\n\"stone wall\",\n\"stopwatch, stop watch\",\n\"stove\",\n\"strainer\",\n\"streetcar, tram, tramcar, trolley, trolley car\",\n\"stretcher\",\n\"studio couch, day bed\",\n\"stupa, tope\",\n\"submarine, pigboat, sub, U-boat\",\n\"suit, suit of clothes\",\n\"sundial\",\n\"sunglass\",\n\"sunglasses, dark glasses, shades\",\n\"sunscreen, sunblock, sun blocker\",\n\"suspension bridge\",\n\"swab, swob, mop\",\n\"sweatshirt\",\n\"swimming trunks, bathing trunks\",\n\"swing\",\n\"switch, electric switch, electrical switch\",\n\"syringe\",\n\"table lamp\",\n\"tank, army tank, armored combat vehicle, armoured combat vehicle\",\n\"tape player\",\n\"teapot\",\n\"teddy, teddy bear\",\n\"television, television system\",\n\"tennis ball\",\n\"thatch, thatched roof\",\n\"theater curtain, theatre curtain\",\n\"thimble\",\n\"thresher, thrasher, threshing machine\",\n\"throne\",\n\"tile roof\",\n\"toaster\",\n\"tobacco shop, tobacconist shop, tobacconist\",\n\"toilet seat\",\n\"torch\",\n\"totem pole\",\n\"tow truck, tow car, wrecker\",\n\"toyshop\",\n\"tractor\",\n\"trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi\",\n\"tray\",\n\"trench coat\",\n\"tricycle, trike, velocipede\",\n\"trimaran\",\n\"tripod\",\n\"triumphal arch\",\n\"trolleybus, trolley coach, trackless trolley\",\n\"trombone\",\n\"tub, vat\",\n\"turnstile\",\n\"typewriter keyboard\",\n\"umbrella\",\n\"unicycle, monocycle\",\n\"upright, upright piano\",\n\"vacuum, vacuum cleaner\",\n\"vase\",\n\"vault\",\n\"velvet\",\n\"vending machine\",\n\"vestment\",\n\"viaduct\",\n\"violin, fiddle\",\n\"volleyball\",\n\"waffle iron\",\n\"wall clock\",\n\"wallet, billfold, notecase, pocketbook\",\n\"wardrobe, closet, press\",\n\"warplane, military plane\",\n\"washbasin, handbasin, washbowl, lavabo, wash-hand basin\",\n\"washer, automatic washer, washing machine\",\n\"water bottle\",\n\"water jug\",\n\"water tower\",\n\"whiskey jug\",\n\"whistle\",\n\"wig\",\n\"window screen\",\n\"window shade\",\n\"Windsor tie\",\n\"wine bottle\",\n\"wing\",\n\"wok\",\n\"wooden spoon\",\n\"wool, woolen, woollen\",\n\"worm fence, snake fence, snake-rail fence, Virginia fence\",\n\"wreck\",\n\"yawl\",\n\"yurt\",\n\"web site, website, internet site, site\",\n\"comic book\",\n\"crossword puzzle, crossword\",\n\"street sign\",\n\"traffic light, traffic signal, stoplight\",\n\"book jacket, dust cover, dust jacket, dust wrapper\",\n\"menu\",\n\"plate\",\n\"guacamole\",\n\"consomme\",\n\"hot pot, hotpot\",\n\"trifle\",\n\"ice cream, icecream\",\n\"ice lolly, lolly, lollipop, popsicle\",\n\"French loaf\",\n\"bagel, beigel\",\n\"pretzel\",\n\"cheeseburger\",\n\"hotdog, hot dog, red hot\",\n\"mashed potato\",\n\"head cabbage\",\n\"broccoli\",\n\"cauliflower\",\n\"zucchini, courgette\",\n\"spaghetti squash\",\n\"acorn squash\",\n\"butternut squash\",\n\"cucumber, cuke\",\n\"artichoke, globe artichoke\",\n\"bell pepper\",\n\"cardoon\",\n\"mushroom\",\n\"Granny Smith\",\n\"strawberry\",\n\"orange\",\n\"lemon\",\n\"fig\",\n\"pineapple, ananas\",\n\"banana\",\n\"jackfruit, jak, jack\",\n\"custard apple\",\n\"pomegranate\",\n\"hay\",\n\"carbonara\",\n\"chocolate sauce, chocolate syrup\",\n\"dough\",\n\"meat loaf, meatloaf\",\n\"pizza, pizza pie\",\n\"potpie\",\n\"burrito\",\n\"red wine\",\n\"espresso\",\n\"cup\",\n\"eggnog\",\n\"alp\",\n\"bubble\",\n\"cliff, drop, drop-off\",\n\"coral reef\",\n\"geyser\",\n\"lakeside, lakeshore\",\n\"promontory, headland, head, foreland\",\n\"sandbar, sand bar\",\n\"seashore, coast, seacoast, sea-coast\",\n\"valley, vale\",\n\"volcano\",\n\"ballplayer, baseball player\",\n\"groom, bridegroom\",\n\"scuba diver\",\n\"rapeseed\",\n\"daisy\",\n\"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum\",\n\"corn\",\n\"acorn\",\n\"hip, rose hip, rosehip\",\n\"buckeye, horse chestnut, conker\",\n\"coral fungus\",\n\"agaric\",\n\"gyromitra\",\n\"stinkhorn, carrion fungus\",\n\"earthstar\",\n\"hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa\",\n\"bolete\",\n\"ear, spike, capitulum\",\n\"toilet tissue, toilet paper, bathroom tissue\"\n};\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/any.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_ANY_H__\n#define GOOGLE_PROTOBUF_ANY_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/arenastring.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Helper class used to implement google::protobuf::Any.\nclass LIBPROTOBUF_EXPORT AnyMetadata {\n  typedef ArenaStringPtr UrlType;\n  typedef ArenaStringPtr ValueType;\n public:\n  // AnyMetadata does not take ownership of \"type_url\" and \"value\".\n  AnyMetadata(UrlType* type_url, ValueType* value);\n\n  // Packs a message using the default type URL prefix: \"type.googleapis.com\".\n  // The resulted type URL will be \"type.googleapis.com/<message_full_name>\".\n  void PackFrom(const Message& message);\n  // Packs a message using the given type URL prefix. The type URL will be\n  // constructed by concatenating the message type's full name to the prefix\n  // with an optional \"/\" separator if the prefix doesn't already end up \"/\".\n  // For example, both PackFrom(message, \"type.googleapis.com\") and\n  // PackFrom(message, \"type.googleapis.com/\") yield the same result type\n  // URL: \"type.googleapis.com/<message_full_name>\".\n  void PackFrom(const Message& message, const string& type_url_prefix);\n\n  // Unpacks the payload into the given message. Returns false if the message's\n  // type doesn't match the type specified in the type URL (i.e., the full\n  // name after the last \"/\" of the type URL doesn't match the message's actaul\n  // full name) or parsing the payload has failed.\n  bool UnpackTo(Message* message) const;\n\n  // Checks whether the type specified in the type URL matches the given type.\n  // A type is consdiered matching if its full name matches the full name after\n  // the last \"/\" in the type URL.\n  template<typename T>\n  bool Is() const {\n    return InternalIs(T::default_instance().GetDescriptor());\n  }\n\n private:\n  bool InternalIs(const Descriptor* message) const;\n\n  UrlType* type_url_;\n  ValueType* value_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(AnyMetadata);\n};\n\nextern const char kAnyFullTypeName[];          // \"google.protobuf.Any\".\nextern const char kTypeGoogleApisComPrefix[];  // \"type.googleapis.com/\".\nextern const char kTypeGoogleProdComPrefix[];  // \"type.googleprod.com/\".\n\n// Get the proto type name from Any::type_url value. For example, passing\n// \"type.googleapis.com/rpc.QueryOrigin\" will return \"rpc.QueryOrigin\" in\n// *full_type_name. Returns false if type_url does not start with\n// \"type.googleapis.com\" or \"type.googleprod.com\".\nbool ParseAnyTypeUrl(const string& type_url, string* full_type_name);\n\n// See if message is of type google.protobuf.Any, if so, return the descriptors\n// for \"type_url\" and \"value\" fields.\nbool GetAnyFieldDescriptors(const Message& message,\n                            const FieldDescriptor** type_url_field,\n                            const FieldDescriptor** value_field);\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_ANY_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/any.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/any.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fany_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fany_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/any.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fany_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fany_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fany_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fany_2eproto();\n\nclass Any;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Any : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Any) */ {\n public:\n  Any();\n  virtual ~Any();\n\n  Any(const Any& from);\n\n  inline Any& operator=(const Any& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Any& default_instance();\n\n  static const Any* internal_default_instance();\n\n  // implements Any -----------------------------------------------\n\n  void PackFrom(const ::google::protobuf::Message& message);\n  void PackFrom(const ::google::protobuf::Message& message,\n                const ::std::string& type_url_prefix);\n  bool UnpackTo(::google::protobuf::Message* message) const;\n  template<typename T> bool Is() const {\n    return _any_metadata_.Is<T>();\n  }\n\n  void Swap(Any* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Any* New() const { return New(NULL); }\n\n  Any* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Any& from);\n  void MergeFrom(const Any& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Any* other);\n  void UnsafeMergeFrom(const Any& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string type_url = 1;\n  void clear_type_url();\n  static const int kTypeUrlFieldNumber = 1;\n  const ::std::string& type_url() const;\n  void set_type_url(const ::std::string& value);\n  void set_type_url(const char* value);\n  void set_type_url(const char* value, size_t size);\n  ::std::string* mutable_type_url();\n  ::std::string* release_type_url();\n  void set_allocated_type_url(::std::string* type_url);\n\n  // optional bytes value = 2;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::std::string& value() const;\n  void set_value(const ::std::string& value);\n  void set_value(const char* value);\n  void set_value(const void* value, size_t size);\n  ::std::string* mutable_value();\n  ::std::string* release_value();\n  void set_allocated_value(::std::string* value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Any)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::ArenaStringPtr type_url_;\n  ::google::protobuf::internal::ArenaStringPtr value_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::AnyMetadata _any_metadata_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fany_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fany_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fany_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fany_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Any> Any_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Any\n\n// optional string type_url = 1;\ninline void Any::clear_type_url() {\n  type_url_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Any::type_url() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Any.type_url)\n  return type_url_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Any::set_type_url(const ::std::string& value) {\n  \n  type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Any.type_url)\n}\ninline void Any::set_type_url(const char* value) {\n  \n  type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Any.type_url)\n}\ninline void Any::set_type_url(const char* value, size_t size) {\n  \n  type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Any.type_url)\n}\ninline ::std::string* Any::mutable_type_url() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Any.type_url)\n  return type_url_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Any::release_type_url() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Any.type_url)\n  \n  return type_url_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Any::set_allocated_type_url(::std::string* type_url) {\n  if (type_url != NULL) {\n    \n  } else {\n    \n  }\n  type_url_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type_url);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Any.type_url)\n}\n\n// optional bytes value = 2;\ninline void Any::clear_value() {\n  value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Any::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Any.value)\n  return value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Any::set_value(const ::std::string& value) {\n  \n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Any.value)\n}\ninline void Any::set_value(const char* value) {\n  \n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Any.value)\n}\ninline void Any::set_value(const void* value, size_t size) {\n  \n  value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Any.value)\n}\ninline ::std::string* Any::mutable_value() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Any.value)\n  return value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Any::release_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Any.value)\n  \n  return value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Any::set_allocated_value(::std::string* value) {\n  if (value != NULL) {\n    \n  } else {\n    \n  }\n  value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Any.value)\n}\n\ninline const Any* Any::internal_default_instance() {\n  return &Any_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fany_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/any.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"github.com/golang/protobuf/ptypes/any\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"AnyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// `Any` contains an arbitrary serialized protocol buffer message along with a\n// URL that describes the type of the serialized message.\n//\n// Protobuf library provides support to pack/unpack Any values in the form\n// of utility functions or additional generated methods of the Any type.\n//\n// Example 1: Pack and unpack a message in C++.\n//\n//     Foo foo = ...;\n//     Any any;\n//     any.PackFrom(foo);\n//     ...\n//     if (any.UnpackTo(&foo)) {\n//       ...\n//     }\n//\n// Example 2: Pack and unpack a message in Java.\n//\n//     Foo foo = ...;\n//     Any any = Any.pack(foo);\n//     ...\n//     if (any.is(Foo.class)) {\n//       foo = any.unpack(Foo.class);\n//     }\n//\n//  Example 3: Pack and unpack a message in Python.\n//\n//     foo = Foo(...)\n//     any = Any()\n//     any.Pack(foo)\n//     ...\n//     if any.Is(Foo.DESCRIPTOR):\n//       any.Unpack(foo)\n//       ...\n//\n// The pack methods provided by protobuf library will by default use\n// 'type.googleapis.com/full.type.name' as the type URL and the unpack\n// methods only use the fully qualified type name after the last '/'\n// in the type URL, for example \"foo.bar.com/x/y.z\" will yield type\n// name \"y.z\".\n//\n//\n// JSON\n// ====\n// The JSON representation of an `Any` value uses the regular\n// representation of the deserialized, embedded message, with an\n// additional field `@type` which contains the type URL. Example:\n//\n//     package google.profile;\n//     message Person {\n//       string first_name = 1;\n//       string last_name = 2;\n//     }\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.profile.Person\",\n//       \"firstName\": <string>,\n//       \"lastName\": <string>\n//     }\n//\n// If the embedded message type is well-known and has a custom JSON\n// representation, that representation will be embedded adding a field\n// `value` which holds the custom JSON in addition to the `@type`\n// field. Example (for message [google.protobuf.Duration][]):\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n//       \"value\": \"1.212s\"\n//     }\n//\nmessage Any {\n  // A URL/resource name whose content describes the type of the\n  // serialized protocol buffer message.\n  //\n  // For URLs which use the scheme `http`, `https`, or no scheme, the\n  // following restrictions and interpretations apply:\n  //\n  // * If no scheme is provided, `https` is assumed.\n  // * The last segment of the URL's path must represent the fully\n  //   qualified name of the type (as in `path/google.protobuf.Duration`).\n  //   The name should be in a canonical form (e.g., leading \".\" is\n  //   not accepted).\n  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]\n  //   value in binary format, or produce an error.\n  // * Applications are allowed to cache lookup results based on the\n  //   URL, or have them precompiled into a binary to avoid any\n  //   lookup. Therefore, binary compatibility needs to be preserved\n  //   on changes to types. (Use versioned type names to manage\n  //   breaking changes.)\n  //\n  // Schemes other than `http`, `https` (or the empty scheme) might be\n  // used with implementation specific semantics.\n  //\n  string type_url = 1;\n\n  // Must be a valid serialized protocol buffer of the above specified type.\n  bytes value = 2;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/api.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/api.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fapi_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fapi_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/source_context.pb.h>\n#include <google/protobuf/type.pb.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fapi_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fapi_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fapi_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fapi_2eproto();\n\nclass Api;\nclass Method;\nclass Mixin;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Api : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Api) */ {\n public:\n  Api();\n  virtual ~Api();\n\n  Api(const Api& from);\n\n  inline Api& operator=(const Api& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Api& default_instance();\n\n  static const Api* internal_default_instance();\n\n  void Swap(Api* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Api* New() const { return New(NULL); }\n\n  Api* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Api& from);\n  void MergeFrom(const Api& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Api* other);\n  void UnsafeMergeFrom(const Api& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .google.protobuf.Method methods = 2;\n  int methods_size() const;\n  void clear_methods();\n  static const int kMethodsFieldNumber = 2;\n  const ::google::protobuf::Method& methods(int index) const;\n  ::google::protobuf::Method* mutable_methods(int index);\n  ::google::protobuf::Method* add_methods();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Method >*\n      mutable_methods();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Method >&\n      methods() const;\n\n  // repeated .google.protobuf.Option options = 3;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // optional string version = 4;\n  void clear_version();\n  static const int kVersionFieldNumber = 4;\n  const ::std::string& version() const;\n  void set_version(const ::std::string& value);\n  void set_version(const char* value);\n  void set_version(const char* value, size_t size);\n  ::std::string* mutable_version();\n  ::std::string* release_version();\n  void set_allocated_version(::std::string* version);\n\n  // optional .google.protobuf.SourceContext source_context = 5;\n  bool has_source_context() const;\n  void clear_source_context();\n  static const int kSourceContextFieldNumber = 5;\n  const ::google::protobuf::SourceContext& source_context() const;\n  ::google::protobuf::SourceContext* mutable_source_context();\n  ::google::protobuf::SourceContext* release_source_context();\n  void set_allocated_source_context(::google::protobuf::SourceContext* source_context);\n\n  // repeated .google.protobuf.Mixin mixins = 6;\n  int mixins_size() const;\n  void clear_mixins();\n  static const int kMixinsFieldNumber = 6;\n  const ::google::protobuf::Mixin& mixins(int index) const;\n  ::google::protobuf::Mixin* mutable_mixins(int index);\n  ::google::protobuf::Mixin* add_mixins();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Mixin >*\n      mutable_mixins();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Mixin >&\n      mixins() const;\n\n  // optional .google.protobuf.Syntax syntax = 7;\n  void clear_syntax();\n  static const int kSyntaxFieldNumber = 7;\n  ::google::protobuf::Syntax syntax() const;\n  void set_syntax(::google::protobuf::Syntax value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Api)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Method > methods_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Mixin > mixins_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr version_;\n  ::google::protobuf::SourceContext* source_context_;\n  int syntax_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fapi_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fapi_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Api> Api_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Method : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Method) */ {\n public:\n  Method();\n  virtual ~Method();\n\n  Method(const Method& from);\n\n  inline Method& operator=(const Method& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Method& default_instance();\n\n  static const Method* internal_default_instance();\n\n  void Swap(Method* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Method* New() const { return New(NULL); }\n\n  Method* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Method& from);\n  void MergeFrom(const Method& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Method* other);\n  void UnsafeMergeFrom(const Method& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string request_type_url = 2;\n  void clear_request_type_url();\n  static const int kRequestTypeUrlFieldNumber = 2;\n  const ::std::string& request_type_url() const;\n  void set_request_type_url(const ::std::string& value);\n  void set_request_type_url(const char* value);\n  void set_request_type_url(const char* value, size_t size);\n  ::std::string* mutable_request_type_url();\n  ::std::string* release_request_type_url();\n  void set_allocated_request_type_url(::std::string* request_type_url);\n\n  // optional bool request_streaming = 3;\n  void clear_request_streaming();\n  static const int kRequestStreamingFieldNumber = 3;\n  bool request_streaming() const;\n  void set_request_streaming(bool value);\n\n  // optional string response_type_url = 4;\n  void clear_response_type_url();\n  static const int kResponseTypeUrlFieldNumber = 4;\n  const ::std::string& response_type_url() const;\n  void set_response_type_url(const ::std::string& value);\n  void set_response_type_url(const char* value);\n  void set_response_type_url(const char* value, size_t size);\n  ::std::string* mutable_response_type_url();\n  ::std::string* release_response_type_url();\n  void set_allocated_response_type_url(::std::string* response_type_url);\n\n  // optional bool response_streaming = 5;\n  void clear_response_streaming();\n  static const int kResponseStreamingFieldNumber = 5;\n  bool response_streaming() const;\n  void set_response_streaming(bool value);\n\n  // repeated .google.protobuf.Option options = 6;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 6;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // optional .google.protobuf.Syntax syntax = 7;\n  void clear_syntax();\n  static const int kSyntaxFieldNumber = 7;\n  ::google::protobuf::Syntax syntax() const;\n  void set_syntax(::google::protobuf::Syntax value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Method)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr request_type_url_;\n  ::google::protobuf::internal::ArenaStringPtr response_type_url_;\n  bool request_streaming_;\n  bool response_streaming_;\n  int syntax_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fapi_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fapi_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Method> Method_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Mixin : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Mixin) */ {\n public:\n  Mixin();\n  virtual ~Mixin();\n\n  Mixin(const Mixin& from);\n\n  inline Mixin& operator=(const Mixin& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Mixin& default_instance();\n\n  static const Mixin* internal_default_instance();\n\n  void Swap(Mixin* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Mixin* New() const { return New(NULL); }\n\n  Mixin* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Mixin& from);\n  void MergeFrom(const Mixin& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Mixin* other);\n  void UnsafeMergeFrom(const Mixin& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string root = 2;\n  void clear_root();\n  static const int kRootFieldNumber = 2;\n  const ::std::string& root() const;\n  void set_root(const ::std::string& value);\n  void set_root(const char* value);\n  void set_root(const char* value, size_t size);\n  ::std::string* mutable_root();\n  ::std::string* release_root();\n  void set_allocated_root(::std::string* root);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Mixin)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr root_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fapi_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fapi_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fapi_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Mixin> Mixin_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Api\n\n// optional string name = 1;\ninline void Api::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Api::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Api::set_name(const ::std::string& value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Api.name)\n}\ninline void Api::set_name(const char* value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Api.name)\n}\ninline void Api::set_name(const char* value, size_t size) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Api.name)\n}\ninline ::std::string* Api::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Api::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Api.name)\n  \n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Api::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.name)\n}\n\n// repeated .google.protobuf.Method methods = 2;\ninline int Api::methods_size() const {\n  return methods_.size();\n}\ninline void Api::clear_methods() {\n  methods_.Clear();\n}\ninline const ::google::protobuf::Method& Api::methods(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.methods)\n  return methods_.Get(index);\n}\ninline ::google::protobuf::Method* Api::mutable_methods(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.methods)\n  return methods_.Mutable(index);\n}\ninline ::google::protobuf::Method* Api::add_methods() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Api.methods)\n  return methods_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Method >*\nApi::mutable_methods() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.methods)\n  return &methods_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Method >&\nApi::methods() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Api.methods)\n  return methods_;\n}\n\n// repeated .google.protobuf.Option options = 3;\ninline int Api::options_size() const {\n  return options_.size();\n}\ninline void Api::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& Api::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* Api::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* Api::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Api.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nApi::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nApi::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Api.options)\n  return options_;\n}\n\n// optional string version = 4;\ninline void Api::clear_version() {\n  version_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Api::version() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.version)\n  return version_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Api::set_version(const ::std::string& value) {\n  \n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Api.version)\n}\ninline void Api::set_version(const char* value) {\n  \n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Api.version)\n}\ninline void Api::set_version(const char* value, size_t size) {\n  \n  version_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Api.version)\n}\ninline ::std::string* Api::mutable_version() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.version)\n  return version_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Api::release_version() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Api.version)\n  \n  return version_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Api::set_allocated_version(::std::string* version) {\n  if (version != NULL) {\n    \n  } else {\n    \n  }\n  version_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), version);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.version)\n}\n\n// optional .google.protobuf.SourceContext source_context = 5;\ninline bool Api::has_source_context() const {\n  return this != internal_default_instance() && source_context_ != NULL;\n}\ninline void Api::clear_source_context() {\n  if (GetArenaNoVirtual() == NULL && source_context_ != NULL) delete source_context_;\n  source_context_ = NULL;\n}\ninline const ::google::protobuf::SourceContext& Api::source_context() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.source_context)\n  return source_context_ != NULL ? *source_context_\n                         : *::google::protobuf::SourceContext::internal_default_instance();\n}\ninline ::google::protobuf::SourceContext* Api::mutable_source_context() {\n  \n  if (source_context_ == NULL) {\n    source_context_ = new ::google::protobuf::SourceContext;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.source_context)\n  return source_context_;\n}\ninline ::google::protobuf::SourceContext* Api::release_source_context() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Api.source_context)\n  \n  ::google::protobuf::SourceContext* temp = source_context_;\n  source_context_ = NULL;\n  return temp;\n}\ninline void Api::set_allocated_source_context(::google::protobuf::SourceContext* source_context) {\n  delete source_context_;\n  source_context_ = source_context;\n  if (source_context) {\n    \n  } else {\n    \n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Api.source_context)\n}\n\n// repeated .google.protobuf.Mixin mixins = 6;\ninline int Api::mixins_size() const {\n  return mixins_.size();\n}\ninline void Api::clear_mixins() {\n  mixins_.Clear();\n}\ninline const ::google::protobuf::Mixin& Api::mixins(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.mixins)\n  return mixins_.Get(index);\n}\ninline ::google::protobuf::Mixin* Api::mutable_mixins(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Api.mixins)\n  return mixins_.Mutable(index);\n}\ninline ::google::protobuf::Mixin* Api::add_mixins() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Api.mixins)\n  return mixins_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Mixin >*\nApi::mutable_mixins() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Api.mixins)\n  return &mixins_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Mixin >&\nApi::mixins() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Api.mixins)\n  return mixins_;\n}\n\n// optional .google.protobuf.Syntax syntax = 7;\ninline void Api::clear_syntax() {\n  syntax_ = 0;\n}\ninline ::google::protobuf::Syntax Api::syntax() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Api.syntax)\n  return static_cast< ::google::protobuf::Syntax >(syntax_);\n}\ninline void Api::set_syntax(::google::protobuf::Syntax value) {\n  \n  syntax_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Api.syntax)\n}\n\ninline const Api* Api::internal_default_instance() {\n  return &Api_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Method\n\n// optional string name = 1;\ninline void Method::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Method::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_name(const ::std::string& value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.name)\n}\ninline void Method::set_name(const char* value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Method.name)\n}\ninline void Method::set_name(const char* value, size_t size) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Method.name)\n}\ninline ::std::string* Method::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Method.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Method::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Method.name)\n  \n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.name)\n}\n\n// optional string request_type_url = 2;\ninline void Method::clear_request_type_url() {\n  request_type_url_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Method::request_type_url() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.request_type_url)\n  return request_type_url_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_request_type_url(const ::std::string& value) {\n  \n  request_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.request_type_url)\n}\ninline void Method::set_request_type_url(const char* value) {\n  \n  request_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Method.request_type_url)\n}\ninline void Method::set_request_type_url(const char* value, size_t size) {\n  \n  request_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Method.request_type_url)\n}\ninline ::std::string* Method::mutable_request_type_url() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Method.request_type_url)\n  return request_type_url_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Method::release_request_type_url() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Method.request_type_url)\n  \n  return request_type_url_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_allocated_request_type_url(::std::string* request_type_url) {\n  if (request_type_url != NULL) {\n    \n  } else {\n    \n  }\n  request_type_url_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), request_type_url);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.request_type_url)\n}\n\n// optional bool request_streaming = 3;\ninline void Method::clear_request_streaming() {\n  request_streaming_ = false;\n}\ninline bool Method::request_streaming() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.request_streaming)\n  return request_streaming_;\n}\ninline void Method::set_request_streaming(bool value) {\n  \n  request_streaming_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.request_streaming)\n}\n\n// optional string response_type_url = 4;\ninline void Method::clear_response_type_url() {\n  response_type_url_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Method::response_type_url() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.response_type_url)\n  return response_type_url_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_response_type_url(const ::std::string& value) {\n  \n  response_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.response_type_url)\n}\ninline void Method::set_response_type_url(const char* value) {\n  \n  response_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Method.response_type_url)\n}\ninline void Method::set_response_type_url(const char* value, size_t size) {\n  \n  response_type_url_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Method.response_type_url)\n}\ninline ::std::string* Method::mutable_response_type_url() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Method.response_type_url)\n  return response_type_url_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Method::release_response_type_url() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Method.response_type_url)\n  \n  return response_type_url_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Method::set_allocated_response_type_url(::std::string* response_type_url) {\n  if (response_type_url != NULL) {\n    \n  } else {\n    \n  }\n  response_type_url_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), response_type_url);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Method.response_type_url)\n}\n\n// optional bool response_streaming = 5;\ninline void Method::clear_response_streaming() {\n  response_streaming_ = false;\n}\ninline bool Method::response_streaming() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.response_streaming)\n  return response_streaming_;\n}\ninline void Method::set_response_streaming(bool value) {\n  \n  response_streaming_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.response_streaming)\n}\n\n// repeated .google.protobuf.Option options = 6;\ninline int Method::options_size() const {\n  return options_.size();\n}\ninline void Method::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& Method::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* Method::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Method.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* Method::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Method.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nMethod::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Method.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nMethod::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Method.options)\n  return options_;\n}\n\n// optional .google.protobuf.Syntax syntax = 7;\ninline void Method::clear_syntax() {\n  syntax_ = 0;\n}\ninline ::google::protobuf::Syntax Method::syntax() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Method.syntax)\n  return static_cast< ::google::protobuf::Syntax >(syntax_);\n}\ninline void Method::set_syntax(::google::protobuf::Syntax value) {\n  \n  syntax_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Method.syntax)\n}\n\ninline const Method* Method::internal_default_instance() {\n  return &Method_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Mixin\n\n// optional string name = 1;\ninline void Mixin::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Mixin::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Mixin.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Mixin::set_name(const ::std::string& value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Mixin.name)\n}\ninline void Mixin::set_name(const char* value) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Mixin.name)\n}\ninline void Mixin::set_name(const char* value, size_t size) {\n  \n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Mixin.name)\n}\ninline ::std::string* Mixin::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Mixin.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Mixin::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Mixin.name)\n  \n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Mixin::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Mixin.name)\n}\n\n// optional string root = 2;\ninline void Mixin::clear_root() {\n  root_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& Mixin::root() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Mixin.root)\n  return root_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Mixin::set_root(const ::std::string& value) {\n  \n  root_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.Mixin.root)\n}\ninline void Mixin::set_root(const char* value) {\n  \n  root_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Mixin.root)\n}\ninline void Mixin::set_root(const char* value, size_t size) {\n  \n  root_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Mixin.root)\n}\ninline ::std::string* Mixin::mutable_root() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Mixin.root)\n  return root_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* Mixin::release_root() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Mixin.root)\n  \n  return root_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Mixin::set_allocated_root(::std::string* root) {\n  if (root != NULL) {\n    \n  } else {\n    \n  }\n  root_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), root);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Mixin.root)\n}\n\ninline const Mixin* Mixin::internal_default_instance() {\n  return &Mixin_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fapi_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/api.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\nimport \"google/protobuf/source_context.proto\";\nimport \"google/protobuf/type.proto\";\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"ApiProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// Api is a light-weight descriptor for a protocol buffer service.\nmessage Api {\n\n  // The fully qualified name of this api, including package name\n  // followed by the api's simple name.\n  string name = 1;\n\n  // The methods of this api, in unspecified order.\n  repeated Method methods = 2;\n\n  // Any metadata attached to the API.\n  repeated Option options = 3;\n\n  // A version string for this api. If specified, must have the form\n  // `major-version.minor-version`, as in `1.10`. If the minor version\n  // is omitted, it defaults to zero. If the entire version field is\n  // empty, the major version is derived from the package name, as\n  // outlined below. If the field is not empty, the version in the\n  // package name will be verified to be consistent with what is\n  // provided here.\n  //\n  // The versioning schema uses [semantic\n  // versioning](http://semver.org) where the major version number\n  // indicates a breaking change and the minor version an additive,\n  // non-breaking change. Both version numbers are signals to users\n  // what to expect from different versions, and should be carefully\n  // chosen based on the product plan.\n  //\n  // The major version is also reflected in the package name of the\n  // API, which must end in `v<major-version>`, as in\n  // `google.feature.v1`. For major versions 0 and 1, the suffix can\n  // be omitted. Zero major versions must only be used for\n  // experimental, none-GA apis.\n  //\n  //\n  string version = 4;\n\n  // Source context for the protocol buffer service represented by this\n  // message.\n  SourceContext source_context = 5;\n\n  // Included APIs. See [Mixin][].\n  repeated Mixin mixins = 6;\n\n  // The source syntax of the service.\n  Syntax syntax = 7;\n}\n\n// Method represents a method of an api.\nmessage Method {\n\n  // The simple name of this method.\n  string name = 1;\n\n  // A URL of the input message type.\n  string request_type_url = 2;\n\n  // If true, the request is streamed.\n  bool request_streaming = 3;\n\n  // The URL of the output message type.\n  string response_type_url = 4;\n\n  // If true, the response is streamed.\n  bool response_streaming = 5;\n\n  // Any metadata attached to the method.\n  repeated Option options = 6;\n\n  // The source syntax of this method.\n  Syntax syntax = 7;\n}\n\n// Declares an API to be included in this API. The including API must\n// redeclare all the methods from the included API, but documentation\n// and options are inherited as follows:\n//\n// - If after comment and whitespace stripping, the documentation\n//   string of the redeclared method is empty, it will be inherited\n//   from the original method.\n//\n// - Each annotation belonging to the service config (http,\n//   visibility) which is not set in the redeclared method will be\n//   inherited.\n//\n// - If an http annotation is inherited, the path pattern will be\n//   modified as follows. Any version prefix will be replaced by the\n//   version of the including API plus the [root][] path if specified.\n//\n// Example of a simple mixin:\n//\n//     package google.acl.v1;\n//     service AccessControl {\n//       // Get the underlying ACL object.\n//       rpc GetAcl(GetAclRequest) returns (Acl) {\n//         option (google.api.http).get = \"/v1/{resource=**}:getAcl\";\n//       }\n//     }\n//\n//     package google.storage.v2;\n//     service Storage {\n//       rpc GetAcl(GetAclRequest) returns (Acl);\n//\n//       // Get a data record.\n//       rpc GetData(GetDataRequest) returns (Data) {\n//         option (google.api.http).get = \"/v2/{resource=**}\";\n//       }\n//     }\n//\n// Example of a mixin configuration:\n//\n//     apis:\n//     - name: google.storage.v2.Storage\n//       mixins:\n//       - name: google.acl.v1.AccessControl\n//\n// The mixin construct implies that all methods in `AccessControl` are\n// also declared with same name and request/response types in\n// `Storage`. A documentation generator or annotation processor will\n// see the effective `Storage.GetAcl` method after inherting\n// documentation and annotations as follows:\n//\n//     service Storage {\n//       // Get the underlying ACL object.\n//       rpc GetAcl(GetAclRequest) returns (Acl) {\n//         option (google.api.http).get = \"/v2/{resource=**}:getAcl\";\n//       }\n//       ...\n//     }\n//\n// Note how the version in the path pattern changed from `v1` to `v2`.\n//\n// If the `root` field in the mixin is specified, it should be a\n// relative path under which inherited HTTP paths are placed. Example:\n//\n//     apis:\n//     - name: google.storage.v2.Storage\n//       mixins:\n//       - name: google.acl.v1.AccessControl\n//         root: acls\n//\n// This implies the following inherited HTTP annotation:\n//\n//     service Storage {\n//       // Get the underlying ACL object.\n//       rpc GetAcl(GetAclRequest) returns (Acl) {\n//         option (google.api.http).get = \"/v2/acls/{resource=**}:getAcl\";\n//       }\n//       ...\n//     }\nmessage Mixin {\n  // The fully qualified name of the API which is included.\n  string name = 1;\n\n  // If non-empty specifies a path under which inherited HTTP paths\n  // are rooted.\n  string root = 2;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/arena.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file defines an Arena allocator for better allocation performance.\n\n#ifndef GOOGLE_PROTOBUF_ARENA_H__\n#define GOOGLE_PROTOBUF_ARENA_H__\n\n#include <limits>\n#ifdef max\n#undef max  // Visual Studio defines this macro\n#endif\n#if __cplusplus >= 201103L\n#include <google/protobuf/stubs/type_traits.h>\n#endif\n#if defined(_MSC_VER) && !_HAS_EXCEPTIONS\n// Work around bugs in MSVC <typeinfo> header when _HAS_EXCEPTIONS=0.\n#include <exception>\n#include <typeinfo>\nnamespace std {\nusing type_info = ::type_info;\n}\n#else\n#include <typeinfo>\n#endif\n\n#include <google/protobuf/stubs/atomic_sequence_num.h>\n#include <google/protobuf/stubs/atomicops.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/mutex.h>\n#include <google/protobuf/stubs/type_traits.h>\n\n\nnamespace google {\nnamespace protobuf {\n\nclass Arena;       // defined below\nclass Message;     // message.h\n\nnamespace internal {\nclass ArenaString; // arenastring.h\nclass LazyField;   // lazy_field.h\n\ntemplate<typename Type>\nclass GenericTypeHandler; // repeated_field.h\n\n// Templated cleanup methods.\ntemplate<typename T> void arena_destruct_object(void* object) {\n  reinterpret_cast<T*>(object)->~T();\n}\ntemplate<typename T> void arena_delete_object(void* object) {\n  delete reinterpret_cast<T*>(object);\n}\ninline void arena_free(void* object, size_t size) {\n#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)\n  ::operator delete(object, size);\n#else\n  ::operator delete(object);\n#endif\n}\n\n}  // namespace internal\n\n// ArenaOptions provides optional additional parameters to arena construction\n// that control its block-allocation behavior.\nstruct ArenaOptions {\n  // This defines the size of the first block requested from the system malloc.\n  // Subsequent block sizes will increase in a geometric series up to a maximum.\n  size_t start_block_size;\n\n  // This defines the maximum block size requested from system malloc (unless an\n  // individual arena allocation request occurs with a size larger than this\n  // maximum). Requested block sizes increase up to this value, then remain\n  // here.\n  size_t max_block_size;\n\n  // An initial block of memory for the arena to use, or NULL for none. If\n  // provided, the block must live at least as long as the arena itself. The\n  // creator of the Arena retains ownership of the block after the Arena is\n  // destroyed.\n  char* initial_block;\n\n  // The size of the initial block, if provided.\n  size_t initial_block_size;\n\n  // A function pointer to an alloc method that returns memory blocks of size\n  // requested. By default, it contains a ptr to the malloc function.\n  //\n  // NOTE: block_alloc and dealloc functions are expected to behave like\n  // malloc and free, including Asan poisoning.\n  void* (*block_alloc)(size_t);\n  // A function pointer to a dealloc method that takes ownership of the blocks\n  // from the arena. By default, it contains a ptr to a wrapper function that\n  // calls free.\n  void (*block_dealloc)(void*, size_t);\n\n  // Hooks for adding external functionality such as user-specific metrics\n  // collection, specific debugging abilities, etc.\n  // Init hook may return a pointer to a cookie to be stored in the arena.\n  // reset and destruction hooks will then be called with the same cookie\n  // pointer. This allows us to save an external object per arena instance and\n  // use it on the other hooks (Note: It is just as legal for init to return\n  // NULL and not use the cookie feature).\n  // on_arena_reset and on_arena_destruction also receive the space used in\n  // the arena just before the reset.\n  void* (*on_arena_init)(Arena* arena);\n  void (*on_arena_reset)(Arena* arena, void* cookie, uint64 space_used);\n  void (*on_arena_destruction)(Arena* arena, void* cookie, uint64 space_used);\n\n  // type_info is promised to be static - its lifetime extends to\n  // match program's lifetime (It is given by typeid operator).\n  // Note: typeid(void) will be passed as allocated_type every time we\n  // intentionally want to avoid monitoring an allocation. (i.e. internal\n  // allocations for managing the arena)\n  void (*on_arena_allocation)(const std::type_info* allocated_type,\n      uint64 alloc_size, void* cookie);\n\n  ArenaOptions()\n      : start_block_size(kDefaultStartBlockSize),\n        max_block_size(kDefaultMaxBlockSize),\n        initial_block(NULL),\n        initial_block_size(0),\n        block_alloc(&::operator new),\n        block_dealloc(&internal::arena_free),\n        on_arena_init(NULL),\n        on_arena_reset(NULL),\n        on_arena_destruction(NULL),\n        on_arena_allocation(NULL) {}\n\n private:\n  // Constants define default starting block size and max block size for\n  // arena allocator behavior -- see descriptions above.\n  static const size_t kDefaultStartBlockSize = 256;\n  static const size_t kDefaultMaxBlockSize   = 8192;\n};\n\n// Support for non-RTTI environments. (The metrics hooks API uses type\n// information.)\n#ifndef GOOGLE_PROTOBUF_NO_RTTI\n#define RTTI_TYPE_ID(type) (&typeid(type))\n#else\n#define RTTI_TYPE_ID(type) (NULL)\n#endif\n\n// Arena allocator. Arena allocation replaces ordinary (heap-based) allocation\n// with new/delete, and improves performance by aggregating allocations into\n// larger blocks and freeing allocations all at once. Protocol messages are\n// allocated on an arena by using Arena::CreateMessage<T>(Arena*), below, and\n// are automatically freed when the arena is destroyed.\n//\n// This is a thread-safe implementation: multiple threads may allocate from the\n// arena concurrently. Destruction is not thread-safe and the destructing\n// thread must synchronize with users of the arena first.\n//\n// An arena provides two allocation interfaces: CreateMessage<T>, which works\n// for arena-enabled proto2 message types as well as other types that satisfy\n// the appropriate protocol (described below), and Create<T>, which works for\n// any arbitrary type T. CreateMessage<T> is better when the type T supports it,\n// because this interface (i) passes the arena pointer to the created object so\n// that its sub-objects and internal allocations can use the arena too, and (ii)\n// elides the object's destructor call when possible. Create<T> does not place\n// any special requirements on the type T, and will invoke the object's\n// destructor when the arena is destroyed.\n//\n// The arena message allocation protocol, required by CreateMessage<T>, is as\n// follows:\n//\n// - The type T must have (at least) two constructors: a constructor with no\n//   arguments, called when a T is allocated on the heap; and a constructor with\n//   a google::protobuf::Arena* argument, called when a T is allocated on an arena. If the\n//   second constructor is called with a NULL arena pointer, it must be\n//   equivalent to invoking the first (no-argument) constructor.\n//\n// - The type T must have a particular type trait: a nested type\n//   |InternalArenaConstructable_|. This is usually a typedef to |void|. If no\n//   such type trait exists, then the instantiation CreateMessage<T> will fail\n//   to compile.\n//\n// - The type T *may* have the type trait |DestructorSkippable_|. If this type\n//   trait is present in the type, then its destructor will not be called if and\n//   only if it was passed a non-NULL arena pointer. If this type trait is not\n//   present on the type, then its destructor is always called when the\n//   containing arena is destroyed.\n//\n// - One- and two-user-argument forms of CreateMessage<T>() also exist that\n//   forward these constructor arguments to T's constructor: for example,\n//   CreateMessage<T>(Arena*, arg1, arg2) forwards to a constructor T(Arena*,\n//   arg1, arg2).\n//\n// This protocol is implemented by all arena-enabled proto2 message classes as\n// well as RepeatedPtrField.\n//\n// Do NOT subclass Arena. This class will be marked as final when C++11 is\n// enabled.\nclass LIBPROTOBUF_EXPORT Arena {\n public:\n  // Arena constructor taking custom options. See ArenaOptions below for\n  // descriptions of the options available.\n  explicit Arena(const ArenaOptions& options) : options_(options) {\n    Init();\n  }\n\n  // Default constructor with sensible default options, tuned for average\n  // use-cases.\n  Arena() {\n    Init();\n  }\n\n  // Destructor deletes all owned heap allocated objects, and destructs objects\n  // that have non-trivial destructors, except for proto2 message objects whose\n  // destructors can be skipped. Also, frees all blocks except the initial block\n  // if it was passed in.\n  ~Arena();\n\n  // API to create proto2 message objects on the arena. If the arena passed in\n  // is NULL, then a heap allocated object is returned. Type T must be a message\n  // defined in a .proto file with cc_enable_arenas set to true, otherwise a\n  // compilation error will occur.\n  //\n  // RepeatedField and RepeatedPtrField may also be instantiated directly on an\n  // arena with this method.\n  //\n  // This function also accepts any type T that satisfies the arena message\n  // allocation protocol, documented above.\n  template <typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* CreateMessage(::google::protobuf::Arena* arena) {\n    if (arena == NULL) {\n      return new T;\n    } else {\n      return arena->CreateMessageInternal<T>(static_cast<T*>(0));\n    }\n  }\n\n  // One-argument form of CreateMessage. This is useful for constructing objects\n  // that implement the arena message construction protocol described above but\n  // take additional constructor arguments.\n  template <typename T, typename Arg> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* CreateMessage(::google::protobuf::Arena* arena, const Arg& arg) {\n    if (arena == NULL) {\n      return new T(NULL, arg);\n    } else {\n      return arena->CreateMessageInternal<T>(static_cast<T*>(0),\n                                             arg);\n    }\n  }\n\n  // Two-argument form of CreateMessage. This is useful for constructing objects\n  // that implement the arena message construction protocol described above but\n  // take additional constructor arguments.\n  template <typename T, typename Arg1, typename Arg2> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* CreateMessage(::google::protobuf::Arena* arena,\n                          const Arg1& arg1,\n                          const Arg2& arg2) {\n    if (arena == NULL) {\n      return new T(NULL, arg1, arg2);\n    } else {\n      return arena->CreateMessageInternal<T>(static_cast<T*>(0),\n                                             arg1, arg2);\n    }\n  }\n\n  // API to create any objects on the arena. Note that only the object will\n  // be created on the arena; the underlying ptrs (in case of a proto2 message)\n  // will be still heap allocated. Proto messages should usually be allocated\n  // with CreateMessage<T>() instead.\n  //\n  // Note that even if T satisfies the arena message construction protocol\n  // (InternalArenaConstructable_ trait and optional DestructorSkippable_\n  // trait), as described above, this function does not follow the protocol;\n  // instead, it treats T as a black-box type, just as if it did not have these\n  // traits. Specifically, T's constructor arguments will always be only those\n  // passed to Create<T>() -- no additional arena pointer is implicitly added.\n  // Furthermore, the destructor will always be called at arena destruction time\n  // (unless the destructor is trivial). Hence, from T's point of view, it is as\n  // if the object were allocated on the heap (except that the underlying memory\n  // is obtained from the arena).\n  template <typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* Create(::google::protobuf::Arena* arena) {\n    if (arena == NULL) {\n      return new T();\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value);\n    }\n  }\n\n  // Version of the above with one constructor argument for the created object.\n  template <typename T, typename Arg> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* Create(::google::protobuf::Arena* arena, const Arg& arg) {\n    if (arena == NULL) {\n      return new T(arg);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg);\n    }\n  }\n\n  // Version of the above with two constructor arguments for the created object.\n  template <typename T, typename Arg1, typename Arg2> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* Create(::google::protobuf::Arena* arena, const Arg1& arg1, const Arg2& arg2) {\n    if (arena == NULL) {\n      return new T(arg1, arg2);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2);\n    }\n  }\n\n  // Version of the above with three constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2, arg3);\n    }\n  }\n\n  // Version of the above with four constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3, const Arg4& arg4) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3, arg4);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2, arg3, arg4);\n    }\n  }\n\n  // Version of the above with five constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3, const Arg4& arg4,\n                                           const Arg5& arg5) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3, arg4, arg5);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2, arg3, arg4, arg5);\n    }\n  }\n\n  // Version of the above with six constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3, const Arg4& arg4,\n                                           const Arg5& arg5, const Arg6& arg6) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3, arg4, arg5, arg6);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2, arg3, arg4, arg5, arg6);\n    }\n  }\n\n  // Version of the above with seven constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6, typename Arg7>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3, const Arg4& arg4,\n                                           const Arg5& arg5, const Arg6& arg6,\n                                           const Arg7& arg7) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3, arg4, arg5, arg6, arg7);\n    } else {\n      return arena->CreateInternal<T>(google::protobuf::internal::has_trivial_destructor<T>::value,\n                                      arg1, arg2, arg3, arg4, arg5, arg6, arg7);\n    }\n  }\n\n  // Version of the above with eight constructor arguments for the created\n  // object.\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6, typename Arg7,\n            typename Arg8>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static T* Create(::google::protobuf::Arena* arena,\n                                           const Arg1& arg1, const Arg2& arg2,\n                                           const Arg3& arg3, const Arg4& arg4,\n                                           const Arg5& arg5, const Arg6& arg6,\n                                           const Arg7& arg7, const Arg8& arg8) {\n    if (arena == NULL) {\n      return new T(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);\n    } else {\n      return arena->CreateInternal<T>(\n          google::protobuf::internal::has_trivial_destructor<T>::value,\n          arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);\n    }\n  }\n\n  // Create an array of object type T on the arena *without* invoking the\n  // constructor of T. If `arena` is null, then the return value should be freed\n  // with `delete[] x;` (or `::operator delete[](x);`).\n  // To ensure safe uses, this function checks at compile time\n  // (when compiled as C++11) that T is trivially default-constructible and\n  // trivially destructible.\n  template <typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* CreateArray(::google::protobuf::Arena* arena, size_t num_elements) {\n    GOOGLE_CHECK_LE(num_elements,\n             std::numeric_limits<size_t>::max() / sizeof(T))\n        << \"Requested size is too large to fit into size_t.\";\n    if (arena == NULL) {\n      return static_cast<T*>(::operator new[](num_elements * sizeof(T)));\n    } else {\n      return arena->CreateInternalRawArray<T>(num_elements);\n    }\n  }\n\n  // Returns the total space used by the arena, which is the sums of the sizes\n  // of the underlying blocks. The total space used may not include the new\n  // blocks that are allocated by this arena from other threads concurrently\n  // with the call to this method.\n  GOOGLE_ATTRIBUTE_NOINLINE uint64 SpaceAllocated() const;\n  // As above, but does not include any free space in underlying blocks.\n  GOOGLE_ATTRIBUTE_NOINLINE uint64 SpaceUsed() const;\n\n  // Combines SpaceAllocated and SpaceUsed. Returns a pair of\n  // <space_allocated, space_used>.\n  GOOGLE_ATTRIBUTE_NOINLINE std::pair<uint64, uint64> SpaceAllocatedAndUsed() const;\n\n  // Frees all storage allocated by this arena after calling destructors\n  // registered with OwnDestructor() and freeing objects registered with Own().\n  // Any objects allocated on this arena are unusable after this call. It also\n  // returns the total space used by the arena which is the sums of the sizes\n  // of the allocated blocks. This method is not thread-safe.\n  GOOGLE_ATTRIBUTE_NOINLINE uint64 Reset();\n\n  // Adds |object| to a list of heap-allocated objects to be freed with |delete|\n  // when the arena is destroyed or reset.\n  template <typename T> GOOGLE_ATTRIBUTE_NOINLINE\n  void Own(T* object) {\n    OwnInternal(object, google::protobuf::internal::is_convertible<T*, ::google::protobuf::Message*>());\n  }\n\n  // Adds |object| to a list of objects whose destructors will be manually\n  // called when the arena is destroyed or reset. This differs from Own() in\n  // that it does not free the underlying memory with |delete|; hence, it is\n  // normally only used for objects that are placement-newed into\n  // arena-allocated memory.\n  template <typename T> GOOGLE_ATTRIBUTE_NOINLINE\n  void OwnDestructor(T* object) {\n    if (object != NULL) {\n      AddListNode(object, &internal::arena_destruct_object<T>);\n    }\n  }\n\n  // Adds a custom member function on an object to the list of destructors that\n  // will be manually called when the arena is destroyed or reset. This differs\n  // from OwnDestructor() in that any member function may be specified, not only\n  // the class destructor.\n  GOOGLE_ATTRIBUTE_NOINLINE void OwnCustomDestructor(void* object,\n                                              void (*destruct)(void*)) {\n    AddListNode(object, destruct);\n  }\n\n  // Retrieves the arena associated with |value| if |value| is an arena-capable\n  // message, or NULL otherwise. This differs from value->GetArena() in that the\n  // latter is a virtual call, while this method is a templated call that\n  // resolves at compile-time.\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static ::google::protobuf::Arena* GetArena(const T* value) {\n    return GetArenaInternal(value, static_cast<T*>(0));\n  }\n\n private:\n  struct InternalIsArenaConstructableHelper {\n    template<typename U>\n    static char ArenaConstructable(\n        const typename U::InternalArenaConstructable_*);\n    template<typename U>\n    static double ArenaConstructable(...);\n  };\n\n public:\n  // Helper typetrait that indicates support for arenas in a type T at compile\n  // time. This is public only to allow construction of higher-level templated\n  // utilities. is_arena_constructable<T>::value is true if the message type T\n  // has arena support enabled, and false otherwise.\n  //\n  // This is inside Arena because only Arena has the friend relationships\n  // necessary to see the underlying generated code traits.\n  template <typename T>\n  struct is_arena_constructable\n      : public google::protobuf::internal::integral_constant<\n            bool, sizeof(InternalIsArenaConstructableHelper::ArenaConstructable<\n                         const T>(static_cast<const T*>(0))) == sizeof(char)> {\n  };\n\n private:\n  // Blocks are variable length malloc-ed objects.  The following structure\n  // describes the common header for all blocks.\n  struct Block {\n    void* owner;   // &ThreadCache of thread that owns this block, or\n                   // &this->owner if not yet owned by a thread.\n    Block* next;   // Next block in arena (may have different owner)\n    // ((char*) &block) + pos is next available byte. It is always\n    // aligned at a multiple of 8 bytes.\n    size_t pos;\n    size_t size;  // total size of the block.\n    GOOGLE_ATTRIBUTE_ALWAYS_INLINE size_t avail() const { return size - pos; }\n    // data follows\n  };\n\n  template<typename Type> friend class ::google::protobuf::internal::GenericTypeHandler;\n  friend class MockArena;              // For unit-testing.\n  friend class internal::ArenaString;  // For AllocateAligned.\n  friend class internal::LazyField;    // For CreateMaybeMessage.\n\n  struct ThreadCache {\n    // The ThreadCache is considered valid as long as this matches the\n    // lifecycle_id of the arena being used.\n    int64 last_lifecycle_id_seen;\n    Block* last_block_used_;\n  };\n\n  static const size_t kHeaderSize = sizeof(Block);\n  static google::protobuf::internal::SequenceNumber lifecycle_id_generator_;\n#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)\n  // Android ndk does not support GOOGLE_THREAD_LOCAL keyword so we use a custom thread\n  // local storage class we implemented.\n  // iOS also does not support the GOOGLE_THREAD_LOCAL keyword.\n  static ThreadCache& thread_cache();\n#elif defined(PROTOBUF_USE_DLLS)\n  // Thread local variables cannot be exposed through DLL interface but we can\n  // wrap them in static functions.\n  static ThreadCache& thread_cache();\n#else\n  static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_;\n  static ThreadCache& thread_cache() { return thread_cache_; }\n#endif\n\n  // SFINAE for skipping addition to delete list for a message type when created\n  // with CreateMessage. This is mainly to skip proto2/proto1 message objects\n  // with cc_enable_arenas=true from being part of the delete list. Also, note,\n  // compiler will optimize out the branch in CreateInternal<T>.\n  template<typename T>\n  static inline bool SkipDeleteList(typename T::DestructorSkippable_*) {\n    return true;\n  }\n\n  // For message objects that don't have the DestructorSkippable_ trait, we\n  // always add to the delete list.\n  template<typename T>\n  static inline bool SkipDeleteList(...) {\n    return google::protobuf::internal::has_trivial_destructor<T>::value;\n  }\n\n private:\n  struct InternalIsDestructorSkippableHelper {\n    template<typename U>\n    static char DestructorSkippable(\n        const typename U::DestructorSkippable_*);\n    template<typename U>\n    static double DestructorSkippable(...);\n  };\n\n public:\n  // Helper typetrait that indicates whether the desctructor of type T should be\n  // called when arena is destroyed at compile time. This is only to allow\n  // construction of higher-level templated utilities.\n  // is_destructor_skippable<T>::value is true if the destructor of the message\n  // type T should not be called when arena is destroyed or false otherwise.\n  // This is inside Arena because only Arena has the friend relationships\n  // necessary to see the underlying generated code traits.\n  template<typename T>\n  struct is_destructor_skippable\n      : public google::protobuf::internal::integral_constant<\n            bool,\n            sizeof(InternalIsDestructorSkippableHelper::DestructorSkippable<\n                   const T>(static_cast<const T*>(0))) == sizeof(char) ||\n                google::protobuf::internal::has_trivial_destructor<T>::value> {};\n\n private:\n  // CreateMessage<T> requires that T supports arenas, but this private method\n  // works whether or not T supports arenas. These are not exposed to user code\n  // as it can cause confusing API usages, and end up having double free in\n  // user code. These are used only internally from LazyField and Repeated\n  // fields, since they are designed to work in all mode combinations.\n  template<typename Msg> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static Msg* CreateMaybeMessage(\n      Arena* arena, typename Msg::InternalArenaConstructable_*) {\n    return CreateMessage<Msg>(arena);\n  }\n\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static T* CreateMaybeMessage(Arena* arena, ...) {\n    return Create<T>(arena);\n  }\n\n  // Just allocate the required size for the given type assuming the\n  // type has a trivial constructor.\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateInternalRawArray(size_t num_elements) {\n    GOOGLE_CHECK_LE(num_elements,\n             std::numeric_limits<size_t>::max() / sizeof(T))\n        << \"Requested size is too large to fit into size_t.\";\n    return static_cast<T*>(\n        AllocateAligned(RTTI_TYPE_ID(T), sizeof(T) * num_elements));\n  }\n\n  template <typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateInternal(bool skip_explicit_ownership) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T))) T();\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateInternal(bool skip_explicit_ownership, const Arg& arg) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T))) T(arg);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateInternal(\n      bool skip_explicit_ownership, const Arg1& arg1, const Arg2& arg2) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T))) T(arg1, arg2);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3,\n                                            const Arg4& arg4) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3, arg4);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3,\n                                            const Arg4& arg4,\n                                            const Arg5& arg5) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3, arg4, arg5);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3,\n                                            const Arg4& arg4,\n                                            const Arg5& arg5,\n                                            const Arg6& arg6) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3, arg4, arg5, arg6);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6, typename Arg7>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3,\n                                            const Arg4& arg4,\n                                            const Arg5& arg5,\n                                            const Arg6& arg6,\n                                            const Arg7& arg7) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3, arg4, arg5, arg6, arg7);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T, typename Arg1, typename Arg2, typename Arg3,\n            typename Arg4, typename Arg5, typename Arg6, typename Arg7,\n            typename Arg8>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE T* CreateInternal(bool skip_explicit_ownership,\n                                            const Arg1& arg1,\n                                            const Arg2& arg2,\n                                            const Arg3& arg3,\n                                            const Arg4& arg4,\n                                            const Arg5& arg5,\n                                            const Arg6& arg6,\n                                            const Arg7& arg7,\n                                            const Arg8& arg8) {\n    T* t = new (AllocateAligned(RTTI_TYPE_ID(T), sizeof(T)))\n        T(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);\n    if (!skip_explicit_ownership) {\n      AddListNode(t, &internal::arena_destruct_object<T>);\n    }\n    return t;\n  }\n\n  template <typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateMessageInternal(typename T::InternalArenaConstructable_*) {\n    return CreateInternal<T, Arena*>(SkipDeleteList<T>(static_cast<T*>(0)),\n                                     this);\n  }\n\n  template <typename T, typename Arg> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateMessageInternal(typename T::InternalArenaConstructable_*,\n                           const Arg& arg) {\n    return CreateInternal<T, Arena*>(SkipDeleteList<T>(static_cast<T*>(0)),\n                                     this, arg);\n  }\n\n  template <typename T, typename Arg1, typename Arg2> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  T* CreateMessageInternal(typename T::InternalArenaConstructable_*,\n                           const Arg1& arg1, const Arg2& arg2) {\n    return CreateInternal<T, Arena*>(SkipDeleteList<T>(static_cast<T*>(0)),\n                                     this, arg1, arg2);\n  }\n\n  // CreateInArenaStorage is used to implement map field. Without it,\n  // google::protobuf::Map need to call generated message's protected arena constructor,\n  // which needs to declare google::protobuf::Map as friend of generated message.\n  template <typename T>\n  static void CreateInArenaStorage(T* ptr, Arena* arena) {\n    CreateInArenaStorageInternal(ptr, arena,\n                                 typename is_arena_constructable<T>::type());\n    RegisterDestructorInternal(ptr, arena,\n                               typename is_destructor_skippable<T>::type());\n  }\n\n  template <typename T>\n  static void CreateInArenaStorageInternal(\n      T* ptr, Arena* arena, google::protobuf::internal::true_type) {\n    new (ptr) T(arena);\n  }\n  template <typename T>\n  static void CreateInArenaStorageInternal(\n      T* ptr, Arena* arena, google::protobuf::internal::false_type) {\n    new (ptr) T();\n  }\n\n  template <typename T>\n  static void RegisterDestructorInternal(\n      T* ptr, Arena* arena, google::protobuf::internal::true_type) {}\n  template <typename T>\n  static void RegisterDestructorInternal(\n      T* ptr, Arena* arena, google::protobuf::internal::false_type) {\n    arena->OwnDestructor(ptr);\n  }\n\n  // These implement Own(), which registers an object for deletion (destructor\n  // call and operator delete()). The second parameter has type 'true_type' if T\n  // is a subtype of ::google::protobuf::Message and 'false_type' otherwise. Collapsing\n  // all template instantiations to one for generic Message reduces code size,\n  // using the virtual destructor instead.\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  void OwnInternal(T* object, google::protobuf::internal::true_type) {\n    if (object != NULL) {\n      AddListNode(object, &internal::arena_delete_object< ::google::protobuf::Message >);\n    }\n  }\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  void OwnInternal(T* object, google::protobuf::internal::false_type) {\n    if (object != NULL) {\n      AddListNode(object, &internal::arena_delete_object<T>);\n    }\n  }\n\n  // Implementation for GetArena(). Only message objects with\n  // InternalArenaConstructable_ tags can be associated with an arena, and such\n  // objects must implement a GetArenaNoVirtual() method.\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static ::google::protobuf::Arena* GetArenaInternal(\n      const T* value, typename T::InternalArenaConstructable_*) {\n    return value->GetArenaNoVirtual();\n  }\n\n  template<typename T> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static ::google::protobuf::Arena* GetArenaInternal(const T* value, ...) {\n    return NULL;\n  }\n\n  // Allocate and also optionally call on_arena_allocation callback with the\n  // allocated type info when the hooks are in place in ArenaOptions and\n  // the cookie is not null.\n  void* AllocateAligned(const std::type_info* allocated, size_t n);\n\n  // Allocate an internal allocation, avoiding optional typed monitoring.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void* AllocateAligned(size_t n) {\n    return AllocateAligned(NULL, n);\n  }\n\n  void Init();\n\n  // Free all blocks and return the total space used which is the sums of sizes\n  // of the all the allocated blocks.\n  uint64 FreeBlocks();\n\n  // Add object pointer and cleanup function pointer to the list.\n  // TODO(rohananil, cfallin): We could pass in a sub-arena into this method\n  // to avoid polluting blocks of this arena with list nodes. This would help in\n  // mixed mode (where many protobufs have cc_enable_arenas=false), and is an\n  // alternative to a chunked linked-list, but with extra overhead of *next.\n  void AddListNode(void* elem, void (*cleanup)(void*));\n  // Delete or Destruct all objects owned by the arena.\n  void CleanupList();\n  uint64 ResetInternal();\n\n  inline void SetThreadCacheBlock(Block* block) {\n    thread_cache().last_block_used_ = block;\n    thread_cache().last_lifecycle_id_seen = lifecycle_id_;\n  }\n\n  int64 lifecycle_id_;  // Unique for each arena. Changes on Reset().\n\n  google::protobuf::internal::AtomicWord blocks_;  // Head of linked list of all allocated blocks\n  google::protobuf::internal::AtomicWord hint_;    // Fast thread-local block access\n\n  // Node contains the ptr of the object to be cleaned up and the associated\n  // cleanup function ptr.\n  struct Node {\n    void* elem;              // Pointer to the object to be cleaned up.\n    void (*cleanup)(void*);  // Function pointer to the destructor or deleter.\n    Node* next;              // Next node in the list.\n  };\n\n  google::protobuf::internal::AtomicWord cleanup_list_;  // Head of a linked list of nodes containing object\n                             // ptrs and cleanup methods.\n\n  bool owns_first_block_;    // Indicates that arena owns the first block\n  Mutex blocks_lock_;\n\n  void AddBlock(Block* b);\n  // Access must be synchronized, either by blocks_lock_ or by being called from\n  // Init()/Reset().\n  void AddBlockInternal(Block* b);\n  void* SlowAlloc(size_t n);\n  Block* FindBlock(void* me);\n  Block* NewBlock(void* me, Block* my_last_block, size_t n,\n                  size_t start_block_size, size_t max_block_size);\n  static void* AllocFromBlock(Block* b, size_t n);\n  template <typename Key, typename T>\n  friend class Map;\n\n  // The arena may save a cookie it receives from the external on_init hook\n  // and then use it when calling the on_reset and on_destruction hooks.\n  void* hooks_cookie_;\n\n  ArenaOptions options_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Arena);\n};\n\n// Defined above for supporting environments without RTTI.\n#undef RTTI_TYPE_ID\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_ARENA_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/arenastring.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_ARENASTRING_H__\n#define GOOGLE_PROTOBUF_ARENASTRING_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/fastmem.h>\n#include <google/protobuf/arena.h>\n#include <google/protobuf/generated_message_util.h>\n\n\n\n// This is the implementation of arena string fields written for the open-source\n// release. The ArenaStringPtr struct below is an internal implementation class\n// and *should not be used* by user code. It is used to collect string\n// operations together into one place and abstract away the underlying\n// string-field pointer representation, so that (for example) an alternate\n// implementation that knew more about ::std::string's internals could integrate more\n// closely with the arena allocator.\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\nstruct LIBPROTOBUF_EXPORT ArenaStringPtr {\n  inline void Set(const ::std::string* default_value,\n                  const ::std::string& value, ::google::protobuf::Arena* arena) {\n    if (ptr_ == default_value) {\n      CreateInstance(arena, &value);\n    } else {\n      *ptr_ = value;\n    }\n  }\n\n  // Basic accessors.\n  inline const ::std::string& Get(const ::std::string* /* default_value */) const {\n    return *ptr_;\n  }\n\n  inline ::std::string* Mutable(const ::std::string* default_value,\n                           ::google::protobuf::Arena* arena) {\n    if (ptr_ == default_value) {\n      CreateInstance(arena, default_value);\n    }\n    return ptr_;\n  }\n\n  // Release returns a ::std::string* instance that is heap-allocated and is not\n  // Own()'d by any arena. If the field was not set, it returns NULL. The caller\n  // retains ownership. Clears this field back to NULL state. Used to implement\n  // release_<field>() methods on generated classes.\n  inline ::std::string* Release(const ::std::string* default_value,\n                           ::google::protobuf::Arena* arena) {\n    if (ptr_ == default_value) {\n      return NULL;\n    }\n    ::std::string* released = NULL;\n    if (arena != NULL) {\n      // ptr_ is owned by the arena -- we need to return a copy.\n      released = new ::std::string(*ptr_);\n    } else {\n      released = ptr_;\n    }\n    ptr_ = const_cast< ::std::string* >(default_value);\n    return released;\n  }\n\n  // UnsafeArenaRelease returns a ::std::string*, but it may be arena-owned (i.e.\n  // have its destructor already registered) if arena != NULL. If the field was\n  // not set, this returns NULL. This method clears this field back to NULL\n  // state. Used to implement unsafe_arena_release_<field>() methods on\n  // generated classes.\n  inline ::std::string* UnsafeArenaRelease(const ::std::string* default_value,\n                                      ::google::protobuf::Arena* /* arena */) {\n    if (ptr_ == default_value) {\n      return NULL;\n    }\n    ::std::string* released = ptr_;\n    ptr_ = const_cast< ::std::string* >(default_value);\n    return released;\n  }\n\n  // Takes a string that is heap-allocated, and takes ownership. The string's\n  // destructor is registered with the arena. Used to implement\n  // set_allocated_<field> in generated classes.\n  inline void SetAllocated(const ::std::string* default_value,\n                           ::std::string* value, ::google::protobuf::Arena* arena) {\n    if (arena == NULL && ptr_ != default_value) {\n      Destroy(default_value, arena);\n    }\n    if (value != NULL) {\n      ptr_ = value;\n      if (arena != NULL) {\n        arena->Own(value);\n      }\n    } else {\n      ptr_ = const_cast< ::std::string* >(default_value);\n    }\n  }\n\n  // Takes a string that has lifetime equal to the arena's lifetime. The arena\n  // must be non-null. It is safe only to pass this method a value returned by\n  // UnsafeArenaRelease() on another field of a message in the same arena. Used\n  // to implement unsafe_arena_set_allocated_<field> in generated classes.\n  inline void UnsafeArenaSetAllocated(const ::std::string* default_value,\n                                      ::std::string* value,\n                                      ::google::protobuf::Arena* /* arena */) {\n    if (value != NULL) {\n      ptr_ = value;\n    } else {\n      ptr_ = const_cast< ::std::string* >(default_value);\n    }\n  }\n\n  // Swaps internal pointers. Arena-safety semantics: this is guarded by the\n  // logic in Swap()/UnsafeArenaSwap() at the message level, so this method is\n  // 'unsafe' if called directly.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void Swap(ArenaStringPtr* other) {\n    std::swap(ptr_, other->ptr_);\n  }\n\n  // Frees storage (if not on an arena) and sets field to default value.\n  inline void Destroy(const ::std::string* default_value,\n                      ::google::protobuf::Arena* arena) {\n    if (arena == NULL && ptr_ != default_value) {\n      delete ptr_;\n    }\n    ptr_ = const_cast< ::std::string* >(default_value);\n  }\n\n  // Clears content, but keeps allocated string if arena != NULL, to avoid the\n  // overhead of heap operations. After this returns, the content (as seen by\n  // the user) will always be the empty string. Assumes that |default_value|\n  // is an empty string.\n  inline void ClearToEmpty(const ::std::string* default_value,\n                           ::google::protobuf::Arena* /* arena */) {\n    if (ptr_ == default_value) {\n      // Already set to default (which is empty) -- do nothing.\n    } else {\n      ptr_->clear();\n    }\n  }\n\n  // Clears content, but keeps allocated string if arena != NULL, to avoid the\n  // overhead of heap operations. After this returns, the content (as seen by\n  // the user) will always be equal to |default_value|.\n  inline void ClearToDefault(const ::std::string* default_value,\n                             ::google::protobuf::Arena* /* arena */) {\n    if (ptr_ == default_value) {\n      // Already set to default -- do nothing.\n    } else {\n      // Have another allocated string -- rather than throwing this away and\n      // resetting ptr_ to the canonical default string instance, we just reuse\n      // this instance.\n      *ptr_ = *default_value;\n    }\n  }\n\n  // Called from generated code / reflection runtime only. Resets value to point\n  // to a default string pointer, with the semantics that this ArenaStringPtr\n  // does not own the pointed-to memory. Disregards initial value of ptr_ (so\n  // this is the *ONLY* safe method to call after construction or when\n  // reinitializing after becoming the active field in a oneof union).\n  inline void UnsafeSetDefault(const ::std::string* default_value) {\n    // Casting away 'const' is safe here: accessors ensure that ptr_ is only\n    // returned as a const if it is equal to default_value.\n    ptr_ = const_cast< ::std::string* >(default_value);\n  }\n\n  // The 'NoArena' variants of methods below assume arena == NULL and are\n  // optimized to provide very little overhead relative to a raw string pointer\n  // (while still being in-memory compatible with other code that assumes\n  // ArenaStringPtr). Note the invariant that a class instance that has only\n  // ever been mutated by NoArena methods must *only* be in the String state\n  // (i.e., tag bits are not used), *NEVER* ArenaString. This allows all\n  // tagged-pointer manipulations to be avoided.\n  inline void SetNoArena(const ::std::string* default_value,\n                         const ::std::string& value) {\n    if (ptr_ == default_value) {\n      CreateInstanceNoArena(&value);\n    } else {\n      *ptr_ = value;\n    }\n  }\n\n  void AssignWithDefault(const ::std::string* default_value, ArenaStringPtr value);\n\n  inline const ::std::string& GetNoArena(const ::std::string* /* default_value */) const {\n    return *ptr_;\n  }\n\n  inline ::std::string* MutableNoArena(const ::std::string* default_value) {\n    if (ptr_ == default_value) {\n      CreateInstanceNoArena(default_value);\n    }\n    return ptr_;\n  }\n\n  inline ::std::string* ReleaseNoArena(const ::std::string* default_value) {\n    if (ptr_ == default_value) {\n      return NULL;\n    } else {\n      ::std::string* released = ptr_;\n      ptr_ = const_cast< ::std::string* >(default_value);\n      return released;\n    }\n  }\n\n  inline void SetAllocatedNoArena(const ::std::string* default_value,\n                                  ::std::string* value) {\n    if (ptr_ != default_value) {\n      delete ptr_;\n    }\n    if (value != NULL) {\n      ptr_ = value;\n    } else {\n      ptr_ = const_cast< ::std::string* >(default_value);\n    }\n  }\n\n  inline void DestroyNoArena(const ::std::string* default_value) {\n    if (ptr_ != default_value) {\n      delete ptr_;\n    }\n    ptr_ = NULL;\n  }\n\n  inline void ClearToEmptyNoArena(const ::std::string* default_value) {\n    if (ptr_ == default_value) {\n      // Nothing: already equal to default (which is the empty string).\n    } else {\n      ptr_->clear();\n    }\n  }\n\n  inline void ClearToDefaultNoArena(const ::std::string* default_value) {\n    if (ptr_ == default_value) {\n      // Nothing: already set to default.\n    } else {\n      // Reuse existing allocated instance.\n      *ptr_ = *default_value;\n    }\n  }\n\n  // Internal accessor used only at parse time to provide direct access to the\n  // raw pointer from the shared parse routine (in the non-arenas case). The\n  // parse routine does the string allocation in order to save code size in the\n  // generated parsing code.\n  inline ::std::string** UnsafeRawStringPointer() {\n    return &ptr_;\n  }\n\n private:\n  ::std::string* ptr_;\n\n  GOOGLE_ATTRIBUTE_NOINLINE void CreateInstance(::google::protobuf::Arena* arena,\n                                         const ::std::string* initial_value) {\n    // Assumes ptr_ is not NULL.\n    if (initial_value != NULL) {\n      ptr_ = new ::std::string(*initial_value);\n    } else {\n      ptr_ = new ::std::string();\n    }\n    if (arena != NULL) {\n      arena->Own(ptr_);\n    }\n  }\n  GOOGLE_ATTRIBUTE_NOINLINE void CreateInstanceNoArena(const ::std::string* initial_value) {\n    if (initial_value != NULL) {\n      ptr_ = new ::std::string(*initial_value);\n    } else {\n      ptr_ = new ::std::string();\n    }\n  }\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_ARENASTRING_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/code_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Defines the abstract interface implemented by each of the language-specific\n// code generators.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <string>\n#include <vector>\n#include <utility>\n\nnamespace google {\nnamespace protobuf {\n\nnamespace io { class ZeroCopyOutputStream; }\nclass FileDescriptor;\n\nnamespace compiler {\n\n// Defined in this file.\nclass CodeGenerator;\nclass GeneratorContext;\n\n// The abstract interface to a class which generates code implementing a\n// particular proto file in a particular language.  A number of these may\n// be registered with CommandLineInterface to support various languages.\nclass LIBPROTOC_EXPORT CodeGenerator {\n public:\n  inline CodeGenerator() {}\n  virtual ~CodeGenerator();\n\n  // Generates code for the given proto file, generating one or more files in\n  // the given output directory.\n  //\n  // A parameter to be passed to the generator can be specified on the command\n  // line. This is intended to be used to pass generator specific parameters.\n  // It is empty if no parameter was given. ParseGeneratorParameter (below),\n  // can be used to accept multiple parameters within the single parameter\n  // command line flag.\n  //\n  // Returns true if successful.  Otherwise, sets *error to a description of\n  // the problem (e.g. \"invalid parameter\") and returns false.\n  virtual bool Generate(const FileDescriptor* file,\n                        const string& parameter,\n                        GeneratorContext* generator_context,\n                        string* error) const = 0;\n\n  // Generates code for all given proto files.\n  //\n  // WARNING: The canonical code generator design produces one or two output\n  // files per input .proto file, and we do not wish to encourage alternate\n  // designs.\n  //\n  // A parameter is given as passed on the command line, as in |Generate()|\n  // above.\n  //\n  // Returns true if successful.  Otherwise, sets *error to a description of\n  // the problem (e.g. \"invalid parameter\") and returns false.\n  virtual bool GenerateAll(const vector<const FileDescriptor*>& files,\n                           const string& parameter,\n                           GeneratorContext* generator_context,\n                           string* error) const;\n\n  // This is no longer used, but this class is part of the opensource protobuf\n  // library, so it has to remain to keep vtables the same for the current\n  // version of the library. When protobufs does a api breaking change, the\n  // method can be removed.\n  virtual bool HasGenerateAll() const { return true; }\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodeGenerator);\n};\n\n// CodeGenerators generate one or more files in a given directory.  This\n// abstract interface represents the directory to which the CodeGenerator is\n// to write and other information about the context in which the Generator\n// runs.\nclass LIBPROTOC_EXPORT GeneratorContext {\n public:\n  inline GeneratorContext() {}\n  virtual ~GeneratorContext();\n\n  // Opens the given file, truncating it if it exists, and returns a\n  // ZeroCopyOutputStream that writes to the file.  The caller takes ownership\n  // of the returned object.  This method never fails (a dummy stream will be\n  // returned instead).\n  //\n  // The filename given should be relative to the root of the source tree.\n  // E.g. the C++ generator, when generating code for \"foo/bar.proto\", will\n  // generate the files \"foo/bar.pb.h\" and \"foo/bar.pb.cc\"; note that\n  // \"foo/\" is included in these filenames.  The filename is not allowed to\n  // contain \".\" or \"..\" components.\n  virtual io::ZeroCopyOutputStream* Open(const string& filename) = 0;\n\n  // Similar to Open() but the output will be appended to the file if exists\n  virtual io::ZeroCopyOutputStream* OpenForAppend(const string& filename);\n\n  // Creates a ZeroCopyOutputStream which will insert code into the given file\n  // at the given insertion point.  See plugin.proto (plugin.pb.h) for more\n  // information on insertion points.  The default implementation\n  // assert-fails -- it exists only for backwards-compatibility.\n  //\n  // WARNING:  This feature is currently EXPERIMENTAL and is subject to change.\n  virtual io::ZeroCopyOutputStream* OpenForInsert(\n      const string& filename, const string& insertion_point);\n\n  // Returns a vector of FileDescriptors for all the files being compiled\n  // in this run.  Useful for languages, such as Go, that treat files\n  // differently when compiled as a set rather than individually.\n  virtual void ListParsedFiles(vector<const FileDescriptor*>* output);\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GeneratorContext);\n};\n\n// The type GeneratorContext was once called OutputDirectory. This typedef\n// provides backward compatibility.\ntypedef GeneratorContext OutputDirectory;\n\n// Several code generators treat the parameter argument as holding a\n// list of options separated by commas.  This helper function parses\n// a set of comma-delimited name/value pairs: e.g.,\n//   \"foo=bar,baz,qux=corge\"\n// parses to the pairs:\n//   (\"foo\", \"bar\"), (\"baz\", \"\"), (\"qux\", \"corge\")\nextern void ParseGeneratorParameter(const string&,\n            vector<pair<string, string> >*);\n\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_CODE_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/command_line_interface.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Implements the Protocol Compiler front-end such that it may be reused by\n// custom compilers written to support other languages.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__\n#define GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/hash.h>\n#include <string>\n#include <vector>\n#include <map>\n#include <set>\n#include <utility>\n\nnamespace google {\nnamespace protobuf {\n\nclass Descriptor;            // descriptor.h\nclass DescriptorPool;        // descriptor.h\nclass FileDescriptor;        // descriptor.h\nclass FileDescriptorProto;   // descriptor.pb.h\ntemplate<typename T> class RepeatedPtrField;  // repeated_field.h\n\n}  // namespace protobuf\n}  // namespace google\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\n\nclass CodeGenerator;        // code_generator.h\nclass GeneratorContext;      // code_generator.h\nclass DiskSourceTree;       // importer.h\n\n// This class implements the command-line interface to the protocol compiler.\n// It is designed to make it very easy to create a custom protocol compiler\n// supporting the languages of your choice.  For example, if you wanted to\n// create a custom protocol compiler binary which includes both the regular\n// C++ support plus support for your own custom output \"Foo\", you would\n// write a class \"FooGenerator\" which implements the CodeGenerator interface,\n// then write a main() procedure like this:\n//\n//   int main(int argc, char* argv[]) {\n//     google::protobuf::compiler::CommandLineInterface cli;\n//\n//     // Support generation of C++ source and headers.\n//     google::protobuf::compiler::cpp::CppGenerator cpp_generator;\n//     cli.RegisterGenerator(\"--cpp_out\", &cpp_generator,\n//       \"Generate C++ source and header.\");\n//\n//     // Support generation of Foo code.\n//     FooGenerator foo_generator;\n//     cli.RegisterGenerator(\"--foo_out\", &foo_generator,\n//       \"Generate Foo file.\");\n//\n//     return cli.Run(argc, argv);\n//   }\n//\n// The compiler is invoked with syntax like:\n//   protoc --cpp_out=outdir --foo_out=outdir --proto_path=src src/foo.proto\n//\n// For a full description of the command-line syntax, invoke it with --help.\nclass LIBPROTOC_EXPORT CommandLineInterface {\n public:\n  CommandLineInterface();\n  ~CommandLineInterface();\n\n  // Register a code generator for a language.\n  //\n  // Parameters:\n  // * flag_name: The command-line flag used to specify an output file of\n  //   this type.  The name must start with a '-'.  If the name is longer\n  //   than one letter, it must start with two '-'s.\n  // * generator: The CodeGenerator which will be called to generate files\n  //   of this type.\n  // * help_text: Text describing this flag in the --help output.\n  //\n  // Some generators accept extra parameters.  You can specify this parameter\n  // on the command-line by placing it before the output directory, separated\n  // by a colon:\n  //   protoc --foo_out=enable_bar:outdir\n  // The text before the colon is passed to CodeGenerator::Generate() as the\n  // \"parameter\".\n  void RegisterGenerator(const string& flag_name,\n                         CodeGenerator* generator,\n                         const string& help_text);\n\n  // Register a code generator for a language.\n  // Besides flag_name you can specify another option_flag_name that could be\n  // used to pass extra parameters to the registered code generator.\n  // Suppose you have registered a generator by calling:\n  //   command_line_interface.RegisterGenerator(\"--foo_out\", \"--foo_opt\", ...)\n  // Then you could invoke the compiler with a command like:\n  //   protoc --foo_out=enable_bar:outdir --foo_opt=enable_baz\n  // This will pass \"enable_bar,enable_baz\" as the parameter to the generator.\n  void RegisterGenerator(const string& flag_name,\n                         const string& option_flag_name,\n                         CodeGenerator* generator,\n                         const string& help_text);\n\n  // Enables \"plugins\".  In this mode, if a command-line flag ends with \"_out\"\n  // but does not match any registered generator, the compiler will attempt to\n  // find a \"plugin\" to implement the generator.  Plugins are just executables.\n  // They should live somewhere in the PATH.\n  //\n  // The compiler determines the executable name to search for by concatenating\n  // exe_name_prefix with the unrecognized flag name, removing \"_out\".  So, for\n  // example, if exe_name_prefix is \"protoc-\" and you pass the flag --foo_out,\n  // the compiler will try to run the program \"protoc-foo\".\n  //\n  // The plugin program should implement the following usage:\n  //   plugin [--out=OUTDIR] [--parameter=PARAMETER] PROTO_FILES < DESCRIPTORS\n  // --out indicates the output directory (as passed to the --foo_out\n  // parameter); if omitted, the current directory should be used.  --parameter\n  // gives the generator parameter, if any was provided.  The PROTO_FILES list\n  // the .proto files which were given on the compiler command-line; these are\n  // the files for which the plugin is expected to generate output code.\n  // Finally, DESCRIPTORS is an encoded FileDescriptorSet (as defined in\n  // descriptor.proto).  This is piped to the plugin's stdin.  The set will\n  // include descriptors for all the files listed in PROTO_FILES as well as\n  // all files that they import.  The plugin MUST NOT attempt to read the\n  // PROTO_FILES directly -- it must use the FileDescriptorSet.\n  //\n  // The plugin should generate whatever files are necessary, as code generators\n  // normally do.  It should write the names of all files it generates to\n  // stdout.  The names should be relative to the output directory, NOT absolute\n  // names or relative to the current directory.  If any errors occur, error\n  // messages should be written to stderr.  If an error is fatal, the plugin\n  // should exit with a non-zero exit code.\n  void AllowPlugins(const string& exe_name_prefix);\n\n  // Run the Protocol Compiler with the given command-line parameters.\n  // Returns the error code which should be returned by main().\n  //\n  // It may not be safe to call Run() in a multi-threaded environment because\n  // it calls strerror().  I'm not sure why you'd want to do this anyway.\n  int Run(int argc, const char* const argv[]);\n\n  // Call SetInputsAreCwdRelative(true) if the input files given on the command\n  // line should be interpreted relative to the proto import path specified\n  // using --proto_path or -I flags.  Otherwise, input file names will be\n  // interpreted relative to the current working directory (or as absolute\n  // paths if they start with '/'), though they must still reside inside\n  // a directory given by --proto_path or the compiler will fail.  The latter\n  // mode is generally more intuitive and easier to use, especially e.g. when\n  // defining implicit rules in Makefiles.\n  void SetInputsAreProtoPathRelative(bool enable) {\n    inputs_are_proto_path_relative_ = enable;\n  }\n\n  // Provides some text which will be printed when the --version flag is\n  // used.  The version of libprotoc will also be printed on the next line\n  // after this text.\n  void SetVersionInfo(const string& text) {\n    version_info_ = text;\n  }\n\n\n private:\n  // -----------------------------------------------------------------\n\n  class ErrorPrinter;\n  class GeneratorContextImpl;\n  class MemoryOutputStream;\n  typedef hash_map<string, GeneratorContextImpl*> GeneratorContextMap;\n\n  // Clear state from previous Run().\n  void Clear();\n\n  // Remaps each file in input_files_ so that it is relative to one of the\n  // directories in proto_path_.  Returns false if an error occurred.  This\n  // is only used if inputs_are_proto_path_relative_ is false.\n  bool MakeInputsBeProtoPathRelative(\n    DiskSourceTree* source_tree);\n\n  // Return status for ParseArguments() and InterpretArgument().\n  enum ParseArgumentStatus {\n    PARSE_ARGUMENT_DONE_AND_CONTINUE,\n    PARSE_ARGUMENT_DONE_AND_EXIT,\n    PARSE_ARGUMENT_FAIL\n  };\n\n  // Parse all command-line arguments.\n  ParseArgumentStatus ParseArguments(int argc, const char* const argv[]);\n\n\n  // Parses a command-line argument into a name/value pair.  Returns\n  // true if the next argument in the argv should be used as the value,\n  // false otherwise.\n  //\n  // Examples:\n  //   \"-Isrc/protos\" ->\n  //     name = \"-I\", value = \"src/protos\"\n  //   \"--cpp_out=src/foo.pb2.cc\" ->\n  //     name = \"--cpp_out\", value = \"src/foo.pb2.cc\"\n  //   \"foo.proto\" ->\n  //     name = \"\", value = \"foo.proto\"\n  bool ParseArgument(const char* arg, string* name, string* value);\n\n  // Interprets arguments parsed with ParseArgument.\n  ParseArgumentStatus InterpretArgument(const string& name,\n                                        const string& value);\n\n  // Print the --help text to stderr.\n  void PrintHelpText();\n\n  // Generate the given output file from the given input.\n  struct OutputDirective;  // see below\n  bool GenerateOutput(const vector<const FileDescriptor*>& parsed_files,\n                      const OutputDirective& output_directive,\n                      GeneratorContext* generator_context);\n  bool GeneratePluginOutput(const vector<const FileDescriptor*>& parsed_files,\n                            const string& plugin_name,\n                            const string& parameter,\n                            GeneratorContext* generator_context,\n                            string* error);\n\n  // Implements --encode and --decode.\n  bool EncodeOrDecode(const DescriptorPool* pool);\n\n  // Implements the --descriptor_set_out option.\n  bool WriteDescriptorSet(const vector<const FileDescriptor*> parsed_files);\n\n  // Implements the --dependency_out option\n  bool GenerateDependencyManifestFile(\n      const vector<const FileDescriptor*>& parsed_files,\n      const GeneratorContextMap& output_directories,\n      DiskSourceTree* source_tree);\n\n  // Get all transitive dependencies of the given file (including the file\n  // itself), adding them to the given list of FileDescriptorProtos.  The\n  // protos will be ordered such that every file is listed before any file that\n  // depends on it, so that you can call DescriptorPool::BuildFile() on them\n  // in order.  Any files in *already_seen will not be added, and each file\n  // added will be inserted into *already_seen.  If include_source_code_info is\n  // true then include the source code information in the FileDescriptorProtos.\n  // If include_json_name is true, populate the json_name field of\n  // FieldDescriptorProto for all fields.\n  static void GetTransitiveDependencies(\n      const FileDescriptor* file,\n      bool include_json_name,\n      bool include_source_code_info,\n      set<const FileDescriptor*>* already_seen,\n      RepeatedPtrField<FileDescriptorProto>* output);\n\n  // Implements the --print_free_field_numbers. This function prints free field\n  // numbers into stdout for the message and it's nested message types in\n  // post-order, i.e. nested types first. Printed range are left-right\n  // inclusive, i.e. [a, b].\n  //\n  // Groups:\n  // For historical reasons, groups are considered to share the same\n  // field number space with the parent message, thus it will not print free\n  // field numbers for groups. The field numbers used in the groups are\n  // excluded in the free field numbers of the parent message.\n  //\n  // Extension Ranges:\n  // Extension ranges are considered ocuppied field numbers and they will not be\n  // listed as free numbers in the output.\n  void PrintFreeFieldNumbers(const Descriptor* descriptor);\n\n  // -----------------------------------------------------------------\n\n  // The name of the executable as invoked (i.e. argv[0]).\n  string executable_name_;\n\n  // Version info set with SetVersionInfo().\n  string version_info_;\n\n  // Registered generators.\n  struct GeneratorInfo {\n    string flag_name;\n    string option_flag_name;\n    CodeGenerator* generator;\n    string help_text;\n  };\n  typedef map<string, GeneratorInfo> GeneratorMap;\n  GeneratorMap generators_by_flag_name_;\n  GeneratorMap generators_by_option_name_;\n  // A map from generator names to the parameters specified using the option\n  // flag. For example, if the user invokes the compiler with:\n  //   protoc --foo_out=outputdir --foo_opt=enable_bar ...\n  // Then there will be an entry (\"--foo_out\", \"enable_bar\") in this map.\n  map<string, string> generator_parameters_;\n\n  // See AllowPlugins().  If this is empty, plugins aren't allowed.\n  string plugin_prefix_;\n\n  // Maps specific plugin names to files.  When executing a plugin, this map\n  // is searched first to find the plugin executable.  If not found here, the\n  // PATH (or other OS-specific search strategy) is searched.\n  map<string, string> plugins_;\n\n  // Stuff parsed from command line.\n  enum Mode {\n    MODE_COMPILE,  // Normal mode:  parse .proto files and compile them.\n    MODE_ENCODE,   // --encode:  read text from stdin, write binary to stdout.\n    MODE_DECODE,   // --decode:  read binary from stdin, write text to stdout.\n    MODE_PRINT,    // Print mode: print info of the given .proto files and exit.\n  };\n\n  Mode mode_;\n\n  enum PrintMode {\n    PRINT_NONE,               // Not in MODE_PRINT\n    PRINT_FREE_FIELDS,        // --print_free_fields\n  };\n\n  PrintMode print_mode_;\n\n  enum ErrorFormat {\n    ERROR_FORMAT_GCC,   // GCC error output format (default).\n    ERROR_FORMAT_MSVS   // Visual Studio output (--error_format=msvs).\n  };\n\n  ErrorFormat error_format_;\n\n  vector<pair<string, string> > proto_path_;  // Search path for proto files.\n  vector<string> input_files_;                // Names of the input proto files.\n\n  // Names of proto files which are allowed to be imported. Used by build\n  // systems to enforce depend-on-what-you-import.\n  set<string> direct_dependencies_;\n  bool direct_dependencies_explicitly_set_;\n\n  // output_directives_ lists all the files we are supposed to output and what\n  // generator to use for each.\n  struct OutputDirective {\n    string name;                // E.g. \"--foo_out\"\n    CodeGenerator* generator;   // NULL for plugins\n    string parameter;\n    string output_location;\n  };\n  vector<OutputDirective> output_directives_;\n\n  // When using --encode or --decode, this names the type we are encoding or\n  // decoding.  (Empty string indicates --decode_raw.)\n  string codec_type_;\n\n  // If --descriptor_set_out was given, this is the filename to which the\n  // FileDescriptorSet should be written.  Otherwise, empty.\n  string descriptor_set_name_;\n\n  // If --dependency_out was given, this is the path to the file where the\n  // dependency file will be written. Otherwise, empty.\n  string dependency_out_name_;\n\n  // True if --include_imports was given, meaning that we should\n  // write all transitive dependencies to the DescriptorSet.  Otherwise, only\n  // the .proto files listed on the command-line are added.\n  bool imports_in_descriptor_set_;\n\n  // True if --include_source_info was given, meaning that we should not strip\n  // SourceCodeInfo from the DescriptorSet.\n  bool source_info_in_descriptor_set_;\n\n  // Was the --disallow_services flag used?\n  bool disallow_services_;\n\n  // See SetInputsAreProtoPathRelative().\n  bool inputs_are_proto_path_relative_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CommandLineInterface);\n};\n\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_COMMAND_LINE_INTERFACE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/cpp/cpp_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Generates C++ code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__\n\n#include <string>\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace cpp {\n\n// CodeGenerator implementation which generates a C++ source file and\n// header.  If you create your own protocol compiler binary and you want\n// it to support C++ output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT CppGenerator : public CodeGenerator {\n public:\n  CppGenerator();\n  ~CppGenerator();\n\n  // implements CodeGenerator ----------------------------------------\n  bool Generate(const FileDescriptor* file,\n                const string& parameter,\n                GeneratorContext* generator_context,\n                string* error) const;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CppGenerator);\n};\n\n}  // namespace cpp\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_CPP_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/csharp/csharp_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Generates C# code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__\n\n#include <string>\n\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace csharp {\n\n// CodeGenerator implementation which generates a C# source file and\n// header.  If you create your own protocol compiler binary and you want\n// it to support C# output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT Generator\n    : public google::protobuf::compiler::CodeGenerator {\n  virtual bool Generate(\n      const FileDescriptor* file,\n      const string& parameter,\n      GeneratorContext* generator_context,\n      string* error) const;\n};\n\n}  // namespace csharp\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMPILER_CSHARP_GENERATOR_H__\n\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/csharp/csharp_names.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Provides a mechanism for mapping a descriptor to the\n// fully-qualified name of the corresponding C# class.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__\n#define GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__\n\n#include <string>\n\nnamespace google {\nnamespace protobuf {\n\nclass Descriptor;\nclass EnumDescriptor;\nclass FileDescriptor;\nclass ServiceDescriptor;\n\nnamespace compiler {\nnamespace csharp {\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The namespace to use for given file descriptor.\nstring GetFileNamespace(const FileDescriptor* descriptor);\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified C# class name.\nstring GetClassName(const Descriptor* descriptor);\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified name of the C# class that provides\n//   access to the file descriptor. Proto compiler generates\n//   such class for each .proto file processed.\nstring GetReflectionClassName(const FileDescriptor* descriptor);\n\n// Generates output file name for given file descriptor. If generate_directories\n// is true, the output file will be put under directory corresponding to file's\n// namespace. base_namespace can be used to strip some of the top level\n// directories. E.g. for file with namespace \"Bar.Foo\" and base_namespace=\"Bar\",\n// the resulting file will be put under directory \"Foo\" (and not \"Bar/Foo\").\n//\n// Requires:\n//   descriptor != NULL\n//   error != NULL\n//\n//  Returns:\n//    The file name to use as output file for given file descriptor. In case\n//    of failure, this function will return empty string and error parameter\n//    will contain the error message.\nstring GetOutputFile(\n    const google::protobuf::FileDescriptor* descriptor,\n    const string file_extension,\n    const bool generate_directories,\n    const string base_namespace,\n    string* error);\n\n}  // namespace csharp\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMPILER_CSHARP_NAMES_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/importer.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file is the public interface to the .proto file parser.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__\n#define GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__\n\n#include <string>\n#include <vector>\n#include <set>\n#include <utility>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/descriptor_database.h>\n#include <google/protobuf/compiler/parser.h>\n\nnamespace google {\nnamespace protobuf {\n\nnamespace io { class ZeroCopyInputStream; }\n\nnamespace compiler {\n\n// Defined in this file.\nclass Importer;\nclass MultiFileErrorCollector;\nclass SourceTree;\nclass DiskSourceTree;\n\n// TODO(kenton):  Move all SourceTree stuff to a separate file?\n\n// An implementation of DescriptorDatabase which loads files from a SourceTree\n// and parses them.\n//\n// Note:  This class is not thread-safe since it maintains a table of source\n//   code locations for error reporting.  However, when a DescriptorPool wraps\n//   a DescriptorDatabase, it uses mutex locking to make sure only one method\n//   of the database is called at a time, even if the DescriptorPool is used\n//   from multiple threads.  Therefore, there is only a problem if you create\n//   multiple DescriptorPools wrapping the same SourceTreeDescriptorDatabase\n//   and use them from multiple threads.\n//\n// Note:  This class does not implement FindFileContainingSymbol() or\n//   FindFileContainingExtension(); these will always return false.\nclass LIBPROTOBUF_EXPORT SourceTreeDescriptorDatabase : public DescriptorDatabase {\n public:\n  SourceTreeDescriptorDatabase(SourceTree* source_tree);\n  ~SourceTreeDescriptorDatabase();\n\n  // Instructs the SourceTreeDescriptorDatabase to report any parse errors\n  // to the given MultiFileErrorCollector.  This should be called before\n  // parsing.  error_collector must remain valid until either this method\n  // is called again or the SourceTreeDescriptorDatabase is destroyed.\n  void RecordErrorsTo(MultiFileErrorCollector* error_collector) {\n    error_collector_ = error_collector;\n  }\n\n  // Gets a DescriptorPool::ErrorCollector which records errors to the\n  // MultiFileErrorCollector specified with RecordErrorsTo().  This collector\n  // has the ability to determine exact line and column numbers of errors\n  // from the information given to it by the DescriptorPool.\n  DescriptorPool::ErrorCollector* GetValidationErrorCollector() {\n    using_validation_error_collector_ = true;\n    return &validation_error_collector_;\n  }\n\n  // implements DescriptorDatabase -----------------------------------\n  bool FindFileByName(const string& filename, FileDescriptorProto* output);\n  bool FindFileContainingSymbol(const string& symbol_name,\n                                FileDescriptorProto* output);\n  bool FindFileContainingExtension(const string& containing_type,\n                                   int field_number,\n                                   FileDescriptorProto* output);\n\n private:\n  class SingleFileErrorCollector;\n\n  SourceTree* source_tree_;\n  MultiFileErrorCollector* error_collector_;\n\n  class LIBPROTOBUF_EXPORT ValidationErrorCollector : public DescriptorPool::ErrorCollector {\n   public:\n    ValidationErrorCollector(SourceTreeDescriptorDatabase* owner);\n    ~ValidationErrorCollector();\n\n    // implements ErrorCollector ---------------------------------------\n    void AddError(const string& filename,\n                  const string& element_name,\n                  const Message* descriptor,\n                  ErrorLocation location,\n                  const string& message);\n\n    virtual void AddWarning(const string& filename,\n                            const string& element_name,\n                            const Message* descriptor,\n                            ErrorLocation location,\n                            const string& message);\n\n   private:\n    SourceTreeDescriptorDatabase* owner_;\n  };\n  friend class ValidationErrorCollector;\n\n  bool using_validation_error_collector_;\n  SourceLocationTable source_locations_;\n  ValidationErrorCollector validation_error_collector_;\n};\n\n// Simple interface for parsing .proto files.  This wraps the process\n// of opening the file, parsing it with a Parser, recursively parsing all its\n// imports, and then cross-linking the results to produce a FileDescriptor.\n//\n// This is really just a thin wrapper around SourceTreeDescriptorDatabase.\n// You may find that SourceTreeDescriptorDatabase is more flexible.\n//\n// TODO(kenton):  I feel like this class is not well-named.\nclass LIBPROTOBUF_EXPORT Importer {\n public:\n  Importer(SourceTree* source_tree,\n           MultiFileErrorCollector* error_collector);\n  ~Importer();\n\n  // Import the given file and build a FileDescriptor representing it.  If\n  // the file is already in the DescriptorPool, the existing FileDescriptor\n  // will be returned.  The FileDescriptor is property of the DescriptorPool,\n  // and will remain valid until it is destroyed.  If any errors occur, they\n  // will be reported using the error collector and Import() will return NULL.\n  //\n  // A particular Importer object will only report errors for a particular\n  // file once.  All future attempts to import the same file will return NULL\n  // without reporting any errors.  The idea is that you might want to import\n  // a lot of files without seeing the same errors over and over again.  If\n  // you want to see errors for the same files repeatedly, you can use a\n  // separate Importer object to import each one (but use the same\n  // DescriptorPool so that they can be cross-linked).\n  const FileDescriptor* Import(const string& filename);\n\n  // The DescriptorPool in which all imported FileDescriptors and their\n  // contents are stored.\n  inline const DescriptorPool* pool() const {\n    return &pool_;\n  }\n\n  void AddUnusedImportTrackFile(const string& file_name);\n  void ClearUnusedImportTrackFiles();\n\n private:\n  SourceTreeDescriptorDatabase database_;\n  DescriptorPool pool_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Importer);\n};\n\n// If the importer encounters problems while trying to import the proto files,\n// it reports them to a MultiFileErrorCollector.\nclass LIBPROTOBUF_EXPORT MultiFileErrorCollector {\n public:\n  inline MultiFileErrorCollector() {}\n  virtual ~MultiFileErrorCollector();\n\n  // Line and column numbers are zero-based.  A line number of -1 indicates\n  // an error with the entire file (e.g. \"not found\").\n  virtual void AddError(const string& filename, int line, int column,\n                        const string& message) = 0;\n\n  virtual void AddWarning(const string& filename, int line, int column,\n                          const string& message) {}\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MultiFileErrorCollector);\n};\n\n// Abstract interface which represents a directory tree containing proto files.\n// Used by the default implementation of Importer to resolve import statements\n// Most users will probably want to use the DiskSourceTree implementation,\n// below.\nclass LIBPROTOBUF_EXPORT SourceTree {\n public:\n  inline SourceTree() {}\n  virtual ~SourceTree();\n\n  // Open the given file and return a stream that reads it, or NULL if not\n  // found.  The caller takes ownership of the returned object.  The filename\n  // must be a path relative to the root of the source tree and must not\n  // contain \".\" or \"..\" components.\n  virtual io::ZeroCopyInputStream* Open(const string& filename) = 0;\n\n  // If Open() returns NULL, calling this method immediately will return an\n  // description of the error.\n  // Subclasses should implement this method and return a meaningful value for\n  // better error reporting.\n  // TODO(xiaofeng): change this to a pure virtual function.\n  virtual string GetLastErrorMessage();\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SourceTree);\n};\n\n// An implementation of SourceTree which loads files from locations on disk.\n// Multiple mappings can be set up to map locations in the DiskSourceTree to\n// locations in the physical filesystem.\nclass LIBPROTOBUF_EXPORT DiskSourceTree : public SourceTree {\n public:\n  DiskSourceTree();\n  ~DiskSourceTree();\n\n  // Map a path on disk to a location in the SourceTree.  The path may be\n  // either a file or a directory.  If it is a directory, the entire tree\n  // under it will be mapped to the given virtual location.  To map a directory\n  // to the root of the source tree, pass an empty string for virtual_path.\n  //\n  // If multiple mapped paths apply when opening a file, they will be searched\n  // in order.  For example, if you do:\n  //   MapPath(\"bar\", \"foo/bar\");\n  //   MapPath(\"\", \"baz\");\n  // and then you do:\n  //   Open(\"bar/qux\");\n  // the DiskSourceTree will first try to open foo/bar/qux, then baz/bar/qux,\n  // returning the first one that opens successfuly.\n  //\n  // disk_path may be an absolute path or relative to the current directory,\n  // just like a path you'd pass to open().\n  void MapPath(const string& virtual_path, const string& disk_path);\n\n  // Return type for DiskFileToVirtualFile().\n  enum DiskFileToVirtualFileResult {\n    SUCCESS,\n    SHADOWED,\n    CANNOT_OPEN,\n    NO_MAPPING\n  };\n\n  // Given a path to a file on disk, find a virtual path mapping to that\n  // file.  The first mapping created with MapPath() whose disk_path contains\n  // the filename is used.  However, that virtual path may not actually be\n  // usable to open the given file.  Possible return values are:\n  // * SUCCESS: The mapping was found.  *virtual_file is filled in so that\n  //   calling Open(*virtual_file) will open the file named by disk_file.\n  // * SHADOWED: A mapping was found, but using Open() to open this virtual\n  //   path will end up returning some different file.  This is because some\n  //   other mapping with a higher precedence also matches this virtual path\n  //   and maps it to a different file that exists on disk.  *virtual_file\n  //   is filled in as it would be in the SUCCESS case.  *shadowing_disk_file\n  //   is filled in with the disk path of the file which would be opened if\n  //   you were to call Open(*virtual_file).\n  // * CANNOT_OPEN: The mapping was found and was not shadowed, but the\n  //   file specified cannot be opened.  When this value is returned,\n  //   errno will indicate the reason the file cannot be opened.  *virtual_file\n  //   will be set to the virtual path as in the SUCCESS case, even though\n  //   it is not useful.\n  // * NO_MAPPING: Indicates that no mapping was found which contains this\n  //   file.\n  DiskFileToVirtualFileResult\n    DiskFileToVirtualFile(const string& disk_file,\n                          string* virtual_file,\n                          string* shadowing_disk_file);\n\n  // Given a virtual path, find the path to the file on disk.\n  // Return true and update disk_file with the on-disk path if the file exists.\n  // Return false and leave disk_file untouched if the file doesn't exist.\n  bool VirtualFileToDiskFile(const string& virtual_file, string* disk_file);\n\n  // implements SourceTree -------------------------------------------\n  virtual io::ZeroCopyInputStream* Open(const string& filename);\n\n  virtual string GetLastErrorMessage();\n\n private:\n  struct Mapping {\n    string virtual_path;\n    string disk_path;\n\n    inline Mapping(const string& virtual_path_param,\n                   const string& disk_path_param)\n      : virtual_path(virtual_path_param), disk_path(disk_path_param) {}\n  };\n  vector<Mapping> mappings_;\n  string last_error_message_;\n\n  // Like Open(), but returns the on-disk path in disk_file if disk_file is\n  // non-NULL and the file could be successfully opened.\n  io::ZeroCopyInputStream* OpenVirtualFile(const string& virtual_file,\n                                           string* disk_file);\n\n  // Like Open() but given the actual on-disk path.\n  io::ZeroCopyInputStream* OpenDiskFile(const string& filename);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DiskSourceTree);\n};\n\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_IMPORTER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/java/java_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Generates Java code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__\n\n#include <string>\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace java {\n\n// CodeGenerator implementation which generates Java code.  If you create your\n// own protocol compiler binary and you want it to support Java output, you\n// can do so by registering an instance of this CodeGenerator with the\n// CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT JavaGenerator : public CodeGenerator {\n public:\n  JavaGenerator();\n  ~JavaGenerator();\n\n  // implements CodeGenerator ----------------------------------------\n  bool Generate(const FileDescriptor* file,\n                const string& parameter,\n                GeneratorContext* context,\n                string* error) const;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(JavaGenerator);\n};\n\n}  // namespace java\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_JAVA_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/java/java_names.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Provides a mechanism for mapping a descriptor to the\n// fully-qualified name of the corresponding Java class.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__\n#define GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__\n\n#include <string>\n\nnamespace google {\nnamespace protobuf {\n\nclass Descriptor;\nclass EnumDescriptor;\nclass FileDescriptor;\nclass ServiceDescriptor;\n\nnamespace compiler {\nnamespace java {\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified Java class name.\nstring ClassName(const Descriptor* descriptor);\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified Java class name.\nstring ClassName(const EnumDescriptor* descriptor);\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified Java class name.\nstring ClassName(const FileDescriptor* descriptor);\n\n// Requires:\n//   descriptor != NULL\n//\n// Returns:\n//   The fully-qualified Java class name.\nstring ClassName(const ServiceDescriptor* descriptor);\n\n}  // namespace java\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMPILER_JAVA_NAMES_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/javanano/javanano_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// http://code.google.com/p/protobuf/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Generates Java nano code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_JAVANANO_NANO_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_JAVANANO_NANO_GENERATOR_H__\n\n#include <string>\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace javanano {\n\n// CodeGenerator implementation which generates Java nano code.  If you create your\n// own protocol compiler binary and you want it to support Java output for the\n// nano runtime, you can do so by registering an instance of this CodeGenerator with\n// the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT JavaNanoGenerator : public CodeGenerator {\n public:\n  JavaNanoGenerator();\n  ~JavaNanoGenerator();\n\n  // implements CodeGenerator ----------------------------------------\n  bool Generate(const FileDescriptor* file,\n                const string& parameter,\n                GeneratorContext* output_directory,\n                string* error) const;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(JavaNanoGenerator);\n};\n\n}  // namespace javanano\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_JAVANANO_NANO_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/js/js_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Generates JavaScript code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_JS_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_JS_GENERATOR_H__\n\n#include <string>\n#include <set>\n\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\n\nclass Descriptor;\nclass EnumDescriptor;\nclass FieldDescriptor;\nclass OneofDescriptor;\nclass FileDescriptor;\n\nnamespace io { class Printer; }\n\nnamespace compiler {\nnamespace js {\n\nstruct GeneratorOptions {\n  // Output path.\n  string output_dir;\n  // Namespace prefix.\n  string namespace_prefix;\n  // Enable binary-format support?\n  bool binary;\n  // What style of imports should be used.\n  enum ImportStyle {\n    kImportClosure,   // goog.require()\n    kImportCommonJs,  // require()\n    kImportBrowser,   // no import statements\n    kImportEs6,       // import { member } from ''\n  } import_style;\n\n  GeneratorOptions()\n      : output_dir(\".\"),\n        namespace_prefix(\"\"),\n        binary(false),\n        import_style(kImportClosure),\n        add_require_for_enums(false),\n        testonly(false),\n        library(\"\"),\n        error_on_name_conflict(false),\n        broken_proto3_semantics(false),\n        extension(\".js\"),\n        one_output_file_per_input_file(false) {}\n\n  bool ParseFromOptions(\n      const vector< pair< string, string > >& options,\n      string* error);\n\n  // Returns the file name extension to use for generated code.\n  string GetFileNameExtension() const {\n    return import_style == kImportClosure ? extension : \"_pb.js\";\n  }\n\n  enum OutputMode {\n    // Create an output file for each input .proto file.\n    kOneOutputFilePerInputFile,\n    // Create an output file for each type.\n    kOneOutputFilePerType,\n    // Put everything in a single file named by the library option.\n    kEverythingInOneFile,\n  };\n\n  // Indicates how to output the generated code based on the provided options.\n  OutputMode output_mode() const;\n\n  // The remaining options are only relevant when we are using kImportClosure.\n\n  // Add a `goog.requires()` call for each enum type used. If not set, a\n  // forward declaration with `goog.forwardDeclare` is produced instead.\n  bool add_require_for_enums;\n  // Set this as a test-only module via `goog.setTestOnly();`.\n  bool testonly;\n  // Create a library with name <name>_lib.js rather than a separate .js file\n  // per type?\n  string library;\n  // Error if there are two types that would generate the same output file?\n  bool error_on_name_conflict;\n  // Preserve the broken proto3 semantics from the old codegen? This amounts\n  // to using proto2 field presence semantics even for proto3 files. DO NOT\n  // USE except for migrating legacy code.\n  bool broken_proto3_semantics;\n  // The extension to use for output file names.\n  string extension;\n  // Create a separate output file for each input file?\n  bool one_output_file_per_input_file;\n};\n\n// CodeGenerator implementation which generates a JavaScript source file and\n// header.  If you create your own protocol compiler binary and you want it to\n// support JavaScript output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT Generator : public CodeGenerator {\n public:\n  Generator() {}\n  virtual ~Generator() {}\n\n  virtual bool Generate(const FileDescriptor* file,\n                        const string& parameter,\n                        GeneratorContext* context,\n                        string* error) const {\n    *error = \"Unimplemented Generate() method. Call GenerateAll() instead.\";\n    return false;\n  }\n\n  virtual bool HasGenerateAll() const { return true; }\n\n  virtual bool GenerateAll(const vector<const FileDescriptor*>& files,\n                           const string& parameter,\n                           GeneratorContext* context,\n                           string* error) const;\n\n private:\n  void GenerateHeader(const GeneratorOptions& options,\n                      io::Printer* printer) const;\n\n  // Generate goog.provides() calls.\n  void FindProvides(const GeneratorOptions& options,\n                    io::Printer* printer,\n                    const vector<const FileDescriptor*>& file,\n                    std::set<string>* provided) const;\n  void FindProvidesForFile(const GeneratorOptions& options,\n                           io::Printer* printer,\n                           const FileDescriptor* file,\n                           std::set<string>* provided) const;\n  void FindProvidesForMessage(const GeneratorOptions& options,\n                              io::Printer* printer,\n                              const Descriptor* desc,\n                              std::set<string>* provided) const;\n  void FindProvidesForEnum(const GeneratorOptions& options,\n                           io::Printer* printer,\n                           const EnumDescriptor* enumdesc,\n                           std::set<string>* provided) const;\n  // For extension fields at file scope.\n  void FindProvidesForFields(const GeneratorOptions& options,\n                             io::Printer* printer,\n                             const vector<const FieldDescriptor*>& fields,\n                             std::set<string>* provided) const;\n  // Print the goog.provides() found by the methods above.\n  void GenerateProvides(const GeneratorOptions& options,\n                        io::Printer* printer,\n                        std::set<string>* provided) const;\n\n  // Generate goog.setTestOnly() if indicated.\n  void GenerateTestOnly(const GeneratorOptions& options,\n                        io::Printer* printer) const;\n\n  // Generate goog.requires() calls.\n  void GenerateRequiresForLibrary(const GeneratorOptions& options,\n                                  io::Printer* printer,\n                                  const vector<const FileDescriptor*>& files,\n                                  std::set<string>* provided) const;\n  void GenerateRequiresForMessage(const GeneratorOptions& options,\n                        io::Printer* printer,\n                        const Descriptor* desc,\n                        std::set<string>* provided) const;\n  // For extension fields at file scope.\n  void GenerateRequiresForExtensions(\n      const GeneratorOptions& options, io::Printer* printer,\n      const vector<const FieldDescriptor*>& fields,\n      std::set<string>* provided) const;\n  void GenerateRequiresImpl(const GeneratorOptions& options,\n                            io::Printer* printer,\n                            std::set<string>* required,\n                            std::set<string>* forwards,\n                            std::set<string>* provided,\n                            bool require_jspb,\n                            bool require_extension) const;\n  void FindRequiresForMessage(const GeneratorOptions& options,\n                              const Descriptor* desc,\n                              std::set<string>* required,\n                              std::set<string>* forwards,\n                              bool* have_message) const;\n  void FindRequiresForField(const GeneratorOptions& options,\n                            const FieldDescriptor* field,\n                            std::set<string>* required,\n                            std::set<string>* forwards) const;\n  void FindRequiresForExtension(const GeneratorOptions& options,\n                                const FieldDescriptor* field,\n                                std::set<string>* required,\n                                std::set<string>* forwards) const;\n\n  void GenerateFile(const GeneratorOptions& options,\n                    io::Printer* printer,\n                    const FileDescriptor* file) const;\n\n  // Generate definitions for all message classes and enums in all files,\n  // processing the files in dependence order.\n  void GenerateFilesInDepOrder(const GeneratorOptions& options,\n                               io::Printer* printer,\n                               const vector<const FileDescriptor*>& file) const;\n  // Helper for above.\n  void GenerateFileAndDeps(const GeneratorOptions& options,\n                           io::Printer* printer,\n                           const FileDescriptor* root,\n                           std::set<const FileDescriptor*>* all_files,\n                           std::set<const FileDescriptor*>* generated) const;\n\n  // Generate definitions for all message classes and enums.\n  void GenerateClassesAndEnums(const GeneratorOptions& options,\n                               io::Printer* printer,\n                               const FileDescriptor* file) const;\n\n  void GenerateFieldValueExpression(io::Printer* printer,\n                                    const char* obj_reference,\n                                    const FieldDescriptor* field,\n                                    bool use_default) const;\n\n  // Generate definition for one class.\n  void GenerateClass(const GeneratorOptions& options,\n                     io::Printer* printer,\n                     const Descriptor* desc) const;\n  void GenerateClassConstructor(const GeneratorOptions& options,\n                                io::Printer* printer,\n                                const Descriptor* desc) const;\n  void GenerateClassFieldInfo(const GeneratorOptions& options,\n                              io::Printer* printer,\n                              const Descriptor* desc) const;\n  void GenerateClassXid(const GeneratorOptions& options,\n                        io::Printer* printer,\n                        const Descriptor* desc) const;\n  void GenerateOneofCaseDefinition(const GeneratorOptions& options,\n                                   io::Printer* printer,\n                                   const OneofDescriptor* oneof) const;\n  void GenerateClassToObject(const GeneratorOptions& options,\n                             io::Printer* printer,\n                             const Descriptor* desc) const;\n  void GenerateClassFieldToObject(const GeneratorOptions& options,\n                                  io::Printer* printer,\n                                  const FieldDescriptor* field) const;\n  void GenerateClassFromObject(const GeneratorOptions& options,\n                               io::Printer* printer,\n                               const Descriptor* desc) const;\n  void GenerateClassFieldFromObject(const GeneratorOptions& options,\n                                    io::Printer* printer,\n                                    const FieldDescriptor* field) const;\n  void GenerateClassClone(const GeneratorOptions& options,\n                          io::Printer* printer,\n                          const Descriptor* desc) const;\n  void GenerateClassRegistration(const GeneratorOptions& options,\n                                 io::Printer* printer,\n                                 const Descriptor* desc) const;\n  void GenerateClassFields(const GeneratorOptions& options,\n                           io::Printer* printer,\n                           const Descriptor* desc) const;\n  void GenerateClassField(const GeneratorOptions& options,\n                          io::Printer* printer,\n                          const FieldDescriptor* desc) const;\n  void GenerateClassExtensionFieldInfo(const GeneratorOptions& options,\n                                       io::Printer* printer,\n                                       const Descriptor* desc) const;\n  void GenerateClassDeserialize(const GeneratorOptions& options,\n                                io::Printer* printer,\n                                const Descriptor* desc) const;\n  void GenerateClassDeserializeBinary(const GeneratorOptions& options,\n                                      io::Printer* printer,\n                                      const Descriptor* desc) const;\n  void GenerateClassDeserializeBinaryField(const GeneratorOptions& options,\n                                           io::Printer* printer,\n                                           const FieldDescriptor* field) const;\n  void GenerateClassSerializeBinary(const GeneratorOptions& options,\n                                    io::Printer* printer,\n                                    const Descriptor* desc) const;\n  void GenerateClassSerializeBinaryField(const GeneratorOptions& options,\n                                         io::Printer* printer,\n                                         const FieldDescriptor* field) const;\n\n  // Generate definition for one enum.\n  void GenerateEnum(const GeneratorOptions& options,\n                    io::Printer* printer,\n                    const EnumDescriptor* enumdesc) const;\n\n  // Generate an extension definition.\n  void GenerateExtension(const GeneratorOptions& options,\n                         io::Printer* printer,\n                         const FieldDescriptor* field) const;\n\n  // Generate addFoo() method for repeated primitive fields.\n  void GenerateRepeatedPrimitiveHelperMethods(const GeneratorOptions& options,\n                                              io::Printer* printer,\n                                              const FieldDescriptor* field,\n                                              bool untyped) const;\n\n  // Generate addFoo() method for repeated message fields.\n  void GenerateRepeatedMessageHelperMethods(const GeneratorOptions& options,\n                                            io::Printer* printer,\n                                            const FieldDescriptor* field) const;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Generator);\n};\n\n}  // namespace js\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_JS_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/objectivec/objectivec_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Generates ObjectiveC code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__\n\n#include <string>\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace objectivec {\n\n// CodeGenerator implementation which generates a ObjectiveC source file and\n// header.  If you create your own protocol compiler binary and you want it to\n// support ObjectiveC output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT ObjectiveCGenerator : public CodeGenerator {\n public:\n  ObjectiveCGenerator();\n  ~ObjectiveCGenerator();\n\n  // implements CodeGenerator ----------------------------------------\n  bool HasGenerateAll() const;\n  bool Generate(const FileDescriptor* file,\n                const string& parameter,\n                GeneratorContext* context,\n                string* error) const;\n  bool GenerateAll(const vector<const FileDescriptor*>& files,\n                   const string& parameter,\n                   GeneratorContext* context,\n                   string* error) const;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ObjectiveCGenerator);\n};\n\n}  // namespace objectivec\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/objectivec/objectivec_helpers.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Helper functions for generating ObjectiveC code.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__\n#define GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__\n\n#include <string>\n#include <vector>\n\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/descriptor.pb.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace objectivec {\n\n// Generator options (see objectivec_generator.cc for a description of each):\nstruct Options {\n  Options();\n  string expected_prefixes_path;\n  string generate_for_named_framework;\n  string named_framework_to_proto_path_mappings_path;\n};\n\n// Escape C++ trigraphs by escaping question marks to \"\\?\".\nstring EscapeTrigraphs(const string& to_escape);\n\n// Strips \".proto\" or \".protodevel\" from the end of a filename.\nstring StripProto(const string& filename);\n\n// Remove white space from either end of a StringPiece.\nvoid StringPieceTrimWhitespace(StringPiece* input);\n\n// Returns true if the name requires a ns_returns_not_retained attribute applied\n// to it.\nbool IsRetainedName(const string& name);\n\n// Returns true if the name starts with \"init\" and will need to have special\n// handling under ARC.\nbool IsInitName(const string& name);\n\n// Gets the objc_class_prefix.\nstring FileClassPrefix(const FileDescriptor* file);\n\n// Gets the path of the file we're going to generate (sans the .pb.h\n// extension).  The path will be dependent on the objectivec package\n// declared in the proto package.\nstring FilePath(const FileDescriptor* file);\n\n// Just like FilePath(), but without the directory part.\nstring FilePathBasename(const FileDescriptor* file);\n\n// Gets the name of the root class we'll generate in the file.  This class\n// is not meant for external consumption, but instead contains helpers that\n// the rest of the classes need\nstring FileClassName(const FileDescriptor* file);\n\n// These return the fully-qualified class name corresponding to the given\n// descriptor.\nstring ClassName(const Descriptor* descriptor);\nstring ClassName(const Descriptor* descriptor, string* out_suffix_added);\nstring EnumName(const EnumDescriptor* descriptor);\n\n// Returns the fully-qualified name of the enum value corresponding to the\n// the descriptor.\nstring EnumValueName(const EnumValueDescriptor* descriptor);\n\n// Returns the name of the enum value corresponding to the descriptor.\nstring EnumValueShortName(const EnumValueDescriptor* descriptor);\n\n// Reverse what an enum does.\nstring UnCamelCaseEnumShortName(const string& name);\n\n// Returns the name to use for the extension (used as the method off the file's\n// Root class).\nstring ExtensionMethodName(const FieldDescriptor* descriptor);\n\n// Returns the transformed field name.\nstring FieldName(const FieldDescriptor* field);\nstring FieldNameCapitalized(const FieldDescriptor* field);\n\n// Returns the transformed oneof name.\nstring OneofEnumName(const OneofDescriptor* descriptor);\nstring OneofName(const OneofDescriptor* descriptor);\nstring OneofNameCapitalized(const OneofDescriptor* descriptor);\n\ninline bool HasFieldPresence(const FileDescriptor* file) {\n  return file->syntax() != FileDescriptor::SYNTAX_PROTO3;\n}\n\ninline bool HasPreservingUnknownEnumSemantics(const FileDescriptor* file) {\n  return file->syntax() == FileDescriptor::SYNTAX_PROTO3;\n}\n\ninline bool IsMapEntryMessage(const Descriptor* descriptor) {\n  return descriptor->options().map_entry();\n}\n\n// Reverse of the above.\nstring UnCamelCaseFieldName(const string& name, const FieldDescriptor* field);\n\nenum ObjectiveCType {\n  OBJECTIVECTYPE_INT32,\n  OBJECTIVECTYPE_UINT32,\n  OBJECTIVECTYPE_INT64,\n  OBJECTIVECTYPE_UINT64,\n  OBJECTIVECTYPE_FLOAT,\n  OBJECTIVECTYPE_DOUBLE,\n  OBJECTIVECTYPE_BOOLEAN,\n  OBJECTIVECTYPE_STRING,\n  OBJECTIVECTYPE_DATA,\n  OBJECTIVECTYPE_ENUM,\n  OBJECTIVECTYPE_MESSAGE\n};\n\nenum FlagType {\n  FLAGTYPE_DESCRIPTOR_INITIALIZATION,\n  FLAGTYPE_EXTENSION,\n  FLAGTYPE_FIELD\n};\n\ntemplate<class TDescriptor>\nstring GetOptionalDeprecatedAttribute(const TDescriptor* descriptor, bool preSpace = true, bool postNewline = false) {\n  if (descriptor->options().deprecated()) {\n    string result = \"DEPRECATED_ATTRIBUTE\";\n    if (preSpace) {\n      result.insert(0, \" \");\n    }\n    if (postNewline) {\n      result.append(\"\\n\");\n    }\n    return result;\n  } else {\n    return \"\";\n  }\n}\n\nstring GetCapitalizedType(const FieldDescriptor* field);\n\nObjectiveCType GetObjectiveCType(FieldDescriptor::Type field_type);\n\ninline ObjectiveCType GetObjectiveCType(const FieldDescriptor* field) {\n  return GetObjectiveCType(field->type());\n}\n\nbool IsPrimitiveType(const FieldDescriptor* field);\nbool IsReferenceType(const FieldDescriptor* field);\n\nstring GPBGenericValueFieldName(const FieldDescriptor* field);\nstring DefaultValue(const FieldDescriptor* field);\nbool HasNonZeroDefaultValue(const FieldDescriptor* field);\n\nstring BuildFlagsString(const FlagType type, const vector<string>& strings);\n\n// Builds HeaderDoc/appledoc style comments out of the comments in the .proto\n// file.\nstring BuildCommentsString(const SourceLocation& location,\n                           bool prefer_single_line);\n\n// The name the commonly used by the library when built as a framework.\n// This lines up to the name used in the CocoaPod.\nextern const char* const ProtobufLibraryFrameworkName;\n// Returns the CPP symbol name to use as the gate for framework style imports\n// for the given framework name to use.\nstring ProtobufFrameworkImportSymbol(const string& framework_name);\n\n// Checks if the file is one of the proto's bundled with the library.\nbool IsProtobufLibraryBundledProtoFile(const FileDescriptor* file);\n\n// Checks the prefix for the given files and outputs any warnings as needed. If\n// there are flat out errors, then out_error is filled in with the first error\n// and the result is false.\nbool ValidateObjCClassPrefixes(const vector<const FileDescriptor*>& files,\n                               const Options& generation_options,\n                               string* out_error);\n\n// Generate decode data needed for ObjC's GPBDecodeTextFormatName() to transform\n// the input into the expected output.\nclass LIBPROTOC_EXPORT TextFormatDecodeData {\n public:\n  TextFormatDecodeData();\n  ~TextFormatDecodeData();\n\n  void AddString(int32 key, const string& input_for_decode,\n                 const string& desired_output);\n  size_t num_entries() const { return entries_.size(); }\n  string Data() const;\n\n  static string DecodeDataForString(const string& input_for_decode,\n                                    const string& desired_output);\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TextFormatDecodeData);\n\n  typedef std::pair<int32, string> DataEntry;\n  vector<DataEntry> entries_;\n};\n\n// Helper for parsing simple files.\nclass LIBPROTOC_EXPORT LineConsumer {\n public:\n  LineConsumer();\n  virtual ~LineConsumer();\n  virtual bool ConsumeLine(const StringPiece& line, string* out_error) = 0;\n};\n\nbool ParseSimpleFile(\n    const string& path, LineConsumer* line_consumer, string* out_error);\n\n\n// Helper class for parsing framework import mappings and generating\n// import statements.\nclass LIBPROTOC_EXPORT ImportWriter {\n public:\n  ImportWriter(const string& generate_for_named_framework,\n               const string& named_framework_to_proto_path_mappings_path);\n  ~ImportWriter();\n\n  void AddFile(const FileDescriptor* file, const string& header_extension);\n  void Print(io::Printer *printer) const;\n\n private:\n  class ProtoFrameworkCollector : public LineConsumer {\n   public:\n    ProtoFrameworkCollector(map<string, string>* inout_proto_file_to_framework_name)\n        : map_(inout_proto_file_to_framework_name) {}\n\n    virtual bool ConsumeLine(const StringPiece& line, string* out_error);\n\n   private:\n    map<string, string>* map_;\n  };\n\n  void ParseFrameworkMappings();\n\n  const string generate_for_named_framework_;\n  const string named_framework_to_proto_path_mappings_path_;\n  map<string, string> proto_file_to_framework_name_;\n  bool need_to_parse_mapping_file_;\n\n  vector<string> protobuf_framework_imports_;\n  vector<string> protobuf_non_framework_imports_;\n  vector<string> other_framework_imports_;\n  vector<string> other_imports_;\n};\n\n}  // namespace objectivec\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_OBJECTIVEC_HELPERS_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/parser.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Implements parsing of .proto files to FileDescriptorProtos.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_PARSER_H__\n#define GOOGLE_PROTOBUF_COMPILER_PARSER_H__\n\n#include <map>\n#include <string>\n#include <utility>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/descriptor.pb.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/io/tokenizer.h>\n\nnamespace google {\nnamespace protobuf { class Message; }\n\nnamespace protobuf {\nnamespace compiler {\n\n// Defined in this file.\nclass Parser;\nclass SourceLocationTable;\n\n// Implements parsing of protocol definitions (such as .proto files).\n//\n// Note that most users will be more interested in the Importer class.\n// Parser is a lower-level class which simply converts a single .proto file\n// to a FileDescriptorProto.  It does not resolve import directives or perform\n// many other kinds of validation needed to construct a complete\n// FileDescriptor.\nclass LIBPROTOBUF_EXPORT Parser {\n public:\n  Parser();\n  ~Parser();\n\n  // Parse the entire input and construct a FileDescriptorProto representing\n  // it.  Returns true if no errors occurred, false otherwise.\n  bool Parse(io::Tokenizer* input, FileDescriptorProto* file);\n\n  // Optional features:\n\n  // DEPRECATED:  New code should use the SourceCodeInfo embedded in the\n  //   FileDescriptorProto.\n  //\n  // Requests that locations of certain definitions be recorded to the given\n  // SourceLocationTable while parsing.  This can be used to look up exact line\n  // and column numbers for errors reported by DescriptorPool during validation.\n  // Set to NULL (the default) to discard source location information.\n  void RecordSourceLocationsTo(SourceLocationTable* location_table) {\n    source_location_table_ = location_table;\n  }\n\n  // Requests that errors be recorded to the given ErrorCollector while\n  // parsing.  Set to NULL (the default) to discard error messages.\n  void RecordErrorsTo(io::ErrorCollector* error_collector) {\n    error_collector_ = error_collector;\n  }\n\n  // Returns the identifier used in the \"syntax = \" declaration, if one was\n  // seen during the last call to Parse(), or the empty string otherwise.\n  const string& GetSyntaxIdentifier() { return syntax_identifier_; }\n\n  // If set true, input files will be required to begin with a syntax\n  // identifier.  Otherwise, files may omit this.  If a syntax identifier\n  // is provided, it must be 'syntax = \"proto2\";' and must appear at the\n  // top of this file regardless of whether or not it was required.\n  void SetRequireSyntaxIdentifier(bool value) {\n    require_syntax_identifier_ = value;\n  }\n\n  // Call SetStopAfterSyntaxIdentifier(true) to tell the parser to stop\n  // parsing as soon as it has seen the syntax identifier, or lack thereof.\n  // This is useful for quickly identifying the syntax of the file without\n  // parsing the whole thing.  If this is enabled, no error will be recorded\n  // if the syntax identifier is something other than \"proto2\" (since\n  // presumably the caller intends to deal with that), but other kinds of\n  // errors (e.g. parse errors) will still be reported.  When this is enabled,\n  // you may pass a NULL FileDescriptorProto to Parse().\n  void SetStopAfterSyntaxIdentifier(bool value) {\n    stop_after_syntax_identifier_ = value;\n  }\n\n private:\n  class LocationRecorder;\n\n  // =================================================================\n  // Error recovery helpers\n\n  // Consume the rest of the current statement.  This consumes tokens\n  // until it sees one of:\n  //   ';'  Consumes the token and returns.\n  //   '{'  Consumes the brace then calls SkipRestOfBlock().\n  //   '}'  Returns without consuming.\n  //   EOF  Returns (can't consume).\n  // The Parser often calls SkipStatement() after encountering a syntax\n  // error.  This allows it to go on parsing the following lines, allowing\n  // it to report more than just one error in the file.\n  void SkipStatement();\n\n  // Consume the rest of the current block, including nested blocks,\n  // ending after the closing '}' is encountered and consumed, or at EOF.\n  void SkipRestOfBlock();\n\n  // -----------------------------------------------------------------\n  // Single-token consuming helpers\n  //\n  // These make parsing code more readable.\n\n  // True if the current token is TYPE_END.\n  inline bool AtEnd();\n\n  // True if the next token matches the given text.\n  inline bool LookingAt(const char* text);\n  // True if the next token is of the given type.\n  inline bool LookingAtType(io::Tokenizer::TokenType token_type);\n\n  // If the next token exactly matches the text given, consume it and return\n  // true.  Otherwise, return false without logging an error.\n  bool TryConsume(const char* text);\n\n  // These attempt to read some kind of token from the input.  If successful,\n  // they return true.  Otherwise they return false and add the given error\n  // to the error list.\n\n  // Consume a token with the exact text given.\n  bool Consume(const char* text, const char* error);\n  // Same as above, but automatically generates the error \"Expected \\\"text\\\".\",\n  // where \"text\" is the expected token text.\n  bool Consume(const char* text);\n  // Consume a token of type IDENTIFIER and store its text in \"output\".\n  bool ConsumeIdentifier(string* output, const char* error);\n  // Consume an integer and store its value in \"output\".\n  bool ConsumeInteger(int* output, const char* error);\n  // Consume a signed integer and store its value in \"output\".\n  bool ConsumeSignedInteger(int* output, const char* error);\n  // Consume a 64-bit integer and store its value in \"output\".  If the value\n  // is greater than max_value, an error will be reported.\n  bool ConsumeInteger64(uint64 max_value, uint64* output, const char* error);\n  // Consume a number and store its value in \"output\".  This will accept\n  // tokens of either INTEGER or FLOAT type.\n  bool ConsumeNumber(double* output, const char* error);\n  // Consume a string literal and store its (unescaped) value in \"output\".\n  bool ConsumeString(string* output, const char* error);\n\n  // Consume a token representing the end of the statement.  Comments between\n  // this token and the next will be harvested for documentation.  The given\n  // LocationRecorder should refer to the declaration that was just parsed;\n  // it will be populated with these comments.\n  //\n  // TODO(kenton):  The LocationRecorder is const because historically locations\n  //   have been passed around by const reference, for no particularly good\n  //   reason.  We should probably go through and change them all to mutable\n  //   pointer to make this more intuitive.\n  bool TryConsumeEndOfDeclaration(\n      const char* text, const LocationRecorder* location);\n  bool TryConsumeEndOfDeclarationFinishScope(\n      const char* text, const LocationRecorder* location);\n\n  bool ConsumeEndOfDeclaration(\n      const char* text, const LocationRecorder* location);\n\n  // -----------------------------------------------------------------\n  // Error logging helpers\n\n  // Invokes error_collector_->AddError(), if error_collector_ is not NULL.\n  void AddError(int line, int column, const string& error);\n\n  // Invokes error_collector_->AddError() with the line and column number\n  // of the current token.\n  void AddError(const string& error);\n\n  // Records a location in the SourceCodeInfo.location table (see\n  // descriptor.proto).  We use RAII to ensure that the start and end locations\n  // are recorded -- the constructor records the start location and the\n  // destructor records the end location.  Since the parser is\n  // recursive-descent, this works out beautifully.\n  class LIBPROTOBUF_EXPORT LocationRecorder {\n   public:\n    // Construct the file's \"root\" location.\n    LocationRecorder(Parser* parser);\n\n    // Construct a location that represents a declaration nested within the\n    // given parent.  E.g. a field's location is nested within the location\n    // for a message type.  The parent's path will be copied, so you should\n    // call AddPath() only to add the path components leading from the parent\n    // to the child (as opposed to leading from the root to the child).\n    LocationRecorder(const LocationRecorder& parent);\n\n    // Convenience constructors that call AddPath() one or two times.\n    LocationRecorder(const LocationRecorder& parent, int path1);\n    LocationRecorder(const LocationRecorder& parent, int path1, int path2);\n\n    ~LocationRecorder();\n\n    // Add a path component.  See SourceCodeInfo.Location.path in\n    // descriptor.proto.\n    void AddPath(int path_component);\n\n    // By default the location is considered to start at the current token at\n    // the time the LocationRecorder is created.  StartAt() sets the start\n    // location to the given token instead.\n    void StartAt(const io::Tokenizer::Token& token);\n\n    // Start at the same location as some other LocationRecorder.\n    void StartAt(const LocationRecorder& other);\n\n    // By default the location is considered to end at the previous token at\n    // the time the LocationRecorder is destroyed.  EndAt() sets the end\n    // location to the given token instead.\n    void EndAt(const io::Tokenizer::Token& token);\n\n    // Records the start point of this location to the SourceLocationTable that\n    // was passed to RecordSourceLocationsTo(), if any.  SourceLocationTable\n    // is an older way of keeping track of source locations which is still\n    // used in some places.\n    void RecordLegacyLocation(const Message* descriptor,\n        DescriptorPool::ErrorCollector::ErrorLocation location);\n\n    // Attaches leading and trailing comments to the location.  The two strings\n    // will be swapped into place, so after this is called *leading and\n    // *trailing will be empty.\n    //\n    // TODO(kenton):  See comment on TryConsumeEndOfDeclaration(), above, for\n    //   why this is const.\n    void AttachComments(string* leading, string* trailing,\n                        vector<string>* detached_comments) const;\n\n   private:\n    // Indexes of parent and current location in the parent\n    // SourceCodeInfo.location repeated field. For top-level elements,\n    // parent_index_ is -1.\n    Parser* parser_;\n    SourceCodeInfo::Location* location_;\n\n    void Init(const LocationRecorder& parent);\n  };\n\n  // =================================================================\n  // Parsers for various language constructs\n\n  // Parses the \"syntax = \\\"proto2\\\";\" line at the top of the file.  Returns\n  // false if it failed to parse or if the syntax identifier was not\n  // recognized.\n  bool ParseSyntaxIdentifier(const LocationRecorder& parent);\n\n  // These methods parse various individual bits of code.  They return\n  // false if they completely fail to parse the construct.  In this case,\n  // it is probably necessary to skip the rest of the statement to recover.\n  // However, if these methods return true, it does NOT mean that there\n  // were no errors; only that there were no *syntax* errors.  For instance,\n  // if a service method is defined using proper syntax but uses a primitive\n  // type as its input or output, ParseMethodField() still returns true\n  // and only reports the error by calling AddError().  In practice, this\n  // makes logic much simpler for the caller.\n\n  // Parse a top-level message, enum, service, etc.\n  bool ParseTopLevelStatement(FileDescriptorProto* file,\n                              const LocationRecorder& root_location);\n\n  // Parse various language high-level language construrcts.\n  bool ParseMessageDefinition(DescriptorProto* message,\n                              const LocationRecorder& message_location,\n                              const FileDescriptorProto* containing_file);\n  bool ParseEnumDefinition(EnumDescriptorProto* enum_type,\n                           const LocationRecorder& enum_location,\n                           const FileDescriptorProto* containing_file);\n  bool ParseServiceDefinition(ServiceDescriptorProto* service,\n                              const LocationRecorder& service_location,\n                              const FileDescriptorProto* containing_file);\n  bool ParsePackage(FileDescriptorProto* file,\n                    const LocationRecorder& root_location,\n                    const FileDescriptorProto* containing_file);\n  bool ParseImport(RepeatedPtrField<string>* dependency,\n                   RepeatedField<int32>* public_dependency,\n                   RepeatedField<int32>* weak_dependency,\n                   const LocationRecorder& root_location,\n                   const FileDescriptorProto* containing_file);\n\n  // These methods parse the contents of a message, enum, or service type and\n  // add them to the given object.  They consume the entire block including\n  // the beginning and ending brace.\n  bool ParseMessageBlock(DescriptorProto* message,\n                         const LocationRecorder& message_location,\n                         const FileDescriptorProto* containing_file);\n  bool ParseEnumBlock(EnumDescriptorProto* enum_type,\n                      const LocationRecorder& enum_location,\n                      const FileDescriptorProto* containing_file);\n  bool ParseServiceBlock(ServiceDescriptorProto* service,\n                         const LocationRecorder& service_location,\n                         const FileDescriptorProto* containing_file);\n\n  // Parse one statement within a message, enum, or service block, including\n  // final semicolon.\n  bool ParseMessageStatement(DescriptorProto* message,\n                             const LocationRecorder& message_location,\n                             const FileDescriptorProto* containing_file);\n  bool ParseEnumStatement(EnumDescriptorProto* message,\n                          const LocationRecorder& enum_location,\n                          const FileDescriptorProto* containing_file);\n  bool ParseServiceStatement(ServiceDescriptorProto* message,\n                             const LocationRecorder& service_location,\n                             const FileDescriptorProto* containing_file);\n\n  // Parse a field of a message.  If the field is a group, its type will be\n  // added to \"messages\".\n  //\n  // parent_location and location_field_number_for_nested_type are needed when\n  // parsing groups -- we need to generate a nested message type within the\n  // parent and record its location accordingly.  Since the parent could be\n  // either a FileDescriptorProto or a DescriptorProto, we must pass in the\n  // correct field number to use.\n  bool ParseMessageField(FieldDescriptorProto* field,\n                         RepeatedPtrField<DescriptorProto>* messages,\n                         const LocationRecorder& parent_location,\n                         int location_field_number_for_nested_type,\n                         const LocationRecorder& field_location,\n                         const FileDescriptorProto* containing_file);\n\n  // Like ParseMessageField() but expects the label has already been filled in\n  // by the caller.\n  bool ParseMessageFieldNoLabel(FieldDescriptorProto* field,\n                                RepeatedPtrField<DescriptorProto>* messages,\n                                const LocationRecorder& parent_location,\n                                int location_field_number_for_nested_type,\n                                const LocationRecorder& field_location,\n                                const FileDescriptorProto* containing_file);\n\n  // Parse an \"extensions\" declaration.\n  bool ParseExtensions(DescriptorProto* message,\n                       const LocationRecorder& extensions_location,\n                       const FileDescriptorProto* containing_file);\n\n  // Parse a \"reserved\" declaration.\n  bool ParseReserved(DescriptorProto* message,\n                     const LocationRecorder& message_location);\n  bool ParseReservedNames(DescriptorProto* message,\n                          const LocationRecorder& parent_location);\n  bool ParseReservedNumbers(DescriptorProto* message,\n                            const LocationRecorder& parent_location);\n\n  // Parse an \"extend\" declaration.  (See also comments for\n  // ParseMessageField().)\n  bool ParseExtend(RepeatedPtrField<FieldDescriptorProto>* extensions,\n                   RepeatedPtrField<DescriptorProto>* messages,\n                   const LocationRecorder& parent_location,\n                   int location_field_number_for_nested_type,\n                   const LocationRecorder& extend_location,\n                   const FileDescriptorProto* containing_file);\n\n  // Parse a \"oneof\" declaration.  The caller is responsible for setting\n  // oneof_decl->label() since it will have had to parse the label before it\n  // knew it was parsing a oneof.\n  bool ParseOneof(OneofDescriptorProto* oneof_decl,\n                  DescriptorProto* containing_type,\n                  int oneof_index,\n                  const LocationRecorder& oneof_location,\n                  const LocationRecorder& containing_type_location,\n                  const FileDescriptorProto* containing_file);\n\n  // Parse a single enum value within an enum block.\n  bool ParseEnumConstant(EnumValueDescriptorProto* enum_value,\n                         const LocationRecorder& enum_value_location,\n                         const FileDescriptorProto* containing_file);\n\n  // Parse enum constant options, i.e. the list in square brackets at the end\n  // of the enum constant value definition.\n  bool ParseEnumConstantOptions(EnumValueDescriptorProto* value,\n                                const LocationRecorder& enum_value_location,\n                                const FileDescriptorProto* containing_file);\n\n  // Parse a single method within a service definition.\n  bool ParseServiceMethod(MethodDescriptorProto* method,\n                          const LocationRecorder& method_location,\n                          const FileDescriptorProto* containing_file);\n\n\n  // Parse options of a single method or stream.\n  bool ParseMethodOptions(const LocationRecorder& parent_location,\n                          const FileDescriptorProto* containing_file,\n                          const int optionsFieldNumber,\n                          Message* mutable_options);\n\n  // Parse \"required\", \"optional\", or \"repeated\" and fill in \"label\"\n  // with the value. Returns true if such a label is consumed.\n  bool ParseLabel(FieldDescriptorProto::Label* label,\n                  const FileDescriptorProto* containing_file);\n\n  // Parse a type name and fill in \"type\" (if it is a primitive) or\n  // \"type_name\" (if it is not) with the type parsed.\n  bool ParseType(FieldDescriptorProto::Type* type,\n                 string* type_name);\n  // Parse a user-defined type and fill in \"type_name\" with the name.\n  // If a primitive type is named, it is treated as an error.\n  bool ParseUserDefinedType(string* type_name);\n\n  // Parses field options, i.e. the stuff in square brackets at the end\n  // of a field definition.  Also parses default value.\n  bool ParseFieldOptions(FieldDescriptorProto* field,\n                         const LocationRecorder& field_location,\n                         const FileDescriptorProto* containing_file);\n\n  // Parse the \"default\" option.  This needs special handling because its\n  // type is the field's type.\n  bool ParseDefaultAssignment(FieldDescriptorProto* field,\n                              const LocationRecorder& field_location,\n                              const FileDescriptorProto* containing_file);\n\n  bool ParseJsonName(FieldDescriptorProto* field,\n                     const LocationRecorder& field_location,\n                     const FileDescriptorProto* containing_file);\n\n  enum OptionStyle {\n    OPTION_ASSIGNMENT,  // just \"name = value\"\n    OPTION_STATEMENT    // \"option name = value;\"\n  };\n\n  // Parse a single option name/value pair, e.g. \"ctype = CORD\".  The name\n  // identifies a field of the given Message, and the value of that field\n  // is set to the parsed value.\n  bool ParseOption(Message* options,\n                   const LocationRecorder& options_location,\n                   const FileDescriptorProto* containing_file,\n                   OptionStyle style);\n\n  // Parses a single part of a multipart option name. A multipart name consists\n  // of names separated by dots. Each name is either an identifier or a series\n  // of identifiers separated by dots and enclosed in parentheses. E.g.,\n  // \"foo.(bar.baz).qux\".\n  bool ParseOptionNamePart(UninterpretedOption* uninterpreted_option,\n                           const LocationRecorder& part_location,\n                           const FileDescriptorProto* containing_file);\n\n  // Parses a string surrounded by balanced braces.  Strips off the outer\n  // braces and stores the enclosed string in *value.\n  // E.g.,\n  //     { foo }                     *value gets 'foo'\n  //     { foo { bar: box } }        *value gets 'foo { bar: box }'\n  //     {}                          *value gets ''\n  //\n  // REQUIRES: LookingAt(\"{\")\n  // When finished successfully, we are looking at the first token past\n  // the ending brace.\n  bool ParseUninterpretedBlock(string* value);\n\n  struct MapField {\n    // Whether the field is a map field.\n    bool is_map_field;\n    // The types of the key and value if they are primitive types.\n    FieldDescriptorProto::Type key_type;\n    FieldDescriptorProto::Type value_type;\n    // Or the type names string if the types are customized types.\n    string key_type_name;\n    string value_type_name;\n\n    MapField() : is_map_field(false) {}\n  };\n  // Desugar the map syntax to generate a nested map entry message.\n  void GenerateMapEntry(const MapField& map_field, FieldDescriptorProto* field,\n                        RepeatedPtrField<DescriptorProto>* messages);\n\n  // Whether fields without label default to optional fields.\n  bool DefaultToOptionalFields() const {\n    return syntax_identifier_ == \"proto3\";\n  }\n\n\n  bool ValidateEnum(const EnumDescriptorProto* proto);\n\n  // =================================================================\n\n  io::Tokenizer* input_;\n  io::ErrorCollector* error_collector_;\n  SourceCodeInfo* source_code_info_;\n  SourceLocationTable* source_location_table_;  // legacy\n  bool had_errors_;\n  bool require_syntax_identifier_;\n  bool stop_after_syntax_identifier_;\n  string syntax_identifier_;\n\n  // Leading doc comments for the next declaration.  These are not complete\n  // yet; use ConsumeEndOfDeclaration() to get the complete comments.\n  string upcoming_doc_comments_;\n\n  // Detached comments are not connected to any syntax entities. Elements in\n  // this vector are paragraphs of comments separated by empty lines. The\n  // detached comments will be put into the leading_detached_comments field for\n  // the next element (See SourceCodeInfo.Location in descriptor.proto), when\n  // ConsumeEndOfDeclaration() is called.\n  vector<string> upcoming_detached_comments_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Parser);\n};\n\n// A table mapping (descriptor, ErrorLocation) pairs -- as reported by\n// DescriptorPool when validating descriptors -- to line and column numbers\n// within the original source code.\n//\n// This is semi-obsolete:  FileDescriptorProto.source_code_info now contains\n// far more complete information about source locations.  However, as of this\n// writing you still need to use SourceLocationTable when integrating with\n// DescriptorPool.\nclass LIBPROTOBUF_EXPORT SourceLocationTable {\n public:\n  SourceLocationTable();\n  ~SourceLocationTable();\n\n  // Finds the precise location of the given error and fills in *line and\n  // *column with the line and column numbers.  If not found, sets *line to\n  // -1 and *column to 0 (since line = -1 is used to mean \"error has no exact\n  // location\" in the ErrorCollector interface).  Returns true if found, false\n  // otherwise.\n  bool Find(const Message* descriptor,\n            DescriptorPool::ErrorCollector::ErrorLocation location,\n            int* line, int* column) const;\n\n  // Adds a location to the table.\n  void Add(const Message* descriptor,\n           DescriptorPool::ErrorCollector::ErrorLocation location,\n           int line, int column);\n\n  // Clears the contents of the table.\n  void Clear();\n\n private:\n  typedef map<\n    pair<const Message*, DescriptorPool::ErrorCollector::ErrorLocation>,\n    pair<int, int> > LocationMap;\n  LocationMap location_map_;\n};\n\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_PARSER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/php/php_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__\n\n#include <google/protobuf/compiler/code_generator.h>\n\n#include <string>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace php {\n\nclass LIBPROTOC_EXPORT Generator\n    : public google::protobuf::compiler::CodeGenerator {\n  virtual bool Generate(\n      const FileDescriptor* file,\n      const string& parameter,\n      GeneratorContext* generator_context,\n      string* error) const;\n};\n\n}  // namespace php\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMPILER_PHP_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/plugin.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//\n// Front-end for protoc code generator plugins written in C++.\n//\n// To implement a protoc plugin in C++, simply write an implementation of\n// CodeGenerator, then create a main() function like:\n//   int main(int argc, char* argv[]) {\n//     MyCodeGenerator generator;\n//     return google::protobuf::compiler::PluginMain(argc, argv, &generator);\n//   }\n// You must link your plugin against libprotobuf and libprotoc.\n//\n// The core part of PluginMain is to invoke the given CodeGenerator on a\n// CodeGeneratorRequest to generate a CodeGeneratorResponse. This part is\n// abstracted out and made into function GenerateCode so that it can be reused,\n// for example, to implement a variant of PluginMain that does some\n// preprocessing on the input CodeGeneratorRequest before feeding the request\n// to the given code generator.\n//\n// To get protoc to use the plugin, do one of the following:\n// * Place the plugin binary somewhere in the PATH and give it the name\n//   \"protoc-gen-NAME\" (replacing \"NAME\" with the name of your plugin).  If you\n//   then invoke protoc with the parameter --NAME_out=OUT_DIR (again, replace\n//   \"NAME\" with your plugin's name), protoc will invoke your plugin to generate\n//   the output, which will be placed in OUT_DIR.\n// * Place the plugin binary anywhere, with any name, and pass the --plugin\n//   parameter to protoc to direct it to your plugin like so:\n//     protoc --plugin=protoc-gen-NAME=path/to/mybinary --NAME_out=OUT_DIR\n//   On Windows, make sure to include the .exe suffix:\n//     protoc --plugin=protoc-gen-NAME=path/to/mybinary.exe --NAME_out=OUT_DIR\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__\n#define GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\n\nclass CodeGenerator;    // code_generator.h\nclass CodeGeneratorRequest;\nclass CodeGeneratorResponse;\n\n// Implements main() for a protoc plugin exposing the given code generator.\nLIBPROTOC_EXPORT int PluginMain(int argc, char* argv[], const CodeGenerator* generator);\n\n// Generates code using the given code generator. Returns true if the code\n// generation is successful. If the code geneartion fails, error_msg may be\n// populated to describe the failure cause.\nbool GenerateCode(const CodeGeneratorRequest& request,\n    const CodeGenerator& generator, CodeGeneratorResponse* response,\n    string* error_msg);\n\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_PLUGIN_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/plugin.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/compiler/plugin.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fcompiler_2fplugin_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fcompiler_2fplugin_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/descriptor.pb.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOC_EXPORT protobuf_AddDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\nvoid LIBPROTOC_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n\nclass CodeGeneratorRequest;\nclass CodeGeneratorResponse;\nclass CodeGeneratorResponse_File;\n\n// ===================================================================\n\nclass LIBPROTOC_EXPORT CodeGeneratorRequest : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorRequest) */ {\n public:\n  CodeGeneratorRequest();\n  virtual ~CodeGeneratorRequest();\n\n  CodeGeneratorRequest(const CodeGeneratorRequest& from);\n\n  inline CodeGeneratorRequest& operator=(const CodeGeneratorRequest& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const CodeGeneratorRequest& default_instance();\n\n  static const CodeGeneratorRequest* internal_default_instance();\n\n  void Swap(CodeGeneratorRequest* other);\n\n  // implements Message ----------------------------------------------\n\n  inline CodeGeneratorRequest* New() const { return New(NULL); }\n\n  CodeGeneratorRequest* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const CodeGeneratorRequest& from);\n  void MergeFrom(const CodeGeneratorRequest& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(CodeGeneratorRequest* other);\n  void UnsafeMergeFrom(const CodeGeneratorRequest& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated string file_to_generate = 1;\n  int file_to_generate_size() const;\n  void clear_file_to_generate();\n  static const int kFileToGenerateFieldNumber = 1;\n  const ::std::string& file_to_generate(int index) const;\n  ::std::string* mutable_file_to_generate(int index);\n  void set_file_to_generate(int index, const ::std::string& value);\n  void set_file_to_generate(int index, const char* value);\n  void set_file_to_generate(int index, const char* value, size_t size);\n  ::std::string* add_file_to_generate();\n  void add_file_to_generate(const ::std::string& value);\n  void add_file_to_generate(const char* value);\n  void add_file_to_generate(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& file_to_generate() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_file_to_generate();\n\n  // optional string parameter = 2;\n  bool has_parameter() const;\n  void clear_parameter();\n  static const int kParameterFieldNumber = 2;\n  const ::std::string& parameter() const;\n  void set_parameter(const ::std::string& value);\n  void set_parameter(const char* value);\n  void set_parameter(const char* value, size_t size);\n  ::std::string* mutable_parameter();\n  ::std::string* release_parameter();\n  void set_allocated_parameter(::std::string* parameter);\n\n  // repeated .google.protobuf.FileDescriptorProto proto_file = 15;\n  int proto_file_size() const;\n  void clear_proto_file();\n  static const int kProtoFileFieldNumber = 15;\n  const ::google::protobuf::FileDescriptorProto& proto_file(int index) const;\n  ::google::protobuf::FileDescriptorProto* mutable_proto_file(int index);\n  ::google::protobuf::FileDescriptorProto* add_proto_file();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >*\n      mutable_proto_file();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >&\n      proto_file() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorRequest)\n private:\n  inline void set_has_parameter();\n  inline void clear_has_parameter();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> file_to_generate_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto > proto_file_;\n  ::google::protobuf::internal::ArenaStringPtr parameter_;\n  friend void LIBPROTOC_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void LIBPROTOC_EXPORT protobuf_AddDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<CodeGeneratorRequest> CodeGeneratorRequest_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOC_EXPORT CodeGeneratorResponse_File : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorResponse.File) */ {\n public:\n  CodeGeneratorResponse_File();\n  virtual ~CodeGeneratorResponse_File();\n\n  CodeGeneratorResponse_File(const CodeGeneratorResponse_File& from);\n\n  inline CodeGeneratorResponse_File& operator=(const CodeGeneratorResponse_File& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const CodeGeneratorResponse_File& default_instance();\n\n  static const CodeGeneratorResponse_File* internal_default_instance();\n\n  void Swap(CodeGeneratorResponse_File* other);\n\n  // implements Message ----------------------------------------------\n\n  inline CodeGeneratorResponse_File* New() const { return New(NULL); }\n\n  CodeGeneratorResponse_File* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const CodeGeneratorResponse_File& from);\n  void MergeFrom(const CodeGeneratorResponse_File& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(CodeGeneratorResponse_File* other);\n  void UnsafeMergeFrom(const CodeGeneratorResponse_File& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string insertion_point = 2;\n  bool has_insertion_point() const;\n  void clear_insertion_point();\n  static const int kInsertionPointFieldNumber = 2;\n  const ::std::string& insertion_point() const;\n  void set_insertion_point(const ::std::string& value);\n  void set_insertion_point(const char* value);\n  void set_insertion_point(const char* value, size_t size);\n  ::std::string* mutable_insertion_point();\n  ::std::string* release_insertion_point();\n  void set_allocated_insertion_point(::std::string* insertion_point);\n\n  // optional string content = 15;\n  bool has_content() const;\n  void clear_content();\n  static const int kContentFieldNumber = 15;\n  const ::std::string& content() const;\n  void set_content(const ::std::string& value);\n  void set_content(const char* value);\n  void set_content(const char* value, size_t size);\n  ::std::string* mutable_content();\n  ::std::string* release_content();\n  void set_allocated_content(::std::string* content);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse.File)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_insertion_point();\n  inline void clear_has_insertion_point();\n  inline void set_has_content();\n  inline void clear_has_content();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr insertion_point_;\n  ::google::protobuf::internal::ArenaStringPtr content_;\n  friend void LIBPROTOC_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void LIBPROTOC_EXPORT protobuf_AddDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<CodeGeneratorResponse_File> CodeGeneratorResponse_File_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOC_EXPORT CodeGeneratorResponse : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.compiler.CodeGeneratorResponse) */ {\n public:\n  CodeGeneratorResponse();\n  virtual ~CodeGeneratorResponse();\n\n  CodeGeneratorResponse(const CodeGeneratorResponse& from);\n\n  inline CodeGeneratorResponse& operator=(const CodeGeneratorResponse& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const CodeGeneratorResponse& default_instance();\n\n  static const CodeGeneratorResponse* internal_default_instance();\n\n  void Swap(CodeGeneratorResponse* other);\n\n  // implements Message ----------------------------------------------\n\n  inline CodeGeneratorResponse* New() const { return New(NULL); }\n\n  CodeGeneratorResponse* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const CodeGeneratorResponse& from);\n  void MergeFrom(const CodeGeneratorResponse& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(CodeGeneratorResponse* other);\n  void UnsafeMergeFrom(const CodeGeneratorResponse& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef CodeGeneratorResponse_File File;\n\n  // accessors -------------------------------------------------------\n\n  // optional string error = 1;\n  bool has_error() const;\n  void clear_error();\n  static const int kErrorFieldNumber = 1;\n  const ::std::string& error() const;\n  void set_error(const ::std::string& value);\n  void set_error(const char* value);\n  void set_error(const char* value, size_t size);\n  ::std::string* mutable_error();\n  ::std::string* release_error();\n  void set_allocated_error(::std::string* error);\n\n  // repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;\n  int file_size() const;\n  void clear_file();\n  static const int kFileFieldNumber = 15;\n  const ::google::protobuf::compiler::CodeGeneratorResponse_File& file(int index) const;\n  ::google::protobuf::compiler::CodeGeneratorResponse_File* mutable_file(int index);\n  ::google::protobuf::compiler::CodeGeneratorResponse_File* add_file();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::compiler::CodeGeneratorResponse_File >*\n      mutable_file();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::compiler::CodeGeneratorResponse_File >&\n      file() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.compiler.CodeGeneratorResponse)\n private:\n  inline void set_has_error();\n  inline void clear_has_error();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::compiler::CodeGeneratorResponse_File > file_;\n  ::google::protobuf::internal::ArenaStringPtr error_;\n  friend void LIBPROTOC_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void LIBPROTOC_EXPORT protobuf_AddDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fcompiler_2fplugin_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<CodeGeneratorResponse> CodeGeneratorResponse_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// CodeGeneratorRequest\n\n// repeated string file_to_generate = 1;\ninline int CodeGeneratorRequest::file_to_generate_size() const {\n  return file_to_generate_.size();\n}\ninline void CodeGeneratorRequest::clear_file_to_generate() {\n  file_to_generate_.Clear();\n}\ninline const ::std::string& CodeGeneratorRequest::file_to_generate(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  return file_to_generate_.Get(index);\n}\ninline ::std::string* CodeGeneratorRequest::mutable_file_to_generate(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  return file_to_generate_.Mutable(index);\n}\ninline void CodeGeneratorRequest::set_file_to_generate(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  file_to_generate_.Mutable(index)->assign(value);\n}\ninline void CodeGeneratorRequest::set_file_to_generate(int index, const char* value) {\n  file_to_generate_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n}\ninline void CodeGeneratorRequest::set_file_to_generate(int index, const char* value, size_t size) {\n  file_to_generate_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n}\ninline ::std::string* CodeGeneratorRequest::add_file_to_generate() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  return file_to_generate_.Add();\n}\ninline void CodeGeneratorRequest::add_file_to_generate(const ::std::string& value) {\n  file_to_generate_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n}\ninline void CodeGeneratorRequest::add_file_to_generate(const char* value) {\n  file_to_generate_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n}\ninline void CodeGeneratorRequest::add_file_to_generate(const char* value, size_t size) {\n  file_to_generate_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nCodeGeneratorRequest::file_to_generate() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  return file_to_generate_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nCodeGeneratorRequest::mutable_file_to_generate() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorRequest.file_to_generate)\n  return &file_to_generate_;\n}\n\n// optional string parameter = 2;\ninline bool CodeGeneratorRequest::has_parameter() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void CodeGeneratorRequest::set_has_parameter() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void CodeGeneratorRequest::clear_has_parameter() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void CodeGeneratorRequest::clear_parameter() {\n  parameter_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_parameter();\n}\ninline const ::std::string& CodeGeneratorRequest::parameter() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n  return parameter_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorRequest::set_parameter(const ::std::string& value) {\n  set_has_parameter();\n  parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n}\ninline void CodeGeneratorRequest::set_parameter(const char* value) {\n  set_has_parameter();\n  parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n}\ninline void CodeGeneratorRequest::set_parameter(const char* value, size_t size) {\n  set_has_parameter();\n  parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n}\ninline ::std::string* CodeGeneratorRequest::mutable_parameter() {\n  set_has_parameter();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n  return parameter_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* CodeGeneratorRequest::release_parameter() {\n  // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n  clear_has_parameter();\n  return parameter_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorRequest::set_allocated_parameter(::std::string* parameter) {\n  if (parameter != NULL) {\n    set_has_parameter();\n  } else {\n    clear_has_parameter();\n  }\n  parameter_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), parameter);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorRequest.parameter)\n}\n\n// repeated .google.protobuf.FileDescriptorProto proto_file = 15;\ninline int CodeGeneratorRequest::proto_file_size() const {\n  return proto_file_.size();\n}\ninline void CodeGeneratorRequest::clear_proto_file() {\n  proto_file_.Clear();\n}\ninline const ::google::protobuf::FileDescriptorProto& CodeGeneratorRequest::proto_file(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorRequest.proto_file)\n  return proto_file_.Get(index);\n}\ninline ::google::protobuf::FileDescriptorProto* CodeGeneratorRequest::mutable_proto_file(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorRequest.proto_file)\n  return proto_file_.Mutable(index);\n}\ninline ::google::protobuf::FileDescriptorProto* CodeGeneratorRequest::add_proto_file() {\n  // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorRequest.proto_file)\n  return proto_file_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >*\nCodeGeneratorRequest::mutable_proto_file() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorRequest.proto_file)\n  return &proto_file_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >&\nCodeGeneratorRequest::proto_file() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorRequest.proto_file)\n  return proto_file_;\n}\n\ninline const CodeGeneratorRequest* CodeGeneratorRequest::internal_default_instance() {\n  return &CodeGeneratorRequest_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// CodeGeneratorResponse_File\n\n// optional string name = 1;\ninline bool CodeGeneratorResponse_File::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void CodeGeneratorResponse_File::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void CodeGeneratorResponse_File::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void CodeGeneratorResponse_File::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& CodeGeneratorResponse_File::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n}\ninline void CodeGeneratorResponse_File::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n}\ninline void CodeGeneratorResponse_File::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n}\ninline ::std::string* CodeGeneratorResponse_File::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* CodeGeneratorResponse_File::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.name)\n}\n\n// optional string insertion_point = 2;\ninline bool CodeGeneratorResponse_File::has_insertion_point() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void CodeGeneratorResponse_File::set_has_insertion_point() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void CodeGeneratorResponse_File::clear_has_insertion_point() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void CodeGeneratorResponse_File::clear_insertion_point() {\n  insertion_point_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_insertion_point();\n}\ninline const ::std::string& CodeGeneratorResponse_File::insertion_point() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n  return insertion_point_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_insertion_point(const ::std::string& value) {\n  set_has_insertion_point();\n  insertion_point_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n}\ninline void CodeGeneratorResponse_File::set_insertion_point(const char* value) {\n  set_has_insertion_point();\n  insertion_point_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n}\ninline void CodeGeneratorResponse_File::set_insertion_point(const char* value, size_t size) {\n  set_has_insertion_point();\n  insertion_point_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n}\ninline ::std::string* CodeGeneratorResponse_File::mutable_insertion_point() {\n  set_has_insertion_point();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n  return insertion_point_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* CodeGeneratorResponse_File::release_insertion_point() {\n  // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n  clear_has_insertion_point();\n  return insertion_point_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_allocated_insertion_point(::std::string* insertion_point) {\n  if (insertion_point != NULL) {\n    set_has_insertion_point();\n  } else {\n    clear_has_insertion_point();\n  }\n  insertion_point_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), insertion_point);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.insertion_point)\n}\n\n// optional string content = 15;\ninline bool CodeGeneratorResponse_File::has_content() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void CodeGeneratorResponse_File::set_has_content() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void CodeGeneratorResponse_File::clear_has_content() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void CodeGeneratorResponse_File::clear_content() {\n  content_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_content();\n}\ninline const ::std::string& CodeGeneratorResponse_File::content() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n  return content_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_content(const ::std::string& value) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n}\ninline void CodeGeneratorResponse_File::set_content(const char* value) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n}\ninline void CodeGeneratorResponse_File::set_content(const char* value, size_t size) {\n  set_has_content();\n  content_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n}\ninline ::std::string* CodeGeneratorResponse_File::mutable_content() {\n  set_has_content();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n  return content_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* CodeGeneratorResponse_File::release_content() {\n  // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n  clear_has_content();\n  return content_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse_File::set_allocated_content(::std::string* content) {\n  if (content != NULL) {\n    set_has_content();\n  } else {\n    clear_has_content();\n  }\n  content_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), content);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.File.content)\n}\n\ninline const CodeGeneratorResponse_File* CodeGeneratorResponse_File::internal_default_instance() {\n  return &CodeGeneratorResponse_File_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// CodeGeneratorResponse\n\n// optional string error = 1;\ninline bool CodeGeneratorResponse::has_error() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void CodeGeneratorResponse::set_has_error() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void CodeGeneratorResponse::clear_has_error() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void CodeGeneratorResponse::clear_error() {\n  error_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_error();\n}\ninline const ::std::string& CodeGeneratorResponse::error() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.error)\n  return error_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse::set_error(const ::std::string& value) {\n  set_has_error();\n  error_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.compiler.CodeGeneratorResponse.error)\n}\ninline void CodeGeneratorResponse::set_error(const char* value) {\n  set_has_error();\n  error_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.compiler.CodeGeneratorResponse.error)\n}\ninline void CodeGeneratorResponse::set_error(const char* value, size_t size) {\n  set_has_error();\n  error_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.compiler.CodeGeneratorResponse.error)\n}\ninline ::std::string* CodeGeneratorResponse::mutable_error() {\n  set_has_error();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.error)\n  return error_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* CodeGeneratorResponse::release_error() {\n  // @@protoc_insertion_point(field_release:google.protobuf.compiler.CodeGeneratorResponse.error)\n  clear_has_error();\n  return error_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void CodeGeneratorResponse::set_allocated_error(::std::string* error) {\n  if (error != NULL) {\n    set_has_error();\n  } else {\n    clear_has_error();\n  }\n  error_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), error);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.compiler.CodeGeneratorResponse.error)\n}\n\n// repeated .google.protobuf.compiler.CodeGeneratorResponse.File file = 15;\ninline int CodeGeneratorResponse::file_size() const {\n  return file_.size();\n}\ninline void CodeGeneratorResponse::clear_file() {\n  file_.Clear();\n}\ninline const ::google::protobuf::compiler::CodeGeneratorResponse_File& CodeGeneratorResponse::file(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.compiler.CodeGeneratorResponse.file)\n  return file_.Get(index);\n}\ninline ::google::protobuf::compiler::CodeGeneratorResponse_File* CodeGeneratorResponse::mutable_file(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.compiler.CodeGeneratorResponse.file)\n  return file_.Mutable(index);\n}\ninline ::google::protobuf::compiler::CodeGeneratorResponse_File* CodeGeneratorResponse::add_file() {\n  // @@protoc_insertion_point(field_add:google.protobuf.compiler.CodeGeneratorResponse.file)\n  return file_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::compiler::CodeGeneratorResponse_File >*\nCodeGeneratorResponse::mutable_file() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.compiler.CodeGeneratorResponse.file)\n  return &file_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::compiler::CodeGeneratorResponse_File >&\nCodeGeneratorResponse::file() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.compiler.CodeGeneratorResponse.file)\n  return file_;\n}\n\ninline const CodeGeneratorResponse* CodeGeneratorResponse::internal_default_instance() {\n  return &CodeGeneratorResponse_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fcompiler_2fplugin_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/plugin.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//\n// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to\n//   change.\n//\n// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is\n// just a program that reads a CodeGeneratorRequest from stdin and writes a\n// CodeGeneratorResponse to stdout.\n//\n// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead\n// of dealing with the raw protocol defined here.\n//\n// A plugin executable needs only to be placed somewhere in the path.  The\n// plugin should be named \"protoc-gen-$NAME\", and will then be used when the\n// flag \"--${NAME}_out\" is passed to protoc.\n\nsyntax = \"proto2\";\npackage google.protobuf.compiler;\noption java_package = \"com.google.protobuf.compiler\";\noption java_outer_classname = \"PluginProtos\";\n\noption go_package = \"plugin_go\";\n\nimport \"google/protobuf/descriptor.proto\";\n\n// An encoded CodeGeneratorRequest is written to the plugin's stdin.\nmessage CodeGeneratorRequest {\n  // The .proto files that were explicitly listed on the command-line.  The\n  // code generator should generate code only for these files.  Each file's\n  // descriptor will be included in proto_file, below.\n  repeated string file_to_generate = 1;\n\n  // The generator parameter passed on the command-line.\n  optional string parameter = 2;\n\n  // FileDescriptorProtos for all files in files_to_generate and everything\n  // they import.  The files will appear in topological order, so each file\n  // appears before any file that imports it.\n  //\n  // protoc guarantees that all proto_files will be written after\n  // the fields above, even though this is not technically guaranteed by the\n  // protobuf wire format.  This theoretically could allow a plugin to stream\n  // in the FileDescriptorProtos and handle them one by one rather than read\n  // the entire set into memory at once.  However, as of this writing, this\n  // is not similarly optimized on protoc's end -- it will store all fields in\n  // memory at once before sending them to the plugin.\n  repeated FileDescriptorProto proto_file = 15;\n}\n\n// The plugin writes an encoded CodeGeneratorResponse to stdout.\nmessage CodeGeneratorResponse {\n  // Error message.  If non-empty, code generation failed.  The plugin process\n  // should exit with status code zero even if it reports an error in this way.\n  //\n  // This should be used to indicate errors in .proto files which prevent the\n  // code generator from generating correct code.  Errors which indicate a\n  // problem in protoc itself -- such as the input CodeGeneratorRequest being\n  // unparseable -- should be reported by writing a message to stderr and\n  // exiting with a non-zero status code.\n  optional string error = 1;\n\n  // Represents a single generated file.\n  message File {\n    // The file name, relative to the output directory.  The name must not\n    // contain \".\" or \"..\" components and must be relative, not be absolute (so,\n    // the file cannot lie outside the output directory).  \"/\" must be used as\n    // the path separator, not \"\\\".\n    //\n    // If the name is omitted, the content will be appended to the previous\n    // file.  This allows the generator to break large files into small chunks,\n    // and allows the generated text to be streamed back to protoc so that large\n    // files need not reside completely in memory at one time.  Note that as of\n    // this writing protoc does not optimize for this -- it will read the entire\n    // CodeGeneratorResponse before writing files to disk.\n    optional string name = 1;\n\n    // If non-empty, indicates that the named file should already exist, and the\n    // content here is to be inserted into that file at a defined insertion\n    // point.  This feature allows a code generator to extend the output\n    // produced by another code generator.  The original generator may provide\n    // insertion points by placing special annotations in the file that look\n    // like:\n    //   @@protoc_insertion_point(NAME)\n    // The annotation can have arbitrary text before and after it on the line,\n    // which allows it to be placed in a comment.  NAME should be replaced with\n    // an identifier naming the point -- this is what other generators will use\n    // as the insertion_point.  Code inserted at this point will be placed\n    // immediately above the line containing the insertion point (thus multiple\n    // insertions to the same point will come out in the order they were added).\n    // The double-@ is intended to make it unlikely that the generated code\n    // could contain things that look like insertion points by accident.\n    //\n    // For example, the C++ code generator places the following line in the\n    // .pb.h files that it generates:\n    //   // @@protoc_insertion_point(namespace_scope)\n    // This line appears within the scope of the file's package namespace, but\n    // outside of any particular class.  Another plugin can then specify the\n    // insertion_point \"namespace_scope\" to generate additional classes or\n    // other declarations that should be placed in this scope.\n    //\n    // Note that if the line containing the insertion point begins with\n    // whitespace, the same whitespace will be added to every line of the\n    // inserted text.  This is useful for languages like Python, where\n    // indentation matters.  In these languages, the insertion point comment\n    // should be indented the same amount as any inserted code will need to be\n    // in order to work correctly in that context.\n    //\n    // The code generator that generates the initial file and the one which\n    // inserts into it must both run as part of a single invocation of protoc.\n    // Code generators are executed in the order in which they appear on the\n    // command line.\n    //\n    // If |insertion_point| is present, |name| must also be present.\n    optional string insertion_point = 2;\n\n    // The file contents.\n    optional string content = 15;\n  }\n  repeated File file = 15;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/python/python_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: robinson@google.com (Will Robinson)\n//\n// Generates Python code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__\n\n#include <string>\n\n#include <google/protobuf/compiler/code_generator.h>\n#include <google/protobuf/stubs/mutex.h>\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\n\nclass Descriptor;\nclass EnumDescriptor;\nclass EnumValueDescriptor;\nclass FieldDescriptor;\nclass OneofDescriptor;\nclass ServiceDescriptor;\n\nnamespace io { class Printer; }\n\nnamespace compiler {\nnamespace python {\n\n// CodeGenerator implementation for generated Python protocol buffer classes.\n// If you create your own protocol compiler binary and you want it to support\n// Python output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT Generator : public CodeGenerator {\n public:\n  Generator();\n  virtual ~Generator();\n\n  // CodeGenerator methods.\n  virtual bool Generate(const FileDescriptor* file,\n                        const string& parameter,\n                        GeneratorContext* generator_context,\n                        string* error) const;\n\n private:\n  void PrintImports() const;\n  void PrintFileDescriptor() const;\n  void PrintTopLevelEnums() const;\n  void PrintAllNestedEnumsInFile() const;\n  void PrintNestedEnums(const Descriptor& descriptor) const;\n  void PrintEnum(const EnumDescriptor& enum_descriptor) const;\n\n  void PrintTopLevelExtensions() const;\n\n  void PrintFieldDescriptor(\n      const FieldDescriptor& field, bool is_extension) const;\n  void PrintFieldDescriptorsInDescriptor(\n      const Descriptor& message_descriptor,\n      bool is_extension,\n      const string& list_variable_name,\n      int (Descriptor::*CountFn)() const,\n      const FieldDescriptor* (Descriptor::*GetterFn)(int) const) const;\n  void PrintFieldsInDescriptor(const Descriptor& message_descriptor) const;\n  void PrintExtensionsInDescriptor(const Descriptor& message_descriptor) const;\n  void PrintMessageDescriptors() const;\n  void PrintDescriptor(const Descriptor& message_descriptor) const;\n  void PrintNestedDescriptors(const Descriptor& containing_descriptor) const;\n\n  void PrintMessages() const;\n  void PrintMessage(const Descriptor& message_descriptor, const string& prefix,\n                    vector<string>* to_register) const;\n  void PrintNestedMessages(const Descriptor& containing_descriptor,\n                           const string& prefix,\n                           vector<string>* to_register) const;\n\n  void FixForeignFieldsInDescriptors() const;\n  void FixForeignFieldsInDescriptor(\n      const Descriptor& descriptor,\n      const Descriptor* containing_descriptor) const;\n  void FixForeignFieldsInField(const Descriptor* containing_type,\n                               const FieldDescriptor& field,\n                               const string& python_dict_name) const;\n  void AddMessageToFileDescriptor(const Descriptor& descriptor) const;\n  void AddEnumToFileDescriptor(const EnumDescriptor& descriptor) const;\n  void AddExtensionToFileDescriptor(const FieldDescriptor& descriptor) const;\n  string FieldReferencingExpression(const Descriptor* containing_type,\n                                    const FieldDescriptor& field,\n                                    const string& python_dict_name) const;\n  template <typename DescriptorT>\n  void FixContainingTypeInDescriptor(\n      const DescriptorT& descriptor,\n      const Descriptor* containing_descriptor) const;\n\n  void FixForeignFieldsInExtensions() const;\n  void FixForeignFieldsInExtension(\n      const FieldDescriptor& extension_field) const;\n  void FixForeignFieldsInNestedExtensions(const Descriptor& descriptor) const;\n\n  void PrintServices() const;\n  void PrintServiceDescriptor(const ServiceDescriptor& descriptor) const;\n  void PrintServiceClass(const ServiceDescriptor& descriptor) const;\n  void PrintServiceStub(const ServiceDescriptor& descriptor) const;\n  void PrintDescriptorKeyAndModuleName(\n      const ServiceDescriptor& descriptor) const ;\n\n  void PrintEnumValueDescriptor(const EnumValueDescriptor& descriptor) const;\n  string OptionsValue(const string& class_name,\n                      const string& serialized_options) const;\n  bool GeneratingDescriptorProto() const;\n\n  template <typename DescriptorT>\n  string ModuleLevelDescriptorName(const DescriptorT& descriptor) const;\n  string ModuleLevelMessageName(const Descriptor& descriptor) const;\n  string ModuleLevelServiceDescriptorName(\n      const ServiceDescriptor& descriptor) const;\n\n  template <typename DescriptorT, typename DescriptorProtoT>\n  void PrintSerializedPbInterval(\n      const DescriptorT& descriptor, DescriptorProtoT& proto) const;\n\n  void FixAllDescriptorOptions() const;\n  void FixOptionsForField(const FieldDescriptor& field) const;\n  void FixOptionsForOneof(const OneofDescriptor& oneof) const;\n  void FixOptionsForEnum(const EnumDescriptor& descriptor) const;\n  void FixOptionsForMessage(const Descriptor& descriptor) const;\n\n  void CopyPublicDependenciesAliases(\n      const string& copy_from, const FileDescriptor* file) const;\n\n  // Very coarse-grained lock to ensure that Generate() is reentrant.\n  // Guards file_, printer_ and file_descriptor_serialized_.\n  mutable Mutex mutex_;\n  mutable const FileDescriptor* file_;  // Set in Generate().  Under mutex_.\n  mutable string file_descriptor_serialized_;\n  mutable io::Printer* printer_;  // Set in Generate().  Under mutex_.\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Generator);\n};\n\n}  // namespace python\n}  // namespace compiler\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_COMPILER_PYTHON_GENERATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/compiler/ruby/ruby_generator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Generates Ruby code for a given .proto file.\n\n#ifndef GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__\n#define GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__\n\n#include <string>\n\n#include <google/protobuf/compiler/code_generator.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace compiler {\nnamespace ruby {\n\n// CodeGenerator implementation for generated Ruby protocol buffer classes.\n// If you create your own protocol compiler binary and you want it to support\n// Ruby output, you can do so by registering an instance of this\n// CodeGenerator with the CommandLineInterface in your main() function.\nclass LIBPROTOC_EXPORT Generator\n    : public google::protobuf::compiler::CodeGenerator {\n  virtual bool Generate(\n      const FileDescriptor* file,\n      const string& parameter,\n      GeneratorContext* generator_context,\n      string* error) const;\n};\n\n}  // namespace ruby\n}  // namespace compiler\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMPILER_RUBY_GENERATOR_H__\n\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/descriptor.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains classes which describe a type of protocol message.\n// You can use a message's descriptor to learn at runtime what fields\n// it contains and what the types of those fields are.  The Message\n// interface also allows you to dynamically access and modify individual\n// fields by passing the FieldDescriptor of the field you are interested\n// in.\n//\n// Most users will not care about descriptors, because they will write\n// code specific to certain protocol types and will simply use the classes\n// generated by the protocol compiler directly.  Advanced users who want\n// to operate on arbitrary types (not known at compile time) may want to\n// read descriptors in order to learn about the contents of a message.\n// A very small number of users will want to construct their own\n// Descriptors, either because they are implementing Message manually or\n// because they are writing something like the protocol compiler.\n//\n// For an example of how you might use descriptors, see the code example\n// at the top of message.h.\n\n#ifndef GOOGLE_PROTOBUF_DESCRIPTOR_H__\n#define GOOGLE_PROTOBUF_DESCRIPTOR_H__\n\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n#include <set>\n#include <string>\n#include <vector>\n#include <google/protobuf/stubs/common.h>\n\n// TYPE_BOOL is defined in the MacOS's ConditionalMacros.h.\n#ifdef TYPE_BOOL\n#undef TYPE_BOOL\n#endif  // TYPE_BOOL\n\nnamespace google {\nnamespace protobuf {\n\n// Defined in this file.\nclass Descriptor;\nclass FieldDescriptor;\nclass OneofDescriptor;\nclass EnumDescriptor;\nclass EnumValueDescriptor;\nclass ServiceDescriptor;\nclass MethodDescriptor;\nclass FileDescriptor;\nclass DescriptorDatabase;\nclass DescriptorPool;\n\n// Defined in descriptor.proto\nclass DescriptorProto;\nclass FieldDescriptorProto;\nclass OneofDescriptorProto;\nclass EnumDescriptorProto;\nclass EnumValueDescriptorProto;\nclass ServiceDescriptorProto;\nclass MethodDescriptorProto;\nclass FileDescriptorProto;\nclass MessageOptions;\nclass FieldOptions;\nclass OneofOptions;\nclass EnumOptions;\nclass EnumValueOptions;\nclass ServiceOptions;\nclass MethodOptions;\nclass FileOptions;\nclass UninterpretedOption;\nclass SourceCodeInfo;\n\n// Defined in message.h\nclass Message;\n\n// Defined in descriptor.cc\nclass DescriptorBuilder;\nclass FileDescriptorTables;\n\n// Defined in unknown_field_set.h.\nclass UnknownField;\n\n// Defined in generated_message_reflection.h.\nnamespace internal {\nclass GeneratedMessageReflection;\n}  // namespace internal\n\n// Defined in command_line_interface.cc\nnamespace compiler {\nclass CommandLineInterface;\n}  // namespace compiler\n\nnamespace descriptor_unittest {\nclass DescriptorTest;\n}  // namespace descriptor_unittest\n\n// Defined in printer.h\nnamespace io {\nclass Printer;\n}  // namespace io\n\n// NB, all indices are zero-based.\nstruct SourceLocation {\n  int start_line;\n  int end_line;\n  int start_column;\n  int end_column;\n\n  // Doc comments found at the source location.\n  // See the comments in SourceCodeInfo.Location (descriptor.proto) for details.\n  string leading_comments;\n  string trailing_comments;\n  std::vector<string> leading_detached_comments;\n};\n\n// Options when generating machine-parsable output from a descriptor with\n// DebugString().\nstruct DebugStringOptions {\n  // include original user comments as recorded in SourceLocation entries. N.B.\n  // that this must be |false| by default: several other pieces of code (for\n  // example, the C++ code generation for fields in the proto compiler) rely on\n  // DebugString() output being unobstructed by user comments.\n  bool include_comments;\n  // If true, elide the braced body in the debug string.\n  bool elide_group_body;\n  bool elide_oneof_body;\n\n  DebugStringOptions()\n      : include_comments(false),\n        elide_group_body(false),\n        elide_oneof_body(false) {}\n};\n\n// Describes a type of protocol message, or a particular group within a\n// message.  To obtain the Descriptor for a given message object, call\n// Message::GetDescriptor().  Generated message classes also have a\n// static method called descriptor() which returns the type's descriptor.\n// Use DescriptorPool to construct your own descriptors.\nclass LIBPROTOBUF_EXPORT Descriptor {\n public:\n  // The name of the message type, not including its scope.\n  const string& name() const;\n\n  // The fully-qualified name of the message type, scope delimited by\n  // periods.  For example, message type \"Foo\" which is declared in package\n  // \"bar\" has full name \"bar.Foo\".  If a type \"Baz\" is nested within\n  // Foo, Baz's full_name is \"bar.Foo.Baz\".  To get only the part that\n  // comes after the last '.', use name().\n  const string& full_name() const;\n\n  // Index of this descriptor within the file or containing type's message\n  // type array.\n  int index() const;\n\n  // The .proto file in which this message type was defined.  Never NULL.\n  const FileDescriptor* file() const;\n\n  // If this Descriptor describes a nested type, this returns the type\n  // in which it is nested.  Otherwise, returns NULL.\n  const Descriptor* containing_type() const;\n\n  // Get options for this message type.  These are specified in the .proto file\n  // by placing lines like \"option foo = 1234;\" in the message definition.\n  // Allowed options are defined by MessageOptions in\n  // google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const MessageOptions& options() const;\n\n  // Write the contents of this Descriptor into the given DescriptorProto.\n  // The target DescriptorProto must be clear before calling this; if it\n  // isn't, the result may be garbage.\n  void CopyTo(DescriptorProto* proto) const;\n\n  // Write the contents of this decriptor in a human-readable form. Output\n  // will be suitable for re-parsing.\n  string DebugString() const;\n\n  // Similar to DebugString(), but additionally takes options (e.g.,\n  // include original user comments in output).\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n  // Returns true if this is a placeholder for an unknown type. This will\n  // only be the case if this descriptor comes from a DescriptorPool\n  // with AllowUnknownDependencies() set.\n  bool is_placeholder() const;\n\n  // Field stuff -----------------------------------------------------\n\n  // The number of fields in this message type.\n  int field_count() const;\n  // Gets a field by index, where 0 <= index < field_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FieldDescriptor* field(int index) const;\n\n  // Looks up a field by declared tag number.  Returns NULL if no such field\n  // exists.\n  const FieldDescriptor* FindFieldByNumber(int number) const;\n  // Looks up a field by name.  Returns NULL if no such field exists.\n  const FieldDescriptor* FindFieldByName(const string& name) const;\n\n  // Looks up a field by lowercased name (as returned by lowercase_name()).\n  // This lookup may be ambiguous if multiple field names differ only by case,\n  // in which case the field returned is chosen arbitrarily from the matches.\n  const FieldDescriptor* FindFieldByLowercaseName(\n      const string& lowercase_name) const;\n\n  // Looks up a field by camel-case name (as returned by camelcase_name()).\n  // This lookup may be ambiguous if multiple field names differ in a way that\n  // leads them to have identical camel-case names, in which case the field\n  // returned is chosen arbitrarily from the matches.\n  const FieldDescriptor* FindFieldByCamelcaseName(\n      const string& camelcase_name) const;\n\n  // The number of oneofs in this message type.\n  int oneof_decl_count() const;\n  // Get a oneof by index, where 0 <= index < oneof_decl_count().\n  // These are returned in the order they were defined in the .proto file.\n  const OneofDescriptor* oneof_decl(int index) const;\n\n  // Looks up a oneof by name.  Returns NULL if no such oneof exists.\n  const OneofDescriptor* FindOneofByName(const string& name) const;\n\n  // Nested type stuff -----------------------------------------------\n\n  // The number of nested types in this message type.\n  int nested_type_count() const;\n  // Gets a nested type by index, where 0 <= index < nested_type_count().\n  // These are returned in the order they were defined in the .proto file.\n  const Descriptor* nested_type(int index) const;\n\n  // Looks up a nested type by name.  Returns NULL if no such nested type\n  // exists.\n  const Descriptor* FindNestedTypeByName(const string& name) const;\n\n  // Enum stuff ------------------------------------------------------\n\n  // The number of enum types in this message type.\n  int enum_type_count() const;\n  // Gets an enum type by index, where 0 <= index < enum_type_count().\n  // These are returned in the order they were defined in the .proto file.\n  const EnumDescriptor* enum_type(int index) const;\n\n  // Looks up an enum type by name.  Returns NULL if no such enum type exists.\n  const EnumDescriptor* FindEnumTypeByName(const string& name) const;\n\n  // Looks up an enum value by name, among all enum types in this message.\n  // Returns NULL if no such value exists.\n  const EnumValueDescriptor* FindEnumValueByName(const string& name) const;\n\n  // Extensions ------------------------------------------------------\n\n  // A range of field numbers which are designated for third-party\n  // extensions.\n  struct ExtensionRange {\n    int start;  // inclusive\n    int end;    // exclusive\n  };\n\n  // The number of extension ranges in this message type.\n  int extension_range_count() const;\n  // Gets an extension range by index, where 0 <= index <\n  // extension_range_count(). These are returned in the order they were defined\n  // in the .proto file.\n  const ExtensionRange* extension_range(int index) const;\n\n  // Returns true if the number is in one of the extension ranges.\n  bool IsExtensionNumber(int number) const;\n\n  // Returns NULL if no extension range contains the given number.\n  const ExtensionRange* FindExtensionRangeContainingNumber(int number) const;\n\n  // The number of extensions -- extending *other* messages -- that were\n  // defined nested within this message type's scope.\n  int extension_count() const;\n  // Get an extension by index, where 0 <= index < extension_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FieldDescriptor* extension(int index) const;\n\n  // Looks up a named extension (which extends some *other* message type)\n  // defined within this message type's scope.\n  const FieldDescriptor* FindExtensionByName(const string& name) const;\n\n  // Similar to FindFieldByLowercaseName(), but finds extensions defined within\n  // this message type's scope.\n  const FieldDescriptor* FindExtensionByLowercaseName(const string& name) const;\n\n  // Similar to FindFieldByCamelcaseName(), but finds extensions defined within\n  // this message type's scope.\n  const FieldDescriptor* FindExtensionByCamelcaseName(const string& name) const;\n\n  // Reserved fields -------------------------------------------------\n\n  // A range of reserved field numbers.\n  struct ReservedRange {\n    int start;  // inclusive\n    int end;    // exclusive\n  };\n\n  // The number of reserved ranges in this message type.\n  int reserved_range_count() const;\n  // Gets an reserved range by index, where 0 <= index <\n  // reserved_range_count(). These are returned in the order they were defined\n  // in the .proto file.\n  const ReservedRange* reserved_range(int index) const;\n\n  // Returns true if the number is in one of the reserved ranges.\n  bool IsReservedNumber(int number) const;\n\n  // Returns NULL if no reserved range contains the given number.\n  const ReservedRange* FindReservedRangeContainingNumber(int number) const;\n\n  // The number of reserved field names in this message type.\n  int reserved_name_count() const;\n\n  // Gets a reserved name by index, where 0 <= index < reserved_name_count().\n  const string& reserved_name(int index) const;\n\n  // Returns true if the field name is reserved.\n  bool IsReservedName(const string& name) const;\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this message declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef MessageOptions OptionsType;\n\n  // Allows tests to test CopyTo(proto, true).\n  friend class ::google::protobuf::descriptor_unittest::DescriptorTest;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // Fill the json_name field of FieldDescriptorProto.\n  void CopyJsonNameTo(DescriptorProto* proto) const;\n\n  // Internal version of DebugString; controls the level of indenting for\n  // correct depth. Takes |options| to control debug-string options, and\n  // |include_opening_clause| to indicate whether the \"message ... \" part of the\n  // clause has already been generated (this varies depending on context).\n  void DebugString(int depth, string *contents,\n                   const DebugStringOptions& options,\n                   bool include_opening_clause) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const FileDescriptor* file_;\n  const Descriptor* containing_type_;\n  const MessageOptions* options_;\n\n  // True if this is a placeholder for an unknown type.\n  bool is_placeholder_;\n  // True if this is a placeholder and the type name wasn't fully-qualified.\n  bool is_unqualified_placeholder_;\n\n  int field_count_;\n  FieldDescriptor* fields_;\n  int oneof_decl_count_;\n  OneofDescriptor* oneof_decls_;\n  int nested_type_count_;\n  Descriptor* nested_types_;\n  int enum_type_count_;\n  EnumDescriptor* enum_types_;\n  int extension_range_count_;\n  ExtensionRange* extension_ranges_;\n  int extension_count_;\n  FieldDescriptor* extensions_;\n  int reserved_range_count_;\n  ReservedRange* reserved_ranges_;\n  int reserved_name_count_;\n  const string** reserved_names_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<Descriptor>() and AllocateArray<Descriptor>() in descriptor.cc\n  // and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  Descriptor() {}\n  friend class DescriptorBuilder;\n  friend class EnumDescriptor;\n  friend class FieldDescriptor;\n  friend class OneofDescriptor;\n  friend class MethodDescriptor;\n  friend class FileDescriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Descriptor);\n};\n\n// Describes a single field of a message.  To get the descriptor for a given\n// field, first get the Descriptor for the message in which it is defined,\n// then call Descriptor::FindFieldByName().  To get a FieldDescriptor for\n// an extension, do one of the following:\n// - Get the Descriptor or FileDescriptor for its containing scope, then\n//   call Descriptor::FindExtensionByName() or\n//   FileDescriptor::FindExtensionByName().\n// - Given a DescriptorPool, call DescriptorPool::FindExtensionByNumber().\n// - Given a Reflection for a message object, call\n//   Reflection::FindKnownExtensionByName() or\n//   Reflection::FindKnownExtensionByNumber().\n// Use DescriptorPool to construct your own descriptors.\nclass LIBPROTOBUF_EXPORT FieldDescriptor {\n public:\n  // Identifies a field type.  0 is reserved for errors.  The order is weird\n  // for historical reasons.  Types 12 and up are new in proto2.\n  enum Type {\n    TYPE_DOUBLE         = 1,   // double, exactly eight bytes on the wire.\n    TYPE_FLOAT          = 2,   // float, exactly four bytes on the wire.\n    TYPE_INT64          = 3,   // int64, varint on the wire.  Negative numbers\n                               // take 10 bytes.  Use TYPE_SINT64 if negative\n                               // values are likely.\n    TYPE_UINT64         = 4,   // uint64, varint on the wire.\n    TYPE_INT32          = 5,   // int32, varint on the wire.  Negative numbers\n                               // take 10 bytes.  Use TYPE_SINT32 if negative\n                               // values are likely.\n    TYPE_FIXED64        = 6,   // uint64, exactly eight bytes on the wire.\n    TYPE_FIXED32        = 7,   // uint32, exactly four bytes on the wire.\n    TYPE_BOOL           = 8,   // bool, varint on the wire.\n    TYPE_STRING         = 9,   // UTF-8 text.\n    TYPE_GROUP          = 10,  // Tag-delimited message.  Deprecated.\n    TYPE_MESSAGE        = 11,  // Length-delimited message.\n\n    TYPE_BYTES          = 12,  // Arbitrary byte array.\n    TYPE_UINT32         = 13,  // uint32, varint on the wire\n    TYPE_ENUM           = 14,  // Enum, varint on the wire\n    TYPE_SFIXED32       = 15,  // int32, exactly four bytes on the wire\n    TYPE_SFIXED64       = 16,  // int64, exactly eight bytes on the wire\n    TYPE_SINT32         = 17,  // int32, ZigZag-encoded varint on the wire\n    TYPE_SINT64         = 18,  // int64, ZigZag-encoded varint on the wire\n\n    MAX_TYPE            = 18,  // Constant useful for defining lookup tables\n                               // indexed by Type.\n  };\n\n  // Specifies the C++ data type used to represent the field.  There is a\n  // fixed mapping from Type to CppType where each Type maps to exactly one\n  // CppType.  0 is reserved for errors.\n  enum CppType {\n    CPPTYPE_INT32       = 1,     // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32\n    CPPTYPE_INT64       = 2,     // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64\n    CPPTYPE_UINT32      = 3,     // TYPE_UINT32, TYPE_FIXED32\n    CPPTYPE_UINT64      = 4,     // TYPE_UINT64, TYPE_FIXED64\n    CPPTYPE_DOUBLE      = 5,     // TYPE_DOUBLE\n    CPPTYPE_FLOAT       = 6,     // TYPE_FLOAT\n    CPPTYPE_BOOL        = 7,     // TYPE_BOOL\n    CPPTYPE_ENUM        = 8,     // TYPE_ENUM\n    CPPTYPE_STRING      = 9,     // TYPE_STRING, TYPE_BYTES\n    CPPTYPE_MESSAGE     = 10,    // TYPE_MESSAGE, TYPE_GROUP\n\n    MAX_CPPTYPE         = 10,    // Constant useful for defining lookup tables\n                                 // indexed by CppType.\n  };\n\n  // Identifies whether the field is optional, required, or repeated.  0 is\n  // reserved for errors.\n  enum Label {\n    LABEL_OPTIONAL      = 1,    // optional\n    LABEL_REQUIRED      = 2,    // required\n    LABEL_REPEATED      = 3,    // repeated\n\n    MAX_LABEL           = 3,    // Constant useful for defining lookup tables\n                                // indexed by Label.\n  };\n\n  // Valid field numbers are positive integers up to kMaxNumber.\n  static const int kMaxNumber = (1 << 29) - 1;\n\n  // First field number reserved for the protocol buffer library implementation.\n  // Users may not declare fields that use reserved numbers.\n  static const int kFirstReservedNumber = 19000;\n  // Last field number reserved for the protocol buffer library implementation.\n  // Users may not declare fields that use reserved numbers.\n  static const int kLastReservedNumber  = 19999;\n\n  const string& name() const;        // Name of this field within the message.\n  const string& full_name() const;   // Fully-qualified name of the field.\n  const string& json_name() const;   // JSON name of this field.\n  const FileDescriptor* file() const;// File in which this field was defined.\n  bool is_extension() const;         // Is this an extension field?\n  int number() const;                // Declared tag number.\n\n  // Same as name() except converted to lower-case.  This (and especially the\n  // FindFieldByLowercaseName() method) can be useful when parsing formats\n  // which prefer to use lowercase naming style.  (Although, technically\n  // field names should be lowercased anyway according to the protobuf style\n  // guide, so this only makes a difference when dealing with old .proto files\n  // which do not follow the guide.)\n  const string& lowercase_name() const;\n\n  // Same as name() except converted to camel-case.  In this conversion, any\n  // time an underscore appears in the name, it is removed and the next\n  // letter is capitalized.  Furthermore, the first letter of the name is\n  // lower-cased.  Examples:\n  //   FooBar -> fooBar\n  //   foo_bar -> fooBar\n  //   fooBar -> fooBar\n  // This (and especially the FindFieldByCamelcaseName() method) can be useful\n  // when parsing formats which prefer to use camel-case naming style.\n  const string& camelcase_name() const;\n\n  Type type() const;                  // Declared type of this field.\n  const char* type_name() const;      // Name of the declared type.\n  CppType cpp_type() const;           // C++ type of this field.\n  const char* cpp_type_name() const;  // Name of the C++ type.\n  Label label() const;                // optional/required/repeated\n\n  bool is_required() const;      // shorthand for label() == LABEL_REQUIRED\n  bool is_optional() const;      // shorthand for label() == LABEL_OPTIONAL\n  bool is_repeated() const;      // shorthand for label() == LABEL_REPEATED\n  bool is_packable() const;      // shorthand for is_repeated() &&\n                                 //               IsTypePackable(type())\n  bool is_packed() const;        // shorthand for is_packable() &&\n                                 //               options().packed()\n  bool is_map() const;           // shorthand for type() == TYPE_MESSAGE &&\n                                 // message_type()->options().map_entry()\n\n  // Index of this field within the message's field array, or the file or\n  // extension scope's extensions array.\n  int index() const;\n\n  // Does this field have an explicitly-declared default value?\n  bool has_default_value() const;\n\n  // Whether the user has specified the json_name field option in the .proto\n  // file.\n  bool has_json_name() const;\n\n  // Get the field default value if cpp_type() == CPPTYPE_INT32.  If no\n  // explicit default was defined, the default is 0.\n  int32 default_value_int32() const;\n  // Get the field default value if cpp_type() == CPPTYPE_INT64.  If no\n  // explicit default was defined, the default is 0.\n  int64 default_value_int64() const;\n  // Get the field default value if cpp_type() == CPPTYPE_UINT32.  If no\n  // explicit default was defined, the default is 0.\n  uint32 default_value_uint32() const;\n  // Get the field default value if cpp_type() == CPPTYPE_UINT64.  If no\n  // explicit default was defined, the default is 0.\n  uint64 default_value_uint64() const;\n  // Get the field default value if cpp_type() == CPPTYPE_FLOAT.  If no\n  // explicit default was defined, the default is 0.0.\n  float default_value_float() const;\n  // Get the field default value if cpp_type() == CPPTYPE_DOUBLE.  If no\n  // explicit default was defined, the default is 0.0.\n  double default_value_double() const;\n  // Get the field default value if cpp_type() == CPPTYPE_BOOL.  If no\n  // explicit default was defined, the default is false.\n  bool default_value_bool() const;\n  // Get the field default value if cpp_type() == CPPTYPE_ENUM.  If no\n  // explicit default was defined, the default is the first value defined\n  // in the enum type (all enum types are required to have at least one value).\n  // This never returns NULL.\n  const EnumValueDescriptor* default_value_enum() const;\n  // Get the field default value if cpp_type() == CPPTYPE_STRING.  If no\n  // explicit default was defined, the default is the empty string.\n  const string& default_value_string() const;\n\n  // The Descriptor for the message of which this is a field.  For extensions,\n  // this is the extended type.  Never NULL.\n  const Descriptor* containing_type() const;\n\n  // If the field is a member of a oneof, this is the one, otherwise this is\n  // NULL.\n  const OneofDescriptor* containing_oneof() const;\n\n  // If the field is a member of a oneof, returns the index in that oneof.\n  int index_in_oneof() const;\n\n  // An extension may be declared within the scope of another message.  If this\n  // field is an extension (is_extension() is true), then extension_scope()\n  // returns that message, or NULL if the extension was declared at global\n  // scope.  If this is not an extension, extension_scope() is undefined (may\n  // assert-fail).\n  const Descriptor* extension_scope() const;\n\n  // If type is TYPE_MESSAGE or TYPE_GROUP, returns a descriptor for the\n  // message or the group type.  Otherwise, returns null.\n  const Descriptor* message_type() const;\n  // If type is TYPE_ENUM, returns a descriptor for the enum.  Otherwise,\n  // returns null.\n  const EnumDescriptor* enum_type() const;\n\n  // Get the FieldOptions for this field.  This includes things listed in\n  // square brackets after the field definition.  E.g., the field:\n  //   optional string text = 1 [ctype=CORD];\n  // has the \"ctype\" option set.  Allowed options are defined by FieldOptions\n  // in google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const FieldOptions& options() const;\n\n  // See Descriptor::CopyTo().\n  void CopyTo(FieldDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n  // Helper method to get the CppType for a particular Type.\n  static CppType TypeToCppType(Type type);\n\n  // Helper method to get the name of a Type.\n  static const char* TypeName(Type type);\n\n  // Helper method to get the name of a CppType.\n  static const char* CppTypeName(CppType cpp_type);\n\n  // Return true iff [packed = true] is valid for fields of this type.\n  static inline bool IsTypePackable(Type field_type);\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this field declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef FieldOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // Fill the json_name field of FieldDescriptorProto.\n  void CopyJsonNameTo(FieldDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  enum PrintLabelFlag { PRINT_LABEL, OMIT_LABEL };\n  void DebugString(int depth, PrintLabelFlag print_label_flag,\n                   string* contents, const DebugStringOptions& options) const;\n\n  // formats the default value appropriately and returns it as a string.\n  // Must have a default value to call this. If quote_string_type is true, then\n  // types of CPPTYPE_STRING whill be surrounded by quotes and CEscaped.\n  string DefaultValueAsString(bool quote_string_type) const;\n\n  // Helper function that returns the field type name for DebugString.\n  string FieldTypeNameDebugString() const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const string* lowercase_name_;\n  const string* camelcase_name_;\n  // Whether the user has specified the json_name field option in the .proto\n  // file.\n  bool has_json_name_;\n  // If has_json_name_ is true, it's the value specified by the user.\n  // Otherwise, it has the same value as camelcase_name_.\n  const string* json_name_;\n  const FileDescriptor* file_;\n  int number_;\n  Type type_;\n  Label label_;\n  bool is_extension_;\n  int index_in_oneof_;\n  const Descriptor* containing_type_;\n  const OneofDescriptor* containing_oneof_;\n  const Descriptor* extension_scope_;\n  const Descriptor* message_type_;\n  const EnumDescriptor* enum_type_;\n  const FieldOptions* options_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<FieldDescriptor>() and AllocateArray<FieldDescriptor>() in\n  // descriptor.cc and update them to initialize the field.\n\n  bool has_default_value_;\n  union {\n    int32  default_value_int32_;\n    int64  default_value_int64_;\n    uint32 default_value_uint32_;\n    uint64 default_value_uint64_;\n    float  default_value_float_;\n    double default_value_double_;\n    bool   default_value_bool_;\n\n    const EnumValueDescriptor* default_value_enum_;\n    const string* default_value_string_;\n  };\n\n  static const CppType kTypeToCppTypeMap[MAX_TYPE + 1];\n\n  static const char * const kTypeToName[MAX_TYPE + 1];\n\n  static const char * const kCppTypeToName[MAX_CPPTYPE + 1];\n\n  static const char * const kLabelToName[MAX_LABEL + 1];\n\n  // Must be constructed using DescriptorPool.\n  FieldDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class FileDescriptor;\n  friend class Descriptor;\n  friend class OneofDescriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FieldDescriptor);\n};\n\n// Describes a oneof defined in a message type.\nclass LIBPROTOBUF_EXPORT OneofDescriptor {\n public:\n  const string& name() const;       // Name of this oneof.\n  const string& full_name() const;  // Fully-qualified name of the oneof.\n\n  // Index of this oneof within the message's oneof array.\n  int index() const;\n\n  // The Descriptor for the message containing this oneof.\n  const Descriptor* containing_type() const;\n\n  // The number of (non-extension) fields which are members of this oneof.\n  int field_count() const;\n  // Get a member of this oneof, in the order in which they were declared in the\n  // .proto file.  Does not include extensions.\n  const FieldDescriptor* field(int index) const;\n\n  const OneofOptions& options() const;\n\n  // See Descriptor::CopyTo().\n  void CopyTo(OneofDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this oneof declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef OneofOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // See Descriptor::DebugString().\n  void DebugString(int depth, string* contents,\n                   const DebugStringOptions& options) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const Descriptor* containing_type_;\n  bool is_extendable_;\n  int field_count_;\n  const FieldDescriptor** fields_;\n  const OneofOptions* options_;\n\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<OneofDescriptor>() and AllocateArray<OneofDescriptor>()\n  // in descriptor.cc and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  OneofDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class Descriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(OneofDescriptor);\n};\n\n// Describes an enum type defined in a .proto file.  To get the EnumDescriptor\n// for a generated enum type, call TypeName_descriptor().  Use DescriptorPool\n// to construct your own descriptors.\nclass LIBPROTOBUF_EXPORT EnumDescriptor {\n public:\n  // The name of this enum type in the containing scope.\n  const string& name() const;\n\n  // The fully-qualified name of the enum type, scope delimited by periods.\n  const string& full_name() const;\n\n  // Index of this enum within the file or containing message's enum array.\n  int index() const;\n\n  // The .proto file in which this enum type was defined.  Never NULL.\n  const FileDescriptor* file() const;\n\n  // The number of values for this EnumDescriptor.  Guaranteed to be greater\n  // than zero.\n  int value_count() const;\n  // Gets a value by index, where 0 <= index < value_count().\n  // These are returned in the order they were defined in the .proto file.\n  const EnumValueDescriptor* value(int index) const;\n\n  // Looks up a value by name.  Returns NULL if no such value exists.\n  const EnumValueDescriptor* FindValueByName(const string& name) const;\n  // Looks up a value by number.  Returns NULL if no such value exists.  If\n  // multiple values have this number, the first one defined is returned.\n  const EnumValueDescriptor* FindValueByNumber(int number) const;\n\n  // If this enum type is nested in a message type, this is that message type.\n  // Otherwise, NULL.\n  const Descriptor* containing_type() const;\n\n  // Get options for this enum type.  These are specified in the .proto file by\n  // placing lines like \"option foo = 1234;\" in the enum definition.  Allowed\n  // options are defined by EnumOptions in google/protobuf/descriptor.proto,\n  // and any available extensions of that message.\n  const EnumOptions& options() const;\n\n  // See Descriptor::CopyTo().\n  void CopyTo(EnumDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n\n  // Returns true if this is a placeholder for an unknown enum. This will\n  // only be the case if this descriptor comes from a DescriptorPool\n  // with AllowUnknownDependencies() set.\n  bool is_placeholder() const;\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this enum declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef EnumOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // Looks up a value by number.  If the value does not exist, dynamically\n  // creates a new EnumValueDescriptor for that value, assuming that it was\n  // unknown. If a new descriptor is created, this is done in a thread-safe way,\n  // and future calls will return the same value descriptor pointer.\n  //\n  // This is private but is used by GeneratedMessageReflection (which is\n  // friended below) to return a valid EnumValueDescriptor from GetEnum() when\n  // this feature is enabled.\n  const EnumValueDescriptor*\n      FindValueByNumberCreatingIfUnknown(int number) const;\n\n\n  // See Descriptor::DebugString().\n  void DebugString(int depth, string *contents,\n                   const DebugStringOptions& options) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const FileDescriptor* file_;\n  const Descriptor* containing_type_;\n  const EnumOptions* options_;\n\n  // True if this is a placeholder for an unknown type.\n  bool is_placeholder_;\n  // True if this is a placeholder and the type name wasn't fully-qualified.\n  bool is_unqualified_placeholder_;\n\n  int value_count_;\n  EnumValueDescriptor* values_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<EnumDescriptor>() and AllocateArray<EnumDescriptor>() in\n  // descriptor.cc and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  EnumDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class Descriptor;\n  friend class FieldDescriptor;\n  friend class EnumValueDescriptor;\n  friend class FileDescriptor;\n  friend class internal::GeneratedMessageReflection;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EnumDescriptor);\n};\n\n// Describes an individual enum constant of a particular type.  To get the\n// EnumValueDescriptor for a given enum value, first get the EnumDescriptor\n// for its type, then use EnumDescriptor::FindValueByName() or\n// EnumDescriptor::FindValueByNumber().  Use DescriptorPool to construct\n// your own descriptors.\nclass LIBPROTOBUF_EXPORT EnumValueDescriptor {\n public:\n  const string& name() const;  // Name of this enum constant.\n  int index() const;           // Index within the enums's Descriptor.\n  int number() const;          // Numeric value of this enum constant.\n\n  // The full_name of an enum value is a sibling symbol of the enum type.\n  // e.g. the full name of FieldDescriptorProto::TYPE_INT32 is actually\n  // \"google.protobuf.FieldDescriptorProto.TYPE_INT32\", NOT\n  // \"google.protobuf.FieldDescriptorProto.Type.TYPE_INT32\".  This is to conform\n  // with C++ scoping rules for enums.\n  const string& full_name() const;\n\n  // The type of this value.  Never NULL.\n  const EnumDescriptor* type() const;\n\n  // Get options for this enum value.  These are specified in the .proto file\n  // by adding text like \"[foo = 1234]\" after an enum value definition.\n  // Allowed options are defined by EnumValueOptions in\n  // google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const EnumValueOptions& options() const;\n\n  // See Descriptor::CopyTo().\n  void CopyTo(EnumValueDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this enum value declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef EnumValueOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // See Descriptor::DebugString().\n  void DebugString(int depth, string *contents,\n                   const DebugStringOptions& options) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  int number_;\n  const EnumDescriptor* type_;\n  const EnumValueOptions* options_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<EnumValueDescriptor>() and AllocateArray<EnumValueDescriptor>()\n  // in descriptor.cc and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  EnumValueDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class EnumDescriptor;\n  friend class FileDescriptorTables;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EnumValueDescriptor);\n};\n\n// Describes an RPC service.  To get the ServiceDescriptor for a service,\n// call Service::GetDescriptor().  Generated service classes also have a\n// static method called descriptor() which returns the type's\n// ServiceDescriptor.  Use DescriptorPool to construct your own descriptors.\nclass LIBPROTOBUF_EXPORT ServiceDescriptor {\n public:\n  // The name of the service, not including its containing scope.\n  const string& name() const;\n  // The fully-qualified name of the service, scope delimited by periods.\n  const string& full_name() const;\n  // Index of this service within the file's services array.\n  int index() const;\n\n  // The .proto file in which this service was defined.  Never NULL.\n  const FileDescriptor* file() const;\n\n  // Get options for this service type.  These are specified in the .proto file\n  // by placing lines like \"option foo = 1234;\" in the service definition.\n  // Allowed options are defined by ServiceOptions in\n  // google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const ServiceOptions& options() const;\n\n  // The number of methods this service defines.\n  int method_count() const;\n  // Gets a MethodDescriptor by index, where 0 <= index < method_count().\n  // These are returned in the order they were defined in the .proto file.\n  const MethodDescriptor* method(int index) const;\n\n  // Look up a MethodDescriptor by name.\n  const MethodDescriptor* FindMethodByName(const string& name) const;\n  // See Descriptor::CopyTo().\n  void CopyTo(ServiceDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this service declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef ServiceOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // See Descriptor::DebugString().\n  void DebugString(string *contents, const DebugStringOptions& options) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const FileDescriptor* file_;\n  const ServiceOptions* options_;\n  int method_count_;\n  MethodDescriptor* methods_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<ServiceDescriptor>() and AllocateArray<ServiceDescriptor>() in\n  // descriptor.cc and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  ServiceDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class FileDescriptor;\n  friend class MethodDescriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ServiceDescriptor);\n};\n\n// Describes an individual service method.  To obtain a MethodDescriptor given\n// a service, first get its ServiceDescriptor, then call\n// ServiceDescriptor::FindMethodByName().  Use DescriptorPool to construct your\n// own descriptors.\nclass LIBPROTOBUF_EXPORT MethodDescriptor {\n public:\n  // Name of this method, not including containing scope.\n  const string& name() const;\n  // The fully-qualified name of the method, scope delimited by periods.\n  const string& full_name() const;\n  // Index within the service's Descriptor.\n  int index() const;\n\n  // Gets the service to which this method belongs.  Never NULL.\n  const ServiceDescriptor* service() const;\n\n  // Gets the type of protocol message which this method accepts as input.\n  const Descriptor* input_type() const;\n  // Gets the type of protocol message which this message produces as output.\n  const Descriptor* output_type() const;\n\n  // Gets whether the client streams multiple requests.\n  bool client_streaming() const;\n  // Gets whether the server streams multiple responses.\n  bool server_streaming() const;\n\n  // Get options for this method.  These are specified in the .proto file by\n  // placing lines like \"option foo = 1234;\" in curly-braces after a method\n  // declaration.  Allowed options are defined by MethodOptions in\n  // google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const MethodOptions& options() const;\n\n  // See Descriptor::CopyTo().\n  void CopyTo(MethodDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n\n  // Source Location ---------------------------------------------------\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of this method declaration.  Returns false and leaves\n  // |*out_location| unchanged iff location information was not available.\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n private:\n  typedef MethodOptions OptionsType;\n\n  // Allows access to GetLocationPath for annotations.\n  friend class ::google::protobuf::io::Printer;\n\n  // See Descriptor::DebugString().\n  void DebugString(int depth, string *contents,\n                   const DebugStringOptions& options) const;\n\n  // Walks up the descriptor tree to generate the source location path\n  // to this descriptor from the file root.\n  void GetLocationPath(std::vector<int>* output) const;\n\n  const string* name_;\n  const string* full_name_;\n  const ServiceDescriptor* service_;\n  const Descriptor* input_type_;\n  const Descriptor* output_type_;\n  const MethodOptions* options_;\n  bool client_streaming_;\n  bool server_streaming_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<MethodDescriptor>() and AllocateArray<MethodDescriptor>() in\n  // descriptor.cc and update them to initialize the field.\n\n  // Must be constructed using DescriptorPool.\n  MethodDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class ServiceDescriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MethodDescriptor);\n};\n\n\n// Describes a whole .proto file.  To get the FileDescriptor for a compiled-in\n// file, get the descriptor for something defined in that file and call\n// descriptor->file().  Use DescriptorPool to construct your own descriptors.\nclass LIBPROTOBUF_EXPORT FileDescriptor {\n public:\n  // The filename, relative to the source tree.\n  // e.g. \"google/protobuf/descriptor.proto\"\n  const string& name() const;\n\n  // The package, e.g. \"google.protobuf.compiler\".\n  const string& package() const;\n\n  // The DescriptorPool in which this FileDescriptor and all its contents were\n  // allocated.  Never NULL.\n  const DescriptorPool* pool() const;\n\n  // The number of files imported by this one.\n  int dependency_count() const;\n  // Gets an imported file by index, where 0 <= index < dependency_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FileDescriptor* dependency(int index) const;\n\n  // The number of files public imported by this one.\n  // The public dependency list is a subset of the dependency list.\n  int public_dependency_count() const;\n  // Gets a public imported file by index, where 0 <= index <\n  // public_dependency_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FileDescriptor* public_dependency(int index) const;\n\n  // The number of files that are imported for weak fields.\n  // The weak dependency list is a subset of the dependency list.\n  int weak_dependency_count() const;\n  // Gets a weak imported file by index, where 0 <= index <\n  // weak_dependency_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FileDescriptor* weak_dependency(int index) const;\n\n  // Number of top-level message types defined in this file.  (This does not\n  // include nested types.)\n  int message_type_count() const;\n  // Gets a top-level message type, where 0 <= index < message_type_count().\n  // These are returned in the order they were defined in the .proto file.\n  const Descriptor* message_type(int index) const;\n\n  // Number of top-level enum types defined in this file.  (This does not\n  // include nested types.)\n  int enum_type_count() const;\n  // Gets a top-level enum type, where 0 <= index < enum_type_count().\n  // These are returned in the order they were defined in the .proto file.\n  const EnumDescriptor* enum_type(int index) const;\n\n  // Number of services defined in this file.\n  int service_count() const;\n  // Gets a service, where 0 <= index < service_count().\n  // These are returned in the order they were defined in the .proto file.\n  const ServiceDescriptor* service(int index) const;\n\n  // Number of extensions defined at file scope.  (This does not include\n  // extensions nested within message types.)\n  int extension_count() const;\n  // Gets an extension's descriptor, where 0 <= index < extension_count().\n  // These are returned in the order they were defined in the .proto file.\n  const FieldDescriptor* extension(int index) const;\n\n  // Get options for this file.  These are specified in the .proto file by\n  // placing lines like \"option foo = 1234;\" at the top level, outside of any\n  // other definitions.  Allowed options are defined by FileOptions in\n  // google/protobuf/descriptor.proto, and any available extensions of that\n  // message.\n  const FileOptions& options() const;\n\n  // Syntax of this file.\n  enum Syntax {\n    SYNTAX_UNKNOWN = 0,\n    SYNTAX_PROTO2  = 2,\n    SYNTAX_PROTO3  = 3,\n  };\n  Syntax syntax() const;\n  static const char* SyntaxName(Syntax syntax);\n\n  // Find a top-level message type by name.  Returns NULL if not found.\n  const Descriptor* FindMessageTypeByName(const string& name) const;\n  // Find a top-level enum type by name.  Returns NULL if not found.\n  const EnumDescriptor* FindEnumTypeByName(const string& name) const;\n  // Find an enum value defined in any top-level enum by name.  Returns NULL if\n  // not found.\n  const EnumValueDescriptor* FindEnumValueByName(const string& name) const;\n  // Find a service definition by name.  Returns NULL if not found.\n  const ServiceDescriptor* FindServiceByName(const string& name) const;\n  // Find a top-level extension definition by name.  Returns NULL if not found.\n  const FieldDescriptor* FindExtensionByName(const string& name) const;\n  // Similar to FindExtensionByName(), but searches by lowercased-name.  See\n  // Descriptor::FindFieldByLowercaseName().\n  const FieldDescriptor* FindExtensionByLowercaseName(const string& name) const;\n  // Similar to FindExtensionByName(), but searches by camelcased-name.  See\n  // Descriptor::FindFieldByCamelcaseName().\n  const FieldDescriptor* FindExtensionByCamelcaseName(const string& name) const;\n\n  // See Descriptor::CopyTo().\n  // Notes:\n  // - This method does NOT copy source code information since it is relatively\n  //   large and rarely needed.  See CopySourceCodeInfoTo() below.\n  void CopyTo(FileDescriptorProto* proto) const;\n  // Write the source code information of this FileDescriptor into the given\n  // FileDescriptorProto.  See CopyTo() above.\n  void CopySourceCodeInfoTo(FileDescriptorProto* proto) const;\n  // Fill the json_name field of FieldDescriptorProto for all fields. Can only\n  // be called after CopyTo().\n  void CopyJsonNameTo(FileDescriptorProto* proto) const;\n\n  // See Descriptor::DebugString().\n  string DebugString() const;\n\n  // See Descriptor::DebugStringWithOptions().\n  string DebugStringWithOptions(const DebugStringOptions& options) const;\n\n  // Returns true if this is a placeholder for an unknown file. This will\n  // only be the case if this descriptor comes from a DescriptorPool\n  // with AllowUnknownDependencies() set.\n  bool is_placeholder() const;\n\n  // Updates |*out_location| to the source location of the complete extent of\n  // this file declaration (namely, the empty path).\n  bool GetSourceLocation(SourceLocation* out_location) const;\n\n  // Updates |*out_location| to the source location of the complete\n  // extent of the declaration or declaration-part denoted by |path|.\n  // Returns false and leaves |*out_location| unchanged iff location\n  // information was not available.  (See SourceCodeInfo for\n  // description of path encoding.)\n  bool GetSourceLocation(const std::vector<int>& path,\n                         SourceLocation* out_location) const;\n\n private:\n  typedef FileOptions OptionsType;\n\n  const string* name_;\n  const string* package_;\n  const DescriptorPool* pool_;\n  int dependency_count_;\n  const FileDescriptor** dependencies_;\n  int public_dependency_count_;\n  int* public_dependencies_;\n  int weak_dependency_count_;\n  int* weak_dependencies_;\n  int message_type_count_;\n  Descriptor* message_types_;\n  int enum_type_count_;\n  EnumDescriptor* enum_types_;\n  int service_count_;\n  ServiceDescriptor* services_;\n  int extension_count_;\n  Syntax syntax_;\n  bool is_placeholder_;\n  FieldDescriptor* extensions_;\n  const FileOptions* options_;\n\n  const FileDescriptorTables* tables_;\n  const SourceCodeInfo* source_code_info_;\n  // IMPORTANT:  If you add a new field, make sure to search for all instances\n  // of Allocate<FileDescriptor>() and AllocateArray<FileDescriptor>() in\n  // descriptor.cc and update them to initialize the field.\n\n  FileDescriptor() {}\n  friend class DescriptorBuilder;\n  friend class Descriptor;\n  friend class FieldDescriptor;\n  friend class OneofDescriptor;\n  friend class EnumDescriptor;\n  friend class EnumValueDescriptor;\n  friend class MethodDescriptor;\n  friend class ServiceDescriptor;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FileDescriptor);\n};\n\n// ===================================================================\n\n// Used to construct descriptors.\n//\n// Normally you won't want to build your own descriptors.  Message classes\n// constructed by the protocol compiler will provide them for you.  However,\n// if you are implementing Message on your own, or if you are writing a\n// program which can operate on totally arbitrary types and needs to load\n// them from some sort of database, you might need to.\n//\n// Since Descriptors are composed of a whole lot of cross-linked bits of\n// data that would be a pain to put together manually, the\n// DescriptorPool class is provided to make the process easier.  It can\n// take a FileDescriptorProto (defined in descriptor.proto), validate it,\n// and convert it to a set of nicely cross-linked Descriptors.\n//\n// DescriptorPool also helps with memory management.  Descriptors are\n// composed of many objects containing static data and pointers to each\n// other.  In all likelihood, when it comes time to delete this data,\n// you'll want to delete it all at once.  In fact, it is not uncommon to\n// have a whole pool of descriptors all cross-linked with each other which\n// you wish to delete all at once.  This class represents such a pool, and\n// handles the memory management for you.\n//\n// You can also search for descriptors within a DescriptorPool by name, and\n// extensions by number.\nclass LIBPROTOBUF_EXPORT DescriptorPool {\n public:\n  // Create a normal, empty DescriptorPool.\n  DescriptorPool();\n\n  // Constructs a DescriptorPool that, when it can't find something among the\n  // descriptors already in the pool, looks for it in the given\n  // DescriptorDatabase.\n  // Notes:\n  // - If a DescriptorPool is constructed this way, its BuildFile*() methods\n  //   must not be called (they will assert-fail).  The only way to populate\n  //   the pool with descriptors is to call the Find*By*() methods.\n  // - The Find*By*() methods may block the calling thread if the\n  //   DescriptorDatabase blocks.  This in turn means that parsing messages\n  //   may block if they need to look up extensions.\n  // - The Find*By*() methods will use mutexes for thread-safety, thus making\n  //   them slower even when they don't have to fall back to the database.\n  //   In fact, even the Find*By*() methods of descriptor objects owned by\n  //   this pool will be slower, since they will have to obtain locks too.\n  // - An ErrorCollector may optionally be given to collect validation errors\n  //   in files loaded from the database.  If not given, errors will be printed\n  //   to GOOGLE_LOG(ERROR).  Remember that files are built on-demand, so this\n  //   ErrorCollector may be called from any thread that calls one of the\n  //   Find*By*() methods.\n  // - The DescriptorDatabase must not be mutated during the lifetime of\n  //   the DescriptorPool. Even if the client takes care to avoid data races,\n  //   changes to the content of the DescriptorDatabase may not be reflected\n  //   in subsequent lookups in the DescriptorPool.\n  class ErrorCollector;\n  explicit DescriptorPool(DescriptorDatabase* fallback_database,\n                          ErrorCollector* error_collector = NULL);\n\n  ~DescriptorPool();\n\n  // Get a pointer to the generated pool.  Generated protocol message classes\n  // which are compiled into the binary will allocate their descriptors in\n  // this pool.  Do not add your own descriptors to this pool.\n  static const DescriptorPool* generated_pool();\n\n\n  // Find a FileDescriptor in the pool by file name.  Returns NULL if not\n  // found.\n  const FileDescriptor* FindFileByName(const string& name) const;\n\n  // Find the FileDescriptor in the pool which defines the given symbol.\n  // If any of the Find*ByName() methods below would succeed, then this is\n  // equivalent to calling that method and calling the result's file() method.\n  // Otherwise this returns NULL.\n  const FileDescriptor* FindFileContainingSymbol(\n      const string& symbol_name) const;\n\n  // Looking up descriptors ------------------------------------------\n  // These find descriptors by fully-qualified name.  These will find both\n  // top-level descriptors and nested descriptors.  They return NULL if not\n  // found.\n\n  const Descriptor* FindMessageTypeByName(const string& name) const;\n  const FieldDescriptor* FindFieldByName(const string& name) const;\n  const FieldDescriptor* FindExtensionByName(const string& name) const;\n  const OneofDescriptor* FindOneofByName(const string& name) const;\n  const EnumDescriptor* FindEnumTypeByName(const string& name) const;\n  const EnumValueDescriptor* FindEnumValueByName(const string& name) const;\n  const ServiceDescriptor* FindServiceByName(const string& name) const;\n  const MethodDescriptor* FindMethodByName(const string& name) const;\n\n  // Finds an extension of the given type by number.  The extendee must be\n  // a member of this DescriptorPool or one of its underlays.\n  const FieldDescriptor* FindExtensionByNumber(const Descriptor* extendee,\n                                               int number) const;\n\n  // Finds extensions of extendee. The extensions will be appended to\n  // out in an undefined order. Only extensions defined directly in\n  // this DescriptorPool or one of its underlays are guaranteed to be\n  // found: extensions defined in the fallback database might not be found\n  // depending on the database implementation.\n  void FindAllExtensions(const Descriptor* extendee,\n                         std::vector<const FieldDescriptor*>* out) const;\n\n  // Building descriptors --------------------------------------------\n\n  // When converting a FileDescriptorProto to a FileDescriptor, various\n  // errors might be detected in the input.  The caller may handle these\n  // programmatically by implementing an ErrorCollector.\n  class LIBPROTOBUF_EXPORT ErrorCollector {\n   public:\n    inline ErrorCollector() {}\n    virtual ~ErrorCollector();\n\n    // These constants specify what exact part of the construct is broken.\n    // This is useful e.g. for mapping the error back to an exact location\n    // in a .proto file.\n    enum ErrorLocation {\n      NAME,              // the symbol name, or the package name for files\n      NUMBER,            // field or extension range number\n      TYPE,              // field type\n      EXTENDEE,          // field extendee\n      DEFAULT_VALUE,     // field default value\n      INPUT_TYPE,        // method input type\n      OUTPUT_TYPE,       // method output type\n      OPTION_NAME,       // name in assignment\n      OPTION_VALUE,      // value in option assignment\n      OTHER              // some other problem\n    };\n\n    // Reports an error in the FileDescriptorProto. Use this function if the\n    // problem occurred should interrupt building the FileDescriptorProto.\n    virtual void AddError(\n      const string& filename,      // File name in which the error occurred.\n      const string& element_name,  // Full name of the erroneous element.\n      const Message* descriptor,   // Descriptor of the erroneous element.\n      ErrorLocation location,      // One of the location constants, above.\n      const string& message        // Human-readable error message.\n      ) = 0;\n\n    // Reports a warning in the FileDescriptorProto. Use this function if the\n    // problem occurred should NOT interrupt building the FileDescriptorProto.\n    virtual void AddWarning(\n      const string& /*filename*/,      // File name in which the error occurred.\n      const string& /*element_name*/,  // Full name of the erroneous element.\n      const Message* /*descriptor*/,   // Descriptor of the erroneous element.\n      ErrorLocation /*location*/,      // One of the location constants, above.\n      const string& /*message*/        // Human-readable error message.\n      ) {}\n\n   private:\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ErrorCollector);\n  };\n\n  // Convert the FileDescriptorProto to real descriptors and place them in\n  // this DescriptorPool.  All dependencies of the file must already be in\n  // the pool.  Returns the resulting FileDescriptor, or NULL if there were\n  // problems with the input (e.g. the message was invalid, or dependencies\n  // were missing).  Details about the errors are written to GOOGLE_LOG(ERROR).\n  const FileDescriptor* BuildFile(const FileDescriptorProto& proto);\n\n  // Same as BuildFile() except errors are sent to the given ErrorCollector.\n  const FileDescriptor* BuildFileCollectingErrors(\n    const FileDescriptorProto& proto,\n    ErrorCollector* error_collector);\n\n  // By default, it is an error if a FileDescriptorProto contains references\n  // to types or other files that are not found in the DescriptorPool (or its\n  // backing DescriptorDatabase, if any).  If you call\n  // AllowUnknownDependencies(), however, then unknown types and files\n  // will be replaced by placeholder descriptors (which can be identified by\n  // the is_placeholder() method).  This can allow you to\n  // perform some useful operations with a .proto file even if you do not\n  // have access to other .proto files on which it depends.  However, some\n  // heuristics must be used to fill in the gaps in information, and these\n  // can lead to descriptors which are inaccurate.  For example, the\n  // DescriptorPool may be forced to guess whether an unknown type is a message\n  // or an enum, as well as what package it resides in.  Furthermore,\n  // placeholder types will not be discoverable via FindMessageTypeByName()\n  // and similar methods, which could confuse some descriptor-based algorithms.\n  // Generally, the results of this option should be handled with extreme care.\n  void AllowUnknownDependencies() { allow_unknown_ = true; }\n\n  // By default, weak imports are allowed to be missing, in which case we will\n  // use a placeholder for the dependency and convert the field to be an Empty\n  // message field. If you call EnforceWeakDependencies(true), however, the\n  // DescriptorPool will report a import not found error.\n  void EnforceWeakDependencies(bool enforce) { enforce_weak_ = enforce; }\n\n  // Internal stuff --------------------------------------------------\n  // These methods MUST NOT be called from outside the proto2 library.\n  // These methods may contain hidden pitfalls and may be removed in a\n  // future library version.\n\n  // Create a DescriptorPool which is overlaid on top of some other pool.\n  // If you search for a descriptor in the overlay and it is not found, the\n  // underlay will be searched as a backup.  If the underlay has its own\n  // underlay, that will be searched next, and so on.  This also means that\n  // files built in the overlay will be cross-linked with the underlay's\n  // descriptors if necessary.  The underlay remains property of the caller;\n  // it must remain valid for the lifetime of the newly-constructed pool.\n  //\n  // Example:  Say you want to parse a .proto file at runtime in order to use\n  // its type with a DynamicMessage.  Say this .proto file has dependencies,\n  // but you know that all the dependencies will be things that are already\n  // compiled into the binary.  For ease of use, you'd like to load the types\n  // right out of generated_pool() rather than have to parse redundant copies\n  // of all these .protos and runtime.  But, you don't want to add the parsed\n  // types directly into generated_pool(): this is not allowed, and would be\n  // bad design anyway.  So, instead, you could use generated_pool() as an\n  // underlay for a new DescriptorPool in which you add only the new file.\n  //\n  // WARNING:  Use of underlays can lead to many subtle gotchas.  Instead,\n  //   try to formulate what you want to do in terms of DescriptorDatabases.\n  explicit DescriptorPool(const DescriptorPool* underlay);\n\n  // Called by generated classes at init time to add their descriptors to\n  // generated_pool.  Do NOT call this in your own code!  filename must be a\n  // permanent string (e.g. a string literal).\n  static void InternalAddGeneratedFile(\n      const void* encoded_file_descriptor, int size);\n\n\n  // For internal use only:  Gets a non-const pointer to the generated pool.\n  // This is called at static-initialization time only, so thread-safety is\n  // not a concern.  If both an underlay and a fallback database are present,\n  // the underlay takes precedence.\n  static DescriptorPool* internal_generated_pool();\n\n  // For internal use only:  Changes the behavior of BuildFile() such that it\n  // allows the file to make reference to message types declared in other files\n  // which it did not officially declare as dependencies.\n  void InternalDontEnforceDependencies();\n\n  // For internal use only.\n  void internal_set_underlay(const DescriptorPool* underlay) {\n    underlay_ = underlay;\n  }\n\n  // For internal (unit test) use only:  Returns true if a FileDescriptor has\n  // been constructed for the given file, false otherwise.  Useful for testing\n  // lazy descriptor initialization behavior.\n  bool InternalIsFileLoaded(const string& filename) const;\n\n\n  // Add a file to unused_import_track_files_. DescriptorBuilder will log\n  // warnings for those files if there is any unused import.\n  void AddUnusedImportTrackFile(const string& file_name);\n  void ClearUnusedImportTrackFiles();\n\n private:\n  friend class Descriptor;\n  friend class FieldDescriptor;\n  friend class EnumDescriptor;\n  friend class ServiceDescriptor;\n  friend class FileDescriptor;\n  friend class DescriptorBuilder;\n  friend class FileDescriptorTables;\n\n  // Return true if the given name is a sub-symbol of any non-package\n  // descriptor that already exists in the descriptor pool.  (The full\n  // definition of such types is already known.)\n  bool IsSubSymbolOfBuiltType(const string& name) const;\n\n  // Tries to find something in the fallback database and link in the\n  // corresponding proto file.  Returns true if successful, in which case\n  // the caller should search for the thing again.  These are declared\n  // const because they are called by (semantically) const methods.\n  bool TryFindFileInFallbackDatabase(const string& name) const;\n  bool TryFindSymbolInFallbackDatabase(const string& name) const;\n  bool TryFindExtensionInFallbackDatabase(const Descriptor* containing_type,\n                                          int field_number) const;\n\n  // Like BuildFile() but called internally when the file has been loaded from\n  // fallback_database_.  Declared const because it is called by (semantically)\n  // const methods.\n  const FileDescriptor* BuildFileFromDatabase(\n    const FileDescriptorProto& proto) const;\n\n  // If fallback_database_ is NULL, this is NULL.  Otherwise, this is a mutex\n  // which must be locked while accessing tables_.\n  Mutex* mutex_;\n\n  // See constructor.\n  DescriptorDatabase* fallback_database_;\n  ErrorCollector* default_error_collector_;\n  const DescriptorPool* underlay_;\n\n  // This class contains a lot of hash maps with complicated types that\n  // we'd like to keep out of the header.\n  class Tables;\n  google::protobuf::scoped_ptr<Tables> tables_;\n\n  bool enforce_dependencies_;\n  bool allow_unknown_;\n  bool enforce_weak_;\n  std::set<string> unused_import_track_files_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DescriptorPool);\n};\n\n// inline methods ====================================================\n\n// These macros makes this repetitive code more readable.\n#define PROTOBUF_DEFINE_ACCESSOR(CLASS, FIELD, TYPE) \\\n  inline TYPE CLASS::FIELD() const { return FIELD##_; }\n\n// Strings fields are stored as pointers but returned as const references.\n#define PROTOBUF_DEFINE_STRING_ACCESSOR(CLASS, FIELD) \\\n  inline const string& CLASS::FIELD() const { return *FIELD##_; }\n\n// Arrays take an index parameter, obviously.\n#define PROTOBUF_DEFINE_ARRAY_ACCESSOR(CLASS, FIELD, TYPE) \\\n  inline TYPE CLASS::FIELD(int index) const { return FIELD##s_ + index; }\n\n#define PROTOBUF_DEFINE_OPTIONS_ACCESSOR(CLASS, TYPE) \\\n  inline const TYPE& CLASS::options() const { return *options_; }\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(Descriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(Descriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, file, const FileDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, containing_type, const Descriptor*)\n\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, field_count, int)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, oneof_decl_count, int)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, nested_type_count, int)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, enum_type_count, int)\n\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, field, const FieldDescriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, oneof_decl, const OneofDescriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, nested_type, const Descriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, enum_type, const EnumDescriptor*)\n\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, extension_range_count, int)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, extension_count, int)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, extension_range,\n                               const Descriptor::ExtensionRange*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, extension,\n                               const FieldDescriptor*)\n\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, reserved_range_count, int)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(Descriptor, reserved_range,\n                               const Descriptor::ReservedRange*)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, reserved_name_count, int)\n\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(Descriptor, MessageOptions)\nPROTOBUF_DEFINE_ACCESSOR(Descriptor, is_placeholder, bool)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, full_name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, json_name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, lowercase_name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, camelcase_name)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, file, const FileDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, number, int)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, is_extension, bool)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, type, FieldDescriptor::Type)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, label, FieldDescriptor::Label)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, containing_type, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, containing_oneof,\n                         const OneofDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, index_in_oneof, int)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, extension_scope, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, message_type, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, enum_type, const EnumDescriptor*)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(FieldDescriptor, FieldOptions)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, has_default_value, bool)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, has_json_name, bool)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_int32 , int32 )\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_int64 , int64 )\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_uint32, uint32)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_uint64, uint64)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_float , float )\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_double, double)\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_bool  , bool  )\nPROTOBUF_DEFINE_ACCESSOR(FieldDescriptor, default_value_enum,\n                         const EnumValueDescriptor*)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FieldDescriptor, default_value_string)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(OneofDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(OneofDescriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(OneofDescriptor, containing_type, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(OneofDescriptor, field_count, int)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(OneofDescriptor, OneofOptions)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(EnumDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(EnumDescriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, file, const FileDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, containing_type, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, value_count, int)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(EnumDescriptor, value,\n                               const EnumValueDescriptor*)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(EnumDescriptor, EnumOptions)\nPROTOBUF_DEFINE_ACCESSOR(EnumDescriptor, is_placeholder, bool)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(EnumValueDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(EnumValueDescriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(EnumValueDescriptor, number, int)\nPROTOBUF_DEFINE_ACCESSOR(EnumValueDescriptor, type, const EnumDescriptor*)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(EnumValueDescriptor, EnumValueOptions)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(ServiceDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(ServiceDescriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(ServiceDescriptor, file, const FileDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(ServiceDescriptor, method_count, int)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(ServiceDescriptor, method,\n                               const MethodDescriptor*)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(ServiceDescriptor, ServiceOptions)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(MethodDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(MethodDescriptor, full_name)\nPROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, service, const ServiceDescriptor*)\nPROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, input_type, const Descriptor*)\nPROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, output_type, const Descriptor*)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(MethodDescriptor, MethodOptions)\nPROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, client_streaming, bool)\nPROTOBUF_DEFINE_ACCESSOR(MethodDescriptor, server_streaming, bool)\n\nPROTOBUF_DEFINE_STRING_ACCESSOR(FileDescriptor, name)\nPROTOBUF_DEFINE_STRING_ACCESSOR(FileDescriptor, package)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, pool, const DescriptorPool*)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, dependency_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, public_dependency_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, weak_dependency_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, message_type_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, enum_type_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, service_count, int)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, extension_count, int)\nPROTOBUF_DEFINE_OPTIONS_ACCESSOR(FileDescriptor, FileOptions)\nPROTOBUF_DEFINE_ACCESSOR(FileDescriptor, is_placeholder, bool)\n\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, message_type, const Descriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, enum_type, const EnumDescriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, service,\n                               const ServiceDescriptor*)\nPROTOBUF_DEFINE_ARRAY_ACCESSOR(FileDescriptor, extension,\n                               const FieldDescriptor*)\n\n#undef PROTOBUF_DEFINE_ACCESSOR\n#undef PROTOBUF_DEFINE_STRING_ACCESSOR\n#undef PROTOBUF_DEFINE_ARRAY_ACCESSOR\n\n// A few accessors differ from the macros...\n\ninline bool Descriptor::IsExtensionNumber(int number) const {\n  return FindExtensionRangeContainingNumber(number) != NULL;\n}\n\ninline bool Descriptor::IsReservedNumber(int number) const {\n  return FindReservedRangeContainingNumber(number) != NULL;\n}\n\ninline bool Descriptor::IsReservedName(const string& name) const {\n  for (int i = 0; i < reserved_name_count(); i++) {\n    if (name == reserved_name(i)) {\n      return true;\n    }\n  }\n  return false;\n}\n\n// Can't use PROTOBUF_DEFINE_ARRAY_ACCESSOR because reserved_names_ is actually\n// an array of pointers rather than the usual array of objects.\ninline const string& Descriptor::reserved_name(int index) const {\n  return *reserved_names_[index];\n}\n\ninline bool FieldDescriptor::is_required() const {\n  return label() == LABEL_REQUIRED;\n}\n\ninline bool FieldDescriptor::is_optional() const {\n  return label() == LABEL_OPTIONAL;\n}\n\ninline bool FieldDescriptor::is_repeated() const {\n  return label() == LABEL_REPEATED;\n}\n\ninline bool FieldDescriptor::is_packable() const {\n  return is_repeated() && IsTypePackable(type());\n}\n\n// To save space, index() is computed by looking at the descriptor's position\n// in the parent's array of children.\ninline int FieldDescriptor::index() const {\n  if (!is_extension_) {\n    return static_cast<int>(this - containing_type_->fields_);\n  } else if (extension_scope_ != NULL) {\n    return static_cast<int>(this - extension_scope_->extensions_);\n  } else {\n    return static_cast<int>(this - file_->extensions_);\n  }\n}\n\ninline int Descriptor::index() const {\n  if (containing_type_ == NULL) {\n    return static_cast<int>(this - file_->message_types_);\n  } else {\n    return static_cast<int>(this - containing_type_->nested_types_);\n  }\n}\n\ninline int OneofDescriptor::index() const {\n  return static_cast<int>(this - containing_type_->oneof_decls_);\n}\n\ninline int EnumDescriptor::index() const {\n  if (containing_type_ == NULL) {\n    return static_cast<int>(this - file_->enum_types_);\n  } else {\n    return static_cast<int>(this - containing_type_->enum_types_);\n  }\n}\n\ninline int EnumValueDescriptor::index() const {\n  return static_cast<int>(this - type_->values_);\n}\n\ninline int ServiceDescriptor::index() const {\n  return static_cast<int>(this - file_->services_);\n}\n\ninline int MethodDescriptor::index() const {\n  return static_cast<int>(this - service_->methods_);\n}\n\ninline const char* FieldDescriptor::type_name() const {\n  return kTypeToName[type_];\n}\n\ninline FieldDescriptor::CppType FieldDescriptor::cpp_type() const {\n  return kTypeToCppTypeMap[type_];\n}\n\ninline const char* FieldDescriptor::cpp_type_name() const {\n  return kCppTypeToName[kTypeToCppTypeMap[type_]];\n}\n\ninline FieldDescriptor::CppType FieldDescriptor::TypeToCppType(Type type) {\n  return kTypeToCppTypeMap[type];\n}\n\ninline const char* FieldDescriptor::TypeName(Type type) {\n  return kTypeToName[type];\n}\n\ninline const char* FieldDescriptor::CppTypeName(CppType cpp_type) {\n  return kCppTypeToName[cpp_type];\n}\n\ninline bool FieldDescriptor::IsTypePackable(Type field_type) {\n  return (field_type != FieldDescriptor::TYPE_STRING &&\n          field_type != FieldDescriptor::TYPE_GROUP &&\n          field_type != FieldDescriptor::TYPE_MESSAGE &&\n          field_type != FieldDescriptor::TYPE_BYTES);\n}\n\ninline const FileDescriptor* FileDescriptor::dependency(int index) const {\n  return dependencies_[index];\n}\n\ninline const FileDescriptor* FileDescriptor::public_dependency(\n    int index) const {\n  return dependencies_[public_dependencies_[index]];\n}\n\ninline const FileDescriptor* FileDescriptor::weak_dependency(\n    int index) const {\n  return dependencies_[weak_dependencies_[index]];\n}\n\ninline FileDescriptor::Syntax FileDescriptor::syntax() const {\n  return syntax_;\n}\n\n// Can't use PROTOBUF_DEFINE_ARRAY_ACCESSOR because fields_ is actually an array\n// of pointers rather than the usual array of objects.\ninline const FieldDescriptor* OneofDescriptor::field(int index) const {\n  return fields_[index];\n}\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_DESCRIPTOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/descriptor.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/descriptor.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fdescriptor_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fdescriptor_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/generated_enum_reflection.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\nclass DescriptorProto;\nclass DescriptorProto_ExtensionRange;\nclass DescriptorProto_ReservedRange;\nclass EnumDescriptorProto;\nclass EnumOptions;\nclass EnumValueDescriptorProto;\nclass EnumValueOptions;\nclass FieldDescriptorProto;\nclass FieldOptions;\nclass FileDescriptorProto;\nclass FileDescriptorSet;\nclass FileOptions;\nclass GeneratedCodeInfo;\nclass GeneratedCodeInfo_Annotation;\nclass MessageOptions;\nclass MethodDescriptorProto;\nclass MethodOptions;\nclass OneofDescriptorProto;\nclass OneofOptions;\nclass ServiceDescriptorProto;\nclass ServiceOptions;\nclass SourceCodeInfo;\nclass SourceCodeInfo_Location;\nclass UninterpretedOption;\nclass UninterpretedOption_NamePart;\n\nenum FieldDescriptorProto_Type {\n  FieldDescriptorProto_Type_TYPE_DOUBLE = 1,\n  FieldDescriptorProto_Type_TYPE_FLOAT = 2,\n  FieldDescriptorProto_Type_TYPE_INT64 = 3,\n  FieldDescriptorProto_Type_TYPE_UINT64 = 4,\n  FieldDescriptorProto_Type_TYPE_INT32 = 5,\n  FieldDescriptorProto_Type_TYPE_FIXED64 = 6,\n  FieldDescriptorProto_Type_TYPE_FIXED32 = 7,\n  FieldDescriptorProto_Type_TYPE_BOOL = 8,\n  FieldDescriptorProto_Type_TYPE_STRING = 9,\n  FieldDescriptorProto_Type_TYPE_GROUP = 10,\n  FieldDescriptorProto_Type_TYPE_MESSAGE = 11,\n  FieldDescriptorProto_Type_TYPE_BYTES = 12,\n  FieldDescriptorProto_Type_TYPE_UINT32 = 13,\n  FieldDescriptorProto_Type_TYPE_ENUM = 14,\n  FieldDescriptorProto_Type_TYPE_SFIXED32 = 15,\n  FieldDescriptorProto_Type_TYPE_SFIXED64 = 16,\n  FieldDescriptorProto_Type_TYPE_SINT32 = 17,\n  FieldDescriptorProto_Type_TYPE_SINT64 = 18\n};\nLIBPROTOBUF_EXPORT bool FieldDescriptorProto_Type_IsValid(int value);\nconst FieldDescriptorProto_Type FieldDescriptorProto_Type_Type_MIN = FieldDescriptorProto_Type_TYPE_DOUBLE;\nconst FieldDescriptorProto_Type FieldDescriptorProto_Type_Type_MAX = FieldDescriptorProto_Type_TYPE_SINT64;\nconst int FieldDescriptorProto_Type_Type_ARRAYSIZE = FieldDescriptorProto_Type_Type_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* FieldDescriptorProto_Type_descriptor();\ninline const ::std::string& FieldDescriptorProto_Type_Name(FieldDescriptorProto_Type value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    FieldDescriptorProto_Type_descriptor(), value);\n}\ninline bool FieldDescriptorProto_Type_Parse(\n    const ::std::string& name, FieldDescriptorProto_Type* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<FieldDescriptorProto_Type>(\n    FieldDescriptorProto_Type_descriptor(), name, value);\n}\nenum FieldDescriptorProto_Label {\n  FieldDescriptorProto_Label_LABEL_OPTIONAL = 1,\n  FieldDescriptorProto_Label_LABEL_REQUIRED = 2,\n  FieldDescriptorProto_Label_LABEL_REPEATED = 3\n};\nLIBPROTOBUF_EXPORT bool FieldDescriptorProto_Label_IsValid(int value);\nconst FieldDescriptorProto_Label FieldDescriptorProto_Label_Label_MIN = FieldDescriptorProto_Label_LABEL_OPTIONAL;\nconst FieldDescriptorProto_Label FieldDescriptorProto_Label_Label_MAX = FieldDescriptorProto_Label_LABEL_REPEATED;\nconst int FieldDescriptorProto_Label_Label_ARRAYSIZE = FieldDescriptorProto_Label_Label_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* FieldDescriptorProto_Label_descriptor();\ninline const ::std::string& FieldDescriptorProto_Label_Name(FieldDescriptorProto_Label value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    FieldDescriptorProto_Label_descriptor(), value);\n}\ninline bool FieldDescriptorProto_Label_Parse(\n    const ::std::string& name, FieldDescriptorProto_Label* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<FieldDescriptorProto_Label>(\n    FieldDescriptorProto_Label_descriptor(), name, value);\n}\nenum FileOptions_OptimizeMode {\n  FileOptions_OptimizeMode_SPEED = 1,\n  FileOptions_OptimizeMode_CODE_SIZE = 2,\n  FileOptions_OptimizeMode_LITE_RUNTIME = 3\n};\nLIBPROTOBUF_EXPORT bool FileOptions_OptimizeMode_IsValid(int value);\nconst FileOptions_OptimizeMode FileOptions_OptimizeMode_OptimizeMode_MIN = FileOptions_OptimizeMode_SPEED;\nconst FileOptions_OptimizeMode FileOptions_OptimizeMode_OptimizeMode_MAX = FileOptions_OptimizeMode_LITE_RUNTIME;\nconst int FileOptions_OptimizeMode_OptimizeMode_ARRAYSIZE = FileOptions_OptimizeMode_OptimizeMode_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* FileOptions_OptimizeMode_descriptor();\ninline const ::std::string& FileOptions_OptimizeMode_Name(FileOptions_OptimizeMode value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    FileOptions_OptimizeMode_descriptor(), value);\n}\ninline bool FileOptions_OptimizeMode_Parse(\n    const ::std::string& name, FileOptions_OptimizeMode* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<FileOptions_OptimizeMode>(\n    FileOptions_OptimizeMode_descriptor(), name, value);\n}\nenum FieldOptions_CType {\n  FieldOptions_CType_STRING = 0,\n  FieldOptions_CType_CORD = 1,\n  FieldOptions_CType_STRING_PIECE = 2\n};\nLIBPROTOBUF_EXPORT bool FieldOptions_CType_IsValid(int value);\nconst FieldOptions_CType FieldOptions_CType_CType_MIN = FieldOptions_CType_STRING;\nconst FieldOptions_CType FieldOptions_CType_CType_MAX = FieldOptions_CType_STRING_PIECE;\nconst int FieldOptions_CType_CType_ARRAYSIZE = FieldOptions_CType_CType_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* FieldOptions_CType_descriptor();\ninline const ::std::string& FieldOptions_CType_Name(FieldOptions_CType value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    FieldOptions_CType_descriptor(), value);\n}\ninline bool FieldOptions_CType_Parse(\n    const ::std::string& name, FieldOptions_CType* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<FieldOptions_CType>(\n    FieldOptions_CType_descriptor(), name, value);\n}\nenum FieldOptions_JSType {\n  FieldOptions_JSType_JS_NORMAL = 0,\n  FieldOptions_JSType_JS_STRING = 1,\n  FieldOptions_JSType_JS_NUMBER = 2\n};\nLIBPROTOBUF_EXPORT bool FieldOptions_JSType_IsValid(int value);\nconst FieldOptions_JSType FieldOptions_JSType_JSType_MIN = FieldOptions_JSType_JS_NORMAL;\nconst FieldOptions_JSType FieldOptions_JSType_JSType_MAX = FieldOptions_JSType_JS_NUMBER;\nconst int FieldOptions_JSType_JSType_ARRAYSIZE = FieldOptions_JSType_JSType_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* FieldOptions_JSType_descriptor();\ninline const ::std::string& FieldOptions_JSType_Name(FieldOptions_JSType value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    FieldOptions_JSType_descriptor(), value);\n}\ninline bool FieldOptions_JSType_Parse(\n    const ::std::string& name, FieldOptions_JSType* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<FieldOptions_JSType>(\n    FieldOptions_JSType_descriptor(), name, value);\n}\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT FileDescriptorSet : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FileDescriptorSet) */ {\n public:\n  FileDescriptorSet();\n  virtual ~FileDescriptorSet();\n\n  FileDescriptorSet(const FileDescriptorSet& from);\n\n  inline FileDescriptorSet& operator=(const FileDescriptorSet& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FileDescriptorSet& default_instance();\n\n  static const FileDescriptorSet* internal_default_instance();\n\n  void Swap(FileDescriptorSet* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FileDescriptorSet* New() const { return New(NULL); }\n\n  FileDescriptorSet* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FileDescriptorSet& from);\n  void MergeFrom(const FileDescriptorSet& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FileDescriptorSet* other);\n  void UnsafeMergeFrom(const FileDescriptorSet& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.FileDescriptorProto file = 1;\n  int file_size() const;\n  void clear_file();\n  static const int kFileFieldNumber = 1;\n  const ::google::protobuf::FileDescriptorProto& file(int index) const;\n  ::google::protobuf::FileDescriptorProto* mutable_file(int index);\n  ::google::protobuf::FileDescriptorProto* add_file();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >*\n      mutable_file();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >&\n      file() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto > file_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FileDescriptorSet> FileDescriptorSet_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT FileDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FileDescriptorProto) */ {\n public:\n  FileDescriptorProto();\n  virtual ~FileDescriptorProto();\n\n  FileDescriptorProto(const FileDescriptorProto& from);\n\n  inline FileDescriptorProto& operator=(const FileDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FileDescriptorProto& default_instance();\n\n  static const FileDescriptorProto* internal_default_instance();\n\n  void Swap(FileDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FileDescriptorProto* New() const { return New(NULL); }\n\n  FileDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FileDescriptorProto& from);\n  void MergeFrom(const FileDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FileDescriptorProto* other);\n  void UnsafeMergeFrom(const FileDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string package = 2;\n  bool has_package() const;\n  void clear_package();\n  static const int kPackageFieldNumber = 2;\n  const ::std::string& package() const;\n  void set_package(const ::std::string& value);\n  void set_package(const char* value);\n  void set_package(const char* value, size_t size);\n  ::std::string* mutable_package();\n  ::std::string* release_package();\n  void set_allocated_package(::std::string* package);\n\n  // repeated string dependency = 3;\n  int dependency_size() const;\n  void clear_dependency();\n  static const int kDependencyFieldNumber = 3;\n  const ::std::string& dependency(int index) const;\n  ::std::string* mutable_dependency(int index);\n  void set_dependency(int index, const ::std::string& value);\n  void set_dependency(int index, const char* value);\n  void set_dependency(int index, const char* value, size_t size);\n  ::std::string* add_dependency();\n  void add_dependency(const ::std::string& value);\n  void add_dependency(const char* value);\n  void add_dependency(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& dependency() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_dependency();\n\n  // repeated int32 public_dependency = 10;\n  int public_dependency_size() const;\n  void clear_public_dependency();\n  static const int kPublicDependencyFieldNumber = 10;\n  ::google::protobuf::int32 public_dependency(int index) const;\n  void set_public_dependency(int index, ::google::protobuf::int32 value);\n  void add_public_dependency(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      public_dependency() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_public_dependency();\n\n  // repeated int32 weak_dependency = 11;\n  int weak_dependency_size() const;\n  void clear_weak_dependency();\n  static const int kWeakDependencyFieldNumber = 11;\n  ::google::protobuf::int32 weak_dependency(int index) const;\n  void set_weak_dependency(int index, ::google::protobuf::int32 value);\n  void add_weak_dependency(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      weak_dependency() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_weak_dependency();\n\n  // repeated .google.protobuf.DescriptorProto message_type = 4;\n  int message_type_size() const;\n  void clear_message_type();\n  static const int kMessageTypeFieldNumber = 4;\n  const ::google::protobuf::DescriptorProto& message_type(int index) const;\n  ::google::protobuf::DescriptorProto* mutable_message_type(int index);\n  ::google::protobuf::DescriptorProto* add_message_type();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >*\n      mutable_message_type();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >&\n      message_type() const;\n\n  // repeated .google.protobuf.EnumDescriptorProto enum_type = 5;\n  int enum_type_size() const;\n  void clear_enum_type();\n  static const int kEnumTypeFieldNumber = 5;\n  const ::google::protobuf::EnumDescriptorProto& enum_type(int index) const;\n  ::google::protobuf::EnumDescriptorProto* mutable_enum_type(int index);\n  ::google::protobuf::EnumDescriptorProto* add_enum_type();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >*\n      mutable_enum_type();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >&\n      enum_type() const;\n\n  // repeated .google.protobuf.ServiceDescriptorProto service = 6;\n  int service_size() const;\n  void clear_service();\n  static const int kServiceFieldNumber = 6;\n  const ::google::protobuf::ServiceDescriptorProto& service(int index) const;\n  ::google::protobuf::ServiceDescriptorProto* mutable_service(int index);\n  ::google::protobuf::ServiceDescriptorProto* add_service();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::ServiceDescriptorProto >*\n      mutable_service();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::ServiceDescriptorProto >&\n      service() const;\n\n  // repeated .google.protobuf.FieldDescriptorProto extension = 7;\n  int extension_size() const;\n  void clear_extension();\n  static const int kExtensionFieldNumber = 7;\n  const ::google::protobuf::FieldDescriptorProto& extension(int index) const;\n  ::google::protobuf::FieldDescriptorProto* mutable_extension(int index);\n  ::google::protobuf::FieldDescriptorProto* add_extension();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\n      mutable_extension();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\n      extension() const;\n\n  // optional .google.protobuf.FileOptions options = 8;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 8;\n  const ::google::protobuf::FileOptions& options() const;\n  ::google::protobuf::FileOptions* mutable_options();\n  ::google::protobuf::FileOptions* release_options();\n  void set_allocated_options(::google::protobuf::FileOptions* options);\n\n  // optional .google.protobuf.SourceCodeInfo source_code_info = 9;\n  bool has_source_code_info() const;\n  void clear_source_code_info();\n  static const int kSourceCodeInfoFieldNumber = 9;\n  const ::google::protobuf::SourceCodeInfo& source_code_info() const;\n  ::google::protobuf::SourceCodeInfo* mutable_source_code_info();\n  ::google::protobuf::SourceCodeInfo* release_source_code_info();\n  void set_allocated_source_code_info(::google::protobuf::SourceCodeInfo* source_code_info);\n\n  // optional string syntax = 12;\n  bool has_syntax() const;\n  void clear_syntax();\n  static const int kSyntaxFieldNumber = 12;\n  const ::std::string& syntax() const;\n  void set_syntax(const ::std::string& value);\n  void set_syntax(const char* value);\n  void set_syntax(const char* value, size_t size);\n  ::std::string* mutable_syntax();\n  ::std::string* release_syntax();\n  void set_allocated_syntax(::std::string* syntax);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_package();\n  inline void clear_has_package();\n  inline void set_has_options();\n  inline void clear_has_options();\n  inline void set_has_source_code_info();\n  inline void clear_has_source_code_info();\n  inline void set_has_syntax();\n  inline void clear_has_syntax();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> dependency_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > public_dependency_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > weak_dependency_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto > message_type_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto > enum_type_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::ServiceDescriptorProto > service_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto > extension_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr package_;\n  ::google::protobuf::internal::ArenaStringPtr syntax_;\n  ::google::protobuf::FileOptions* options_;\n  ::google::protobuf::SourceCodeInfo* source_code_info_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FileDescriptorProto> FileDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT DescriptorProto_ExtensionRange : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.DescriptorProto.ExtensionRange) */ {\n public:\n  DescriptorProto_ExtensionRange();\n  virtual ~DescriptorProto_ExtensionRange();\n\n  DescriptorProto_ExtensionRange(const DescriptorProto_ExtensionRange& from);\n\n  inline DescriptorProto_ExtensionRange& operator=(const DescriptorProto_ExtensionRange& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DescriptorProto_ExtensionRange& default_instance();\n\n  static const DescriptorProto_ExtensionRange* internal_default_instance();\n\n  void Swap(DescriptorProto_ExtensionRange* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DescriptorProto_ExtensionRange* New() const { return New(NULL); }\n\n  DescriptorProto_ExtensionRange* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DescriptorProto_ExtensionRange& from);\n  void MergeFrom(const DescriptorProto_ExtensionRange& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DescriptorProto_ExtensionRange* other);\n  void UnsafeMergeFrom(const DescriptorProto_ExtensionRange& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 start = 1;\n  bool has_start() const;\n  void clear_start();\n  static const int kStartFieldNumber = 1;\n  ::google::protobuf::int32 start() const;\n  void set_start(::google::protobuf::int32 value);\n\n  // optional int32 end = 2;\n  bool has_end() const;\n  void clear_end();\n  static const int kEndFieldNumber = 2;\n  ::google::protobuf::int32 end() const;\n  void set_end(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange)\n private:\n  inline void set_has_start();\n  inline void clear_has_start();\n  inline void set_has_end();\n  inline void clear_has_end();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::int32 start_;\n  ::google::protobuf::int32 end_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DescriptorProto_ExtensionRange> DescriptorProto_ExtensionRange_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT DescriptorProto_ReservedRange : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.DescriptorProto.ReservedRange) */ {\n public:\n  DescriptorProto_ReservedRange();\n  virtual ~DescriptorProto_ReservedRange();\n\n  DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange& from);\n\n  inline DescriptorProto_ReservedRange& operator=(const DescriptorProto_ReservedRange& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DescriptorProto_ReservedRange& default_instance();\n\n  static const DescriptorProto_ReservedRange* internal_default_instance();\n\n  void Swap(DescriptorProto_ReservedRange* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DescriptorProto_ReservedRange* New() const { return New(NULL); }\n\n  DescriptorProto_ReservedRange* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DescriptorProto_ReservedRange& from);\n  void MergeFrom(const DescriptorProto_ReservedRange& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DescriptorProto_ReservedRange* other);\n  void UnsafeMergeFrom(const DescriptorProto_ReservedRange& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 start = 1;\n  bool has_start() const;\n  void clear_start();\n  static const int kStartFieldNumber = 1;\n  ::google::protobuf::int32 start() const;\n  void set_start(::google::protobuf::int32 value);\n\n  // optional int32 end = 2;\n  bool has_end() const;\n  void clear_end();\n  static const int kEndFieldNumber = 2;\n  ::google::protobuf::int32 end() const;\n  void set_end(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ReservedRange)\n private:\n  inline void set_has_start();\n  inline void clear_has_start();\n  inline void set_has_end();\n  inline void clear_has_end();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::int32 start_;\n  ::google::protobuf::int32 end_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DescriptorProto_ReservedRange> DescriptorProto_ReservedRange_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT DescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.DescriptorProto) */ {\n public:\n  DescriptorProto();\n  virtual ~DescriptorProto();\n\n  DescriptorProto(const DescriptorProto& from);\n\n  inline DescriptorProto& operator=(const DescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DescriptorProto& default_instance();\n\n  static const DescriptorProto* internal_default_instance();\n\n  void Swap(DescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DescriptorProto* New() const { return New(NULL); }\n\n  DescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DescriptorProto& from);\n  void MergeFrom(const DescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DescriptorProto* other);\n  void UnsafeMergeFrom(const DescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef DescriptorProto_ExtensionRange ExtensionRange;\n  typedef DescriptorProto_ReservedRange ReservedRange;\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .google.protobuf.FieldDescriptorProto field = 2;\n  int field_size() const;\n  void clear_field();\n  static const int kFieldFieldNumber = 2;\n  const ::google::protobuf::FieldDescriptorProto& field(int index) const;\n  ::google::protobuf::FieldDescriptorProto* mutable_field(int index);\n  ::google::protobuf::FieldDescriptorProto* add_field();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\n      mutable_field();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\n      field() const;\n\n  // repeated .google.protobuf.FieldDescriptorProto extension = 6;\n  int extension_size() const;\n  void clear_extension();\n  static const int kExtensionFieldNumber = 6;\n  const ::google::protobuf::FieldDescriptorProto& extension(int index) const;\n  ::google::protobuf::FieldDescriptorProto* mutable_extension(int index);\n  ::google::protobuf::FieldDescriptorProto* add_extension();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\n      mutable_extension();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\n      extension() const;\n\n  // repeated .google.protobuf.DescriptorProto nested_type = 3;\n  int nested_type_size() const;\n  void clear_nested_type();\n  static const int kNestedTypeFieldNumber = 3;\n  const ::google::protobuf::DescriptorProto& nested_type(int index) const;\n  ::google::protobuf::DescriptorProto* mutable_nested_type(int index);\n  ::google::protobuf::DescriptorProto* add_nested_type();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >*\n      mutable_nested_type();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >&\n      nested_type() const;\n\n  // repeated .google.protobuf.EnumDescriptorProto enum_type = 4;\n  int enum_type_size() const;\n  void clear_enum_type();\n  static const int kEnumTypeFieldNumber = 4;\n  const ::google::protobuf::EnumDescriptorProto& enum_type(int index) const;\n  ::google::protobuf::EnumDescriptorProto* mutable_enum_type(int index);\n  ::google::protobuf::EnumDescriptorProto* add_enum_type();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >*\n      mutable_enum_type();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >&\n      enum_type() const;\n\n  // repeated .google.protobuf.DescriptorProto.ExtensionRange extension_range = 5;\n  int extension_range_size() const;\n  void clear_extension_range();\n  static const int kExtensionRangeFieldNumber = 5;\n  const ::google::protobuf::DescriptorProto_ExtensionRange& extension_range(int index) const;\n  ::google::protobuf::DescriptorProto_ExtensionRange* mutable_extension_range(int index);\n  ::google::protobuf::DescriptorProto_ExtensionRange* add_extension_range();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ExtensionRange >*\n      mutable_extension_range();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ExtensionRange >&\n      extension_range() const;\n\n  // repeated .google.protobuf.OneofDescriptorProto oneof_decl = 8;\n  int oneof_decl_size() const;\n  void clear_oneof_decl();\n  static const int kOneofDeclFieldNumber = 8;\n  const ::google::protobuf::OneofDescriptorProto& oneof_decl(int index) const;\n  ::google::protobuf::OneofDescriptorProto* mutable_oneof_decl(int index);\n  ::google::protobuf::OneofDescriptorProto* add_oneof_decl();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::OneofDescriptorProto >*\n      mutable_oneof_decl();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::OneofDescriptorProto >&\n      oneof_decl() const;\n\n  // optional .google.protobuf.MessageOptions options = 7;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 7;\n  const ::google::protobuf::MessageOptions& options() const;\n  ::google::protobuf::MessageOptions* mutable_options();\n  ::google::protobuf::MessageOptions* release_options();\n  void set_allocated_options(::google::protobuf::MessageOptions* options);\n\n  // repeated .google.protobuf.DescriptorProto.ReservedRange reserved_range = 9;\n  int reserved_range_size() const;\n  void clear_reserved_range();\n  static const int kReservedRangeFieldNumber = 9;\n  const ::google::protobuf::DescriptorProto_ReservedRange& reserved_range(int index) const;\n  ::google::protobuf::DescriptorProto_ReservedRange* mutable_reserved_range(int index);\n  ::google::protobuf::DescriptorProto_ReservedRange* add_reserved_range();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ReservedRange >*\n      mutable_reserved_range();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ReservedRange >&\n      reserved_range() const;\n\n  // repeated string reserved_name = 10;\n  int reserved_name_size() const;\n  void clear_reserved_name();\n  static const int kReservedNameFieldNumber = 10;\n  const ::std::string& reserved_name(int index) const;\n  ::std::string* mutable_reserved_name(int index);\n  void set_reserved_name(int index, const ::std::string& value);\n  void set_reserved_name(int index, const char* value);\n  void set_reserved_name(int index, const char* value, size_t size);\n  ::std::string* add_reserved_name();\n  void add_reserved_name(const ::std::string& value);\n  void add_reserved_name(const char* value);\n  void add_reserved_name(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& reserved_name() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_reserved_name();\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto > field_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto > extension_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto > nested_type_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto > enum_type_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ExtensionRange > extension_range_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::OneofDescriptorProto > oneof_decl_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ReservedRange > reserved_range_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> reserved_name_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::MessageOptions* options_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DescriptorProto> DescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT FieldDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FieldDescriptorProto) */ {\n public:\n  FieldDescriptorProto();\n  virtual ~FieldDescriptorProto();\n\n  FieldDescriptorProto(const FieldDescriptorProto& from);\n\n  inline FieldDescriptorProto& operator=(const FieldDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FieldDescriptorProto& default_instance();\n\n  static const FieldDescriptorProto* internal_default_instance();\n\n  void Swap(FieldDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FieldDescriptorProto* New() const { return New(NULL); }\n\n  FieldDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FieldDescriptorProto& from);\n  void MergeFrom(const FieldDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FieldDescriptorProto* other);\n  void UnsafeMergeFrom(const FieldDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef FieldDescriptorProto_Type Type;\n  static const Type TYPE_DOUBLE =\n    FieldDescriptorProto_Type_TYPE_DOUBLE;\n  static const Type TYPE_FLOAT =\n    FieldDescriptorProto_Type_TYPE_FLOAT;\n  static const Type TYPE_INT64 =\n    FieldDescriptorProto_Type_TYPE_INT64;\n  static const Type TYPE_UINT64 =\n    FieldDescriptorProto_Type_TYPE_UINT64;\n  static const Type TYPE_INT32 =\n    FieldDescriptorProto_Type_TYPE_INT32;\n  static const Type TYPE_FIXED64 =\n    FieldDescriptorProto_Type_TYPE_FIXED64;\n  static const Type TYPE_FIXED32 =\n    FieldDescriptorProto_Type_TYPE_FIXED32;\n  static const Type TYPE_BOOL =\n    FieldDescriptorProto_Type_TYPE_BOOL;\n  static const Type TYPE_STRING =\n    FieldDescriptorProto_Type_TYPE_STRING;\n  static const Type TYPE_GROUP =\n    FieldDescriptorProto_Type_TYPE_GROUP;\n  static const Type TYPE_MESSAGE =\n    FieldDescriptorProto_Type_TYPE_MESSAGE;\n  static const Type TYPE_BYTES =\n    FieldDescriptorProto_Type_TYPE_BYTES;\n  static const Type TYPE_UINT32 =\n    FieldDescriptorProto_Type_TYPE_UINT32;\n  static const Type TYPE_ENUM =\n    FieldDescriptorProto_Type_TYPE_ENUM;\n  static const Type TYPE_SFIXED32 =\n    FieldDescriptorProto_Type_TYPE_SFIXED32;\n  static const Type TYPE_SFIXED64 =\n    FieldDescriptorProto_Type_TYPE_SFIXED64;\n  static const Type TYPE_SINT32 =\n    FieldDescriptorProto_Type_TYPE_SINT32;\n  static const Type TYPE_SINT64 =\n    FieldDescriptorProto_Type_TYPE_SINT64;\n  static inline bool Type_IsValid(int value) {\n    return FieldDescriptorProto_Type_IsValid(value);\n  }\n  static const Type Type_MIN =\n    FieldDescriptorProto_Type_Type_MIN;\n  static const Type Type_MAX =\n    FieldDescriptorProto_Type_Type_MAX;\n  static const int Type_ARRAYSIZE =\n    FieldDescriptorProto_Type_Type_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  Type_descriptor() {\n    return FieldDescriptorProto_Type_descriptor();\n  }\n  static inline const ::std::string& Type_Name(Type value) {\n    return FieldDescriptorProto_Type_Name(value);\n  }\n  static inline bool Type_Parse(const ::std::string& name,\n      Type* value) {\n    return FieldDescriptorProto_Type_Parse(name, value);\n  }\n\n  typedef FieldDescriptorProto_Label Label;\n  static const Label LABEL_OPTIONAL =\n    FieldDescriptorProto_Label_LABEL_OPTIONAL;\n  static const Label LABEL_REQUIRED =\n    FieldDescriptorProto_Label_LABEL_REQUIRED;\n  static const Label LABEL_REPEATED =\n    FieldDescriptorProto_Label_LABEL_REPEATED;\n  static inline bool Label_IsValid(int value) {\n    return FieldDescriptorProto_Label_IsValid(value);\n  }\n  static const Label Label_MIN =\n    FieldDescriptorProto_Label_Label_MIN;\n  static const Label Label_MAX =\n    FieldDescriptorProto_Label_Label_MAX;\n  static const int Label_ARRAYSIZE =\n    FieldDescriptorProto_Label_Label_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  Label_descriptor() {\n    return FieldDescriptorProto_Label_descriptor();\n  }\n  static inline const ::std::string& Label_Name(Label value) {\n    return FieldDescriptorProto_Label_Name(value);\n  }\n  static inline bool Label_Parse(const ::std::string& name,\n      Label* value) {\n    return FieldDescriptorProto_Label_Parse(name, value);\n  }\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional int32 number = 3;\n  bool has_number() const;\n  void clear_number();\n  static const int kNumberFieldNumber = 3;\n  ::google::protobuf::int32 number() const;\n  void set_number(::google::protobuf::int32 value);\n\n  // optional .google.protobuf.FieldDescriptorProto.Label label = 4;\n  bool has_label() const;\n  void clear_label();\n  static const int kLabelFieldNumber = 4;\n  ::google::protobuf::FieldDescriptorProto_Label label() const;\n  void set_label(::google::protobuf::FieldDescriptorProto_Label value);\n\n  // optional .google.protobuf.FieldDescriptorProto.Type type = 5;\n  bool has_type() const;\n  void clear_type();\n  static const int kTypeFieldNumber = 5;\n  ::google::protobuf::FieldDescriptorProto_Type type() const;\n  void set_type(::google::protobuf::FieldDescriptorProto_Type value);\n\n  // optional string type_name = 6;\n  bool has_type_name() const;\n  void clear_type_name();\n  static const int kTypeNameFieldNumber = 6;\n  const ::std::string& type_name() const;\n  void set_type_name(const ::std::string& value);\n  void set_type_name(const char* value);\n  void set_type_name(const char* value, size_t size);\n  ::std::string* mutable_type_name();\n  ::std::string* release_type_name();\n  void set_allocated_type_name(::std::string* type_name);\n\n  // optional string extendee = 2;\n  bool has_extendee() const;\n  void clear_extendee();\n  static const int kExtendeeFieldNumber = 2;\n  const ::std::string& extendee() const;\n  void set_extendee(const ::std::string& value);\n  void set_extendee(const char* value);\n  void set_extendee(const char* value, size_t size);\n  ::std::string* mutable_extendee();\n  ::std::string* release_extendee();\n  void set_allocated_extendee(::std::string* extendee);\n\n  // optional string default_value = 7;\n  bool has_default_value() const;\n  void clear_default_value();\n  static const int kDefaultValueFieldNumber = 7;\n  const ::std::string& default_value() const;\n  void set_default_value(const ::std::string& value);\n  void set_default_value(const char* value);\n  void set_default_value(const char* value, size_t size);\n  ::std::string* mutable_default_value();\n  ::std::string* release_default_value();\n  void set_allocated_default_value(::std::string* default_value);\n\n  // optional int32 oneof_index = 9;\n  bool has_oneof_index() const;\n  void clear_oneof_index();\n  static const int kOneofIndexFieldNumber = 9;\n  ::google::protobuf::int32 oneof_index() const;\n  void set_oneof_index(::google::protobuf::int32 value);\n\n  // optional string json_name = 10;\n  bool has_json_name() const;\n  void clear_json_name();\n  static const int kJsonNameFieldNumber = 10;\n  const ::std::string& json_name() const;\n  void set_json_name(const ::std::string& value);\n  void set_json_name(const char* value);\n  void set_json_name(const char* value, size_t size);\n  ::std::string* mutable_json_name();\n  ::std::string* release_json_name();\n  void set_allocated_json_name(::std::string* json_name);\n\n  // optional .google.protobuf.FieldOptions options = 8;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 8;\n  const ::google::protobuf::FieldOptions& options() const;\n  ::google::protobuf::FieldOptions* mutable_options();\n  ::google::protobuf::FieldOptions* release_options();\n  void set_allocated_options(::google::protobuf::FieldOptions* options);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_number();\n  inline void clear_has_number();\n  inline void set_has_label();\n  inline void clear_has_label();\n  inline void set_has_type();\n  inline void clear_has_type();\n  inline void set_has_type_name();\n  inline void clear_has_type_name();\n  inline void set_has_extendee();\n  inline void clear_has_extendee();\n  inline void set_has_default_value();\n  inline void clear_has_default_value();\n  inline void set_has_oneof_index();\n  inline void clear_has_oneof_index();\n  inline void set_has_json_name();\n  inline void clear_has_json_name();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr type_name_;\n  ::google::protobuf::internal::ArenaStringPtr extendee_;\n  ::google::protobuf::internal::ArenaStringPtr default_value_;\n  ::google::protobuf::internal::ArenaStringPtr json_name_;\n  ::google::protobuf::FieldOptions* options_;\n  ::google::protobuf::int32 number_;\n  ::google::protobuf::int32 oneof_index_;\n  int label_;\n  int type_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FieldDescriptorProto> FieldDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT OneofDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.OneofDescriptorProto) */ {\n public:\n  OneofDescriptorProto();\n  virtual ~OneofDescriptorProto();\n\n  OneofDescriptorProto(const OneofDescriptorProto& from);\n\n  inline OneofDescriptorProto& operator=(const OneofDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const OneofDescriptorProto& default_instance();\n\n  static const OneofDescriptorProto* internal_default_instance();\n\n  void Swap(OneofDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline OneofDescriptorProto* New() const { return New(NULL); }\n\n  OneofDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const OneofDescriptorProto& from);\n  void MergeFrom(const OneofDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(OneofDescriptorProto* other);\n  void UnsafeMergeFrom(const OneofDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional .google.protobuf.OneofOptions options = 2;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 2;\n  const ::google::protobuf::OneofOptions& options() const;\n  ::google::protobuf::OneofOptions* mutable_options();\n  ::google::protobuf::OneofOptions* release_options();\n  void set_allocated_options(::google::protobuf::OneofOptions* options);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.OneofDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::OneofOptions* options_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<OneofDescriptorProto> OneofDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT EnumDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.EnumDescriptorProto) */ {\n public:\n  EnumDescriptorProto();\n  virtual ~EnumDescriptorProto();\n\n  EnumDescriptorProto(const EnumDescriptorProto& from);\n\n  inline EnumDescriptorProto& operator=(const EnumDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const EnumDescriptorProto& default_instance();\n\n  static const EnumDescriptorProto* internal_default_instance();\n\n  void Swap(EnumDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline EnumDescriptorProto* New() const { return New(NULL); }\n\n  EnumDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const EnumDescriptorProto& from);\n  void MergeFrom(const EnumDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(EnumDescriptorProto* other);\n  void UnsafeMergeFrom(const EnumDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .google.protobuf.EnumValueDescriptorProto value = 2;\n  int value_size() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  const ::google::protobuf::EnumValueDescriptorProto& value(int index) const;\n  ::google::protobuf::EnumValueDescriptorProto* mutable_value(int index);\n  ::google::protobuf::EnumValueDescriptorProto* add_value();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValueDescriptorProto >*\n      mutable_value();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValueDescriptorProto >&\n      value() const;\n\n  // optional .google.protobuf.EnumOptions options = 3;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::EnumOptions& options() const;\n  ::google::protobuf::EnumOptions* mutable_options();\n  ::google::protobuf::EnumOptions* release_options();\n  void set_allocated_options(::google::protobuf::EnumOptions* options);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValueDescriptorProto > value_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::EnumOptions* options_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<EnumDescriptorProto> EnumDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT EnumValueDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.EnumValueDescriptorProto) */ {\n public:\n  EnumValueDescriptorProto();\n  virtual ~EnumValueDescriptorProto();\n\n  EnumValueDescriptorProto(const EnumValueDescriptorProto& from);\n\n  inline EnumValueDescriptorProto& operator=(const EnumValueDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const EnumValueDescriptorProto& default_instance();\n\n  static const EnumValueDescriptorProto* internal_default_instance();\n\n  void Swap(EnumValueDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline EnumValueDescriptorProto* New() const { return New(NULL); }\n\n  EnumValueDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const EnumValueDescriptorProto& from);\n  void MergeFrom(const EnumValueDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(EnumValueDescriptorProto* other);\n  void UnsafeMergeFrom(const EnumValueDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional int32 number = 2;\n  bool has_number() const;\n  void clear_number();\n  static const int kNumberFieldNumber = 2;\n  ::google::protobuf::int32 number() const;\n  void set_number(::google::protobuf::int32 value);\n\n  // optional .google.protobuf.EnumValueOptions options = 3;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::EnumValueOptions& options() const;\n  ::google::protobuf::EnumValueOptions* mutable_options();\n  ::google::protobuf::EnumValueOptions* release_options();\n  void set_allocated_options(::google::protobuf::EnumValueOptions* options);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_number();\n  inline void clear_has_number();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::EnumValueOptions* options_;\n  ::google::protobuf::int32 number_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<EnumValueDescriptorProto> EnumValueDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT ServiceDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.ServiceDescriptorProto) */ {\n public:\n  ServiceDescriptorProto();\n  virtual ~ServiceDescriptorProto();\n\n  ServiceDescriptorProto(const ServiceDescriptorProto& from);\n\n  inline ServiceDescriptorProto& operator=(const ServiceDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ServiceDescriptorProto& default_instance();\n\n  static const ServiceDescriptorProto* internal_default_instance();\n\n  void Swap(ServiceDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ServiceDescriptorProto* New() const { return New(NULL); }\n\n  ServiceDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ServiceDescriptorProto& from);\n  void MergeFrom(const ServiceDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ServiceDescriptorProto* other);\n  void UnsafeMergeFrom(const ServiceDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // repeated .google.protobuf.MethodDescriptorProto method = 2;\n  int method_size() const;\n  void clear_method();\n  static const int kMethodFieldNumber = 2;\n  const ::google::protobuf::MethodDescriptorProto& method(int index) const;\n  ::google::protobuf::MethodDescriptorProto* mutable_method(int index);\n  ::google::protobuf::MethodDescriptorProto* add_method();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::MethodDescriptorProto >*\n      mutable_method();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::MethodDescriptorProto >&\n      method() const;\n\n  // optional .google.protobuf.ServiceOptions options = 3;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::ServiceOptions& options() const;\n  ::google::protobuf::ServiceOptions* mutable_options();\n  ::google::protobuf::ServiceOptions* release_options();\n  void set_allocated_options(::google::protobuf::ServiceOptions* options);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_options();\n  inline void clear_has_options();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::MethodDescriptorProto > method_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::ServiceOptions* options_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ServiceDescriptorProto> ServiceDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT MethodDescriptorProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.MethodDescriptorProto) */ {\n public:\n  MethodDescriptorProto();\n  virtual ~MethodDescriptorProto();\n\n  MethodDescriptorProto(const MethodDescriptorProto& from);\n\n  inline MethodDescriptorProto& operator=(const MethodDescriptorProto& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const MethodDescriptorProto& default_instance();\n\n  static const MethodDescriptorProto* internal_default_instance();\n\n  void Swap(MethodDescriptorProto* other);\n\n  // implements Message ----------------------------------------------\n\n  inline MethodDescriptorProto* New() const { return New(NULL); }\n\n  MethodDescriptorProto* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const MethodDescriptorProto& from);\n  void MergeFrom(const MethodDescriptorProto& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(MethodDescriptorProto* other);\n  void UnsafeMergeFrom(const MethodDescriptorProto& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  bool has_name() const;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n\n  // optional string input_type = 2;\n  bool has_input_type() const;\n  void clear_input_type();\n  static const int kInputTypeFieldNumber = 2;\n  const ::std::string& input_type() const;\n  void set_input_type(const ::std::string& value);\n  void set_input_type(const char* value);\n  void set_input_type(const char* value, size_t size);\n  ::std::string* mutable_input_type();\n  ::std::string* release_input_type();\n  void set_allocated_input_type(::std::string* input_type);\n\n  // optional string output_type = 3;\n  bool has_output_type() const;\n  void clear_output_type();\n  static const int kOutputTypeFieldNumber = 3;\n  const ::std::string& output_type() const;\n  void set_output_type(const ::std::string& value);\n  void set_output_type(const char* value);\n  void set_output_type(const char* value, size_t size);\n  ::std::string* mutable_output_type();\n  ::std::string* release_output_type();\n  void set_allocated_output_type(::std::string* output_type);\n\n  // optional .google.protobuf.MethodOptions options = 4;\n  bool has_options() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 4;\n  const ::google::protobuf::MethodOptions& options() const;\n  ::google::protobuf::MethodOptions* mutable_options();\n  ::google::protobuf::MethodOptions* release_options();\n  void set_allocated_options(::google::protobuf::MethodOptions* options);\n\n  // optional bool client_streaming = 5 [default = false];\n  bool has_client_streaming() const;\n  void clear_client_streaming();\n  static const int kClientStreamingFieldNumber = 5;\n  bool client_streaming() const;\n  void set_client_streaming(bool value);\n\n  // optional bool server_streaming = 6 [default = false];\n  bool has_server_streaming() const;\n  void clear_server_streaming();\n  static const int kServerStreamingFieldNumber = 6;\n  bool server_streaming() const;\n  void set_server_streaming(bool value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto)\n private:\n  inline void set_has_name();\n  inline void clear_has_name();\n  inline void set_has_input_type();\n  inline void clear_has_input_type();\n  inline void set_has_output_type();\n  inline void clear_has_output_type();\n  inline void set_has_options();\n  inline void clear_has_options();\n  inline void set_has_client_streaming();\n  inline void clear_has_client_streaming();\n  inline void set_has_server_streaming();\n  inline void clear_has_server_streaming();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr input_type_;\n  ::google::protobuf::internal::ArenaStringPtr output_type_;\n  ::google::protobuf::MethodOptions* options_;\n  bool client_streaming_;\n  bool server_streaming_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<MethodDescriptorProto> MethodDescriptorProto_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT FileOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FileOptions) */ {\n public:\n  FileOptions();\n  virtual ~FileOptions();\n\n  FileOptions(const FileOptions& from);\n\n  inline FileOptions& operator=(const FileOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FileOptions& default_instance();\n\n  static const FileOptions* internal_default_instance();\n\n  void Swap(FileOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FileOptions* New() const { return New(NULL); }\n\n  FileOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FileOptions& from);\n  void MergeFrom(const FileOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FileOptions* other);\n  void UnsafeMergeFrom(const FileOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef FileOptions_OptimizeMode OptimizeMode;\n  static const OptimizeMode SPEED =\n    FileOptions_OptimizeMode_SPEED;\n  static const OptimizeMode CODE_SIZE =\n    FileOptions_OptimizeMode_CODE_SIZE;\n  static const OptimizeMode LITE_RUNTIME =\n    FileOptions_OptimizeMode_LITE_RUNTIME;\n  static inline bool OptimizeMode_IsValid(int value) {\n    return FileOptions_OptimizeMode_IsValid(value);\n  }\n  static const OptimizeMode OptimizeMode_MIN =\n    FileOptions_OptimizeMode_OptimizeMode_MIN;\n  static const OptimizeMode OptimizeMode_MAX =\n    FileOptions_OptimizeMode_OptimizeMode_MAX;\n  static const int OptimizeMode_ARRAYSIZE =\n    FileOptions_OptimizeMode_OptimizeMode_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  OptimizeMode_descriptor() {\n    return FileOptions_OptimizeMode_descriptor();\n  }\n  static inline const ::std::string& OptimizeMode_Name(OptimizeMode value) {\n    return FileOptions_OptimizeMode_Name(value);\n  }\n  static inline bool OptimizeMode_Parse(const ::std::string& name,\n      OptimizeMode* value) {\n    return FileOptions_OptimizeMode_Parse(name, value);\n  }\n\n  // accessors -------------------------------------------------------\n\n  // optional string java_package = 1;\n  bool has_java_package() const;\n  void clear_java_package();\n  static const int kJavaPackageFieldNumber = 1;\n  const ::std::string& java_package() const;\n  void set_java_package(const ::std::string& value);\n  void set_java_package(const char* value);\n  void set_java_package(const char* value, size_t size);\n  ::std::string* mutable_java_package();\n  ::std::string* release_java_package();\n  void set_allocated_java_package(::std::string* java_package);\n\n  // optional string java_outer_classname = 8;\n  bool has_java_outer_classname() const;\n  void clear_java_outer_classname();\n  static const int kJavaOuterClassnameFieldNumber = 8;\n  const ::std::string& java_outer_classname() const;\n  void set_java_outer_classname(const ::std::string& value);\n  void set_java_outer_classname(const char* value);\n  void set_java_outer_classname(const char* value, size_t size);\n  ::std::string* mutable_java_outer_classname();\n  ::std::string* release_java_outer_classname();\n  void set_allocated_java_outer_classname(::std::string* java_outer_classname);\n\n  // optional bool java_multiple_files = 10 [default = false];\n  bool has_java_multiple_files() const;\n  void clear_java_multiple_files();\n  static const int kJavaMultipleFilesFieldNumber = 10;\n  bool java_multiple_files() const;\n  void set_java_multiple_files(bool value);\n\n  // optional bool java_generate_equals_and_hash = 20 [deprecated = true];\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR bool has_java_generate_equals_and_hash() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void clear_java_generate_equals_and_hash();\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR static const int kJavaGenerateEqualsAndHashFieldNumber = 20;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR bool java_generate_equals_and_hash() const;\n  GOOGLE_PROTOBUF_DEPRECATED_ATTR void set_java_generate_equals_and_hash(bool value);\n\n  // optional bool java_string_check_utf8 = 27 [default = false];\n  bool has_java_string_check_utf8() const;\n  void clear_java_string_check_utf8();\n  static const int kJavaStringCheckUtf8FieldNumber = 27;\n  bool java_string_check_utf8() const;\n  void set_java_string_check_utf8(bool value);\n\n  // optional .google.protobuf.FileOptions.OptimizeMode optimize_for = 9 [default = SPEED];\n  bool has_optimize_for() const;\n  void clear_optimize_for();\n  static const int kOptimizeForFieldNumber = 9;\n  ::google::protobuf::FileOptions_OptimizeMode optimize_for() const;\n  void set_optimize_for(::google::protobuf::FileOptions_OptimizeMode value);\n\n  // optional string go_package = 11;\n  bool has_go_package() const;\n  void clear_go_package();\n  static const int kGoPackageFieldNumber = 11;\n  const ::std::string& go_package() const;\n  void set_go_package(const ::std::string& value);\n  void set_go_package(const char* value);\n  void set_go_package(const char* value, size_t size);\n  ::std::string* mutable_go_package();\n  ::std::string* release_go_package();\n  void set_allocated_go_package(::std::string* go_package);\n\n  // optional bool cc_generic_services = 16 [default = false];\n  bool has_cc_generic_services() const;\n  void clear_cc_generic_services();\n  static const int kCcGenericServicesFieldNumber = 16;\n  bool cc_generic_services() const;\n  void set_cc_generic_services(bool value);\n\n  // optional bool java_generic_services = 17 [default = false];\n  bool has_java_generic_services() const;\n  void clear_java_generic_services();\n  static const int kJavaGenericServicesFieldNumber = 17;\n  bool java_generic_services() const;\n  void set_java_generic_services(bool value);\n\n  // optional bool py_generic_services = 18 [default = false];\n  bool has_py_generic_services() const;\n  void clear_py_generic_services();\n  static const int kPyGenericServicesFieldNumber = 18;\n  bool py_generic_services() const;\n  void set_py_generic_services(bool value);\n\n  // optional bool deprecated = 23 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 23;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // optional bool cc_enable_arenas = 31 [default = false];\n  bool has_cc_enable_arenas() const;\n  void clear_cc_enable_arenas();\n  static const int kCcEnableArenasFieldNumber = 31;\n  bool cc_enable_arenas() const;\n  void set_cc_enable_arenas(bool value);\n\n  // optional string objc_class_prefix = 36;\n  bool has_objc_class_prefix() const;\n  void clear_objc_class_prefix();\n  static const int kObjcClassPrefixFieldNumber = 36;\n  const ::std::string& objc_class_prefix() const;\n  void set_objc_class_prefix(const ::std::string& value);\n  void set_objc_class_prefix(const char* value);\n  void set_objc_class_prefix(const char* value, size_t size);\n  ::std::string* mutable_objc_class_prefix();\n  ::std::string* release_objc_class_prefix();\n  void set_allocated_objc_class_prefix(::std::string* objc_class_prefix);\n\n  // optional string csharp_namespace = 37;\n  bool has_csharp_namespace() const;\n  void clear_csharp_namespace();\n  static const int kCsharpNamespaceFieldNumber = 37;\n  const ::std::string& csharp_namespace() const;\n  void set_csharp_namespace(const ::std::string& value);\n  void set_csharp_namespace(const char* value);\n  void set_csharp_namespace(const char* value, size_t size);\n  ::std::string* mutable_csharp_namespace();\n  ::std::string* release_csharp_namespace();\n  void set_allocated_csharp_namespace(::std::string* csharp_namespace);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(FileOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.FileOptions)\n private:\n  inline void set_has_java_package();\n  inline void clear_has_java_package();\n  inline void set_has_java_outer_classname();\n  inline void clear_has_java_outer_classname();\n  inline void set_has_java_multiple_files();\n  inline void clear_has_java_multiple_files();\n  inline void set_has_java_generate_equals_and_hash();\n  inline void clear_has_java_generate_equals_and_hash();\n  inline void set_has_java_string_check_utf8();\n  inline void clear_has_java_string_check_utf8();\n  inline void set_has_optimize_for();\n  inline void clear_has_optimize_for();\n  inline void set_has_go_package();\n  inline void clear_has_go_package();\n  inline void set_has_cc_generic_services();\n  inline void clear_has_cc_generic_services();\n  inline void set_has_java_generic_services();\n  inline void clear_has_java_generic_services();\n  inline void set_has_py_generic_services();\n  inline void clear_has_py_generic_services();\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n  inline void set_has_cc_enable_arenas();\n  inline void clear_has_cc_enable_arenas();\n  inline void set_has_objc_class_prefix();\n  inline void clear_has_objc_class_prefix();\n  inline void set_has_csharp_namespace();\n  inline void clear_has_csharp_namespace();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  ::google::protobuf::internal::ArenaStringPtr java_package_;\n  ::google::protobuf::internal::ArenaStringPtr java_outer_classname_;\n  ::google::protobuf::internal::ArenaStringPtr go_package_;\n  ::google::protobuf::internal::ArenaStringPtr objc_class_prefix_;\n  ::google::protobuf::internal::ArenaStringPtr csharp_namespace_;\n  bool java_multiple_files_;\n  bool java_generate_equals_and_hash_;\n  bool java_string_check_utf8_;\n  bool cc_generic_services_;\n  bool java_generic_services_;\n  bool py_generic_services_;\n  bool deprecated_;\n  bool cc_enable_arenas_;\n  int optimize_for_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FileOptions> FileOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT MessageOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.MessageOptions) */ {\n public:\n  MessageOptions();\n  virtual ~MessageOptions();\n\n  MessageOptions(const MessageOptions& from);\n\n  inline MessageOptions& operator=(const MessageOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const MessageOptions& default_instance();\n\n  static const MessageOptions* internal_default_instance();\n\n  void Swap(MessageOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline MessageOptions* New() const { return New(NULL); }\n\n  MessageOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const MessageOptions& from);\n  void MergeFrom(const MessageOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(MessageOptions* other);\n  void UnsafeMergeFrom(const MessageOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool message_set_wire_format = 1 [default = false];\n  bool has_message_set_wire_format() const;\n  void clear_message_set_wire_format();\n  static const int kMessageSetWireFormatFieldNumber = 1;\n  bool message_set_wire_format() const;\n  void set_message_set_wire_format(bool value);\n\n  // optional bool no_standard_descriptor_accessor = 2 [default = false];\n  bool has_no_standard_descriptor_accessor() const;\n  void clear_no_standard_descriptor_accessor();\n  static const int kNoStandardDescriptorAccessorFieldNumber = 2;\n  bool no_standard_descriptor_accessor() const;\n  void set_no_standard_descriptor_accessor(bool value);\n\n  // optional bool deprecated = 3 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 3;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // optional bool map_entry = 7;\n  bool has_map_entry() const;\n  void clear_map_entry();\n  static const int kMapEntryFieldNumber = 7;\n  bool map_entry() const;\n  void set_map_entry(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(MessageOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions)\n private:\n  inline void set_has_message_set_wire_format();\n  inline void clear_has_message_set_wire_format();\n  inline void set_has_no_standard_descriptor_accessor();\n  inline void clear_has_no_standard_descriptor_accessor();\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n  inline void set_has_map_entry();\n  inline void clear_has_map_entry();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  bool message_set_wire_format_;\n  bool no_standard_descriptor_accessor_;\n  bool deprecated_;\n  bool map_entry_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<MessageOptions> MessageOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT FieldOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FieldOptions) */ {\n public:\n  FieldOptions();\n  virtual ~FieldOptions();\n\n  FieldOptions(const FieldOptions& from);\n\n  inline FieldOptions& operator=(const FieldOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FieldOptions& default_instance();\n\n  static const FieldOptions* internal_default_instance();\n\n  void Swap(FieldOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FieldOptions* New() const { return New(NULL); }\n\n  FieldOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FieldOptions& from);\n  void MergeFrom(const FieldOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FieldOptions* other);\n  void UnsafeMergeFrom(const FieldOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef FieldOptions_CType CType;\n  static const CType STRING =\n    FieldOptions_CType_STRING;\n  static const CType CORD =\n    FieldOptions_CType_CORD;\n  static const CType STRING_PIECE =\n    FieldOptions_CType_STRING_PIECE;\n  static inline bool CType_IsValid(int value) {\n    return FieldOptions_CType_IsValid(value);\n  }\n  static const CType CType_MIN =\n    FieldOptions_CType_CType_MIN;\n  static const CType CType_MAX =\n    FieldOptions_CType_CType_MAX;\n  static const int CType_ARRAYSIZE =\n    FieldOptions_CType_CType_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  CType_descriptor() {\n    return FieldOptions_CType_descriptor();\n  }\n  static inline const ::std::string& CType_Name(CType value) {\n    return FieldOptions_CType_Name(value);\n  }\n  static inline bool CType_Parse(const ::std::string& name,\n      CType* value) {\n    return FieldOptions_CType_Parse(name, value);\n  }\n\n  typedef FieldOptions_JSType JSType;\n  static const JSType JS_NORMAL =\n    FieldOptions_JSType_JS_NORMAL;\n  static const JSType JS_STRING =\n    FieldOptions_JSType_JS_STRING;\n  static const JSType JS_NUMBER =\n    FieldOptions_JSType_JS_NUMBER;\n  static inline bool JSType_IsValid(int value) {\n    return FieldOptions_JSType_IsValid(value);\n  }\n  static const JSType JSType_MIN =\n    FieldOptions_JSType_JSType_MIN;\n  static const JSType JSType_MAX =\n    FieldOptions_JSType_JSType_MAX;\n  static const int JSType_ARRAYSIZE =\n    FieldOptions_JSType_JSType_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  JSType_descriptor() {\n    return FieldOptions_JSType_descriptor();\n  }\n  static inline const ::std::string& JSType_Name(JSType value) {\n    return FieldOptions_JSType_Name(value);\n  }\n  static inline bool JSType_Parse(const ::std::string& name,\n      JSType* value) {\n    return FieldOptions_JSType_Parse(name, value);\n  }\n\n  // accessors -------------------------------------------------------\n\n  // optional .google.protobuf.FieldOptions.CType ctype = 1 [default = STRING];\n  bool has_ctype() const;\n  void clear_ctype();\n  static const int kCtypeFieldNumber = 1;\n  ::google::protobuf::FieldOptions_CType ctype() const;\n  void set_ctype(::google::protobuf::FieldOptions_CType value);\n\n  // optional bool packed = 2;\n  bool has_packed() const;\n  void clear_packed();\n  static const int kPackedFieldNumber = 2;\n  bool packed() const;\n  void set_packed(bool value);\n\n  // optional .google.protobuf.FieldOptions.JSType jstype = 6 [default = JS_NORMAL];\n  bool has_jstype() const;\n  void clear_jstype();\n  static const int kJstypeFieldNumber = 6;\n  ::google::protobuf::FieldOptions_JSType jstype() const;\n  void set_jstype(::google::protobuf::FieldOptions_JSType value);\n\n  // optional bool lazy = 5 [default = false];\n  bool has_lazy() const;\n  void clear_lazy();\n  static const int kLazyFieldNumber = 5;\n  bool lazy() const;\n  void set_lazy(bool value);\n\n  // optional bool deprecated = 3 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 3;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // optional bool weak = 10 [default = false];\n  bool has_weak() const;\n  void clear_weak();\n  static const int kWeakFieldNumber = 10;\n  bool weak() const;\n  void set_weak(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(FieldOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions)\n private:\n  inline void set_has_ctype();\n  inline void clear_has_ctype();\n  inline void set_has_packed();\n  inline void clear_has_packed();\n  inline void set_has_jstype();\n  inline void clear_has_jstype();\n  inline void set_has_lazy();\n  inline void clear_has_lazy();\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n  inline void set_has_weak();\n  inline void clear_has_weak();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  int ctype_;\n  int jstype_;\n  bool packed_;\n  bool lazy_;\n  bool deprecated_;\n  bool weak_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FieldOptions> FieldOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT OneofOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.OneofOptions) */ {\n public:\n  OneofOptions();\n  virtual ~OneofOptions();\n\n  OneofOptions(const OneofOptions& from);\n\n  inline OneofOptions& operator=(const OneofOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const OneofOptions& default_instance();\n\n  static const OneofOptions* internal_default_instance();\n\n  void Swap(OneofOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline OneofOptions* New() const { return New(NULL); }\n\n  OneofOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const OneofOptions& from);\n  void MergeFrom(const OneofOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(OneofOptions* other);\n  void UnsafeMergeFrom(const OneofOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(OneofOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.OneofOptions)\n private:\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<OneofOptions> OneofOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT EnumOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.EnumOptions) */ {\n public:\n  EnumOptions();\n  virtual ~EnumOptions();\n\n  EnumOptions(const EnumOptions& from);\n\n  inline EnumOptions& operator=(const EnumOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const EnumOptions& default_instance();\n\n  static const EnumOptions* internal_default_instance();\n\n  void Swap(EnumOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline EnumOptions* New() const { return New(NULL); }\n\n  EnumOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const EnumOptions& from);\n  void MergeFrom(const EnumOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(EnumOptions* other);\n  void UnsafeMergeFrom(const EnumOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool allow_alias = 2;\n  bool has_allow_alias() const;\n  void clear_allow_alias();\n  static const int kAllowAliasFieldNumber = 2;\n  bool allow_alias() const;\n  void set_allow_alias(bool value);\n\n  // optional bool deprecated = 3 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 3;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(EnumOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions)\n private:\n  inline void set_has_allow_alias();\n  inline void clear_has_allow_alias();\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  bool allow_alias_;\n  bool deprecated_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<EnumOptions> EnumOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT EnumValueOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.EnumValueOptions) */ {\n public:\n  EnumValueOptions();\n  virtual ~EnumValueOptions();\n\n  EnumValueOptions(const EnumValueOptions& from);\n\n  inline EnumValueOptions& operator=(const EnumValueOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const EnumValueOptions& default_instance();\n\n  static const EnumValueOptions* internal_default_instance();\n\n  void Swap(EnumValueOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline EnumValueOptions* New() const { return New(NULL); }\n\n  EnumValueOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const EnumValueOptions& from);\n  void MergeFrom(const EnumValueOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(EnumValueOptions* other);\n  void UnsafeMergeFrom(const EnumValueOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool deprecated = 1 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 1;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(EnumValueOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions)\n private:\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  bool deprecated_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<EnumValueOptions> EnumValueOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT ServiceOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.ServiceOptions) */ {\n public:\n  ServiceOptions();\n  virtual ~ServiceOptions();\n\n  ServiceOptions(const ServiceOptions& from);\n\n  inline ServiceOptions& operator=(const ServiceOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ServiceOptions& default_instance();\n\n  static const ServiceOptions* internal_default_instance();\n\n  void Swap(ServiceOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ServiceOptions* New() const { return New(NULL); }\n\n  ServiceOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ServiceOptions& from);\n  void MergeFrom(const ServiceOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ServiceOptions* other);\n  void UnsafeMergeFrom(const ServiceOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool deprecated = 33 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 33;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(ServiceOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions)\n private:\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  bool deprecated_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ServiceOptions> ServiceOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT MethodOptions : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.MethodOptions) */ {\n public:\n  MethodOptions();\n  virtual ~MethodOptions();\n\n  MethodOptions(const MethodOptions& from);\n\n  inline MethodOptions& operator=(const MethodOptions& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const MethodOptions& default_instance();\n\n  static const MethodOptions* internal_default_instance();\n\n  void Swap(MethodOptions* other);\n\n  // implements Message ----------------------------------------------\n\n  inline MethodOptions* New() const { return New(NULL); }\n\n  MethodOptions* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const MethodOptions& from);\n  void MergeFrom(const MethodOptions& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(MethodOptions* other);\n  void UnsafeMergeFrom(const MethodOptions& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool deprecated = 33 [default = false];\n  bool has_deprecated() const;\n  void clear_deprecated();\n  static const int kDeprecatedFieldNumber = 33;\n  bool deprecated() const;\n  void set_deprecated(bool value);\n\n  // repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\n  int uninterpreted_option_size() const;\n  void clear_uninterpreted_option();\n  static const int kUninterpretedOptionFieldNumber = 999;\n  const ::google::protobuf::UninterpretedOption& uninterpreted_option(int index) const;\n  ::google::protobuf::UninterpretedOption* mutable_uninterpreted_option(int index);\n  ::google::protobuf::UninterpretedOption* add_uninterpreted_option();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\n      mutable_uninterpreted_option();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\n      uninterpreted_option() const;\n\n  GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(MethodOptions)\n  // @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions)\n private:\n  inline void set_has_deprecated();\n  inline void clear_has_deprecated();\n\n  ::google::protobuf::internal::ExtensionSet _extensions_;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption > uninterpreted_option_;\n  bool deprecated_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<MethodOptions> MethodOptions_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT UninterpretedOption_NamePart : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.UninterpretedOption.NamePart) */ {\n public:\n  UninterpretedOption_NamePart();\n  virtual ~UninterpretedOption_NamePart();\n\n  UninterpretedOption_NamePart(const UninterpretedOption_NamePart& from);\n\n  inline UninterpretedOption_NamePart& operator=(const UninterpretedOption_NamePart& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const UninterpretedOption_NamePart& default_instance();\n\n  static const UninterpretedOption_NamePart* internal_default_instance();\n\n  void Swap(UninterpretedOption_NamePart* other);\n\n  // implements Message ----------------------------------------------\n\n  inline UninterpretedOption_NamePart* New() const { return New(NULL); }\n\n  UninterpretedOption_NamePart* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const UninterpretedOption_NamePart& from);\n  void MergeFrom(const UninterpretedOption_NamePart& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(UninterpretedOption_NamePart* other);\n  void UnsafeMergeFrom(const UninterpretedOption_NamePart& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // required string name_part = 1;\n  bool has_name_part() const;\n  void clear_name_part();\n  static const int kNamePartFieldNumber = 1;\n  const ::std::string& name_part() const;\n  void set_name_part(const ::std::string& value);\n  void set_name_part(const char* value);\n  void set_name_part(const char* value, size_t size);\n  ::std::string* mutable_name_part();\n  ::std::string* release_name_part();\n  void set_allocated_name_part(::std::string* name_part);\n\n  // required bool is_extension = 2;\n  bool has_is_extension() const;\n  void clear_is_extension();\n  static const int kIsExtensionFieldNumber = 2;\n  bool is_extension() const;\n  void set_is_extension(bool value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart)\n private:\n  inline void set_has_name_part();\n  inline void clear_has_name_part();\n  inline void set_has_is_extension();\n  inline void clear_has_is_extension();\n\n  // helper for ByteSizeLong()\n  size_t RequiredFieldsByteSizeFallback() const;\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::internal::ArenaStringPtr name_part_;\n  bool is_extension_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<UninterpretedOption_NamePart> UninterpretedOption_NamePart_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT UninterpretedOption : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.UninterpretedOption) */ {\n public:\n  UninterpretedOption();\n  virtual ~UninterpretedOption();\n\n  UninterpretedOption(const UninterpretedOption& from);\n\n  inline UninterpretedOption& operator=(const UninterpretedOption& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const UninterpretedOption& default_instance();\n\n  static const UninterpretedOption* internal_default_instance();\n\n  void Swap(UninterpretedOption* other);\n\n  // implements Message ----------------------------------------------\n\n  inline UninterpretedOption* New() const { return New(NULL); }\n\n  UninterpretedOption* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const UninterpretedOption& from);\n  void MergeFrom(const UninterpretedOption& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(UninterpretedOption* other);\n  void UnsafeMergeFrom(const UninterpretedOption& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef UninterpretedOption_NamePart NamePart;\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.UninterpretedOption.NamePart name = 2;\n  int name_size() const;\n  void clear_name();\n  static const int kNameFieldNumber = 2;\n  const ::google::protobuf::UninterpretedOption_NamePart& name(int index) const;\n  ::google::protobuf::UninterpretedOption_NamePart* mutable_name(int index);\n  ::google::protobuf::UninterpretedOption_NamePart* add_name();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption_NamePart >*\n      mutable_name();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption_NamePart >&\n      name() const;\n\n  // optional string identifier_value = 3;\n  bool has_identifier_value() const;\n  void clear_identifier_value();\n  static const int kIdentifierValueFieldNumber = 3;\n  const ::std::string& identifier_value() const;\n  void set_identifier_value(const ::std::string& value);\n  void set_identifier_value(const char* value);\n  void set_identifier_value(const char* value, size_t size);\n  ::std::string* mutable_identifier_value();\n  ::std::string* release_identifier_value();\n  void set_allocated_identifier_value(::std::string* identifier_value);\n\n  // optional uint64 positive_int_value = 4;\n  bool has_positive_int_value() const;\n  void clear_positive_int_value();\n  static const int kPositiveIntValueFieldNumber = 4;\n  ::google::protobuf::uint64 positive_int_value() const;\n  void set_positive_int_value(::google::protobuf::uint64 value);\n\n  // optional int64 negative_int_value = 5;\n  bool has_negative_int_value() const;\n  void clear_negative_int_value();\n  static const int kNegativeIntValueFieldNumber = 5;\n  ::google::protobuf::int64 negative_int_value() const;\n  void set_negative_int_value(::google::protobuf::int64 value);\n\n  // optional double double_value = 6;\n  bool has_double_value() const;\n  void clear_double_value();\n  static const int kDoubleValueFieldNumber = 6;\n  double double_value() const;\n  void set_double_value(double value);\n\n  // optional bytes string_value = 7;\n  bool has_string_value() const;\n  void clear_string_value();\n  static const int kStringValueFieldNumber = 7;\n  const ::std::string& string_value() const;\n  void set_string_value(const ::std::string& value);\n  void set_string_value(const char* value);\n  void set_string_value(const void* value, size_t size);\n  ::std::string* mutable_string_value();\n  ::std::string* release_string_value();\n  void set_allocated_string_value(::std::string* string_value);\n\n  // optional string aggregate_value = 8;\n  bool has_aggregate_value() const;\n  void clear_aggregate_value();\n  static const int kAggregateValueFieldNumber = 8;\n  const ::std::string& aggregate_value() const;\n  void set_aggregate_value(const ::std::string& value);\n  void set_aggregate_value(const char* value);\n  void set_aggregate_value(const char* value, size_t size);\n  ::std::string* mutable_aggregate_value();\n  ::std::string* release_aggregate_value();\n  void set_allocated_aggregate_value(::std::string* aggregate_value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption)\n private:\n  inline void set_has_identifier_value();\n  inline void clear_has_identifier_value();\n  inline void set_has_positive_int_value();\n  inline void clear_has_positive_int_value();\n  inline void set_has_negative_int_value();\n  inline void clear_has_negative_int_value();\n  inline void set_has_double_value();\n  inline void clear_has_double_value();\n  inline void set_has_string_value();\n  inline void clear_has_string_value();\n  inline void set_has_aggregate_value();\n  inline void clear_has_aggregate_value();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption_NamePart > name_;\n  ::google::protobuf::internal::ArenaStringPtr identifier_value_;\n  ::google::protobuf::internal::ArenaStringPtr string_value_;\n  ::google::protobuf::internal::ArenaStringPtr aggregate_value_;\n  ::google::protobuf::uint64 positive_int_value_;\n  ::google::protobuf::int64 negative_int_value_;\n  double double_value_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<UninterpretedOption> UninterpretedOption_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT SourceCodeInfo_Location : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.SourceCodeInfo.Location) */ {\n public:\n  SourceCodeInfo_Location();\n  virtual ~SourceCodeInfo_Location();\n\n  SourceCodeInfo_Location(const SourceCodeInfo_Location& from);\n\n  inline SourceCodeInfo_Location& operator=(const SourceCodeInfo_Location& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const SourceCodeInfo_Location& default_instance();\n\n  static const SourceCodeInfo_Location* internal_default_instance();\n\n  void Swap(SourceCodeInfo_Location* other);\n\n  // implements Message ----------------------------------------------\n\n  inline SourceCodeInfo_Location* New() const { return New(NULL); }\n\n  SourceCodeInfo_Location* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const SourceCodeInfo_Location& from);\n  void MergeFrom(const SourceCodeInfo_Location& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(SourceCodeInfo_Location* other);\n  void UnsafeMergeFrom(const SourceCodeInfo_Location& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated int32 path = 1 [packed = true];\n  int path_size() const;\n  void clear_path();\n  static const int kPathFieldNumber = 1;\n  ::google::protobuf::int32 path(int index) const;\n  void set_path(int index, ::google::protobuf::int32 value);\n  void add_path(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      path() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_path();\n\n  // repeated int32 span = 2 [packed = true];\n  int span_size() const;\n  void clear_span();\n  static const int kSpanFieldNumber = 2;\n  ::google::protobuf::int32 span(int index) const;\n  void set_span(int index, ::google::protobuf::int32 value);\n  void add_span(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      span() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_span();\n\n  // optional string leading_comments = 3;\n  bool has_leading_comments() const;\n  void clear_leading_comments();\n  static const int kLeadingCommentsFieldNumber = 3;\n  const ::std::string& leading_comments() const;\n  void set_leading_comments(const ::std::string& value);\n  void set_leading_comments(const char* value);\n  void set_leading_comments(const char* value, size_t size);\n  ::std::string* mutable_leading_comments();\n  ::std::string* release_leading_comments();\n  void set_allocated_leading_comments(::std::string* leading_comments);\n\n  // optional string trailing_comments = 4;\n  bool has_trailing_comments() const;\n  void clear_trailing_comments();\n  static const int kTrailingCommentsFieldNumber = 4;\n  const ::std::string& trailing_comments() const;\n  void set_trailing_comments(const ::std::string& value);\n  void set_trailing_comments(const char* value);\n  void set_trailing_comments(const char* value, size_t size);\n  ::std::string* mutable_trailing_comments();\n  ::std::string* release_trailing_comments();\n  void set_allocated_trailing_comments(::std::string* trailing_comments);\n\n  // repeated string leading_detached_comments = 6;\n  int leading_detached_comments_size() const;\n  void clear_leading_detached_comments();\n  static const int kLeadingDetachedCommentsFieldNumber = 6;\n  const ::std::string& leading_detached_comments(int index) const;\n  ::std::string* mutable_leading_detached_comments(int index);\n  void set_leading_detached_comments(int index, const ::std::string& value);\n  void set_leading_detached_comments(int index, const char* value);\n  void set_leading_detached_comments(int index, const char* value, size_t size);\n  ::std::string* add_leading_detached_comments();\n  void add_leading_detached_comments(const ::std::string& value);\n  void add_leading_detached_comments(const char* value);\n  void add_leading_detached_comments(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& leading_detached_comments() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_leading_detached_comments();\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo.Location)\n private:\n  inline void set_has_leading_comments();\n  inline void clear_has_leading_comments();\n  inline void set_has_trailing_comments();\n  inline void clear_has_trailing_comments();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > path_;\n  mutable int _path_cached_byte_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > span_;\n  mutable int _span_cached_byte_size_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> leading_detached_comments_;\n  ::google::protobuf::internal::ArenaStringPtr leading_comments_;\n  ::google::protobuf::internal::ArenaStringPtr trailing_comments_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<SourceCodeInfo_Location> SourceCodeInfo_Location_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT SourceCodeInfo : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.SourceCodeInfo) */ {\n public:\n  SourceCodeInfo();\n  virtual ~SourceCodeInfo();\n\n  SourceCodeInfo(const SourceCodeInfo& from);\n\n  inline SourceCodeInfo& operator=(const SourceCodeInfo& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const SourceCodeInfo& default_instance();\n\n  static const SourceCodeInfo* internal_default_instance();\n\n  void Swap(SourceCodeInfo* other);\n\n  // implements Message ----------------------------------------------\n\n  inline SourceCodeInfo* New() const { return New(NULL); }\n\n  SourceCodeInfo* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const SourceCodeInfo& from);\n  void MergeFrom(const SourceCodeInfo& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(SourceCodeInfo* other);\n  void UnsafeMergeFrom(const SourceCodeInfo& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef SourceCodeInfo_Location Location;\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.SourceCodeInfo.Location location = 1;\n  int location_size() const;\n  void clear_location();\n  static const int kLocationFieldNumber = 1;\n  const ::google::protobuf::SourceCodeInfo_Location& location(int index) const;\n  ::google::protobuf::SourceCodeInfo_Location* mutable_location(int index);\n  ::google::protobuf::SourceCodeInfo_Location* add_location();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::SourceCodeInfo_Location >*\n      mutable_location();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::SourceCodeInfo_Location >&\n      location() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.SourceCodeInfo)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::SourceCodeInfo_Location > location_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<SourceCodeInfo> SourceCodeInfo_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT GeneratedCodeInfo_Annotation : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.GeneratedCodeInfo.Annotation) */ {\n public:\n  GeneratedCodeInfo_Annotation();\n  virtual ~GeneratedCodeInfo_Annotation();\n\n  GeneratedCodeInfo_Annotation(const GeneratedCodeInfo_Annotation& from);\n\n  inline GeneratedCodeInfo_Annotation& operator=(const GeneratedCodeInfo_Annotation& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const GeneratedCodeInfo_Annotation& default_instance();\n\n  static const GeneratedCodeInfo_Annotation* internal_default_instance();\n\n  void Swap(GeneratedCodeInfo_Annotation* other);\n\n  // implements Message ----------------------------------------------\n\n  inline GeneratedCodeInfo_Annotation* New() const { return New(NULL); }\n\n  GeneratedCodeInfo_Annotation* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const GeneratedCodeInfo_Annotation& from);\n  void MergeFrom(const GeneratedCodeInfo_Annotation& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(GeneratedCodeInfo_Annotation* other);\n  void UnsafeMergeFrom(const GeneratedCodeInfo_Annotation& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated int32 path = 1 [packed = true];\n  int path_size() const;\n  void clear_path();\n  static const int kPathFieldNumber = 1;\n  ::google::protobuf::int32 path(int index) const;\n  void set_path(int index, ::google::protobuf::int32 value);\n  void add_path(::google::protobuf::int32 value);\n  const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\n      path() const;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\n      mutable_path();\n\n  // optional string source_file = 2;\n  bool has_source_file() const;\n  void clear_source_file();\n  static const int kSourceFileFieldNumber = 2;\n  const ::std::string& source_file() const;\n  void set_source_file(const ::std::string& value);\n  void set_source_file(const char* value);\n  void set_source_file(const char* value, size_t size);\n  ::std::string* mutable_source_file();\n  ::std::string* release_source_file();\n  void set_allocated_source_file(::std::string* source_file);\n\n  // optional int32 begin = 3;\n  bool has_begin() const;\n  void clear_begin();\n  static const int kBeginFieldNumber = 3;\n  ::google::protobuf::int32 begin() const;\n  void set_begin(::google::protobuf::int32 value);\n\n  // optional int32 end = 4;\n  bool has_end() const;\n  void clear_end();\n  static const int kEndFieldNumber = 4;\n  ::google::protobuf::int32 end() const;\n  void set_end(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.GeneratedCodeInfo.Annotation)\n private:\n  inline void set_has_source_file();\n  inline void clear_has_source_file();\n  inline void set_has_begin();\n  inline void clear_has_begin();\n  inline void set_has_end();\n  inline void clear_has_end();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedField< ::google::protobuf::int32 > path_;\n  mutable int _path_cached_byte_size_;\n  ::google::protobuf::internal::ArenaStringPtr source_file_;\n  ::google::protobuf::int32 begin_;\n  ::google::protobuf::int32 end_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<GeneratedCodeInfo_Annotation> GeneratedCodeInfo_Annotation_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT GeneratedCodeInfo : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.GeneratedCodeInfo) */ {\n public:\n  GeneratedCodeInfo();\n  virtual ~GeneratedCodeInfo();\n\n  GeneratedCodeInfo(const GeneratedCodeInfo& from);\n\n  inline GeneratedCodeInfo& operator=(const GeneratedCodeInfo& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {\n    return _internal_metadata_.unknown_fields();\n  }\n\n  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {\n    return _internal_metadata_.mutable_unknown_fields();\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const GeneratedCodeInfo& default_instance();\n\n  static const GeneratedCodeInfo* internal_default_instance();\n\n  void Swap(GeneratedCodeInfo* other);\n\n  // implements Message ----------------------------------------------\n\n  inline GeneratedCodeInfo* New() const { return New(NULL); }\n\n  GeneratedCodeInfo* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const GeneratedCodeInfo& from);\n  void MergeFrom(const GeneratedCodeInfo& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(GeneratedCodeInfo* other);\n  void UnsafeMergeFrom(const GeneratedCodeInfo& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef GeneratedCodeInfo_Annotation Annotation;\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.GeneratedCodeInfo.Annotation annotation = 1;\n  int annotation_size() const;\n  void clear_annotation();\n  static const int kAnnotationFieldNumber = 1;\n  const ::google::protobuf::GeneratedCodeInfo_Annotation& annotation(int index) const;\n  ::google::protobuf::GeneratedCodeInfo_Annotation* mutable_annotation(int index);\n  ::google::protobuf::GeneratedCodeInfo_Annotation* add_annotation();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::GeneratedCodeInfo_Annotation >*\n      mutable_annotation();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::GeneratedCodeInfo_Annotation >&\n      annotation() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.GeneratedCodeInfo)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::HasBits<1> _has_bits_;\n  mutable int _cached_size_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::GeneratedCodeInfo_Annotation > annotation_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fdescriptor_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fdescriptor_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fdescriptor_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<GeneratedCodeInfo> GeneratedCodeInfo_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// FileDescriptorSet\n\n// repeated .google.protobuf.FileDescriptorProto file = 1;\ninline int FileDescriptorSet::file_size() const {\n  return file_.size();\n}\ninline void FileDescriptorSet::clear_file() {\n  file_.Clear();\n}\ninline const ::google::protobuf::FileDescriptorProto& FileDescriptorSet::file(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorSet.file)\n  return file_.Get(index);\n}\ninline ::google::protobuf::FileDescriptorProto* FileDescriptorSet::mutable_file(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorSet.file)\n  return file_.Mutable(index);\n}\ninline ::google::protobuf::FileDescriptorProto* FileDescriptorSet::add_file() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorSet.file)\n  return file_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >*\nFileDescriptorSet::mutable_file() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorSet.file)\n  return &file_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FileDescriptorProto >&\nFileDescriptorSet::file() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorSet.file)\n  return file_;\n}\n\ninline const FileDescriptorSet* FileDescriptorSet::internal_default_instance() {\n  return &FileDescriptorSet_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// FileDescriptorProto\n\n// optional string name = 1;\ninline bool FileDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void FileDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void FileDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void FileDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& FileDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.name)\n}\ninline void FileDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileDescriptorProto.name)\n}\ninline void FileDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileDescriptorProto.name)\n}\ninline ::std::string* FileDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileDescriptorProto.name)\n}\n\n// optional string package = 2;\ninline bool FileDescriptorProto::has_package() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void FileDescriptorProto::set_has_package() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void FileDescriptorProto::clear_has_package() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void FileDescriptorProto::clear_package() {\n  package_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_package();\n}\ninline const ::std::string& FileDescriptorProto::package() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.package)\n  return package_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_package(const ::std::string& value) {\n  set_has_package();\n  package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.package)\n}\ninline void FileDescriptorProto::set_package(const char* value) {\n  set_has_package();\n  package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileDescriptorProto.package)\n}\ninline void FileDescriptorProto::set_package(const char* value, size_t size) {\n  set_has_package();\n  package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileDescriptorProto.package)\n}\ninline ::std::string* FileDescriptorProto::mutable_package() {\n  set_has_package();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.package)\n  return package_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileDescriptorProto::release_package() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileDescriptorProto.package)\n  clear_has_package();\n  return package_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_allocated_package(::std::string* package) {\n  if (package != NULL) {\n    set_has_package();\n  } else {\n    clear_has_package();\n  }\n  package_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), package);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileDescriptorProto.package)\n}\n\n// repeated string dependency = 3;\ninline int FileDescriptorProto::dependency_size() const {\n  return dependency_.size();\n}\ninline void FileDescriptorProto::clear_dependency() {\n  dependency_.Clear();\n}\ninline const ::std::string& FileDescriptorProto::dependency(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.dependency)\n  return dependency_.Get(index);\n}\ninline ::std::string* FileDescriptorProto::mutable_dependency(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.dependency)\n  return dependency_.Mutable(index);\n}\ninline void FileDescriptorProto::set_dependency(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.dependency)\n  dependency_.Mutable(index)->assign(value);\n}\ninline void FileDescriptorProto::set_dependency(int index, const char* value) {\n  dependency_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileDescriptorProto.dependency)\n}\ninline void FileDescriptorProto::set_dependency(int index, const char* value, size_t size) {\n  dependency_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileDescriptorProto.dependency)\n}\ninline ::std::string* FileDescriptorProto::add_dependency() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.FileDescriptorProto.dependency)\n  return dependency_.Add();\n}\ninline void FileDescriptorProto::add_dependency(const ::std::string& value) {\n  dependency_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.dependency)\n}\ninline void FileDescriptorProto::add_dependency(const char* value) {\n  dependency_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.FileDescriptorProto.dependency)\n}\ninline void FileDescriptorProto::add_dependency(const char* value, size_t size) {\n  dependency_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.FileDescriptorProto.dependency)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nFileDescriptorProto::dependency() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.dependency)\n  return dependency_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nFileDescriptorProto::mutable_dependency() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.dependency)\n  return &dependency_;\n}\n\n// repeated int32 public_dependency = 10;\ninline int FileDescriptorProto::public_dependency_size() const {\n  return public_dependency_.size();\n}\ninline void FileDescriptorProto::clear_public_dependency() {\n  public_dependency_.Clear();\n}\ninline ::google::protobuf::int32 FileDescriptorProto::public_dependency(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.public_dependency)\n  return public_dependency_.Get(index);\n}\ninline void FileDescriptorProto::set_public_dependency(int index, ::google::protobuf::int32 value) {\n  public_dependency_.Set(index, value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.public_dependency)\n}\ninline void FileDescriptorProto::add_public_dependency(::google::protobuf::int32 value) {\n  public_dependency_.Add(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.public_dependency)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nFileDescriptorProto::public_dependency() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.public_dependency)\n  return public_dependency_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nFileDescriptorProto::mutable_public_dependency() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.public_dependency)\n  return &public_dependency_;\n}\n\n// repeated int32 weak_dependency = 11;\ninline int FileDescriptorProto::weak_dependency_size() const {\n  return weak_dependency_.size();\n}\ninline void FileDescriptorProto::clear_weak_dependency() {\n  weak_dependency_.Clear();\n}\ninline ::google::protobuf::int32 FileDescriptorProto::weak_dependency(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.weak_dependency)\n  return weak_dependency_.Get(index);\n}\ninline void FileDescriptorProto::set_weak_dependency(int index, ::google::protobuf::int32 value) {\n  weak_dependency_.Set(index, value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.weak_dependency)\n}\ninline void FileDescriptorProto::add_weak_dependency(::google::protobuf::int32 value) {\n  weak_dependency_.Add(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.weak_dependency)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nFileDescriptorProto::weak_dependency() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.weak_dependency)\n  return weak_dependency_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nFileDescriptorProto::mutable_weak_dependency() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.weak_dependency)\n  return &weak_dependency_;\n}\n\n// repeated .google.protobuf.DescriptorProto message_type = 4;\ninline int FileDescriptorProto::message_type_size() const {\n  return message_type_.size();\n}\ninline void FileDescriptorProto::clear_message_type() {\n  message_type_.Clear();\n}\ninline const ::google::protobuf::DescriptorProto& FileDescriptorProto::message_type(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.message_type)\n  return message_type_.Get(index);\n}\ninline ::google::protobuf::DescriptorProto* FileDescriptorProto::mutable_message_type(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.message_type)\n  return message_type_.Mutable(index);\n}\ninline ::google::protobuf::DescriptorProto* FileDescriptorProto::add_message_type() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.message_type)\n  return message_type_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >*\nFileDescriptorProto::mutable_message_type() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.message_type)\n  return &message_type_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >&\nFileDescriptorProto::message_type() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.message_type)\n  return message_type_;\n}\n\n// repeated .google.protobuf.EnumDescriptorProto enum_type = 5;\ninline int FileDescriptorProto::enum_type_size() const {\n  return enum_type_.size();\n}\ninline void FileDescriptorProto::clear_enum_type() {\n  enum_type_.Clear();\n}\ninline const ::google::protobuf::EnumDescriptorProto& FileDescriptorProto::enum_type(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.enum_type)\n  return enum_type_.Get(index);\n}\ninline ::google::protobuf::EnumDescriptorProto* FileDescriptorProto::mutable_enum_type(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.enum_type)\n  return enum_type_.Mutable(index);\n}\ninline ::google::protobuf::EnumDescriptorProto* FileDescriptorProto::add_enum_type() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.enum_type)\n  return enum_type_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >*\nFileDescriptorProto::mutable_enum_type() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.enum_type)\n  return &enum_type_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >&\nFileDescriptorProto::enum_type() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.enum_type)\n  return enum_type_;\n}\n\n// repeated .google.protobuf.ServiceDescriptorProto service = 6;\ninline int FileDescriptorProto::service_size() const {\n  return service_.size();\n}\ninline void FileDescriptorProto::clear_service() {\n  service_.Clear();\n}\ninline const ::google::protobuf::ServiceDescriptorProto& FileDescriptorProto::service(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.service)\n  return service_.Get(index);\n}\ninline ::google::protobuf::ServiceDescriptorProto* FileDescriptorProto::mutable_service(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.service)\n  return service_.Mutable(index);\n}\ninline ::google::protobuf::ServiceDescriptorProto* FileDescriptorProto::add_service() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.service)\n  return service_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::ServiceDescriptorProto >*\nFileDescriptorProto::mutable_service() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.service)\n  return &service_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::ServiceDescriptorProto >&\nFileDescriptorProto::service() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.service)\n  return service_;\n}\n\n// repeated .google.protobuf.FieldDescriptorProto extension = 7;\ninline int FileDescriptorProto::extension_size() const {\n  return extension_.size();\n}\ninline void FileDescriptorProto::clear_extension() {\n  extension_.Clear();\n}\ninline const ::google::protobuf::FieldDescriptorProto& FileDescriptorProto::extension(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.extension)\n  return extension_.Get(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* FileDescriptorProto::mutable_extension(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.extension)\n  return extension_.Mutable(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* FileDescriptorProto::add_extension() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileDescriptorProto.extension)\n  return extension_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\nFileDescriptorProto::mutable_extension() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileDescriptorProto.extension)\n  return &extension_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\nFileDescriptorProto::extension() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileDescriptorProto.extension)\n  return extension_;\n}\n\n// optional .google.protobuf.FileOptions options = 8;\ninline bool FileDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void FileDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void FileDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void FileDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::FileOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::FileOptions& FileDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::FileOptions::internal_default_instance();\n}\ninline ::google::protobuf::FileOptions* FileDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::FileOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::FileOptions* FileDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::FileOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void FileDescriptorProto::set_allocated_options(::google::protobuf::FileOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileDescriptorProto.options)\n}\n\n// optional .google.protobuf.SourceCodeInfo source_code_info = 9;\ninline bool FileDescriptorProto::has_source_code_info() const {\n  return (_has_bits_[0] & 0x00000400u) != 0;\n}\ninline void FileDescriptorProto::set_has_source_code_info() {\n  _has_bits_[0] |= 0x00000400u;\n}\ninline void FileDescriptorProto::clear_has_source_code_info() {\n  _has_bits_[0] &= ~0x00000400u;\n}\ninline void FileDescriptorProto::clear_source_code_info() {\n  if (source_code_info_ != NULL) source_code_info_->::google::protobuf::SourceCodeInfo::Clear();\n  clear_has_source_code_info();\n}\ninline const ::google::protobuf::SourceCodeInfo& FileDescriptorProto::source_code_info() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.source_code_info)\n  return source_code_info_ != NULL ? *source_code_info_\n                         : *::google::protobuf::SourceCodeInfo::internal_default_instance();\n}\ninline ::google::protobuf::SourceCodeInfo* FileDescriptorProto::mutable_source_code_info() {\n  set_has_source_code_info();\n  if (source_code_info_ == NULL) {\n    source_code_info_ = new ::google::protobuf::SourceCodeInfo;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.source_code_info)\n  return source_code_info_;\n}\ninline ::google::protobuf::SourceCodeInfo* FileDescriptorProto::release_source_code_info() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileDescriptorProto.source_code_info)\n  clear_has_source_code_info();\n  ::google::protobuf::SourceCodeInfo* temp = source_code_info_;\n  source_code_info_ = NULL;\n  return temp;\n}\ninline void FileDescriptorProto::set_allocated_source_code_info(::google::protobuf::SourceCodeInfo* source_code_info) {\n  delete source_code_info_;\n  source_code_info_ = source_code_info;\n  if (source_code_info) {\n    set_has_source_code_info();\n  } else {\n    clear_has_source_code_info();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileDescriptorProto.source_code_info)\n}\n\n// optional string syntax = 12;\ninline bool FileDescriptorProto::has_syntax() const {\n  return (_has_bits_[0] & 0x00000800u) != 0;\n}\ninline void FileDescriptorProto::set_has_syntax() {\n  _has_bits_[0] |= 0x00000800u;\n}\ninline void FileDescriptorProto::clear_has_syntax() {\n  _has_bits_[0] &= ~0x00000800u;\n}\ninline void FileDescriptorProto::clear_syntax() {\n  syntax_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_syntax();\n}\ninline const ::std::string& FileDescriptorProto::syntax() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileDescriptorProto.syntax)\n  return syntax_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_syntax(const ::std::string& value) {\n  set_has_syntax();\n  syntax_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileDescriptorProto.syntax)\n}\ninline void FileDescriptorProto::set_syntax(const char* value) {\n  set_has_syntax();\n  syntax_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileDescriptorProto.syntax)\n}\ninline void FileDescriptorProto::set_syntax(const char* value, size_t size) {\n  set_has_syntax();\n  syntax_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileDescriptorProto.syntax)\n}\ninline ::std::string* FileDescriptorProto::mutable_syntax() {\n  set_has_syntax();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileDescriptorProto.syntax)\n  return syntax_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileDescriptorProto::release_syntax() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileDescriptorProto.syntax)\n  clear_has_syntax();\n  return syntax_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileDescriptorProto::set_allocated_syntax(::std::string* syntax) {\n  if (syntax != NULL) {\n    set_has_syntax();\n  } else {\n    clear_has_syntax();\n  }\n  syntax_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), syntax);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileDescriptorProto.syntax)\n}\n\ninline const FileDescriptorProto* FileDescriptorProto::internal_default_instance() {\n  return &FileDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// DescriptorProto_ExtensionRange\n\n// optional int32 start = 1;\ninline bool DescriptorProto_ExtensionRange::has_start() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void DescriptorProto_ExtensionRange::set_has_start() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void DescriptorProto_ExtensionRange::clear_has_start() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void DescriptorProto_ExtensionRange::clear_start() {\n  start_ = 0;\n  clear_has_start();\n}\ninline ::google::protobuf::int32 DescriptorProto_ExtensionRange::start() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.ExtensionRange.start)\n  return start_;\n}\ninline void DescriptorProto_ExtensionRange::set_start(::google::protobuf::int32 value) {\n  set_has_start();\n  start_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.ExtensionRange.start)\n}\n\n// optional int32 end = 2;\ninline bool DescriptorProto_ExtensionRange::has_end() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void DescriptorProto_ExtensionRange::set_has_end() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void DescriptorProto_ExtensionRange::clear_has_end() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void DescriptorProto_ExtensionRange::clear_end() {\n  end_ = 0;\n  clear_has_end();\n}\ninline ::google::protobuf::int32 DescriptorProto_ExtensionRange::end() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.ExtensionRange.end)\n  return end_;\n}\ninline void DescriptorProto_ExtensionRange::set_end(::google::protobuf::int32 value) {\n  set_has_end();\n  end_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.ExtensionRange.end)\n}\n\ninline const DescriptorProto_ExtensionRange* DescriptorProto_ExtensionRange::internal_default_instance() {\n  return &DescriptorProto_ExtensionRange_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// DescriptorProto_ReservedRange\n\n// optional int32 start = 1;\ninline bool DescriptorProto_ReservedRange::has_start() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void DescriptorProto_ReservedRange::set_has_start() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void DescriptorProto_ReservedRange::clear_has_start() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void DescriptorProto_ReservedRange::clear_start() {\n  start_ = 0;\n  clear_has_start();\n}\ninline ::google::protobuf::int32 DescriptorProto_ReservedRange::start() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.ReservedRange.start)\n  return start_;\n}\ninline void DescriptorProto_ReservedRange::set_start(::google::protobuf::int32 value) {\n  set_has_start();\n  start_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.ReservedRange.start)\n}\n\n// optional int32 end = 2;\ninline bool DescriptorProto_ReservedRange::has_end() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void DescriptorProto_ReservedRange::set_has_end() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void DescriptorProto_ReservedRange::clear_has_end() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void DescriptorProto_ReservedRange::clear_end() {\n  end_ = 0;\n  clear_has_end();\n}\ninline ::google::protobuf::int32 DescriptorProto_ReservedRange::end() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.ReservedRange.end)\n  return end_;\n}\ninline void DescriptorProto_ReservedRange::set_end(::google::protobuf::int32 value) {\n  set_has_end();\n  end_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.ReservedRange.end)\n}\n\ninline const DescriptorProto_ReservedRange* DescriptorProto_ReservedRange::internal_default_instance() {\n  return &DescriptorProto_ReservedRange_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// DescriptorProto\n\n// optional string name = 1;\ninline bool DescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void DescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void DescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void DescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& DescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.name)\n}\ninline void DescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.DescriptorProto.name)\n}\ninline void DescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.DescriptorProto.name)\n}\ninline ::std::string* DescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* DescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.DescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void DescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.DescriptorProto.name)\n}\n\n// repeated .google.protobuf.FieldDescriptorProto field = 2;\ninline int DescriptorProto::field_size() const {\n  return field_.size();\n}\ninline void DescriptorProto::clear_field() {\n  field_.Clear();\n}\ninline const ::google::protobuf::FieldDescriptorProto& DescriptorProto::field(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.field)\n  return field_.Get(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* DescriptorProto::mutable_field(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.field)\n  return field_.Mutable(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* DescriptorProto::add_field() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.field)\n  return field_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\nDescriptorProto::mutable_field() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.field)\n  return &field_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\nDescriptorProto::field() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.field)\n  return field_;\n}\n\n// repeated .google.protobuf.FieldDescriptorProto extension = 6;\ninline int DescriptorProto::extension_size() const {\n  return extension_.size();\n}\ninline void DescriptorProto::clear_extension() {\n  extension_.Clear();\n}\ninline const ::google::protobuf::FieldDescriptorProto& DescriptorProto::extension(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.extension)\n  return extension_.Get(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* DescriptorProto::mutable_extension(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.extension)\n  return extension_.Mutable(index);\n}\ninline ::google::protobuf::FieldDescriptorProto* DescriptorProto::add_extension() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.extension)\n  return extension_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >*\nDescriptorProto::mutable_extension() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.extension)\n  return &extension_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::FieldDescriptorProto >&\nDescriptorProto::extension() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.extension)\n  return extension_;\n}\n\n// repeated .google.protobuf.DescriptorProto nested_type = 3;\ninline int DescriptorProto::nested_type_size() const {\n  return nested_type_.size();\n}\ninline void DescriptorProto::clear_nested_type() {\n  nested_type_.Clear();\n}\ninline const ::google::protobuf::DescriptorProto& DescriptorProto::nested_type(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.nested_type)\n  return nested_type_.Get(index);\n}\ninline ::google::protobuf::DescriptorProto* DescriptorProto::mutable_nested_type(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.nested_type)\n  return nested_type_.Mutable(index);\n}\ninline ::google::protobuf::DescriptorProto* DescriptorProto::add_nested_type() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.nested_type)\n  return nested_type_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >*\nDescriptorProto::mutable_nested_type() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.nested_type)\n  return &nested_type_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto >&\nDescriptorProto::nested_type() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.nested_type)\n  return nested_type_;\n}\n\n// repeated .google.protobuf.EnumDescriptorProto enum_type = 4;\ninline int DescriptorProto::enum_type_size() const {\n  return enum_type_.size();\n}\ninline void DescriptorProto::clear_enum_type() {\n  enum_type_.Clear();\n}\ninline const ::google::protobuf::EnumDescriptorProto& DescriptorProto::enum_type(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.enum_type)\n  return enum_type_.Get(index);\n}\ninline ::google::protobuf::EnumDescriptorProto* DescriptorProto::mutable_enum_type(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.enum_type)\n  return enum_type_.Mutable(index);\n}\ninline ::google::protobuf::EnumDescriptorProto* DescriptorProto::add_enum_type() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.enum_type)\n  return enum_type_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >*\nDescriptorProto::mutable_enum_type() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.enum_type)\n  return &enum_type_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumDescriptorProto >&\nDescriptorProto::enum_type() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.enum_type)\n  return enum_type_;\n}\n\n// repeated .google.protobuf.DescriptorProto.ExtensionRange extension_range = 5;\ninline int DescriptorProto::extension_range_size() const {\n  return extension_range_.size();\n}\ninline void DescriptorProto::clear_extension_range() {\n  extension_range_.Clear();\n}\ninline const ::google::protobuf::DescriptorProto_ExtensionRange& DescriptorProto::extension_range(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.extension_range)\n  return extension_range_.Get(index);\n}\ninline ::google::protobuf::DescriptorProto_ExtensionRange* DescriptorProto::mutable_extension_range(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.extension_range)\n  return extension_range_.Mutable(index);\n}\ninline ::google::protobuf::DescriptorProto_ExtensionRange* DescriptorProto::add_extension_range() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.extension_range)\n  return extension_range_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ExtensionRange >*\nDescriptorProto::mutable_extension_range() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.extension_range)\n  return &extension_range_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ExtensionRange >&\nDescriptorProto::extension_range() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.extension_range)\n  return extension_range_;\n}\n\n// repeated .google.protobuf.OneofDescriptorProto oneof_decl = 8;\ninline int DescriptorProto::oneof_decl_size() const {\n  return oneof_decl_.size();\n}\ninline void DescriptorProto::clear_oneof_decl() {\n  oneof_decl_.Clear();\n}\ninline const ::google::protobuf::OneofDescriptorProto& DescriptorProto::oneof_decl(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.oneof_decl)\n  return oneof_decl_.Get(index);\n}\ninline ::google::protobuf::OneofDescriptorProto* DescriptorProto::mutable_oneof_decl(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.oneof_decl)\n  return oneof_decl_.Mutable(index);\n}\ninline ::google::protobuf::OneofDescriptorProto* DescriptorProto::add_oneof_decl() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.oneof_decl)\n  return oneof_decl_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::OneofDescriptorProto >*\nDescriptorProto::mutable_oneof_decl() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.oneof_decl)\n  return &oneof_decl_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::OneofDescriptorProto >&\nDescriptorProto::oneof_decl() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.oneof_decl)\n  return oneof_decl_;\n}\n\n// optional .google.protobuf.MessageOptions options = 7;\ninline bool DescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000080u) != 0;\n}\ninline void DescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000080u;\n}\ninline void DescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000080u;\n}\ninline void DescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::MessageOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::MessageOptions& DescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::MessageOptions::internal_default_instance();\n}\ninline ::google::protobuf::MessageOptions* DescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::MessageOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::MessageOptions* DescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.DescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::MessageOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void DescriptorProto::set_allocated_options(::google::protobuf::MessageOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.DescriptorProto.options)\n}\n\n// repeated .google.protobuf.DescriptorProto.ReservedRange reserved_range = 9;\ninline int DescriptorProto::reserved_range_size() const {\n  return reserved_range_.size();\n}\ninline void DescriptorProto::clear_reserved_range() {\n  reserved_range_.Clear();\n}\ninline const ::google::protobuf::DescriptorProto_ReservedRange& DescriptorProto::reserved_range(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.reserved_range)\n  return reserved_range_.Get(index);\n}\ninline ::google::protobuf::DescriptorProto_ReservedRange* DescriptorProto::mutable_reserved_range(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.reserved_range)\n  return reserved_range_.Mutable(index);\n}\ninline ::google::protobuf::DescriptorProto_ReservedRange* DescriptorProto::add_reserved_range() {\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.reserved_range)\n  return reserved_range_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ReservedRange >*\nDescriptorProto::mutable_reserved_range() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.reserved_range)\n  return &reserved_range_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::DescriptorProto_ReservedRange >&\nDescriptorProto::reserved_range() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.reserved_range)\n  return reserved_range_;\n}\n\n// repeated string reserved_name = 10;\ninline int DescriptorProto::reserved_name_size() const {\n  return reserved_name_.size();\n}\ninline void DescriptorProto::clear_reserved_name() {\n  reserved_name_.Clear();\n}\ninline const ::std::string& DescriptorProto::reserved_name(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DescriptorProto.reserved_name)\n  return reserved_name_.Get(index);\n}\ninline ::std::string* DescriptorProto::mutable_reserved_name(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.DescriptorProto.reserved_name)\n  return reserved_name_.Mutable(index);\n}\ninline void DescriptorProto::set_reserved_name(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.DescriptorProto.reserved_name)\n  reserved_name_.Mutable(index)->assign(value);\n}\ninline void DescriptorProto::set_reserved_name(int index, const char* value) {\n  reserved_name_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.DescriptorProto.reserved_name)\n}\ninline void DescriptorProto::set_reserved_name(int index, const char* value, size_t size) {\n  reserved_name_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.DescriptorProto.reserved_name)\n}\ninline ::std::string* DescriptorProto::add_reserved_name() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.DescriptorProto.reserved_name)\n  return reserved_name_.Add();\n}\ninline void DescriptorProto::add_reserved_name(const ::std::string& value) {\n  reserved_name_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.DescriptorProto.reserved_name)\n}\ninline void DescriptorProto::add_reserved_name(const char* value) {\n  reserved_name_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.DescriptorProto.reserved_name)\n}\ninline void DescriptorProto::add_reserved_name(const char* value, size_t size) {\n  reserved_name_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.DescriptorProto.reserved_name)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nDescriptorProto::reserved_name() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.DescriptorProto.reserved_name)\n  return reserved_name_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nDescriptorProto::mutable_reserved_name() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.DescriptorProto.reserved_name)\n  return &reserved_name_;\n}\n\ninline const DescriptorProto* DescriptorProto::internal_default_instance() {\n  return &DescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// FieldDescriptorProto\n\n// optional string name = 1;\ninline bool FieldDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void FieldDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void FieldDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void FieldDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& FieldDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.name)\n}\ninline void FieldDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldDescriptorProto.name)\n}\ninline void FieldDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldDescriptorProto.name)\n}\ninline ::std::string* FieldDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FieldDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.name)\n}\n\n// optional int32 number = 3;\ninline bool FieldDescriptorProto::has_number() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void FieldDescriptorProto::set_has_number() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void FieldDescriptorProto::clear_has_number() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void FieldDescriptorProto::clear_number() {\n  number_ = 0;\n  clear_has_number();\n}\ninline ::google::protobuf::int32 FieldDescriptorProto::number() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.number)\n  return number_;\n}\ninline void FieldDescriptorProto::set_number(::google::protobuf::int32 value) {\n  set_has_number();\n  number_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.number)\n}\n\n// optional .google.protobuf.FieldDescriptorProto.Label label = 4;\ninline bool FieldDescriptorProto::has_label() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void FieldDescriptorProto::set_has_label() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void FieldDescriptorProto::clear_has_label() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void FieldDescriptorProto::clear_label() {\n  label_ = 1;\n  clear_has_label();\n}\ninline ::google::protobuf::FieldDescriptorProto_Label FieldDescriptorProto::label() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.label)\n  return static_cast< ::google::protobuf::FieldDescriptorProto_Label >(label_);\n}\ninline void FieldDescriptorProto::set_label(::google::protobuf::FieldDescriptorProto_Label value) {\n  assert(::google::protobuf::FieldDescriptorProto_Label_IsValid(value));\n  set_has_label();\n  label_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.label)\n}\n\n// optional .google.protobuf.FieldDescriptorProto.Type type = 5;\ninline bool FieldDescriptorProto::has_type() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void FieldDescriptorProto::set_has_type() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void FieldDescriptorProto::clear_has_type() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void FieldDescriptorProto::clear_type() {\n  type_ = 1;\n  clear_has_type();\n}\ninline ::google::protobuf::FieldDescriptorProto_Type FieldDescriptorProto::type() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.type)\n  return static_cast< ::google::protobuf::FieldDescriptorProto_Type >(type_);\n}\ninline void FieldDescriptorProto::set_type(::google::protobuf::FieldDescriptorProto_Type value) {\n  assert(::google::protobuf::FieldDescriptorProto_Type_IsValid(value));\n  set_has_type();\n  type_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.type)\n}\n\n// optional string type_name = 6;\ninline bool FieldDescriptorProto::has_type_name() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void FieldDescriptorProto::set_has_type_name() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void FieldDescriptorProto::clear_has_type_name() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void FieldDescriptorProto::clear_type_name() {\n  type_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_type_name();\n}\ninline const ::std::string& FieldDescriptorProto::type_name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.type_name)\n  return type_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_type_name(const ::std::string& value) {\n  set_has_type_name();\n  type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.type_name)\n}\ninline void FieldDescriptorProto::set_type_name(const char* value) {\n  set_has_type_name();\n  type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldDescriptorProto.type_name)\n}\ninline void FieldDescriptorProto::set_type_name(const char* value, size_t size) {\n  set_has_type_name();\n  type_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldDescriptorProto.type_name)\n}\ninline ::std::string* FieldDescriptorProto::mutable_type_name() {\n  set_has_type_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.type_name)\n  return type_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FieldDescriptorProto::release_type_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.type_name)\n  clear_has_type_name();\n  return type_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_allocated_type_name(::std::string* type_name) {\n  if (type_name != NULL) {\n    set_has_type_name();\n  } else {\n    clear_has_type_name();\n  }\n  type_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type_name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.type_name)\n}\n\n// optional string extendee = 2;\ninline bool FieldDescriptorProto::has_extendee() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void FieldDescriptorProto::set_has_extendee() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void FieldDescriptorProto::clear_has_extendee() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void FieldDescriptorProto::clear_extendee() {\n  extendee_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_extendee();\n}\ninline const ::std::string& FieldDescriptorProto::extendee() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.extendee)\n  return extendee_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_extendee(const ::std::string& value) {\n  set_has_extendee();\n  extendee_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.extendee)\n}\ninline void FieldDescriptorProto::set_extendee(const char* value) {\n  set_has_extendee();\n  extendee_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldDescriptorProto.extendee)\n}\ninline void FieldDescriptorProto::set_extendee(const char* value, size_t size) {\n  set_has_extendee();\n  extendee_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldDescriptorProto.extendee)\n}\ninline ::std::string* FieldDescriptorProto::mutable_extendee() {\n  set_has_extendee();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.extendee)\n  return extendee_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FieldDescriptorProto::release_extendee() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.extendee)\n  clear_has_extendee();\n  return extendee_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_allocated_extendee(::std::string* extendee) {\n  if (extendee != NULL) {\n    set_has_extendee();\n  } else {\n    clear_has_extendee();\n  }\n  extendee_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), extendee);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.extendee)\n}\n\n// optional string default_value = 7;\ninline bool FieldDescriptorProto::has_default_value() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void FieldDescriptorProto::set_has_default_value() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void FieldDescriptorProto::clear_has_default_value() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void FieldDescriptorProto::clear_default_value() {\n  default_value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_default_value();\n}\ninline const ::std::string& FieldDescriptorProto::default_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.default_value)\n  return default_value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_default_value(const ::std::string& value) {\n  set_has_default_value();\n  default_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.default_value)\n}\ninline void FieldDescriptorProto::set_default_value(const char* value) {\n  set_has_default_value();\n  default_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldDescriptorProto.default_value)\n}\ninline void FieldDescriptorProto::set_default_value(const char* value, size_t size) {\n  set_has_default_value();\n  default_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldDescriptorProto.default_value)\n}\ninline ::std::string* FieldDescriptorProto::mutable_default_value() {\n  set_has_default_value();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.default_value)\n  return default_value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FieldDescriptorProto::release_default_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.default_value)\n  clear_has_default_value();\n  return default_value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_allocated_default_value(::std::string* default_value) {\n  if (default_value != NULL) {\n    set_has_default_value();\n  } else {\n    clear_has_default_value();\n  }\n  default_value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), default_value);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.default_value)\n}\n\n// optional int32 oneof_index = 9;\ninline bool FieldDescriptorProto::has_oneof_index() const {\n  return (_has_bits_[0] & 0x00000080u) != 0;\n}\ninline void FieldDescriptorProto::set_has_oneof_index() {\n  _has_bits_[0] |= 0x00000080u;\n}\ninline void FieldDescriptorProto::clear_has_oneof_index() {\n  _has_bits_[0] &= ~0x00000080u;\n}\ninline void FieldDescriptorProto::clear_oneof_index() {\n  oneof_index_ = 0;\n  clear_has_oneof_index();\n}\ninline ::google::protobuf::int32 FieldDescriptorProto::oneof_index() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.oneof_index)\n  return oneof_index_;\n}\ninline void FieldDescriptorProto::set_oneof_index(::google::protobuf::int32 value) {\n  set_has_oneof_index();\n  oneof_index_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.oneof_index)\n}\n\n// optional string json_name = 10;\ninline bool FieldDescriptorProto::has_json_name() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void FieldDescriptorProto::set_has_json_name() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void FieldDescriptorProto::clear_has_json_name() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void FieldDescriptorProto::clear_json_name() {\n  json_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_json_name();\n}\ninline const ::std::string& FieldDescriptorProto::json_name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.json_name)\n  return json_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_json_name(const ::std::string& value) {\n  set_has_json_name();\n  json_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldDescriptorProto.json_name)\n}\ninline void FieldDescriptorProto::set_json_name(const char* value) {\n  set_has_json_name();\n  json_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldDescriptorProto.json_name)\n}\ninline void FieldDescriptorProto::set_json_name(const char* value, size_t size) {\n  set_has_json_name();\n  json_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldDescriptorProto.json_name)\n}\ninline ::std::string* FieldDescriptorProto::mutable_json_name() {\n  set_has_json_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.json_name)\n  return json_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FieldDescriptorProto::release_json_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.json_name)\n  clear_has_json_name();\n  return json_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FieldDescriptorProto::set_allocated_json_name(::std::string* json_name) {\n  if (json_name != NULL) {\n    set_has_json_name();\n  } else {\n    clear_has_json_name();\n  }\n  json_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), json_name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.json_name)\n}\n\n// optional .google.protobuf.FieldOptions options = 8;\ninline bool FieldDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void FieldDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void FieldDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void FieldDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::FieldOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::FieldOptions& FieldDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::FieldOptions::internal_default_instance();\n}\ninline ::google::protobuf::FieldOptions* FieldDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::FieldOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::FieldOptions* FieldDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FieldDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::FieldOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void FieldDescriptorProto::set_allocated_options(::google::protobuf::FieldOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FieldDescriptorProto.options)\n}\n\ninline const FieldDescriptorProto* FieldDescriptorProto::internal_default_instance() {\n  return &FieldDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// OneofDescriptorProto\n\n// optional string name = 1;\ninline bool OneofDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void OneofDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void OneofDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void OneofDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& OneofDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.OneofDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OneofDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.OneofDescriptorProto.name)\n}\ninline void OneofDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.OneofDescriptorProto.name)\n}\ninline void OneofDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.OneofDescriptorProto.name)\n}\ninline ::std::string* OneofDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.OneofDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* OneofDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.OneofDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void OneofDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.OneofDescriptorProto.name)\n}\n\n// optional .google.protobuf.OneofOptions options = 2;\ninline bool OneofDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void OneofDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void OneofDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void OneofDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::OneofOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::OneofOptions& OneofDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.OneofDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::OneofOptions::internal_default_instance();\n}\ninline ::google::protobuf::OneofOptions* OneofDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::OneofOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.OneofDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::OneofOptions* OneofDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.OneofDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::OneofOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void OneofDescriptorProto::set_allocated_options(::google::protobuf::OneofOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.OneofDescriptorProto.options)\n}\n\ninline const OneofDescriptorProto* OneofDescriptorProto::internal_default_instance() {\n  return &OneofDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// EnumDescriptorProto\n\n// optional string name = 1;\ninline bool EnumDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void EnumDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void EnumDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void EnumDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& EnumDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void EnumDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumDescriptorProto.name)\n}\ninline void EnumDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.EnumDescriptorProto.name)\n}\ninline void EnumDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.EnumDescriptorProto.name)\n}\ninline ::std::string* EnumDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* EnumDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.EnumDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void EnumDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.EnumDescriptorProto.name)\n}\n\n// repeated .google.protobuf.EnumValueDescriptorProto value = 2;\ninline int EnumDescriptorProto::value_size() const {\n  return value_.size();\n}\ninline void EnumDescriptorProto::clear_value() {\n  value_.Clear();\n}\ninline const ::google::protobuf::EnumValueDescriptorProto& EnumDescriptorProto::value(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumDescriptorProto.value)\n  return value_.Get(index);\n}\ninline ::google::protobuf::EnumValueDescriptorProto* EnumDescriptorProto::mutable_value(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumDescriptorProto.value)\n  return value_.Mutable(index);\n}\ninline ::google::protobuf::EnumValueDescriptorProto* EnumDescriptorProto::add_value() {\n  // @@protoc_insertion_point(field_add:google.protobuf.EnumDescriptorProto.value)\n  return value_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValueDescriptorProto >*\nEnumDescriptorProto::mutable_value() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.EnumDescriptorProto.value)\n  return &value_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValueDescriptorProto >&\nEnumDescriptorProto::value() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.EnumDescriptorProto.value)\n  return value_;\n}\n\n// optional .google.protobuf.EnumOptions options = 3;\ninline bool EnumDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void EnumDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void EnumDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void EnumDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::EnumOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::EnumOptions& EnumDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::EnumOptions::internal_default_instance();\n}\ninline ::google::protobuf::EnumOptions* EnumDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::EnumOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::EnumOptions* EnumDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.EnumDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::EnumOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void EnumDescriptorProto::set_allocated_options(::google::protobuf::EnumOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.EnumDescriptorProto.options)\n}\n\ninline const EnumDescriptorProto* EnumDescriptorProto::internal_default_instance() {\n  return &EnumDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// EnumValueDescriptorProto\n\n// optional string name = 1;\ninline bool EnumValueDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void EnumValueDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void EnumValueDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void EnumValueDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& EnumValueDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValueDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void EnumValueDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumValueDescriptorProto.name)\n}\ninline void EnumValueDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.EnumValueDescriptorProto.name)\n}\ninline void EnumValueDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.EnumValueDescriptorProto.name)\n}\ninline ::std::string* EnumValueDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumValueDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* EnumValueDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.EnumValueDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void EnumValueDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.EnumValueDescriptorProto.name)\n}\n\n// optional int32 number = 2;\ninline bool EnumValueDescriptorProto::has_number() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void EnumValueDescriptorProto::set_has_number() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void EnumValueDescriptorProto::clear_has_number() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void EnumValueDescriptorProto::clear_number() {\n  number_ = 0;\n  clear_has_number();\n}\ninline ::google::protobuf::int32 EnumValueDescriptorProto::number() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValueDescriptorProto.number)\n  return number_;\n}\ninline void EnumValueDescriptorProto::set_number(::google::protobuf::int32 value) {\n  set_has_number();\n  number_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumValueDescriptorProto.number)\n}\n\n// optional .google.protobuf.EnumValueOptions options = 3;\ninline bool EnumValueDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void EnumValueDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void EnumValueDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void EnumValueDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::EnumValueOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::EnumValueOptions& EnumValueDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValueDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::EnumValueOptions::internal_default_instance();\n}\ninline ::google::protobuf::EnumValueOptions* EnumValueDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::EnumValueOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumValueDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::EnumValueOptions* EnumValueDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.EnumValueDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::EnumValueOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void EnumValueDescriptorProto::set_allocated_options(::google::protobuf::EnumValueOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.EnumValueDescriptorProto.options)\n}\n\ninline const EnumValueDescriptorProto* EnumValueDescriptorProto::internal_default_instance() {\n  return &EnumValueDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// ServiceDescriptorProto\n\n// optional string name = 1;\ninline bool ServiceDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void ServiceDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void ServiceDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void ServiceDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& ServiceDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ServiceDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ServiceDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.ServiceDescriptorProto.name)\n}\ninline void ServiceDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.ServiceDescriptorProto.name)\n}\ninline void ServiceDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.ServiceDescriptorProto.name)\n}\ninline ::std::string* ServiceDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.ServiceDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* ServiceDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.ServiceDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void ServiceDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.ServiceDescriptorProto.name)\n}\n\n// repeated .google.protobuf.MethodDescriptorProto method = 2;\ninline int ServiceDescriptorProto::method_size() const {\n  return method_.size();\n}\ninline void ServiceDescriptorProto::clear_method() {\n  method_.Clear();\n}\ninline const ::google::protobuf::MethodDescriptorProto& ServiceDescriptorProto::method(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ServiceDescriptorProto.method)\n  return method_.Get(index);\n}\ninline ::google::protobuf::MethodDescriptorProto* ServiceDescriptorProto::mutable_method(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.ServiceDescriptorProto.method)\n  return method_.Mutable(index);\n}\ninline ::google::protobuf::MethodDescriptorProto* ServiceDescriptorProto::add_method() {\n  // @@protoc_insertion_point(field_add:google.protobuf.ServiceDescriptorProto.method)\n  return method_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::MethodDescriptorProto >*\nServiceDescriptorProto::mutable_method() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.ServiceDescriptorProto.method)\n  return &method_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::MethodDescriptorProto >&\nServiceDescriptorProto::method() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.ServiceDescriptorProto.method)\n  return method_;\n}\n\n// optional .google.protobuf.ServiceOptions options = 3;\ninline bool ServiceDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void ServiceDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void ServiceDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void ServiceDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::ServiceOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::ServiceOptions& ServiceDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ServiceDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::ServiceOptions::internal_default_instance();\n}\ninline ::google::protobuf::ServiceOptions* ServiceDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::ServiceOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.ServiceDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::ServiceOptions* ServiceDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.ServiceDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::ServiceOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void ServiceDescriptorProto::set_allocated_options(::google::protobuf::ServiceOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.ServiceDescriptorProto.options)\n}\n\ninline const ServiceDescriptorProto* ServiceDescriptorProto::internal_default_instance() {\n  return &ServiceDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// MethodDescriptorProto\n\n// optional string name = 1;\ninline bool MethodDescriptorProto::has_name() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void MethodDescriptorProto::set_has_name() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void MethodDescriptorProto::clear_has_name() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void MethodDescriptorProto::clear_name() {\n  name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name();\n}\ninline const ::std::string& MethodDescriptorProto::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.name)\n  return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_name(const ::std::string& value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodDescriptorProto.name)\n}\ninline void MethodDescriptorProto::set_name(const char* value) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.MethodDescriptorProto.name)\n}\ninline void MethodDescriptorProto::set_name(const char* value, size_t size) {\n  set_has_name();\n  name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.MethodDescriptorProto.name)\n}\ninline ::std::string* MethodDescriptorProto::mutable_name() {\n  set_has_name();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MethodDescriptorProto.name)\n  return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* MethodDescriptorProto::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.MethodDescriptorProto.name)\n  clear_has_name();\n  return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    set_has_name();\n  } else {\n    clear_has_name();\n  }\n  name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.MethodDescriptorProto.name)\n}\n\n// optional string input_type = 2;\ninline bool MethodDescriptorProto::has_input_type() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void MethodDescriptorProto::set_has_input_type() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void MethodDescriptorProto::clear_has_input_type() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void MethodDescriptorProto::clear_input_type() {\n  input_type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_input_type();\n}\ninline const ::std::string& MethodDescriptorProto::input_type() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.input_type)\n  return input_type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_input_type(const ::std::string& value) {\n  set_has_input_type();\n  input_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodDescriptorProto.input_type)\n}\ninline void MethodDescriptorProto::set_input_type(const char* value) {\n  set_has_input_type();\n  input_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.MethodDescriptorProto.input_type)\n}\ninline void MethodDescriptorProto::set_input_type(const char* value, size_t size) {\n  set_has_input_type();\n  input_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.MethodDescriptorProto.input_type)\n}\ninline ::std::string* MethodDescriptorProto::mutable_input_type() {\n  set_has_input_type();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MethodDescriptorProto.input_type)\n  return input_type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* MethodDescriptorProto::release_input_type() {\n  // @@protoc_insertion_point(field_release:google.protobuf.MethodDescriptorProto.input_type)\n  clear_has_input_type();\n  return input_type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_allocated_input_type(::std::string* input_type) {\n  if (input_type != NULL) {\n    set_has_input_type();\n  } else {\n    clear_has_input_type();\n  }\n  input_type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), input_type);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.MethodDescriptorProto.input_type)\n}\n\n// optional string output_type = 3;\ninline bool MethodDescriptorProto::has_output_type() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void MethodDescriptorProto::set_has_output_type() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void MethodDescriptorProto::clear_has_output_type() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void MethodDescriptorProto::clear_output_type() {\n  output_type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_output_type();\n}\ninline const ::std::string& MethodDescriptorProto::output_type() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.output_type)\n  return output_type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_output_type(const ::std::string& value) {\n  set_has_output_type();\n  output_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodDescriptorProto.output_type)\n}\ninline void MethodDescriptorProto::set_output_type(const char* value) {\n  set_has_output_type();\n  output_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.MethodDescriptorProto.output_type)\n}\ninline void MethodDescriptorProto::set_output_type(const char* value, size_t size) {\n  set_has_output_type();\n  output_type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.MethodDescriptorProto.output_type)\n}\ninline ::std::string* MethodDescriptorProto::mutable_output_type() {\n  set_has_output_type();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MethodDescriptorProto.output_type)\n  return output_type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* MethodDescriptorProto::release_output_type() {\n  // @@protoc_insertion_point(field_release:google.protobuf.MethodDescriptorProto.output_type)\n  clear_has_output_type();\n  return output_type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void MethodDescriptorProto::set_allocated_output_type(::std::string* output_type) {\n  if (output_type != NULL) {\n    set_has_output_type();\n  } else {\n    clear_has_output_type();\n  }\n  output_type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), output_type);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.MethodDescriptorProto.output_type)\n}\n\n// optional .google.protobuf.MethodOptions options = 4;\ninline bool MethodDescriptorProto::has_options() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void MethodDescriptorProto::set_has_options() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void MethodDescriptorProto::clear_has_options() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void MethodDescriptorProto::clear_options() {\n  if (options_ != NULL) options_->::google::protobuf::MethodOptions::Clear();\n  clear_has_options();\n}\ninline const ::google::protobuf::MethodOptions& MethodDescriptorProto::options() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.options)\n  return options_ != NULL ? *options_\n                         : *::google::protobuf::MethodOptions::internal_default_instance();\n}\ninline ::google::protobuf::MethodOptions* MethodDescriptorProto::mutable_options() {\n  set_has_options();\n  if (options_ == NULL) {\n    options_ = new ::google::protobuf::MethodOptions;\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MethodDescriptorProto.options)\n  return options_;\n}\ninline ::google::protobuf::MethodOptions* MethodDescriptorProto::release_options() {\n  // @@protoc_insertion_point(field_release:google.protobuf.MethodDescriptorProto.options)\n  clear_has_options();\n  ::google::protobuf::MethodOptions* temp = options_;\n  options_ = NULL;\n  return temp;\n}\ninline void MethodDescriptorProto::set_allocated_options(::google::protobuf::MethodOptions* options) {\n  delete options_;\n  options_ = options;\n  if (options) {\n    set_has_options();\n  } else {\n    clear_has_options();\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.MethodDescriptorProto.options)\n}\n\n// optional bool client_streaming = 5 [default = false];\ninline bool MethodDescriptorProto::has_client_streaming() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void MethodDescriptorProto::set_has_client_streaming() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void MethodDescriptorProto::clear_has_client_streaming() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void MethodDescriptorProto::clear_client_streaming() {\n  client_streaming_ = false;\n  clear_has_client_streaming();\n}\ninline bool MethodDescriptorProto::client_streaming() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.client_streaming)\n  return client_streaming_;\n}\ninline void MethodDescriptorProto::set_client_streaming(bool value) {\n  set_has_client_streaming();\n  client_streaming_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodDescriptorProto.client_streaming)\n}\n\n// optional bool server_streaming = 6 [default = false];\ninline bool MethodDescriptorProto::has_server_streaming() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void MethodDescriptorProto::set_has_server_streaming() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void MethodDescriptorProto::clear_has_server_streaming() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void MethodDescriptorProto::clear_server_streaming() {\n  server_streaming_ = false;\n  clear_has_server_streaming();\n}\ninline bool MethodDescriptorProto::server_streaming() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodDescriptorProto.server_streaming)\n  return server_streaming_;\n}\ninline void MethodDescriptorProto::set_server_streaming(bool value) {\n  set_has_server_streaming();\n  server_streaming_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodDescriptorProto.server_streaming)\n}\n\ninline const MethodDescriptorProto* MethodDescriptorProto::internal_default_instance() {\n  return &MethodDescriptorProto_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// FileOptions\n\n// optional string java_package = 1;\ninline bool FileOptions::has_java_package() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void FileOptions::set_has_java_package() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void FileOptions::clear_has_java_package() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void FileOptions::clear_java_package() {\n  java_package_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_java_package();\n}\ninline const ::std::string& FileOptions::java_package() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_package)\n  return java_package_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_java_package(const ::std::string& value) {\n  set_has_java_package();\n  java_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_package)\n}\ninline void FileOptions::set_java_package(const char* value) {\n  set_has_java_package();\n  java_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileOptions.java_package)\n}\ninline void FileOptions::set_java_package(const char* value, size_t size) {\n  set_has_java_package();\n  java_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileOptions.java_package)\n}\ninline ::std::string* FileOptions::mutable_java_package() {\n  set_has_java_package();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.java_package)\n  return java_package_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileOptions::release_java_package() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileOptions.java_package)\n  clear_has_java_package();\n  return java_package_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_allocated_java_package(::std::string* java_package) {\n  if (java_package != NULL) {\n    set_has_java_package();\n  } else {\n    clear_has_java_package();\n  }\n  java_package_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), java_package);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileOptions.java_package)\n}\n\n// optional string java_outer_classname = 8;\ninline bool FileOptions::has_java_outer_classname() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void FileOptions::set_has_java_outer_classname() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void FileOptions::clear_has_java_outer_classname() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void FileOptions::clear_java_outer_classname() {\n  java_outer_classname_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_java_outer_classname();\n}\ninline const ::std::string& FileOptions::java_outer_classname() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_outer_classname)\n  return java_outer_classname_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_java_outer_classname(const ::std::string& value) {\n  set_has_java_outer_classname();\n  java_outer_classname_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_outer_classname)\n}\ninline void FileOptions::set_java_outer_classname(const char* value) {\n  set_has_java_outer_classname();\n  java_outer_classname_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileOptions.java_outer_classname)\n}\ninline void FileOptions::set_java_outer_classname(const char* value, size_t size) {\n  set_has_java_outer_classname();\n  java_outer_classname_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileOptions.java_outer_classname)\n}\ninline ::std::string* FileOptions::mutable_java_outer_classname() {\n  set_has_java_outer_classname();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.java_outer_classname)\n  return java_outer_classname_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileOptions::release_java_outer_classname() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileOptions.java_outer_classname)\n  clear_has_java_outer_classname();\n  return java_outer_classname_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_allocated_java_outer_classname(::std::string* java_outer_classname) {\n  if (java_outer_classname != NULL) {\n    set_has_java_outer_classname();\n  } else {\n    clear_has_java_outer_classname();\n  }\n  java_outer_classname_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), java_outer_classname);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileOptions.java_outer_classname)\n}\n\n// optional bool java_multiple_files = 10 [default = false];\ninline bool FileOptions::has_java_multiple_files() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void FileOptions::set_has_java_multiple_files() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void FileOptions::clear_has_java_multiple_files() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void FileOptions::clear_java_multiple_files() {\n  java_multiple_files_ = false;\n  clear_has_java_multiple_files();\n}\ninline bool FileOptions::java_multiple_files() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_multiple_files)\n  return java_multiple_files_;\n}\ninline void FileOptions::set_java_multiple_files(bool value) {\n  set_has_java_multiple_files();\n  java_multiple_files_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_multiple_files)\n}\n\n// optional bool java_generate_equals_and_hash = 20 [deprecated = true];\ninline bool FileOptions::has_java_generate_equals_and_hash() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void FileOptions::set_has_java_generate_equals_and_hash() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void FileOptions::clear_has_java_generate_equals_and_hash() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void FileOptions::clear_java_generate_equals_and_hash() {\n  java_generate_equals_and_hash_ = false;\n  clear_has_java_generate_equals_and_hash();\n}\ninline bool FileOptions::java_generate_equals_and_hash() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_generate_equals_and_hash)\n  return java_generate_equals_and_hash_;\n}\ninline void FileOptions::set_java_generate_equals_and_hash(bool value) {\n  set_has_java_generate_equals_and_hash();\n  java_generate_equals_and_hash_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_generate_equals_and_hash)\n}\n\n// optional bool java_string_check_utf8 = 27 [default = false];\ninline bool FileOptions::has_java_string_check_utf8() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void FileOptions::set_has_java_string_check_utf8() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void FileOptions::clear_has_java_string_check_utf8() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void FileOptions::clear_java_string_check_utf8() {\n  java_string_check_utf8_ = false;\n  clear_has_java_string_check_utf8();\n}\ninline bool FileOptions::java_string_check_utf8() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_string_check_utf8)\n  return java_string_check_utf8_;\n}\ninline void FileOptions::set_java_string_check_utf8(bool value) {\n  set_has_java_string_check_utf8();\n  java_string_check_utf8_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_string_check_utf8)\n}\n\n// optional .google.protobuf.FileOptions.OptimizeMode optimize_for = 9 [default = SPEED];\ninline bool FileOptions::has_optimize_for() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void FileOptions::set_has_optimize_for() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void FileOptions::clear_has_optimize_for() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void FileOptions::clear_optimize_for() {\n  optimize_for_ = 1;\n  clear_has_optimize_for();\n}\ninline ::google::protobuf::FileOptions_OptimizeMode FileOptions::optimize_for() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.optimize_for)\n  return static_cast< ::google::protobuf::FileOptions_OptimizeMode >(optimize_for_);\n}\ninline void FileOptions::set_optimize_for(::google::protobuf::FileOptions_OptimizeMode value) {\n  assert(::google::protobuf::FileOptions_OptimizeMode_IsValid(value));\n  set_has_optimize_for();\n  optimize_for_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.optimize_for)\n}\n\n// optional string go_package = 11;\ninline bool FileOptions::has_go_package() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void FileOptions::set_has_go_package() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void FileOptions::clear_has_go_package() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void FileOptions::clear_go_package() {\n  go_package_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_go_package();\n}\ninline const ::std::string& FileOptions::go_package() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.go_package)\n  return go_package_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_go_package(const ::std::string& value) {\n  set_has_go_package();\n  go_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.go_package)\n}\ninline void FileOptions::set_go_package(const char* value) {\n  set_has_go_package();\n  go_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileOptions.go_package)\n}\ninline void FileOptions::set_go_package(const char* value, size_t size) {\n  set_has_go_package();\n  go_package_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileOptions.go_package)\n}\ninline ::std::string* FileOptions::mutable_go_package() {\n  set_has_go_package();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.go_package)\n  return go_package_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileOptions::release_go_package() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileOptions.go_package)\n  clear_has_go_package();\n  return go_package_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_allocated_go_package(::std::string* go_package) {\n  if (go_package != NULL) {\n    set_has_go_package();\n  } else {\n    clear_has_go_package();\n  }\n  go_package_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), go_package);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileOptions.go_package)\n}\n\n// optional bool cc_generic_services = 16 [default = false];\ninline bool FileOptions::has_cc_generic_services() const {\n  return (_has_bits_[0] & 0x00000080u) != 0;\n}\ninline void FileOptions::set_has_cc_generic_services() {\n  _has_bits_[0] |= 0x00000080u;\n}\ninline void FileOptions::clear_has_cc_generic_services() {\n  _has_bits_[0] &= ~0x00000080u;\n}\ninline void FileOptions::clear_cc_generic_services() {\n  cc_generic_services_ = false;\n  clear_has_cc_generic_services();\n}\ninline bool FileOptions::cc_generic_services() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.cc_generic_services)\n  return cc_generic_services_;\n}\ninline void FileOptions::set_cc_generic_services(bool value) {\n  set_has_cc_generic_services();\n  cc_generic_services_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.cc_generic_services)\n}\n\n// optional bool java_generic_services = 17 [default = false];\ninline bool FileOptions::has_java_generic_services() const {\n  return (_has_bits_[0] & 0x00000100u) != 0;\n}\ninline void FileOptions::set_has_java_generic_services() {\n  _has_bits_[0] |= 0x00000100u;\n}\ninline void FileOptions::clear_has_java_generic_services() {\n  _has_bits_[0] &= ~0x00000100u;\n}\ninline void FileOptions::clear_java_generic_services() {\n  java_generic_services_ = false;\n  clear_has_java_generic_services();\n}\ninline bool FileOptions::java_generic_services() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.java_generic_services)\n  return java_generic_services_;\n}\ninline void FileOptions::set_java_generic_services(bool value) {\n  set_has_java_generic_services();\n  java_generic_services_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.java_generic_services)\n}\n\n// optional bool py_generic_services = 18 [default = false];\ninline bool FileOptions::has_py_generic_services() const {\n  return (_has_bits_[0] & 0x00000200u) != 0;\n}\ninline void FileOptions::set_has_py_generic_services() {\n  _has_bits_[0] |= 0x00000200u;\n}\ninline void FileOptions::clear_has_py_generic_services() {\n  _has_bits_[0] &= ~0x00000200u;\n}\ninline void FileOptions::clear_py_generic_services() {\n  py_generic_services_ = false;\n  clear_has_py_generic_services();\n}\ninline bool FileOptions::py_generic_services() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.py_generic_services)\n  return py_generic_services_;\n}\ninline void FileOptions::set_py_generic_services(bool value) {\n  set_has_py_generic_services();\n  py_generic_services_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.py_generic_services)\n}\n\n// optional bool deprecated = 23 [default = false];\ninline bool FileOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000400u) != 0;\n}\ninline void FileOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000400u;\n}\ninline void FileOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000400u;\n}\ninline void FileOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool FileOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.deprecated)\n  return deprecated_;\n}\ninline void FileOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.deprecated)\n}\n\n// optional bool cc_enable_arenas = 31 [default = false];\ninline bool FileOptions::has_cc_enable_arenas() const {\n  return (_has_bits_[0] & 0x00000800u) != 0;\n}\ninline void FileOptions::set_has_cc_enable_arenas() {\n  _has_bits_[0] |= 0x00000800u;\n}\ninline void FileOptions::clear_has_cc_enable_arenas() {\n  _has_bits_[0] &= ~0x00000800u;\n}\ninline void FileOptions::clear_cc_enable_arenas() {\n  cc_enable_arenas_ = false;\n  clear_has_cc_enable_arenas();\n}\ninline bool FileOptions::cc_enable_arenas() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.cc_enable_arenas)\n  return cc_enable_arenas_;\n}\ninline void FileOptions::set_cc_enable_arenas(bool value) {\n  set_has_cc_enable_arenas();\n  cc_enable_arenas_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.cc_enable_arenas)\n}\n\n// optional string objc_class_prefix = 36;\ninline bool FileOptions::has_objc_class_prefix() const {\n  return (_has_bits_[0] & 0x00001000u) != 0;\n}\ninline void FileOptions::set_has_objc_class_prefix() {\n  _has_bits_[0] |= 0x00001000u;\n}\ninline void FileOptions::clear_has_objc_class_prefix() {\n  _has_bits_[0] &= ~0x00001000u;\n}\ninline void FileOptions::clear_objc_class_prefix() {\n  objc_class_prefix_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_objc_class_prefix();\n}\ninline const ::std::string& FileOptions::objc_class_prefix() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.objc_class_prefix)\n  return objc_class_prefix_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_objc_class_prefix(const ::std::string& value) {\n  set_has_objc_class_prefix();\n  objc_class_prefix_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.objc_class_prefix)\n}\ninline void FileOptions::set_objc_class_prefix(const char* value) {\n  set_has_objc_class_prefix();\n  objc_class_prefix_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileOptions.objc_class_prefix)\n}\ninline void FileOptions::set_objc_class_prefix(const char* value, size_t size) {\n  set_has_objc_class_prefix();\n  objc_class_prefix_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileOptions.objc_class_prefix)\n}\ninline ::std::string* FileOptions::mutable_objc_class_prefix() {\n  set_has_objc_class_prefix();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.objc_class_prefix)\n  return objc_class_prefix_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileOptions::release_objc_class_prefix() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileOptions.objc_class_prefix)\n  clear_has_objc_class_prefix();\n  return objc_class_prefix_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_allocated_objc_class_prefix(::std::string* objc_class_prefix) {\n  if (objc_class_prefix != NULL) {\n    set_has_objc_class_prefix();\n  } else {\n    clear_has_objc_class_prefix();\n  }\n  objc_class_prefix_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), objc_class_prefix);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileOptions.objc_class_prefix)\n}\n\n// optional string csharp_namespace = 37;\ninline bool FileOptions::has_csharp_namespace() const {\n  return (_has_bits_[0] & 0x00002000u) != 0;\n}\ninline void FileOptions::set_has_csharp_namespace() {\n  _has_bits_[0] |= 0x00002000u;\n}\ninline void FileOptions::clear_has_csharp_namespace() {\n  _has_bits_[0] &= ~0x00002000u;\n}\ninline void FileOptions::clear_csharp_namespace() {\n  csharp_namespace_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_csharp_namespace();\n}\ninline const ::std::string& FileOptions::csharp_namespace() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.csharp_namespace)\n  return csharp_namespace_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_csharp_namespace(const ::std::string& value) {\n  set_has_csharp_namespace();\n  csharp_namespace_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.FileOptions.csharp_namespace)\n}\ninline void FileOptions::set_csharp_namespace(const char* value) {\n  set_has_csharp_namespace();\n  csharp_namespace_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FileOptions.csharp_namespace)\n}\ninline void FileOptions::set_csharp_namespace(const char* value, size_t size) {\n  set_has_csharp_namespace();\n  csharp_namespace_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FileOptions.csharp_namespace)\n}\ninline ::std::string* FileOptions::mutable_csharp_namespace() {\n  set_has_csharp_namespace();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.csharp_namespace)\n  return csharp_namespace_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* FileOptions::release_csharp_namespace() {\n  // @@protoc_insertion_point(field_release:google.protobuf.FileOptions.csharp_namespace)\n  clear_has_csharp_namespace();\n  return csharp_namespace_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void FileOptions::set_allocated_csharp_namespace(::std::string* csharp_namespace) {\n  if (csharp_namespace != NULL) {\n    set_has_csharp_namespace();\n  } else {\n    clear_has_csharp_namespace();\n  }\n  csharp_namespace_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), csharp_namespace);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.FileOptions.csharp_namespace)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int FileOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void FileOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& FileOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FileOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* FileOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FileOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* FileOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FileOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nFileOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FileOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nFileOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FileOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const FileOptions* FileOptions::internal_default_instance() {\n  return &FileOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// MessageOptions\n\n// optional bool message_set_wire_format = 1 [default = false];\ninline bool MessageOptions::has_message_set_wire_format() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void MessageOptions::set_has_message_set_wire_format() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void MessageOptions::clear_has_message_set_wire_format() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void MessageOptions::clear_message_set_wire_format() {\n  message_set_wire_format_ = false;\n  clear_has_message_set_wire_format();\n}\ninline bool MessageOptions::message_set_wire_format() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MessageOptions.message_set_wire_format)\n  return message_set_wire_format_;\n}\ninline void MessageOptions::set_message_set_wire_format(bool value) {\n  set_has_message_set_wire_format();\n  message_set_wire_format_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MessageOptions.message_set_wire_format)\n}\n\n// optional bool no_standard_descriptor_accessor = 2 [default = false];\ninline bool MessageOptions::has_no_standard_descriptor_accessor() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void MessageOptions::set_has_no_standard_descriptor_accessor() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void MessageOptions::clear_has_no_standard_descriptor_accessor() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void MessageOptions::clear_no_standard_descriptor_accessor() {\n  no_standard_descriptor_accessor_ = false;\n  clear_has_no_standard_descriptor_accessor();\n}\ninline bool MessageOptions::no_standard_descriptor_accessor() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MessageOptions.no_standard_descriptor_accessor)\n  return no_standard_descriptor_accessor_;\n}\ninline void MessageOptions::set_no_standard_descriptor_accessor(bool value) {\n  set_has_no_standard_descriptor_accessor();\n  no_standard_descriptor_accessor_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MessageOptions.no_standard_descriptor_accessor)\n}\n\n// optional bool deprecated = 3 [default = false];\ninline bool MessageOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void MessageOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void MessageOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void MessageOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool MessageOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MessageOptions.deprecated)\n  return deprecated_;\n}\ninline void MessageOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MessageOptions.deprecated)\n}\n\n// optional bool map_entry = 7;\ninline bool MessageOptions::has_map_entry() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void MessageOptions::set_has_map_entry() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void MessageOptions::clear_has_map_entry() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void MessageOptions::clear_map_entry() {\n  map_entry_ = false;\n  clear_has_map_entry();\n}\ninline bool MessageOptions::map_entry() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MessageOptions.map_entry)\n  return map_entry_;\n}\ninline void MessageOptions::set_map_entry(bool value) {\n  set_has_map_entry();\n  map_entry_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MessageOptions.map_entry)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int MessageOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void MessageOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& MessageOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MessageOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* MessageOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MessageOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* MessageOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.MessageOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nMessageOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.MessageOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nMessageOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.MessageOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const MessageOptions* MessageOptions::internal_default_instance() {\n  return &MessageOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// FieldOptions\n\n// optional .google.protobuf.FieldOptions.CType ctype = 1 [default = STRING];\ninline bool FieldOptions::has_ctype() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void FieldOptions::set_has_ctype() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void FieldOptions::clear_has_ctype() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void FieldOptions::clear_ctype() {\n  ctype_ = 0;\n  clear_has_ctype();\n}\ninline ::google::protobuf::FieldOptions_CType FieldOptions::ctype() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.ctype)\n  return static_cast< ::google::protobuf::FieldOptions_CType >(ctype_);\n}\ninline void FieldOptions::set_ctype(::google::protobuf::FieldOptions_CType value) {\n  assert(::google::protobuf::FieldOptions_CType_IsValid(value));\n  set_has_ctype();\n  ctype_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.ctype)\n}\n\n// optional bool packed = 2;\ninline bool FieldOptions::has_packed() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void FieldOptions::set_has_packed() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void FieldOptions::clear_has_packed() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void FieldOptions::clear_packed() {\n  packed_ = false;\n  clear_has_packed();\n}\ninline bool FieldOptions::packed() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.packed)\n  return packed_;\n}\ninline void FieldOptions::set_packed(bool value) {\n  set_has_packed();\n  packed_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.packed)\n}\n\n// optional .google.protobuf.FieldOptions.JSType jstype = 6 [default = JS_NORMAL];\ninline bool FieldOptions::has_jstype() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void FieldOptions::set_has_jstype() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void FieldOptions::clear_has_jstype() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void FieldOptions::clear_jstype() {\n  jstype_ = 0;\n  clear_has_jstype();\n}\ninline ::google::protobuf::FieldOptions_JSType FieldOptions::jstype() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.jstype)\n  return static_cast< ::google::protobuf::FieldOptions_JSType >(jstype_);\n}\ninline void FieldOptions::set_jstype(::google::protobuf::FieldOptions_JSType value) {\n  assert(::google::protobuf::FieldOptions_JSType_IsValid(value));\n  set_has_jstype();\n  jstype_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.jstype)\n}\n\n// optional bool lazy = 5 [default = false];\ninline bool FieldOptions::has_lazy() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void FieldOptions::set_has_lazy() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void FieldOptions::clear_has_lazy() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void FieldOptions::clear_lazy() {\n  lazy_ = false;\n  clear_has_lazy();\n}\ninline bool FieldOptions::lazy() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.lazy)\n  return lazy_;\n}\ninline void FieldOptions::set_lazy(bool value) {\n  set_has_lazy();\n  lazy_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.lazy)\n}\n\n// optional bool deprecated = 3 [default = false];\ninline bool FieldOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void FieldOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void FieldOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void FieldOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool FieldOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.deprecated)\n  return deprecated_;\n}\ninline void FieldOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.deprecated)\n}\n\n// optional bool weak = 10 [default = false];\ninline bool FieldOptions::has_weak() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void FieldOptions::set_has_weak() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void FieldOptions::clear_has_weak() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void FieldOptions::clear_weak() {\n  weak_ = false;\n  clear_has_weak();\n}\ninline bool FieldOptions::weak() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.weak)\n  return weak_;\n}\ninline void FieldOptions::set_weak(bool value) {\n  set_has_weak();\n  weak_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldOptions.weak)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int FieldOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void FieldOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& FieldOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* FieldOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* FieldOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.FieldOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nFieldOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FieldOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nFieldOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FieldOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const FieldOptions* FieldOptions::internal_default_instance() {\n  return &FieldOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// OneofOptions\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int OneofOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void OneofOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& OneofOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.OneofOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* OneofOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.OneofOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* OneofOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.OneofOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nOneofOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.OneofOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nOneofOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.OneofOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const OneofOptions* OneofOptions::internal_default_instance() {\n  return &OneofOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// EnumOptions\n\n// optional bool allow_alias = 2;\ninline bool EnumOptions::has_allow_alias() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void EnumOptions::set_has_allow_alias() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void EnumOptions::clear_has_allow_alias() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void EnumOptions::clear_allow_alias() {\n  allow_alias_ = false;\n  clear_has_allow_alias();\n}\ninline bool EnumOptions::allow_alias() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumOptions.allow_alias)\n  return allow_alias_;\n}\ninline void EnumOptions::set_allow_alias(bool value) {\n  set_has_allow_alias();\n  allow_alias_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumOptions.allow_alias)\n}\n\n// optional bool deprecated = 3 [default = false];\ninline bool EnumOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void EnumOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void EnumOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void EnumOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool EnumOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumOptions.deprecated)\n  return deprecated_;\n}\ninline void EnumOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumOptions.deprecated)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int EnumOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void EnumOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& EnumOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* EnumOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* EnumOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.EnumOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nEnumOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.EnumOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nEnumOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.EnumOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const EnumOptions* EnumOptions::internal_default_instance() {\n  return &EnumOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// EnumValueOptions\n\n// optional bool deprecated = 1 [default = false];\ninline bool EnumValueOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void EnumValueOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void EnumValueOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void EnumValueOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool EnumValueOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValueOptions.deprecated)\n  return deprecated_;\n}\ninline void EnumValueOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumValueOptions.deprecated)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int EnumValueOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void EnumValueOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& EnumValueOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValueOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* EnumValueOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumValueOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* EnumValueOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.EnumValueOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nEnumValueOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.EnumValueOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nEnumValueOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.EnumValueOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const EnumValueOptions* EnumValueOptions::internal_default_instance() {\n  return &EnumValueOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// ServiceOptions\n\n// optional bool deprecated = 33 [default = false];\ninline bool ServiceOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void ServiceOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void ServiceOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void ServiceOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool ServiceOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ServiceOptions.deprecated)\n  return deprecated_;\n}\ninline void ServiceOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.ServiceOptions.deprecated)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int ServiceOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void ServiceOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& ServiceOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ServiceOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* ServiceOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.ServiceOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* ServiceOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.ServiceOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nServiceOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.ServiceOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nServiceOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.ServiceOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const ServiceOptions* ServiceOptions::internal_default_instance() {\n  return &ServiceOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// MethodOptions\n\n// optional bool deprecated = 33 [default = false];\ninline bool MethodOptions::has_deprecated() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void MethodOptions::set_has_deprecated() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void MethodOptions::clear_has_deprecated() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void MethodOptions::clear_deprecated() {\n  deprecated_ = false;\n  clear_has_deprecated();\n}\ninline bool MethodOptions::deprecated() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodOptions.deprecated)\n  return deprecated_;\n}\ninline void MethodOptions::set_deprecated(bool value) {\n  set_has_deprecated();\n  deprecated_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.MethodOptions.deprecated)\n}\n\n// repeated .google.protobuf.UninterpretedOption uninterpreted_option = 999;\ninline int MethodOptions::uninterpreted_option_size() const {\n  return uninterpreted_option_.size();\n}\ninline void MethodOptions::clear_uninterpreted_option() {\n  uninterpreted_option_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption& MethodOptions::uninterpreted_option(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.MethodOptions.uninterpreted_option)\n  return uninterpreted_option_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption* MethodOptions::mutable_uninterpreted_option(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.MethodOptions.uninterpreted_option)\n  return uninterpreted_option_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption* MethodOptions::add_uninterpreted_option() {\n  // @@protoc_insertion_point(field_add:google.protobuf.MethodOptions.uninterpreted_option)\n  return uninterpreted_option_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >*\nMethodOptions::mutable_uninterpreted_option() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.MethodOptions.uninterpreted_option)\n  return &uninterpreted_option_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption >&\nMethodOptions::uninterpreted_option() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.MethodOptions.uninterpreted_option)\n  return uninterpreted_option_;\n}\n\ninline const MethodOptions* MethodOptions::internal_default_instance() {\n  return &MethodOptions_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// UninterpretedOption_NamePart\n\n// required string name_part = 1;\ninline bool UninterpretedOption_NamePart::has_name_part() const {\n  return (_has_bits_[0] & 0x00000001u) != 0;\n}\ninline void UninterpretedOption_NamePart::set_has_name_part() {\n  _has_bits_[0] |= 0x00000001u;\n}\ninline void UninterpretedOption_NamePart::clear_has_name_part() {\n  _has_bits_[0] &= ~0x00000001u;\n}\ninline void UninterpretedOption_NamePart::clear_name_part() {\n  name_part_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_name_part();\n}\ninline const ::std::string& UninterpretedOption_NamePart::name_part() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.NamePart.name_part)\n  return name_part_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption_NamePart::set_name_part(const ::std::string& value) {\n  set_has_name_part();\n  name_part_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.NamePart.name_part)\n}\ninline void UninterpretedOption_NamePart::set_name_part(const char* value) {\n  set_has_name_part();\n  name_part_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.UninterpretedOption.NamePart.name_part)\n}\ninline void UninterpretedOption_NamePart::set_name_part(const char* value, size_t size) {\n  set_has_name_part();\n  name_part_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.UninterpretedOption.NamePart.name_part)\n}\ninline ::std::string* UninterpretedOption_NamePart::mutable_name_part() {\n  set_has_name_part();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.UninterpretedOption.NamePart.name_part)\n  return name_part_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* UninterpretedOption_NamePart::release_name_part() {\n  // @@protoc_insertion_point(field_release:google.protobuf.UninterpretedOption.NamePart.name_part)\n  clear_has_name_part();\n  return name_part_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption_NamePart::set_allocated_name_part(::std::string* name_part) {\n  if (name_part != NULL) {\n    set_has_name_part();\n  } else {\n    clear_has_name_part();\n  }\n  name_part_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name_part);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.UninterpretedOption.NamePart.name_part)\n}\n\n// required bool is_extension = 2;\ninline bool UninterpretedOption_NamePart::has_is_extension() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void UninterpretedOption_NamePart::set_has_is_extension() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void UninterpretedOption_NamePart::clear_has_is_extension() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void UninterpretedOption_NamePart::clear_is_extension() {\n  is_extension_ = false;\n  clear_has_is_extension();\n}\ninline bool UninterpretedOption_NamePart::is_extension() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.NamePart.is_extension)\n  return is_extension_;\n}\ninline void UninterpretedOption_NamePart::set_is_extension(bool value) {\n  set_has_is_extension();\n  is_extension_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.NamePart.is_extension)\n}\n\ninline const UninterpretedOption_NamePart* UninterpretedOption_NamePart::internal_default_instance() {\n  return &UninterpretedOption_NamePart_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// UninterpretedOption\n\n// repeated .google.protobuf.UninterpretedOption.NamePart name = 2;\ninline int UninterpretedOption::name_size() const {\n  return name_.size();\n}\ninline void UninterpretedOption::clear_name() {\n  name_.Clear();\n}\ninline const ::google::protobuf::UninterpretedOption_NamePart& UninterpretedOption::name(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.name)\n  return name_.Get(index);\n}\ninline ::google::protobuf::UninterpretedOption_NamePart* UninterpretedOption::mutable_name(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.UninterpretedOption.name)\n  return name_.Mutable(index);\n}\ninline ::google::protobuf::UninterpretedOption_NamePart* UninterpretedOption::add_name() {\n  // @@protoc_insertion_point(field_add:google.protobuf.UninterpretedOption.name)\n  return name_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption_NamePart >*\nUninterpretedOption::mutable_name() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.UninterpretedOption.name)\n  return &name_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::UninterpretedOption_NamePart >&\nUninterpretedOption::name() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.UninterpretedOption.name)\n  return name_;\n}\n\n// optional string identifier_value = 3;\ninline bool UninterpretedOption::has_identifier_value() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void UninterpretedOption::set_has_identifier_value() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void UninterpretedOption::clear_has_identifier_value() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void UninterpretedOption::clear_identifier_value() {\n  identifier_value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_identifier_value();\n}\ninline const ::std::string& UninterpretedOption::identifier_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.identifier_value)\n  return identifier_value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_identifier_value(const ::std::string& value) {\n  set_has_identifier_value();\n  identifier_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.identifier_value)\n}\ninline void UninterpretedOption::set_identifier_value(const char* value) {\n  set_has_identifier_value();\n  identifier_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.UninterpretedOption.identifier_value)\n}\ninline void UninterpretedOption::set_identifier_value(const char* value, size_t size) {\n  set_has_identifier_value();\n  identifier_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.UninterpretedOption.identifier_value)\n}\ninline ::std::string* UninterpretedOption::mutable_identifier_value() {\n  set_has_identifier_value();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.UninterpretedOption.identifier_value)\n  return identifier_value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* UninterpretedOption::release_identifier_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.UninterpretedOption.identifier_value)\n  clear_has_identifier_value();\n  return identifier_value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_allocated_identifier_value(::std::string* identifier_value) {\n  if (identifier_value != NULL) {\n    set_has_identifier_value();\n  } else {\n    clear_has_identifier_value();\n  }\n  identifier_value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), identifier_value);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.UninterpretedOption.identifier_value)\n}\n\n// optional uint64 positive_int_value = 4;\ninline bool UninterpretedOption::has_positive_int_value() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void UninterpretedOption::set_has_positive_int_value() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void UninterpretedOption::clear_has_positive_int_value() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void UninterpretedOption::clear_positive_int_value() {\n  positive_int_value_ = GOOGLE_ULONGLONG(0);\n  clear_has_positive_int_value();\n}\ninline ::google::protobuf::uint64 UninterpretedOption::positive_int_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.positive_int_value)\n  return positive_int_value_;\n}\ninline void UninterpretedOption::set_positive_int_value(::google::protobuf::uint64 value) {\n  set_has_positive_int_value();\n  positive_int_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.positive_int_value)\n}\n\n// optional int64 negative_int_value = 5;\ninline bool UninterpretedOption::has_negative_int_value() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void UninterpretedOption::set_has_negative_int_value() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void UninterpretedOption::clear_has_negative_int_value() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void UninterpretedOption::clear_negative_int_value() {\n  negative_int_value_ = GOOGLE_LONGLONG(0);\n  clear_has_negative_int_value();\n}\ninline ::google::protobuf::int64 UninterpretedOption::negative_int_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.negative_int_value)\n  return negative_int_value_;\n}\ninline void UninterpretedOption::set_negative_int_value(::google::protobuf::int64 value) {\n  set_has_negative_int_value();\n  negative_int_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.negative_int_value)\n}\n\n// optional double double_value = 6;\ninline bool UninterpretedOption::has_double_value() const {\n  return (_has_bits_[0] & 0x00000010u) != 0;\n}\ninline void UninterpretedOption::set_has_double_value() {\n  _has_bits_[0] |= 0x00000010u;\n}\ninline void UninterpretedOption::clear_has_double_value() {\n  _has_bits_[0] &= ~0x00000010u;\n}\ninline void UninterpretedOption::clear_double_value() {\n  double_value_ = 0;\n  clear_has_double_value();\n}\ninline double UninterpretedOption::double_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.double_value)\n  return double_value_;\n}\ninline void UninterpretedOption::set_double_value(double value) {\n  set_has_double_value();\n  double_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.double_value)\n}\n\n// optional bytes string_value = 7;\ninline bool UninterpretedOption::has_string_value() const {\n  return (_has_bits_[0] & 0x00000020u) != 0;\n}\ninline void UninterpretedOption::set_has_string_value() {\n  _has_bits_[0] |= 0x00000020u;\n}\ninline void UninterpretedOption::clear_has_string_value() {\n  _has_bits_[0] &= ~0x00000020u;\n}\ninline void UninterpretedOption::clear_string_value() {\n  string_value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_string_value();\n}\ninline const ::std::string& UninterpretedOption::string_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.string_value)\n  return string_value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_string_value(const ::std::string& value) {\n  set_has_string_value();\n  string_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.string_value)\n}\ninline void UninterpretedOption::set_string_value(const char* value) {\n  set_has_string_value();\n  string_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.UninterpretedOption.string_value)\n}\ninline void UninterpretedOption::set_string_value(const void* value, size_t size) {\n  set_has_string_value();\n  string_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.UninterpretedOption.string_value)\n}\ninline ::std::string* UninterpretedOption::mutable_string_value() {\n  set_has_string_value();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.UninterpretedOption.string_value)\n  return string_value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* UninterpretedOption::release_string_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.UninterpretedOption.string_value)\n  clear_has_string_value();\n  return string_value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_allocated_string_value(::std::string* string_value) {\n  if (string_value != NULL) {\n    set_has_string_value();\n  } else {\n    clear_has_string_value();\n  }\n  string_value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), string_value);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.UninterpretedOption.string_value)\n}\n\n// optional string aggregate_value = 8;\ninline bool UninterpretedOption::has_aggregate_value() const {\n  return (_has_bits_[0] & 0x00000040u) != 0;\n}\ninline void UninterpretedOption::set_has_aggregate_value() {\n  _has_bits_[0] |= 0x00000040u;\n}\ninline void UninterpretedOption::clear_has_aggregate_value() {\n  _has_bits_[0] &= ~0x00000040u;\n}\ninline void UninterpretedOption::clear_aggregate_value() {\n  aggregate_value_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_aggregate_value();\n}\ninline const ::std::string& UninterpretedOption::aggregate_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UninterpretedOption.aggregate_value)\n  return aggregate_value_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_aggregate_value(const ::std::string& value) {\n  set_has_aggregate_value();\n  aggregate_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.UninterpretedOption.aggregate_value)\n}\ninline void UninterpretedOption::set_aggregate_value(const char* value) {\n  set_has_aggregate_value();\n  aggregate_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.UninterpretedOption.aggregate_value)\n}\ninline void UninterpretedOption::set_aggregate_value(const char* value, size_t size) {\n  set_has_aggregate_value();\n  aggregate_value_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.UninterpretedOption.aggregate_value)\n}\ninline ::std::string* UninterpretedOption::mutable_aggregate_value() {\n  set_has_aggregate_value();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.UninterpretedOption.aggregate_value)\n  return aggregate_value_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* UninterpretedOption::release_aggregate_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.UninterpretedOption.aggregate_value)\n  clear_has_aggregate_value();\n  return aggregate_value_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void UninterpretedOption::set_allocated_aggregate_value(::std::string* aggregate_value) {\n  if (aggregate_value != NULL) {\n    set_has_aggregate_value();\n  } else {\n    clear_has_aggregate_value();\n  }\n  aggregate_value_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), aggregate_value);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.UninterpretedOption.aggregate_value)\n}\n\ninline const UninterpretedOption* UninterpretedOption::internal_default_instance() {\n  return &UninterpretedOption_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// SourceCodeInfo_Location\n\n// repeated int32 path = 1 [packed = true];\ninline int SourceCodeInfo_Location::path_size() const {\n  return path_.size();\n}\ninline void SourceCodeInfo_Location::clear_path() {\n  path_.Clear();\n}\ninline ::google::protobuf::int32 SourceCodeInfo_Location::path(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.Location.path)\n  return path_.Get(index);\n}\ninline void SourceCodeInfo_Location::set_path(int index, ::google::protobuf::int32 value) {\n  path_.Set(index, value);\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceCodeInfo.Location.path)\n}\ninline void SourceCodeInfo_Location::add_path(::google::protobuf::int32 value) {\n  path_.Add(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.SourceCodeInfo.Location.path)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nSourceCodeInfo_Location::path() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.SourceCodeInfo.Location.path)\n  return path_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nSourceCodeInfo_Location::mutable_path() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.SourceCodeInfo.Location.path)\n  return &path_;\n}\n\n// repeated int32 span = 2 [packed = true];\ninline int SourceCodeInfo_Location::span_size() const {\n  return span_.size();\n}\ninline void SourceCodeInfo_Location::clear_span() {\n  span_.Clear();\n}\ninline ::google::protobuf::int32 SourceCodeInfo_Location::span(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.Location.span)\n  return span_.Get(index);\n}\ninline void SourceCodeInfo_Location::set_span(int index, ::google::protobuf::int32 value) {\n  span_.Set(index, value);\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceCodeInfo.Location.span)\n}\ninline void SourceCodeInfo_Location::add_span(::google::protobuf::int32 value) {\n  span_.Add(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.SourceCodeInfo.Location.span)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nSourceCodeInfo_Location::span() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.SourceCodeInfo.Location.span)\n  return span_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nSourceCodeInfo_Location::mutable_span() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.SourceCodeInfo.Location.span)\n  return &span_;\n}\n\n// optional string leading_comments = 3;\ninline bool SourceCodeInfo_Location::has_leading_comments() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void SourceCodeInfo_Location::set_has_leading_comments() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void SourceCodeInfo_Location::clear_has_leading_comments() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void SourceCodeInfo_Location::clear_leading_comments() {\n  leading_comments_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_leading_comments();\n}\ninline const ::std::string& SourceCodeInfo_Location::leading_comments() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.Location.leading_comments)\n  return leading_comments_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceCodeInfo_Location::set_leading_comments(const ::std::string& value) {\n  set_has_leading_comments();\n  leading_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceCodeInfo.Location.leading_comments)\n}\ninline void SourceCodeInfo_Location::set_leading_comments(const char* value) {\n  set_has_leading_comments();\n  leading_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.SourceCodeInfo.Location.leading_comments)\n}\ninline void SourceCodeInfo_Location::set_leading_comments(const char* value, size_t size) {\n  set_has_leading_comments();\n  leading_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.SourceCodeInfo.Location.leading_comments)\n}\ninline ::std::string* SourceCodeInfo_Location::mutable_leading_comments() {\n  set_has_leading_comments();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.SourceCodeInfo.Location.leading_comments)\n  return leading_comments_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* SourceCodeInfo_Location::release_leading_comments() {\n  // @@protoc_insertion_point(field_release:google.protobuf.SourceCodeInfo.Location.leading_comments)\n  clear_has_leading_comments();\n  return leading_comments_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceCodeInfo_Location::set_allocated_leading_comments(::std::string* leading_comments) {\n  if (leading_comments != NULL) {\n    set_has_leading_comments();\n  } else {\n    clear_has_leading_comments();\n  }\n  leading_comments_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), leading_comments);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.SourceCodeInfo.Location.leading_comments)\n}\n\n// optional string trailing_comments = 4;\ninline bool SourceCodeInfo_Location::has_trailing_comments() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void SourceCodeInfo_Location::set_has_trailing_comments() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void SourceCodeInfo_Location::clear_has_trailing_comments() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void SourceCodeInfo_Location::clear_trailing_comments() {\n  trailing_comments_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_trailing_comments();\n}\ninline const ::std::string& SourceCodeInfo_Location::trailing_comments() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n  return trailing_comments_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceCodeInfo_Location::set_trailing_comments(const ::std::string& value) {\n  set_has_trailing_comments();\n  trailing_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n}\ninline void SourceCodeInfo_Location::set_trailing_comments(const char* value) {\n  set_has_trailing_comments();\n  trailing_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n}\ninline void SourceCodeInfo_Location::set_trailing_comments(const char* value, size_t size) {\n  set_has_trailing_comments();\n  trailing_comments_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n}\ninline ::std::string* SourceCodeInfo_Location::mutable_trailing_comments() {\n  set_has_trailing_comments();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n  return trailing_comments_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* SourceCodeInfo_Location::release_trailing_comments() {\n  // @@protoc_insertion_point(field_release:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n  clear_has_trailing_comments();\n  return trailing_comments_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceCodeInfo_Location::set_allocated_trailing_comments(::std::string* trailing_comments) {\n  if (trailing_comments != NULL) {\n    set_has_trailing_comments();\n  } else {\n    clear_has_trailing_comments();\n  }\n  trailing_comments_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), trailing_comments);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.SourceCodeInfo.Location.trailing_comments)\n}\n\n// repeated string leading_detached_comments = 6;\ninline int SourceCodeInfo_Location::leading_detached_comments_size() const {\n  return leading_detached_comments_.size();\n}\ninline void SourceCodeInfo_Location::clear_leading_detached_comments() {\n  leading_detached_comments_.Clear();\n}\ninline const ::std::string& SourceCodeInfo_Location::leading_detached_comments(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  return leading_detached_comments_.Get(index);\n}\ninline ::std::string* SourceCodeInfo_Location::mutable_leading_detached_comments(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  return leading_detached_comments_.Mutable(index);\n}\ninline void SourceCodeInfo_Location::set_leading_detached_comments(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  leading_detached_comments_.Mutable(index)->assign(value);\n}\ninline void SourceCodeInfo_Location::set_leading_detached_comments(int index, const char* value) {\n  leading_detached_comments_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n}\ninline void SourceCodeInfo_Location::set_leading_detached_comments(int index, const char* value, size_t size) {\n  leading_detached_comments_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n}\ninline ::std::string* SourceCodeInfo_Location::add_leading_detached_comments() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  return leading_detached_comments_.Add();\n}\ninline void SourceCodeInfo_Location::add_leading_detached_comments(const ::std::string& value) {\n  leading_detached_comments_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n}\ninline void SourceCodeInfo_Location::add_leading_detached_comments(const char* value) {\n  leading_detached_comments_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n}\ninline void SourceCodeInfo_Location::add_leading_detached_comments(const char* value, size_t size) {\n  leading_detached_comments_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nSourceCodeInfo_Location::leading_detached_comments() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  return leading_detached_comments_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nSourceCodeInfo_Location::mutable_leading_detached_comments() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.SourceCodeInfo.Location.leading_detached_comments)\n  return &leading_detached_comments_;\n}\n\ninline const SourceCodeInfo_Location* SourceCodeInfo_Location::internal_default_instance() {\n  return &SourceCodeInfo_Location_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// SourceCodeInfo\n\n// repeated .google.protobuf.SourceCodeInfo.Location location = 1;\ninline int SourceCodeInfo::location_size() const {\n  return location_.size();\n}\ninline void SourceCodeInfo::clear_location() {\n  location_.Clear();\n}\ninline const ::google::protobuf::SourceCodeInfo_Location& SourceCodeInfo::location(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceCodeInfo.location)\n  return location_.Get(index);\n}\ninline ::google::protobuf::SourceCodeInfo_Location* SourceCodeInfo::mutable_location(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.SourceCodeInfo.location)\n  return location_.Mutable(index);\n}\ninline ::google::protobuf::SourceCodeInfo_Location* SourceCodeInfo::add_location() {\n  // @@protoc_insertion_point(field_add:google.protobuf.SourceCodeInfo.location)\n  return location_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::SourceCodeInfo_Location >*\nSourceCodeInfo::mutable_location() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.SourceCodeInfo.location)\n  return &location_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::SourceCodeInfo_Location >&\nSourceCodeInfo::location() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.SourceCodeInfo.location)\n  return location_;\n}\n\ninline const SourceCodeInfo* SourceCodeInfo::internal_default_instance() {\n  return &SourceCodeInfo_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// GeneratedCodeInfo_Annotation\n\n// repeated int32 path = 1 [packed = true];\ninline int GeneratedCodeInfo_Annotation::path_size() const {\n  return path_.size();\n}\ninline void GeneratedCodeInfo_Annotation::clear_path() {\n  path_.Clear();\n}\ninline ::google::protobuf::int32 GeneratedCodeInfo_Annotation::path(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.GeneratedCodeInfo.Annotation.path)\n  return path_.Get(index);\n}\ninline void GeneratedCodeInfo_Annotation::set_path(int index, ::google::protobuf::int32 value) {\n  path_.Set(index, value);\n  // @@protoc_insertion_point(field_set:google.protobuf.GeneratedCodeInfo.Annotation.path)\n}\ninline void GeneratedCodeInfo_Annotation::add_path(::google::protobuf::int32 value) {\n  path_.Add(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.GeneratedCodeInfo.Annotation.path)\n}\ninline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >&\nGeneratedCodeInfo_Annotation::path() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.GeneratedCodeInfo.Annotation.path)\n  return path_;\n}\ninline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >*\nGeneratedCodeInfo_Annotation::mutable_path() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.GeneratedCodeInfo.Annotation.path)\n  return &path_;\n}\n\n// optional string source_file = 2;\ninline bool GeneratedCodeInfo_Annotation::has_source_file() const {\n  return (_has_bits_[0] & 0x00000002u) != 0;\n}\ninline void GeneratedCodeInfo_Annotation::set_has_source_file() {\n  _has_bits_[0] |= 0x00000002u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_has_source_file() {\n  _has_bits_[0] &= ~0x00000002u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_source_file() {\n  source_file_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  clear_has_source_file();\n}\ninline const ::std::string& GeneratedCodeInfo_Annotation::source_file() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n  return source_file_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void GeneratedCodeInfo_Annotation::set_source_file(const ::std::string& value) {\n  set_has_source_file();\n  source_file_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n}\ninline void GeneratedCodeInfo_Annotation::set_source_file(const char* value) {\n  set_has_source_file();\n  source_file_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n}\ninline void GeneratedCodeInfo_Annotation::set_source_file(const char* value, size_t size) {\n  set_has_source_file();\n  source_file_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n}\ninline ::std::string* GeneratedCodeInfo_Annotation::mutable_source_file() {\n  set_has_source_file();\n  // @@protoc_insertion_point(field_mutable:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n  return source_file_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* GeneratedCodeInfo_Annotation::release_source_file() {\n  // @@protoc_insertion_point(field_release:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n  clear_has_source_file();\n  return source_file_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void GeneratedCodeInfo_Annotation::set_allocated_source_file(::std::string* source_file) {\n  if (source_file != NULL) {\n    set_has_source_file();\n  } else {\n    clear_has_source_file();\n  }\n  source_file_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), source_file);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.GeneratedCodeInfo.Annotation.source_file)\n}\n\n// optional int32 begin = 3;\ninline bool GeneratedCodeInfo_Annotation::has_begin() const {\n  return (_has_bits_[0] & 0x00000004u) != 0;\n}\ninline void GeneratedCodeInfo_Annotation::set_has_begin() {\n  _has_bits_[0] |= 0x00000004u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_has_begin() {\n  _has_bits_[0] &= ~0x00000004u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_begin() {\n  begin_ = 0;\n  clear_has_begin();\n}\ninline ::google::protobuf::int32 GeneratedCodeInfo_Annotation::begin() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.GeneratedCodeInfo.Annotation.begin)\n  return begin_;\n}\ninline void GeneratedCodeInfo_Annotation::set_begin(::google::protobuf::int32 value) {\n  set_has_begin();\n  begin_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.GeneratedCodeInfo.Annotation.begin)\n}\n\n// optional int32 end = 4;\ninline bool GeneratedCodeInfo_Annotation::has_end() const {\n  return (_has_bits_[0] & 0x00000008u) != 0;\n}\ninline void GeneratedCodeInfo_Annotation::set_has_end() {\n  _has_bits_[0] |= 0x00000008u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_has_end() {\n  _has_bits_[0] &= ~0x00000008u;\n}\ninline void GeneratedCodeInfo_Annotation::clear_end() {\n  end_ = 0;\n  clear_has_end();\n}\ninline ::google::protobuf::int32 GeneratedCodeInfo_Annotation::end() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.GeneratedCodeInfo.Annotation.end)\n  return end_;\n}\ninline void GeneratedCodeInfo_Annotation::set_end(::google::protobuf::int32 value) {\n  set_has_end();\n  end_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.GeneratedCodeInfo.Annotation.end)\n}\n\ninline const GeneratedCodeInfo_Annotation* GeneratedCodeInfo_Annotation::internal_default_instance() {\n  return &GeneratedCodeInfo_Annotation_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// GeneratedCodeInfo\n\n// repeated .google.protobuf.GeneratedCodeInfo.Annotation annotation = 1;\ninline int GeneratedCodeInfo::annotation_size() const {\n  return annotation_.size();\n}\ninline void GeneratedCodeInfo::clear_annotation() {\n  annotation_.Clear();\n}\ninline const ::google::protobuf::GeneratedCodeInfo_Annotation& GeneratedCodeInfo::annotation(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.GeneratedCodeInfo.annotation)\n  return annotation_.Get(index);\n}\ninline ::google::protobuf::GeneratedCodeInfo_Annotation* GeneratedCodeInfo::mutable_annotation(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.GeneratedCodeInfo.annotation)\n  return annotation_.Mutable(index);\n}\ninline ::google::protobuf::GeneratedCodeInfo_Annotation* GeneratedCodeInfo::add_annotation() {\n  // @@protoc_insertion_point(field_add:google.protobuf.GeneratedCodeInfo.annotation)\n  return annotation_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::GeneratedCodeInfo_Annotation >*\nGeneratedCodeInfo::mutable_annotation() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.GeneratedCodeInfo.annotation)\n  return &annotation_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::GeneratedCodeInfo_Annotation >&\nGeneratedCodeInfo::annotation() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.GeneratedCodeInfo.annotation)\n  return annotation_;\n}\n\ninline const GeneratedCodeInfo* GeneratedCodeInfo::internal_default_instance() {\n  return &GeneratedCodeInfo_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n#ifndef SWIG\nnamespace google {\nnamespace protobuf {\n\ntemplate <> struct is_proto_enum< ::google::protobuf::FieldDescriptorProto_Type> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::FieldDescriptorProto_Type>() {\n  return ::google::protobuf::FieldDescriptorProto_Type_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::FieldDescriptorProto_Label> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::FieldDescriptorProto_Label>() {\n  return ::google::protobuf::FieldDescriptorProto_Label_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::FileOptions_OptimizeMode> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::FileOptions_OptimizeMode>() {\n  return ::google::protobuf::FileOptions_OptimizeMode_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::FieldOptions_CType> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::FieldOptions_CType>() {\n  return ::google::protobuf::FieldOptions_CType_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::FieldOptions_JSType> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::FieldOptions_JSType>() {\n  return ::google::protobuf::FieldOptions_JSType_descriptor();\n}\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // SWIG\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fdescriptor_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/descriptor.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// The messages in this file describe the definitions found in .proto files.\n// A valid .proto file can be translated directly to a FileDescriptorProto\n// without any other information (e.g. without reading its imports).\n\n\nsyntax = \"proto2\";\n\npackage google.protobuf;\noption go_package = \"descriptor\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"DescriptorProtos\";\noption csharp_namespace = \"Google.Protobuf.Reflection\";\noption objc_class_prefix = \"GPB\";\n\n// descriptor.proto must be optimized for speed because reflection-based\n// algorithms don't work during bootstrapping.\noption optimize_for = SPEED;\n\n// The protocol compiler can output a FileDescriptorSet containing the .proto\n// files it parses.\nmessage FileDescriptorSet {\n  repeated FileDescriptorProto file = 1;\n}\n\n// Describes a complete .proto file.\nmessage FileDescriptorProto {\n  optional string name = 1;       // file name, relative to root of source tree\n  optional string package = 2;    // e.g. \"foo\", \"foo.bar\", etc.\n\n  // Names of files imported by this file.\n  repeated string dependency = 3;\n  // Indexes of the public imported files in the dependency list above.\n  repeated int32 public_dependency = 10;\n  // Indexes of the weak imported files in the dependency list.\n  // For Google-internal migration only. Do not use.\n  repeated int32 weak_dependency = 11;\n\n  // All top-level definitions in this file.\n  repeated DescriptorProto message_type = 4;\n  repeated EnumDescriptorProto enum_type = 5;\n  repeated ServiceDescriptorProto service = 6;\n  repeated FieldDescriptorProto extension = 7;\n\n  optional FileOptions options = 8;\n\n  // This field contains optional information about the original source code.\n  // You may safely remove this entire field without harming runtime\n  // functionality of the descriptors -- the information is needed only by\n  // development tools.\n  optional SourceCodeInfo source_code_info = 9;\n\n  // The syntax of the proto file.\n  // The supported values are \"proto2\" and \"proto3\".\n  optional string syntax = 12;\n}\n\n// Describes a message type.\nmessage DescriptorProto {\n  optional string name = 1;\n\n  repeated FieldDescriptorProto field = 2;\n  repeated FieldDescriptorProto extension = 6;\n\n  repeated DescriptorProto nested_type = 3;\n  repeated EnumDescriptorProto enum_type = 4;\n\n  message ExtensionRange {\n    optional int32 start = 1;\n    optional int32 end = 2;\n  }\n  repeated ExtensionRange extension_range = 5;\n\n  repeated OneofDescriptorProto oneof_decl = 8;\n\n  optional MessageOptions options = 7;\n\n  // Range of reserved tag numbers. Reserved tag numbers may not be used by\n  // fields or extension ranges in the same message. Reserved ranges may\n  // not overlap.\n  message ReservedRange {\n    optional int32 start = 1; // Inclusive.\n    optional int32 end = 2;   // Exclusive.\n  }\n  repeated ReservedRange reserved_range = 9;\n  // Reserved field names, which may not be used by fields in the same message.\n  // A given name may only be reserved once.\n  repeated string reserved_name = 10;\n}\n\n// Describes a field within a message.\nmessage FieldDescriptorProto {\n  enum Type {\n    // 0 is reserved for errors.\n    // Order is weird for historical reasons.\n    TYPE_DOUBLE         = 1;\n    TYPE_FLOAT          = 2;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if\n    // negative values are likely.\n    TYPE_INT64          = 3;\n    TYPE_UINT64         = 4;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if\n    // negative values are likely.\n    TYPE_INT32          = 5;\n    TYPE_FIXED64        = 6;\n    TYPE_FIXED32        = 7;\n    TYPE_BOOL           = 8;\n    TYPE_STRING         = 9;\n    TYPE_GROUP          = 10;  // Tag-delimited aggregate.\n    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.\n\n    // New in version 2.\n    TYPE_BYTES          = 12;\n    TYPE_UINT32         = 13;\n    TYPE_ENUM           = 14;\n    TYPE_SFIXED32       = 15;\n    TYPE_SFIXED64       = 16;\n    TYPE_SINT32         = 17;  // Uses ZigZag encoding.\n    TYPE_SINT64         = 18;  // Uses ZigZag encoding.\n  };\n\n  enum Label {\n    // 0 is reserved for errors\n    LABEL_OPTIONAL      = 1;\n    LABEL_REQUIRED      = 2;\n    LABEL_REPEATED      = 3;\n    // TODO(sanjay): Should we add LABEL_MAP?\n  };\n\n  optional string name = 1;\n  optional int32 number = 3;\n  optional Label label = 4;\n\n  // If type_name is set, this need not be set.  If both this and type_name\n  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.\n  optional Type type = 5;\n\n  // For message and enum types, this is the name of the type.  If the name\n  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping\n  // rules are used to find the type (i.e. first the nested types within this\n  // message are searched, then within the parent, on up to the root\n  // namespace).\n  optional string type_name = 6;\n\n  // For extensions, this is the name of the type being extended.  It is\n  // resolved in the same manner as type_name.\n  optional string extendee = 2;\n\n  // For numeric types, contains the original text representation of the value.\n  // For booleans, \"true\" or \"false\".\n  // For strings, contains the default text contents (not escaped in any way).\n  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.\n  // TODO(kenton):  Base-64 encode?\n  optional string default_value = 7;\n\n  // If set, gives the index of a oneof in the containing type's oneof_decl\n  // list.  This field is a member of that oneof.\n  optional int32 oneof_index = 9;\n\n  // JSON name of this field. The value is set by protocol compiler. If the\n  // user has set a \"json_name\" option on this field, that option's value\n  // will be used. Otherwise, it's deduced from the field's name by converting\n  // it to camelCase.\n  optional string json_name = 10;\n\n  optional FieldOptions options = 8;\n}\n\n// Describes a oneof.\nmessage OneofDescriptorProto {\n  optional string name = 1;\n  optional OneofOptions options = 2;\n}\n\n// Describes an enum type.\nmessage EnumDescriptorProto {\n  optional string name = 1;\n\n  repeated EnumValueDescriptorProto value = 2;\n\n  optional EnumOptions options = 3;\n}\n\n// Describes a value within an enum.\nmessage EnumValueDescriptorProto {\n  optional string name = 1;\n  optional int32 number = 2;\n\n  optional EnumValueOptions options = 3;\n}\n\n// Describes a service.\nmessage ServiceDescriptorProto {\n  optional string name = 1;\n  repeated MethodDescriptorProto method = 2;\n\n  optional ServiceOptions options = 3;\n}\n\n// Describes a method of a service.\nmessage MethodDescriptorProto {\n  optional string name = 1;\n\n  // Input and output type names.  These are resolved in the same way as\n  // FieldDescriptorProto.type_name, but must refer to a message type.\n  optional string input_type = 2;\n  optional string output_type = 3;\n\n  optional MethodOptions options = 4;\n\n  // Identifies if client streams multiple client messages\n  optional bool client_streaming = 5 [default=false];\n  // Identifies if server streams multiple server messages\n  optional bool server_streaming = 6 [default=false];\n}\n\n\n// ===================================================================\n// Options\n\n// Each of the definitions above may have \"options\" attached.  These are\n// just annotations which may cause code to be generated slightly differently\n// or may contain hints for code that manipulates protocol messages.\n//\n// Clients may define custom options as extensions of the *Options messages.\n// These extensions may not yet be known at parsing time, so the parser cannot\n// store the values in them.  Instead it stores them in a field in the *Options\n// message called uninterpreted_option. This field must have the same name\n// across all *Options messages. We then use this field to populate the\n// extensions when we build a descriptor, at which point all protos have been\n// parsed and so all extensions are known.\n//\n// Extension numbers for custom options may be chosen as follows:\n// * For options which will only be used within a single application or\n//   organization, or for experimental options, use field numbers 50000\n//   through 99999.  It is up to you to ensure that you do not use the\n//   same number for multiple options.\n// * For options which will be published and used publicly by multiple\n//   independent entities, e-mail protobuf-global-extension-registry@google.com\n//   to reserve extension numbers. Simply provide your project name (e.g.\n//   Objective-C plugin) and your project website (if available) -- there's no\n//   need to explain how you intend to use them. Usually you only need one\n//   extension number. You can declare multiple options with only one extension\n//   number by putting them in a sub-message. See the Custom Options section of\n//   the docs for examples:\n//   https://developers.google.com/protocol-buffers/docs/proto#options\n//   If this turns out to be popular, a web service will be set up\n//   to automatically assign option numbers.\n\n\nmessage FileOptions {\n\n  // Sets the Java package where classes generated from this .proto will be\n  // placed.  By default, the proto package is used, but this is often\n  // inappropriate because proto packages do not normally start with backwards\n  // domain names.\n  optional string java_package = 1;\n\n\n  // If set, all the classes from the .proto file are wrapped in a single\n  // outer class with the given name.  This applies to both Proto1\n  // (equivalent to the old \"--one_java_file\" option) and Proto2 (where\n  // a .proto always translates to a single class, but you may want to\n  // explicitly choose the class name).\n  optional string java_outer_classname = 8;\n\n  // If set true, then the Java code generator will generate a separate .java\n  // file for each top-level message, enum, and service defined in the .proto\n  // file.  Thus, these types will *not* be nested inside the outer class\n  // named by java_outer_classname.  However, the outer class will still be\n  // generated to contain the file's getDescriptor() method as well as any\n  // top-level extensions defined in the file.\n  optional bool java_multiple_files = 10 [default=false];\n\n  // This option does nothing.\n  optional bool java_generate_equals_and_hash = 20 [deprecated=true];\n\n  // If set true, then the Java2 code generator will generate code that\n  // throws an exception whenever an attempt is made to assign a non-UTF-8\n  // byte sequence to a string field.\n  // Message reflection will do the same.\n  // However, an extension field still accepts non-UTF-8 byte sequences.\n  // This option has no effect on when used with the lite runtime.\n  optional bool java_string_check_utf8 = 27 [default=false];\n\n\n  // Generated classes can be optimized for speed or code size.\n  enum OptimizeMode {\n    SPEED = 1;        // Generate complete code for parsing, serialization,\n                      // etc.\n    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.\n    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.\n  }\n  optional OptimizeMode optimize_for = 9 [default=SPEED];\n\n  // Sets the Go package where structs generated from this .proto will be\n  // placed. If omitted, the Go package will be derived from the following:\n  //   - The basename of the package import path, if provided.\n  //   - Otherwise, the package statement in the .proto file, if present.\n  //   - Otherwise, the basename of the .proto file, without extension.\n  optional string go_package = 11;\n\n\n\n  // Should generic services be generated in each language?  \"Generic\" services\n  // are not specific to any particular RPC system.  They are generated by the\n  // main code generators in each language (without additional plugins).\n  // Generic services were the only kind of service generation supported by\n  // early versions of google.protobuf.\n  //\n  // Generic services are now considered deprecated in favor of using plugins\n  // that generate code specific to your particular RPC system.  Therefore,\n  // these default to false.  Old code which depends on generic services should\n  // explicitly set them to true.\n  optional bool cc_generic_services = 16 [default=false];\n  optional bool java_generic_services = 17 [default=false];\n  optional bool py_generic_services = 18 [default=false];\n\n  // Is this file deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for everything in the file, or it will be completely ignored; in the very\n  // least, this is a formalization for deprecating files.\n  optional bool deprecated = 23 [default=false];\n\n  // Enables the use of arenas for the proto messages in this file. This applies\n  // only to generated classes for C++.\n  optional bool cc_enable_arenas = 31 [default=false];\n\n\n  // Sets the objective c class prefix which is prepended to all objective c\n  // generated classes from this .proto. There is no default.\n  optional string objc_class_prefix = 36;\n\n  // Namespace for generated classes; defaults to the package.\n  optional string csharp_namespace = 37;\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 38;\n}\n\nmessage MessageOptions {\n  // Set true to use the old proto1 MessageSet wire format for extensions.\n  // This is provided for backwards-compatibility with the MessageSet wire\n  // format.  You should not use this for any other reason:  It's less\n  // efficient, has fewer features, and is more complicated.\n  //\n  // The message must be defined exactly as follows:\n  //   message Foo {\n  //     option message_set_wire_format = true;\n  //     extensions 4 to max;\n  //   }\n  // Note that the message cannot have any defined fields; MessageSets only\n  // have extensions.\n  //\n  // All extensions of your type must be singular messages; e.g. they cannot\n  // be int32s, enums, or repeated messages.\n  //\n  // Because this is an option, the above two restrictions are not enforced by\n  // the protocol compiler.\n  optional bool message_set_wire_format = 1 [default=false];\n\n  // Disables the generation of the standard \"descriptor()\" accessor, which can\n  // conflict with a field of the same name.  This is meant to make migration\n  // from proto1 easier; new code should avoid fields named \"descriptor\".\n  optional bool no_standard_descriptor_accessor = 2 [default=false];\n\n  // Is this message deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the message, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating messages.\n  optional bool deprecated = 3 [default=false];\n\n  // Whether the message is an automatically generated map entry type for the\n  // maps field.\n  //\n  // For maps fields:\n  //     map<KeyType, ValueType> map_field = 1;\n  // The parsed descriptor looks like:\n  //     message MapFieldEntry {\n  //         option map_entry = true;\n  //         optional KeyType key = 1;\n  //         optional ValueType value = 2;\n  //     }\n  //     repeated MapFieldEntry map_field = 1;\n  //\n  // Implementations may choose not to generate the map_entry=true message, but\n  // use a native map in the target language to hold the keys and values.\n  // The reflection APIs in such implementions still need to work as\n  // if the field is a repeated message field.\n  //\n  // NOTE: Do not set the option in .proto files. Always use the maps syntax\n  // instead. The option should only be implicitly set by the proto compiler\n  // parser.\n  optional bool map_entry = 7;\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 8;  // javalite_serializable\n}\n\nmessage FieldOptions {\n  // The ctype option instructs the C++ code generator to use a different\n  // representation of the field than it normally would.  See the specific\n  // options below.  This option is not yet implemented in the open source\n  // release -- sorry, we'll try to include it in a future version!\n  optional CType ctype = 1 [default = STRING];\n  enum CType {\n    // Default mode.\n    STRING = 0;\n\n    CORD = 1;\n\n    STRING_PIECE = 2;\n  }\n  // The packed option can be enabled for repeated primitive fields to enable\n  // a more efficient representation on the wire. Rather than repeatedly\n  // writing the tag and type for each element, the entire array is encoded as\n  // a single length-delimited blob. In proto3, only explicit setting it to\n  // false will avoid using packed encoding.\n  optional bool packed = 2;\n\n  // The jstype option determines the JavaScript type used for values of the\n  // field.  The option is permitted only for 64 bit integral and fixed types\n  // (int64, uint64, sint64, fixed64, sfixed64).  By default these types are\n  // represented as JavaScript strings.  This avoids loss of precision that can\n  // happen when a large value is converted to a floating point JavaScript\n  // numbers.  Specifying JS_NUMBER for the jstype causes the generated\n  // JavaScript code to use the JavaScript \"number\" type instead of strings.\n  // This option is an enum to permit additional types to be added,\n  // e.g. goog.math.Integer.\n  optional JSType jstype = 6 [default = JS_NORMAL];\n  enum JSType {\n    // Use the default type.\n    JS_NORMAL = 0;\n\n    // Use JavaScript strings.\n    JS_STRING = 1;\n\n    // Use JavaScript numbers.\n    JS_NUMBER = 2;\n  }\n\n  // Should this field be parsed lazily?  Lazy applies only to message-type\n  // fields.  It means that when the outer message is initially parsed, the\n  // inner message's contents will not be parsed but instead stored in encoded\n  // form.  The inner message will actually be parsed when it is first accessed.\n  //\n  // This is only a hint.  Implementations are free to choose whether to use\n  // eager or lazy parsing regardless of the value of this option.  However,\n  // setting this option true suggests that the protocol author believes that\n  // using lazy parsing on this field is worth the additional bookkeeping\n  // overhead typically needed to implement it.\n  //\n  // This option does not affect the public interface of any generated code;\n  // all method signatures remain the same.  Furthermore, thread-safety of the\n  // interface is not affected by this option; const methods remain safe to\n  // call from multiple threads concurrently, while non-const methods continue\n  // to require exclusive access.\n  //\n  //\n  // Note that implementations may choose not to check required fields within\n  // a lazy sub-message.  That is, calling IsInitialized() on the outer message\n  // may return true even if the inner message has missing required fields.\n  // This is necessary because otherwise the inner message would have to be\n  // parsed in order to perform the check, defeating the purpose of lazy\n  // parsing.  An implementation which chooses not to check required fields\n  // must be consistent about it.  That is, for any particular sub-message, the\n  // implementation must either *always* check its required fields, or *never*\n  // check its required fields, regardless of whether or not the message has\n  // been parsed.\n  optional bool lazy = 5 [default=false];\n\n  // Is this field deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for accessors, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating fields.\n  optional bool deprecated = 3 [default=false];\n\n  // For Google-internal migration only. Do not use.\n  optional bool weak = 10 [default=false];\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 4;  // removed jtype\n}\n\nmessage OneofOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumOptions {\n\n  // Set this option to true to allow mapping different tag names to the same\n  // value.\n  optional bool allow_alias = 2;\n\n  // Is this enum deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating enums.\n  optional bool deprecated = 3 [default=false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumValueOptions {\n  // Is this enum value deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum value, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating enum values.\n  optional bool deprecated = 1 [default=false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage ServiceOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this service deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the service, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating services.\n  optional bool deprecated = 33 [default=false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage MethodOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this method deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the method, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating methods.\n  optional bool deprecated = 33 [default=false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n\n// A message representing a option the parser does not recognize. This only\n// appears in options protos created by the compiler::Parser class.\n// DescriptorPool resolves these when building Descriptor objects. Therefore,\n// options protos in descriptor objects (e.g. returned by Descriptor::options(),\n// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions\n// in them.\nmessage UninterpretedOption {\n  // The name of the uninterpreted option.  Each string represents a segment in\n  // a dot-separated name.  is_extension is true iff a segment represents an\n  // extension (denoted with parentheses in options specs in .proto files).\n  // E.g.,{ [\"foo\", false], [\"bar.baz\", true], [\"qux\", false] } represents\n  // \"foo.(bar.baz).qux\".\n  message NamePart {\n    required string name_part = 1;\n    required bool is_extension = 2;\n  }\n  repeated NamePart name = 2;\n\n  // The value of the uninterpreted option, in whatever type the tokenizer\n  // identified it as during parsing. Exactly one of these should be set.\n  optional string identifier_value = 3;\n  optional uint64 positive_int_value = 4;\n  optional int64 negative_int_value = 5;\n  optional double double_value = 6;\n  optional bytes string_value = 7;\n  optional string aggregate_value = 8;\n}\n\n// ===================================================================\n// Optional source code info\n\n// Encapsulates information about the original source file from which a\n// FileDescriptorProto was generated.\nmessage SourceCodeInfo {\n  // A Location identifies a piece of source code in a .proto file which\n  // corresponds to a particular definition.  This information is intended\n  // to be useful to IDEs, code indexers, documentation generators, and similar\n  // tools.\n  //\n  // For example, say we have a file like:\n  //   message Foo {\n  //     optional string foo = 1;\n  //   }\n  // Let's look at just the field definition:\n  //   optional string foo = 1;\n  //   ^       ^^     ^^  ^  ^^^\n  //   a       bc     de  f  ghi\n  // We have the following locations:\n  //   span   path               represents\n  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.\n  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).\n  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).\n  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).\n  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).\n  //\n  // Notes:\n  // - A location may refer to a repeated field itself (i.e. not to any\n  //   particular index within it).  This is used whenever a set of elements are\n  //   logically enclosed in a single code segment.  For example, an entire\n  //   extend block (possibly containing multiple extension definitions) will\n  //   have an outer location whose path refers to the \"extensions\" repeated\n  //   field without an index.\n  // - Multiple locations may have the same path.  This happens when a single\n  //   logical declaration is spread out across multiple places.  The most\n  //   obvious example is the \"extend\" block again -- there may be multiple\n  //   extend blocks in the same scope, each of which will have the same path.\n  // - A location's span is not always a subset of its parent's span.  For\n  //   example, the \"extendee\" of an extension declaration appears at the\n  //   beginning of the \"extend\" block and is shared by all extensions within\n  //   the block.\n  // - Just because a location's span is a subset of some other location's span\n  //   does not mean that it is a descendent.  For example, a \"group\" defines\n  //   both a type and a field in a single declaration.  Thus, the locations\n  //   corresponding to the type and field and their components will overlap.\n  // - Code which tries to interpret locations should probably be designed to\n  //   ignore those that it doesn't understand, as more types of locations could\n  //   be recorded in the future.\n  repeated Location location = 1;\n  message Location {\n    // Identifies which part of the FileDescriptorProto was defined at this\n    // location.\n    //\n    // Each element is a field number or an index.  They form a path from\n    // the root FileDescriptorProto to the place where the definition.  For\n    // example, this path:\n    //   [ 4, 3, 2, 7, 1 ]\n    // refers to:\n    //   file.message_type(3)  // 4, 3\n    //       .field(7)         // 2, 7\n    //       .name()           // 1\n    // This is because FileDescriptorProto.message_type has field number 4:\n    //   repeated DescriptorProto message_type = 4;\n    // and DescriptorProto.field has field number 2:\n    //   repeated FieldDescriptorProto field = 2;\n    // and FieldDescriptorProto.name has field number 1:\n    //   optional string name = 1;\n    //\n    // Thus, the above path gives the location of a field name.  If we removed\n    // the last element:\n    //   [ 4, 3, 2, 7 ]\n    // this path refers to the whole field declaration (from the beginning\n    // of the label to the terminating semicolon).\n    repeated int32 path = 1 [packed=true];\n\n    // Always has exactly three or four elements: start line, start column,\n    // end line (optional, otherwise assumed same as start line), end column.\n    // These are packed into a single field for efficiency.  Note that line\n    // and column numbers are zero-based -- typically you will want to add\n    // 1 to each before displaying to a user.\n    repeated int32 span = 2 [packed=true];\n\n    // If this SourceCodeInfo represents a complete declaration, these are any\n    // comments appearing before and after the declaration which appear to be\n    // attached to the declaration.\n    //\n    // A series of line comments appearing on consecutive lines, with no other\n    // tokens appearing on those lines, will be treated as a single comment.\n    //\n    // leading_detached_comments will keep paragraphs of comments that appear\n    // before (but not connected to) the current element. Each paragraph,\n    // separated by empty lines, will be one comment element in the repeated\n    // field.\n    //\n    // Only the comment content is provided; comment markers (e.g. //) are\n    // stripped out.  For block comments, leading whitespace and an asterisk\n    // will be stripped from the beginning of each line other than the first.\n    // Newlines are included in the output.\n    //\n    // Examples:\n    //\n    //   optional int32 foo = 1;  // Comment attached to foo.\n    //   // Comment attached to bar.\n    //   optional int32 bar = 2;\n    //\n    //   optional string baz = 3;\n    //   // Comment attached to baz.\n    //   // Another line attached to baz.\n    //\n    //   // Comment attached to qux.\n    //   //\n    //   // Another line attached to qux.\n    //   optional double qux = 4;\n    //\n    //   // Detached comment for corge. This is not leading or trailing comments\n    //   // to qux or corge because there are blank lines separating it from\n    //   // both.\n    //\n    //   // Detached comment for corge paragraph 2.\n    //\n    //   optional string corge = 5;\n    //   /* Block comment attached\n    //    * to corge.  Leading asterisks\n    //    * will be removed. */\n    //   /* Block comment attached to\n    //    * grault. */\n    //   optional int32 grault = 6;\n    //\n    //   // ignored detached comments.\n    optional string leading_comments = 3;\n    optional string trailing_comments = 4;\n    repeated string leading_detached_comments = 6;\n  }\n}\n\n// Describes the relationship between generated code and its original source\n// file. A GeneratedCodeInfo message is associated with only one generated\n// source file, but may contain references to different source .proto files.\nmessage GeneratedCodeInfo {\n  // An Annotation connects some span of text in generated code to an element\n  // of its generating .proto file.\n  repeated Annotation annotation = 1;\n  message Annotation {\n    // Identifies the element in the original source .proto file. This field\n    // is formatted the same as SourceCodeInfo.Location.path.\n    repeated int32 path = 1 [packed=true];\n\n    // Identifies the filesystem path to the original source .proto.\n    optional string source_file = 2;\n\n    // Identifies the starting offset in bytes in the generated code\n    // that relates to the identified object.\n    optional int32 begin = 3;\n\n    // Identifies the ending offset in bytes in the generated code that\n    // relates to the identified offset. The end offset should be one past\n    // the last relevant byte (so the length of the text = end - begin).\n    optional int32 end = 4;\n  }\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/descriptor_database.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Interface for manipulating databases of descriptors.\n\n#ifndef GOOGLE_PROTOBUF_DESCRIPTOR_DATABASE_H__\n#define GOOGLE_PROTOBUF_DESCRIPTOR_DATABASE_H__\n\n#include <map>\n#include <string>\n#include <utility>\n#include <vector>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/descriptor.h>\n\nnamespace google {\nnamespace protobuf {\n\n// Defined in this file.\nclass DescriptorDatabase;\nclass SimpleDescriptorDatabase;\nclass EncodedDescriptorDatabase;\nclass DescriptorPoolDatabase;\nclass MergedDescriptorDatabase;\n\n// Abstract interface for a database of descriptors.\n//\n// This is useful if you want to create a DescriptorPool which loads\n// descriptors on-demand from some sort of large database.  If the database\n// is large, it may be inefficient to enumerate every .proto file inside it\n// calling DescriptorPool::BuildFile() for each one.  Instead, a DescriptorPool\n// can be created which wraps a DescriptorDatabase and only builds particular\n// descriptors when they are needed.\nclass LIBPROTOBUF_EXPORT DescriptorDatabase {\n public:\n  inline DescriptorDatabase() {}\n  virtual ~DescriptorDatabase();\n\n  // Find a file by file name.  Fills in in *output and returns true if found.\n  // Otherwise, returns false, leaving the contents of *output undefined.\n  virtual bool FindFileByName(const string& filename,\n                              FileDescriptorProto* output) = 0;\n\n  // Find the file that declares the given fully-qualified symbol name.\n  // If found, fills in *output and returns true, otherwise returns false\n  // and leaves *output undefined.\n  virtual bool FindFileContainingSymbol(const string& symbol_name,\n                                        FileDescriptorProto* output) = 0;\n\n  // Find the file which defines an extension extending the given message type\n  // with the given field number.  If found, fills in *output and returns true,\n  // otherwise returns false and leaves *output undefined.  containing_type\n  // must be a fully-qualified type name.\n  virtual bool FindFileContainingExtension(const string& containing_type,\n                                           int field_number,\n                                           FileDescriptorProto* output) = 0;\n\n  // Finds the tag numbers used by all known extensions of\n  // extendee_type, and appends them to output in an undefined\n  // order. This method is best-effort: it's not guaranteed that the\n  // database will find all extensions, and it's not guaranteed that\n  // FindFileContainingExtension will return true on all of the found\n  // numbers. Returns true if the search was successful, otherwise\n  // returns false and leaves output unchanged.\n  //\n  // This method has a default implementation that always returns\n  // false.\n  virtual bool FindAllExtensionNumbers(const string& /* extendee_type */,\n                                       vector<int>* /* output */) {\n    return false;\n  }\n\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DescriptorDatabase);\n};\n\n// A DescriptorDatabase into which you can insert files manually.\n//\n// FindFileContainingSymbol() is fully-implemented.  When you add a file, its\n// symbols will be indexed for this purpose.  Note that the implementation\n// may return false positives, but only if it isn't possible for the symbol\n// to be defined in any other file.  In particular, if a file defines a symbol\n// \"Foo\", then searching for \"Foo.[anything]\" will match that file.  This way,\n// the database does not need to aggressively index all children of a symbol.\n//\n// FindFileContainingExtension() is mostly-implemented.  It works if and only\n// if the original FieldDescriptorProto defining the extension has a\n// fully-qualified type name in its \"extendee\" field (i.e. starts with a '.').\n// If the extendee is a relative name, SimpleDescriptorDatabase will not\n// attempt to resolve the type, so it will not know what type the extension is\n// extending.  Therefore, calling FindFileContainingExtension() with the\n// extension's containing type will never actually find that extension.  Note\n// that this is an unlikely problem, as all FileDescriptorProtos created by the\n// protocol compiler (as well as ones created by calling\n// FileDescriptor::CopyTo()) will always use fully-qualified names for all\n// types.  You only need to worry if you are constructing FileDescriptorProtos\n// yourself, or are calling compiler::Parser directly.\nclass LIBPROTOBUF_EXPORT SimpleDescriptorDatabase : public DescriptorDatabase {\n public:\n  SimpleDescriptorDatabase();\n  ~SimpleDescriptorDatabase();\n\n  // Adds the FileDescriptorProto to the database, making a copy.  The object\n  // can be deleted after Add() returns.  Returns false if the file conflicted\n  // with a file already in the database, in which case an error will have\n  // been written to GOOGLE_LOG(ERROR).\n  bool Add(const FileDescriptorProto& file);\n\n  // Adds the FileDescriptorProto to the database and takes ownership of it.\n  bool AddAndOwn(const FileDescriptorProto* file);\n\n  // implements DescriptorDatabase -----------------------------------\n  bool FindFileByName(const string& filename,\n                      FileDescriptorProto* output);\n  bool FindFileContainingSymbol(const string& symbol_name,\n                                FileDescriptorProto* output);\n  bool FindFileContainingExtension(const string& containing_type,\n                                   int field_number,\n                                   FileDescriptorProto* output);\n  bool FindAllExtensionNumbers(const string& extendee_type,\n                               vector<int>* output);\n\n private:\n  // So that it can use DescriptorIndex.\n  friend class EncodedDescriptorDatabase;\n\n  // An index mapping file names, symbol names, and extension numbers to\n  // some sort of values.\n  template <typename Value>\n  class DescriptorIndex {\n   public:\n    // Helpers to recursively add particular descriptors and all their contents\n    // to the index.\n    bool AddFile(const FileDescriptorProto& file,\n                 Value value);\n    bool AddSymbol(const string& name, Value value);\n    bool AddNestedExtensions(const DescriptorProto& message_type,\n                             Value value);\n    bool AddExtension(const FieldDescriptorProto& field,\n                      Value value);\n\n    Value FindFile(const string& filename);\n    Value FindSymbol(const string& name);\n    Value FindExtension(const string& containing_type, int field_number);\n    bool FindAllExtensionNumbers(const string& containing_type,\n                                 vector<int>* output);\n\n   private:\n    map<string, Value> by_name_;\n    map<string, Value> by_symbol_;\n    map<pair<string, int>, Value> by_extension_;\n\n    // Invariant:  The by_symbol_ map does not contain any symbols which are\n    // prefixes of other symbols in the map.  For example, \"foo.bar\" is a\n    // prefix of \"foo.bar.baz\" (but is not a prefix of \"foo.barbaz\").\n    //\n    // This invariant is important because it means that given a symbol name,\n    // we can find a key in the map which is a prefix of the symbol in O(lg n)\n    // time, and we know that there is at most one such key.\n    //\n    // The prefix lookup algorithm works like so:\n    // 1) Find the last key in the map which is less than or equal to the\n    //    search key.\n    // 2) If the found key is a prefix of the search key, then return it.\n    //    Otherwise, there is no match.\n    //\n    // I am sure this algorithm has been described elsewhere, but since I\n    // wasn't able to find it quickly I will instead prove that it works\n    // myself.  The key to the algorithm is that if a match exists, step (1)\n    // will find it.  Proof:\n    // 1) Define the \"search key\" to be the key we are looking for, the \"found\n    //    key\" to be the key found in step (1), and the \"match key\" to be the\n    //    key which actually matches the serach key (i.e. the key we're trying\n    //    to find).\n    // 2) The found key must be less than or equal to the search key by\n    //    definition.\n    // 3) The match key must also be less than or equal to the search key\n    //    (because it is a prefix).\n    // 4) The match key cannot be greater than the found key, because if it\n    //    were, then step (1) of the algorithm would have returned the match\n    //    key instead (since it finds the *greatest* key which is less than or\n    //    equal to the search key).\n    // 5) Therefore, the found key must be between the match key and the search\n    //    key, inclusive.\n    // 6) Since the search key must be a sub-symbol of the match key, if it is\n    //    not equal to the match key, then search_key[match_key.size()] must\n    //    be '.'.\n    // 7) Since '.' sorts before any other character that is valid in a symbol\n    //    name, then if the found key is not equal to the match key, then\n    //    found_key[match_key.size()] must also be '.', because any other value\n    //    would make it sort after the search key.\n    // 8) Therefore, if the found key is not equal to the match key, then the\n    //    found key must be a sub-symbol of the match key.  However, this would\n    //    contradict our map invariant which says that no symbol in the map is\n    //    a sub-symbol of any other.\n    // 9) Therefore, the found key must match the match key.\n    //\n    // The above proof assumes the match key exists.  In the case that the\n    // match key does not exist, then step (1) will return some other symbol.\n    // That symbol cannot be a super-symbol of the search key since if it were,\n    // then it would be a match, and we're assuming the match key doesn't exist.\n    // Therefore, step 2 will correctly return no match.\n\n    // Find the last entry in the by_symbol_ map whose key is less than or\n    // equal to the given name.\n    typename map<string, Value>::iterator FindLastLessOrEqual(\n        const string& name);\n\n    // True if either the arguments are equal or super_symbol identifies a\n    // parent symbol of sub_symbol (e.g. \"foo.bar\" is a parent of\n    // \"foo.bar.baz\", but not a parent of \"foo.barbaz\").\n    bool IsSubSymbol(const string& sub_symbol, const string& super_symbol);\n\n    // Returns true if and only if all characters in the name are alphanumerics,\n    // underscores, or periods.\n    bool ValidateSymbolName(const string& name);\n  };\n\n\n  DescriptorIndex<const FileDescriptorProto*> index_;\n  vector<const FileDescriptorProto*> files_to_delete_;\n\n  // If file is non-NULL, copy it into *output and return true, otherwise\n  // return false.\n  bool MaybeCopy(const FileDescriptorProto* file,\n                 FileDescriptorProto* output);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SimpleDescriptorDatabase);\n};\n\n// Very similar to SimpleDescriptorDatabase, but stores all the descriptors\n// as raw bytes and generally tries to use as little memory as possible.\n//\n// The same caveats regarding FindFileContainingExtension() apply as with\n// SimpleDescriptorDatabase.\nclass LIBPROTOBUF_EXPORT EncodedDescriptorDatabase : public DescriptorDatabase {\n public:\n  EncodedDescriptorDatabase();\n  ~EncodedDescriptorDatabase();\n\n  // Adds the FileDescriptorProto to the database.  The descriptor is provided\n  // in encoded form.  The database does not make a copy of the bytes, nor\n  // does it take ownership; it's up to the caller to make sure the bytes\n  // remain valid for the life of the database.  Returns false and logs an error\n  // if the bytes are not a valid FileDescriptorProto or if the file conflicted\n  // with a file already in the database.\n  bool Add(const void* encoded_file_descriptor, int size);\n\n  // Like Add(), but makes a copy of the data, so that the caller does not\n  // need to keep it around.\n  bool AddCopy(const void* encoded_file_descriptor, int size);\n\n  // Like FindFileContainingSymbol but returns only the name of the file.\n  bool FindNameOfFileContainingSymbol(const string& symbol_name,\n                                      string* output);\n\n  // implements DescriptorDatabase -----------------------------------\n  bool FindFileByName(const string& filename,\n                      FileDescriptorProto* output);\n  bool FindFileContainingSymbol(const string& symbol_name,\n                                FileDescriptorProto* output);\n  bool FindFileContainingExtension(const string& containing_type,\n                                   int field_number,\n                                   FileDescriptorProto* output);\n  bool FindAllExtensionNumbers(const string& extendee_type,\n                               vector<int>* output);\n\n private:\n  SimpleDescriptorDatabase::DescriptorIndex<pair<const void*, int> > index_;\n  vector<void*> files_to_delete_;\n\n  // If encoded_file.first is non-NULL, parse the data into *output and return\n  // true, otherwise return false.\n  bool MaybeParse(pair<const void*, int> encoded_file,\n                  FileDescriptorProto* output);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(EncodedDescriptorDatabase);\n};\n\n// A DescriptorDatabase that fetches files from a given pool.\nclass LIBPROTOBUF_EXPORT DescriptorPoolDatabase : public DescriptorDatabase {\n public:\n  explicit DescriptorPoolDatabase(const DescriptorPool& pool);\n  ~DescriptorPoolDatabase();\n\n  // implements DescriptorDatabase -----------------------------------\n  bool FindFileByName(const string& filename,\n                      FileDescriptorProto* output);\n  bool FindFileContainingSymbol(const string& symbol_name,\n                                FileDescriptorProto* output);\n  bool FindFileContainingExtension(const string& containing_type,\n                                   int field_number,\n                                   FileDescriptorProto* output);\n  bool FindAllExtensionNumbers(const string& extendee_type,\n                               vector<int>* output);\n\n private:\n  const DescriptorPool& pool_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DescriptorPoolDatabase);\n};\n\n// A DescriptorDatabase that wraps two or more others.  It first searches the\n// first database and, if that fails, tries the second, and so on.\nclass LIBPROTOBUF_EXPORT MergedDescriptorDatabase : public DescriptorDatabase {\n public:\n  // Merge just two databases.  The sources remain property of the caller.\n  MergedDescriptorDatabase(DescriptorDatabase* source1,\n                           DescriptorDatabase* source2);\n  // Merge more than two databases.  The sources remain property of the caller.\n  // The vector may be deleted after the constructor returns but the\n  // DescriptorDatabases need to stick around.\n  explicit MergedDescriptorDatabase(const vector<DescriptorDatabase*>& sources);\n  ~MergedDescriptorDatabase();\n\n  // implements DescriptorDatabase -----------------------------------\n  bool FindFileByName(const string& filename,\n                      FileDescriptorProto* output);\n  bool FindFileContainingSymbol(const string& symbol_name,\n                                FileDescriptorProto* output);\n  bool FindFileContainingExtension(const string& containing_type,\n                                   int field_number,\n                                   FileDescriptorProto* output);\n  // Merges the results of calling all databases. Returns true iff any\n  // of the databases returned true.\n  bool FindAllExtensionNumbers(const string& extendee_type,\n                               vector<int>* output);\n\n\n private:\n  vector<DescriptorDatabase*> sources_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MergedDescriptorDatabase);\n};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_DESCRIPTOR_DATABASE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/duration.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/duration.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fduration_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fduration_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fduration_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fduration_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fduration_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fduration_2eproto();\n\nclass Duration;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Duration : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Duration) */ {\n public:\n  Duration();\n  virtual ~Duration();\n\n  Duration(const Duration& from);\n\n  inline Duration& operator=(const Duration& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Duration& default_instance();\n\n  static const Duration* internal_default_instance();\n\n  void UnsafeArenaSwap(Duration* other);\n  void Swap(Duration* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Duration* New() const { return New(NULL); }\n\n  Duration* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Duration& from);\n  void MergeFrom(const Duration& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Duration* other);\n  void UnsafeMergeFrom(const Duration& from);\n  protected:\n  explicit Duration(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int64 seconds = 1;\n  void clear_seconds();\n  static const int kSecondsFieldNumber = 1;\n  ::google::protobuf::int64 seconds() const;\n  void set_seconds(::google::protobuf::int64 value);\n\n  // optional int32 nanos = 2;\n  void clear_nanos();\n  static const int kNanosFieldNumber = 2;\n  ::google::protobuf::int32 nanos() const;\n  void set_nanos(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Duration)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::int64 seconds_;\n  ::google::protobuf::int32 nanos_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fduration_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fduration_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fduration_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fduration_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Duration> Duration_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Duration\n\n// optional int64 seconds = 1;\ninline void Duration::clear_seconds() {\n  seconds_ = GOOGLE_LONGLONG(0);\n}\ninline ::google::protobuf::int64 Duration::seconds() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Duration.seconds)\n  return seconds_;\n}\ninline void Duration::set_seconds(::google::protobuf::int64 value) {\n  \n  seconds_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Duration.seconds)\n}\n\n// optional int32 nanos = 2;\ninline void Duration::clear_nanos() {\n  nanos_ = 0;\n}\ninline ::google::protobuf::int32 Duration::nanos() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Duration.nanos)\n  return nanos_;\n}\ninline void Duration::set_nanos(::google::protobuf::int32 value) {\n  \n  nanos_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Duration.nanos)\n}\n\ninline const Duration* Duration::internal_default_instance() {\n  return &Duration_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fduration_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/duration.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"github.com/golang/protobuf/ptypes/duration\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"DurationProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Duration represents a signed, fixed-length span of time represented\n// as a count of seconds and fractions of seconds at nanosecond\n// resolution. It is independent of any calendar and concepts like \"day\"\n// or \"month\". It is related to Timestamp in that the difference between\n// two Timestamp values is a Duration and it can be added or subtracted\n// from a Timestamp. Range is approximately +-10,000 years.\n//\n// Example 1: Compute Duration from two Timestamps in pseudo code.\n//\n//     Timestamp start = ...;\n//     Timestamp end = ...;\n//     Duration duration = ...;\n//\n//     duration.seconds = end.seconds - start.seconds;\n//     duration.nanos = end.nanos - start.nanos;\n//\n//     if (duration.seconds < 0 && duration.nanos > 0) {\n//       duration.seconds += 1;\n//       duration.nanos -= 1000000000;\n//     } else if (durations.seconds > 0 && duration.nanos < 0) {\n//       duration.seconds -= 1;\n//       duration.nanos += 1000000000;\n//     }\n//\n// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.\n//\n//     Timestamp start = ...;\n//     Duration duration = ...;\n//     Timestamp end = ...;\n//\n//     end.seconds = start.seconds + duration.seconds;\n//     end.nanos = start.nanos + duration.nanos;\n//\n//     if (end.nanos < 0) {\n//       end.seconds -= 1;\n//       end.nanos += 1000000000;\n//     } else if (end.nanos >= 1000000000) {\n//       end.seconds += 1;\n//       end.nanos -= 1000000000;\n//     }\n//\n// Example 3: Compute Duration from datetime.timedelta in Python.\n//\n//     td = datetime.timedelta(days=3, minutes=10)\n//     duration = Duration()\n//     duration.FromTimedelta(td)\n//\n//\nmessage Duration {\n\n  // Signed seconds of the span of time. Must be from -315,576,000,000\n  // to +315,576,000,000 inclusive.\n  int64 seconds = 1;\n\n  // Signed fractions of a second at nanosecond resolution of the span\n  // of time. Durations less than one second are represented with a 0\n  // `seconds` field and a positive or negative `nanos` field. For durations\n  // of one second or more, a non-zero value for the `nanos` field must be\n  // of the same sign as the `seconds` field. Must be from -999,999,999\n  // to +999,999,999 inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/dynamic_message.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Defines an implementation of Message which can emulate types which are not\n// known at compile-time.\n\n#ifndef GOOGLE_PROTOBUF_DYNAMIC_MESSAGE_H__\n#define GOOGLE_PROTOBUF_DYNAMIC_MESSAGE_H__\n\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n\n#include <google/protobuf/message.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/mutex.h>\n\nnamespace google {\nnamespace protobuf {\n\n// Defined in other files.\nclass Descriptor;        // descriptor.h\nclass DescriptorPool;    // descriptor.h\n\n// Constructs implementations of Message which can emulate types which are not\n// known at compile-time.\n//\n// Sometimes you want to be able to manipulate protocol types that you don't\n// know about at compile time.  It would be nice to be able to construct\n// a Message object which implements the message type given by any arbitrary\n// Descriptor.  DynamicMessage provides this.\n//\n// As it turns out, a DynamicMessage needs to construct extra\n// information about its type in order to operate.  Most of this information\n// can be shared between all DynamicMessages of the same type.  But, caching\n// this information in some sort of global map would be a bad idea, since\n// the cached information for a particular descriptor could outlive the\n// descriptor itself.  To avoid this problem, DynamicMessageFactory\n// encapsulates this \"cache\".  All DynamicMessages of the same type created\n// from the same factory will share the same support data.  Any Descriptors\n// used with a particular factory must outlive the factory.\nclass LIBPROTOBUF_EXPORT DynamicMessageFactory : public MessageFactory {\n public:\n  // Construct a DynamicMessageFactory that will search for extensions in\n  // the DescriptorPool in which the extendee is defined.\n  DynamicMessageFactory();\n\n  // Construct a DynamicMessageFactory that will search for extensions in\n  // the given DescriptorPool.\n  //\n  // DEPRECATED:  Use CodedInputStream::SetExtensionRegistry() to tell the\n  //   parser to look for extensions in an alternate pool.  However, note that\n  //   this is almost never what you want to do.  Almost all users should use\n  //   the zero-arg constructor.\n  DynamicMessageFactory(const DescriptorPool* pool);\n\n  ~DynamicMessageFactory();\n\n  // Call this to tell the DynamicMessageFactory that if it is given a\n  // Descriptor d for which:\n  //   d->file()->pool() == DescriptorPool::generated_pool(),\n  // then it should delegate to MessageFactory::generated_factory() instead\n  // of constructing a dynamic implementation of the message.  In theory there\n  // is no down side to doing this, so it may become the default in the future.\n  void SetDelegateToGeneratedFactory(bool enable) {\n    delegate_to_generated_factory_ = enable;\n  }\n\n  // implements MessageFactory ---------------------------------------\n\n  // Given a Descriptor, constructs the default (prototype) Message of that\n  // type.  You can then call that message's New() method to construct a\n  // mutable message of that type.\n  //\n  // Calling this method twice with the same Descriptor returns the same\n  // object.  The returned object remains property of the factory and will\n  // be destroyed when the factory is destroyed.  Also, any objects created\n  // by calling the prototype's New() method share some data with the\n  // prototype, so these must be destroyed before the DynamicMessageFactory\n  // is destroyed.\n  //\n  // The given descriptor must outlive the returned message, and hence must\n  // outlive the DynamicMessageFactory.\n  //\n  // The method is thread-safe.\n  const Message* GetPrototype(const Descriptor* type);\n\n private:\n  const DescriptorPool* pool_;\n  bool delegate_to_generated_factory_;\n\n  // This struct just contains a hash_map.  We can't #include <google/protobuf/stubs/hash.h> from\n  // this header due to hacks needed for hash_map portability in the open source\n  // release.  Namely, stubs/hash.h, which defines hash_map portably, is not a\n  // public header (for good reason), but dynamic_message.h is, and public\n  // headers may only #include other public headers.\n  struct PrototypeMap;\n  google::protobuf::scoped_ptr<PrototypeMap> prototypes_;\n  mutable Mutex prototypes_mutex_;\n\n  friend class DynamicMessage;\n  const Message* GetPrototypeNoLock(const Descriptor* type);\n\n  // Construct default oneof instance for reflection usage if oneof\n  // is defined.\n  static void ConstructDefaultOneofInstance(const Descriptor* type,\n                                            const int offsets[],\n                                            void* default_oneof_instance);\n  // Delete default oneof instance. Called by ~DynamicMessageFactory.\n  static void DeleteDefaultOneofInstance(const Descriptor* type,\n                                         const int offsets[],\n                                         void* default_oneof_instance);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DynamicMessageFactory);\n};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_DYNAMIC_MESSAGE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/empty.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/empty.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fempty_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fempty_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fempty_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fempty_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fempty_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fempty_2eproto();\n\nclass Empty;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Empty : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Empty) */ {\n public:\n  Empty();\n  virtual ~Empty();\n\n  Empty(const Empty& from);\n\n  inline Empty& operator=(const Empty& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Empty& default_instance();\n\n  static const Empty* internal_default_instance();\n\n  void UnsafeArenaSwap(Empty* other);\n  void Swap(Empty* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Empty* New() const { return New(NULL); }\n\n  Empty* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Empty& from);\n  void MergeFrom(const Empty& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Empty* other);\n  void UnsafeMergeFrom(const Empty& from);\n  protected:\n  explicit Empty(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Empty)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fempty_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fempty_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fempty_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fempty_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Empty> Empty_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Empty\n\ninline const Empty* Empty::internal_default_instance() {\n  return &Empty_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fempty_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/empty.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"github.com/golang/protobuf/ptypes/empty\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"EmptyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\n// The JSON representation for `Empty` is empty JSON object `{}`.\nmessage Empty {}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/extension_set.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n\n#ifndef GOOGLE_PROTOBUF_EXTENSION_SET_H__\n#define GOOGLE_PROTOBUF_EXTENSION_SET_H__\n\n#include <vector>\n#include <map>\n#include <utility>\n#include <string>\n\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/once.h>\n\n#include <google/protobuf/repeated_field.h>\n\nnamespace google {\n\nnamespace protobuf {\n  class Arena;\n  class Descriptor;                                    // descriptor.h\n  class FieldDescriptor;                               // descriptor.h\n  class DescriptorPool;                                // descriptor.h\n  class MessageLite;                                   // message_lite.h\n  class Message;                                       // message.h\n  class MessageFactory;                                // message.h\n  class UnknownFieldSet;                               // unknown_field_set.h\n  namespace io {\n    class CodedInputStream;                              // coded_stream.h\n    class CodedOutputStream;                             // coded_stream.h\n  }\n  namespace internal {\n    class FieldSkipper;                                  // wire_format_lite.h\n  }\n}\n\nnamespace protobuf {\nnamespace internal {\n\n// Used to store values of type WireFormatLite::FieldType without having to\n// #include wire_format_lite.h.  Also, ensures that we use only one byte to\n// store these values, which is important to keep the layout of\n// ExtensionSet::Extension small.\ntypedef uint8 FieldType;\n\n// A function which, given an integer value, returns true if the number\n// matches one of the defined values for the corresponding enum type.  This\n// is used with RegisterEnumExtension, below.\ntypedef bool EnumValidityFunc(int number);\n\n// Version of the above which takes an argument.  This is needed to deal with\n// extensions that are not compiled in.\ntypedef bool EnumValidityFuncWithArg(const void* arg, int number);\n\n// Information about a registered extension.\nstruct ExtensionInfo {\n  inline ExtensionInfo() {}\n  inline ExtensionInfo(FieldType type_param, bool isrepeated, bool ispacked)\n      : type(type_param), is_repeated(isrepeated), is_packed(ispacked),\n        descriptor(NULL) {}\n\n  FieldType type;\n  bool is_repeated;\n  bool is_packed;\n\n  struct EnumValidityCheck {\n    EnumValidityFuncWithArg* func;\n    const void* arg;\n  };\n\n  union {\n    EnumValidityCheck enum_validity_check;\n    const MessageLite* message_prototype;\n  };\n\n  // The descriptor for this extension, if one exists and is known.  May be\n  // NULL.  Must not be NULL if the descriptor for the extension does not\n  // live in the same pool as the descriptor for the containing type.\n  const FieldDescriptor* descriptor;\n};\n\n// Abstract interface for an object which looks up extension definitions.  Used\n// when parsing.\nclass LIBPROTOBUF_EXPORT ExtensionFinder {\n public:\n  virtual ~ExtensionFinder();\n\n  // Find the extension with the given containing type and number.\n  virtual bool Find(int number, ExtensionInfo* output) = 0;\n};\n\n// Implementation of ExtensionFinder which finds extensions defined in .proto\n// files which have been compiled into the binary.\nclass LIBPROTOBUF_EXPORT GeneratedExtensionFinder : public ExtensionFinder {\n public:\n  GeneratedExtensionFinder(const MessageLite* containing_type)\n      : containing_type_(containing_type) {}\n  virtual ~GeneratedExtensionFinder() {}\n\n  // Returns true and fills in *output if found, otherwise returns false.\n  virtual bool Find(int number, ExtensionInfo* output);\n\n private:\n  const MessageLite* containing_type_;\n};\n\n// A FieldSkipper used for parsing MessageSet.\nclass MessageSetFieldSkipper;\n\n// Note:  extension_set_heavy.cc defines DescriptorPoolExtensionFinder for\n// finding extensions from a DescriptorPool.\n\n// This is an internal helper class intended for use within the protocol buffer\n// library and generated classes.  Clients should not use it directly.  Instead,\n// use the generated accessors such as GetExtension() of the class being\n// extended.\n//\n// This class manages extensions for a protocol message object.  The\n// message's HasExtension(), GetExtension(), MutableExtension(), and\n// ClearExtension() methods are just thin wrappers around the embedded\n// ExtensionSet.  When parsing, if a tag number is encountered which is\n// inside one of the message type's extension ranges, the tag is passed\n// off to the ExtensionSet for parsing.  Etc.\nclass LIBPROTOBUF_EXPORT ExtensionSet {\n public:\n  ExtensionSet();\n  explicit ExtensionSet(::google::protobuf::Arena* arena);\n  ~ExtensionSet();\n\n  // These are called at startup by protocol-compiler-generated code to\n  // register known extensions.  The registrations are used by ParseField()\n  // to look up extensions for parsed field numbers.  Note that dynamic parsing\n  // does not use ParseField(); only protocol-compiler-generated parsing\n  // methods do.\n  static void RegisterExtension(const MessageLite* containing_type,\n                                int number, FieldType type,\n                                bool is_repeated, bool is_packed);\n  static void RegisterEnumExtension(const MessageLite* containing_type,\n                                    int number, FieldType type,\n                                    bool is_repeated, bool is_packed,\n                                    EnumValidityFunc* is_valid);\n  static void RegisterMessageExtension(const MessageLite* containing_type,\n                                       int number, FieldType type,\n                                       bool is_repeated, bool is_packed,\n                                       const MessageLite* prototype);\n\n  // =================================================================\n\n  // Add all fields which are currently present to the given vector.  This\n  // is useful to implement Reflection::ListFields().\n  void AppendToList(const Descriptor* containing_type,\n                    const DescriptorPool* pool,\n                    std::vector<const FieldDescriptor*>* output) const;\n\n  // =================================================================\n  // Accessors\n  //\n  // Generated message classes include type-safe templated wrappers around\n  // these methods.  Generally you should use those rather than call these\n  // directly, unless you are doing low-level memory management.\n  //\n  // When calling any of these accessors, the extension number requested\n  // MUST exist in the DescriptorPool provided to the constructor.  Otherwise,\n  // the method will fail an assert.  Normally, though, you would not call\n  // these directly; you would either call the generated accessors of your\n  // message class (e.g. GetExtension()) or you would call the accessors\n  // of the reflection interface.  In both cases, it is impossible to\n  // trigger this assert failure:  the generated accessors only accept\n  // linked-in extension types as parameters, while the Reflection interface\n  // requires you to provide the FieldDescriptor describing the extension.\n  //\n  // When calling any of these accessors, a protocol-compiler-generated\n  // implementation of the extension corresponding to the number MUST\n  // be linked in, and the FieldDescriptor used to refer to it MUST be\n  // the one generated by that linked-in code.  Otherwise, the method will\n  // die on an assert failure.  The message objects returned by the message\n  // accessors are guaranteed to be of the correct linked-in type.\n  //\n  // These methods pretty much match Reflection except that:\n  // - They're not virtual.\n  // - They identify fields by number rather than FieldDescriptors.\n  // - They identify enum values using integers rather than descriptors.\n  // - Strings provide Mutable() in addition to Set() accessors.\n\n  bool Has(int number) const;\n  int ExtensionSize(int number) const;   // Size of a repeated extension.\n  int NumExtensions() const;  // The number of extensions\n  FieldType ExtensionType(int number) const;\n  void ClearExtension(int number);\n\n  // singular fields -------------------------------------------------\n\n  int32  GetInt32 (int number, int32  default_value) const;\n  int64  GetInt64 (int number, int64  default_value) const;\n  uint32 GetUInt32(int number, uint32 default_value) const;\n  uint64 GetUInt64(int number, uint64 default_value) const;\n  float  GetFloat (int number, float  default_value) const;\n  double GetDouble(int number, double default_value) const;\n  bool   GetBool  (int number, bool   default_value) const;\n  int    GetEnum  (int number, int    default_value) const;\n  const string & GetString (int number, const string&  default_value) const;\n  const MessageLite& GetMessage(int number,\n                                const MessageLite& default_value) const;\n  const MessageLite& GetMessage(int number, const Descriptor* message_type,\n                                MessageFactory* factory) const;\n\n  // |descriptor| may be NULL so long as it is known that the descriptor for\n  // the extension lives in the same pool as the descriptor for the containing\n  // type.\n#define desc const FieldDescriptor* descriptor  // avoid line wrapping\n  void SetInt32 (int number, FieldType type, int32  value, desc);\n  void SetInt64 (int number, FieldType type, int64  value, desc);\n  void SetUInt32(int number, FieldType type, uint32 value, desc);\n  void SetUInt64(int number, FieldType type, uint64 value, desc);\n  void SetFloat (int number, FieldType type, float  value, desc);\n  void SetDouble(int number, FieldType type, double value, desc);\n  void SetBool  (int number, FieldType type, bool   value, desc);\n  void SetEnum  (int number, FieldType type, int    value, desc);\n  void SetString(int number, FieldType type, const string& value, desc);\n  string * MutableString (int number, FieldType type, desc);\n  MessageLite* MutableMessage(int number, FieldType type,\n                              const MessageLite& prototype, desc);\n  MessageLite* MutableMessage(const FieldDescriptor* decsriptor,\n                              MessageFactory* factory);\n  // Adds the given message to the ExtensionSet, taking ownership of the\n  // message object. Existing message with the same number will be deleted.\n  // If \"message\" is NULL, this is equivalent to \"ClearExtension(number)\".\n  void SetAllocatedMessage(int number, FieldType type,\n                           const FieldDescriptor* descriptor,\n                           MessageLite* message);\n  void UnsafeArenaSetAllocatedMessage(int number, FieldType type,\n                                      const FieldDescriptor* descriptor,\n                                      MessageLite* message);\n  MessageLite* ReleaseMessage(int number, const MessageLite& prototype);\n  MessageLite* UnsafeArenaReleaseMessage(\n      int number, const MessageLite& prototype);\n\n  MessageLite* ReleaseMessage(const FieldDescriptor* descriptor,\n                              MessageFactory* factory);\n  MessageLite* UnsafeArenaReleaseMessage(const FieldDescriptor* descriptor,\n                                         MessageFactory* factory);\n#undef desc\n  ::google::protobuf::Arena* GetArenaNoVirtual() const { return arena_; }\n\n  // repeated fields -------------------------------------------------\n\n  // Fetches a RepeatedField extension by number; returns |default_value|\n  // if no such extension exists. User should not touch this directly; it is\n  // used by the GetRepeatedExtension() method.\n  const void* GetRawRepeatedField(int number, const void* default_value) const;\n  // Fetches a mutable version of a RepeatedField extension by number,\n  // instantiating one if none exists. Similar to above, user should not use\n  // this directly; it underlies MutableRepeatedExtension().\n  void* MutableRawRepeatedField(int number, FieldType field_type,\n                                bool packed, const FieldDescriptor* desc);\n\n  // This is an overload of MutableRawRepeatedField to maintain compatibility\n  // with old code using a previous API. This version of\n  // MutableRawRepeatedField() will GOOGLE_CHECK-fail on a missing extension.\n  // (E.g.: borg/clients/internal/proto1/proto2_reflection.cc.)\n  void* MutableRawRepeatedField(int number);\n\n  int32  GetRepeatedInt32 (int number, int index) const;\n  int64  GetRepeatedInt64 (int number, int index) const;\n  uint32 GetRepeatedUInt32(int number, int index) const;\n  uint64 GetRepeatedUInt64(int number, int index) const;\n  float  GetRepeatedFloat (int number, int index) const;\n  double GetRepeatedDouble(int number, int index) const;\n  bool   GetRepeatedBool  (int number, int index) const;\n  int    GetRepeatedEnum  (int number, int index) const;\n  const string & GetRepeatedString (int number, int index) const;\n  const MessageLite& GetRepeatedMessage(int number, int index) const;\n\n  void SetRepeatedInt32 (int number, int index, int32  value);\n  void SetRepeatedInt64 (int number, int index, int64  value);\n  void SetRepeatedUInt32(int number, int index, uint32 value);\n  void SetRepeatedUInt64(int number, int index, uint64 value);\n  void SetRepeatedFloat (int number, int index, float  value);\n  void SetRepeatedDouble(int number, int index, double value);\n  void SetRepeatedBool  (int number, int index, bool   value);\n  void SetRepeatedEnum  (int number, int index, int    value);\n  void SetRepeatedString(int number, int index, const string& value);\n  string * MutableRepeatedString (int number, int index);\n  MessageLite* MutableRepeatedMessage(int number, int index);\n\n#define desc const FieldDescriptor* descriptor  // avoid line wrapping\n  void AddInt32 (int number, FieldType type, bool packed, int32  value, desc);\n  void AddInt64 (int number, FieldType type, bool packed, int64  value, desc);\n  void AddUInt32(int number, FieldType type, bool packed, uint32 value, desc);\n  void AddUInt64(int number, FieldType type, bool packed, uint64 value, desc);\n  void AddFloat (int number, FieldType type, bool packed, float  value, desc);\n  void AddDouble(int number, FieldType type, bool packed, double value, desc);\n  void AddBool  (int number, FieldType type, bool packed, bool   value, desc);\n  void AddEnum  (int number, FieldType type, bool packed, int    value, desc);\n  void AddString(int number, FieldType type, const string& value, desc);\n  string * AddString (int number, FieldType type, desc);\n  MessageLite* AddMessage(int number, FieldType type,\n                          const MessageLite& prototype, desc);\n  MessageLite* AddMessage(const FieldDescriptor* descriptor,\n                          MessageFactory* factory);\n  void AddAllocatedMessage(const FieldDescriptor* descriptor,\n                           MessageLite* new_entry);\n#undef desc\n\n  void RemoveLast(int number);\n  MessageLite* ReleaseLast(int number);\n  void SwapElements(int number, int index1, int index2);\n\n  // -----------------------------------------------------------------\n  // TODO(kenton):  Hardcore memory management accessors\n\n  // =================================================================\n  // convenience methods for implementing methods of Message\n  //\n  // These could all be implemented in terms of the other methods of this\n  // class, but providing them here helps keep the generated code size down.\n\n  void Clear();\n  void MergeFrom(const ExtensionSet& other);\n  void Swap(ExtensionSet* other);\n  void SwapExtension(ExtensionSet* other, int number);\n  bool IsInitialized() const;\n\n  // Parses a single extension from the input. The input should start out\n  // positioned immediately after the tag.\n  bool ParseField(uint32 tag, io::CodedInputStream* input,\n                  ExtensionFinder* extension_finder,\n                  FieldSkipper* field_skipper);\n\n  // Specific versions for lite or full messages (constructs the appropriate\n  // FieldSkipper automatically).  |containing_type| is the default\n  // instance for the containing message; it is used only to look up the\n  // extension by number.  See RegisterExtension(), above.  Unlike the other\n  // methods of ExtensionSet, this only works for generated message types --\n  // it looks up extensions registered using RegisterExtension().\n  bool ParseField(uint32 tag, io::CodedInputStream* input,\n                  const MessageLite* containing_type);\n  bool ParseField(uint32 tag, io::CodedInputStream* input,\n                  const Message* containing_type,\n                  UnknownFieldSet* unknown_fields);\n  bool ParseField(uint32 tag, io::CodedInputStream* input,\n                  const MessageLite* containing_type,\n                  io::CodedOutputStream* unknown_fields);\n\n  // Parse an entire message in MessageSet format.  Such messages have no\n  // fields, only extensions.\n  bool ParseMessageSet(io::CodedInputStream* input,\n                       ExtensionFinder* extension_finder,\n                       MessageSetFieldSkipper* field_skipper);\n\n  // Specific versions for lite or full messages (constructs the appropriate\n  // FieldSkipper automatically).\n  bool ParseMessageSet(io::CodedInputStream* input,\n                       const MessageLite* containing_type);\n  bool ParseMessageSet(io::CodedInputStream* input,\n                       const Message* containing_type,\n                       UnknownFieldSet* unknown_fields);\n\n  // Write all extension fields with field numbers in the range\n  //   [start_field_number, end_field_number)\n  // to the output stream, using the cached sizes computed when ByteSize() was\n  // last called.  Note that the range bounds are inclusive-exclusive.\n  void SerializeWithCachedSizes(int start_field_number,\n                                int end_field_number,\n                                io::CodedOutputStream* output) const;\n\n  // Same as SerializeWithCachedSizes, but without any bounds checking.\n  // The caller must ensure that target has sufficient capacity for the\n  // serialized extensions.\n  //\n  // Returns a pointer past the last written byte.\n  uint8* InternalSerializeWithCachedSizesToArray(int start_field_number,\n                                                 int end_field_number,\n                                                 bool deterministic,\n                                                 uint8* target) const;\n\n  // Like above but serializes in MessageSet format.\n  void SerializeMessageSetWithCachedSizes(io::CodedOutputStream* output) const;\n  uint8* InternalSerializeMessageSetWithCachedSizesToArray(bool deterministic,\n                                                           uint8* target) const;\n\n  // For backward-compatibility, versions of two of the above methods that\n  // are never forced to serialize deterministically.\n  uint8* SerializeWithCachedSizesToArray(int start_field_number,\n                                         int end_field_number,\n                                         uint8* target) const;\n  uint8* SerializeMessageSetWithCachedSizesToArray(uint8* target) const;\n\n  // Returns the total serialized size of all the extensions.\n  size_t ByteSize() const;\n\n  // Like ByteSize() but uses MessageSet format.\n  size_t MessageSetByteSize() const;\n\n  // Returns (an estimate of) the total number of bytes used for storing the\n  // extensions in memory, excluding sizeof(*this).  If the ExtensionSet is\n  // for a lite message (and thus possibly contains lite messages), the results\n  // are undefined (might work, might crash, might corrupt data, might not even\n  // be linked in).  It's up to the protocol compiler to avoid calling this on\n  // such ExtensionSets (easy enough since lite messages don't implement\n  // SpaceUsed()).\n  int SpaceUsedExcludingSelf() const;\n\n private:\n\n  // Interface of a lazily parsed singular message extension.\n  class LIBPROTOBUF_EXPORT LazyMessageExtension {\n   public:\n    LazyMessageExtension() {}\n    virtual ~LazyMessageExtension() {}\n\n    virtual LazyMessageExtension* New(::google::protobuf::Arena* arena) const = 0;\n    virtual const MessageLite& GetMessage(\n        const MessageLite& prototype) const = 0;\n    virtual MessageLite* MutableMessage(const MessageLite& prototype) = 0;\n    virtual void SetAllocatedMessage(MessageLite *message) = 0;\n    virtual void UnsafeArenaSetAllocatedMessage(MessageLite *message) = 0;\n    virtual MessageLite* ReleaseMessage(const MessageLite& prototype) = 0;\n    virtual MessageLite* UnsafeArenaReleaseMessage(\n        const MessageLite& prototype) = 0;\n\n    virtual bool IsInitialized() const = 0;\n    virtual int ByteSize() const = 0;\n    virtual int SpaceUsed() const = 0;\n\n    virtual void MergeFrom(const LazyMessageExtension& other) = 0;\n    virtual void Clear() = 0;\n\n    virtual bool ReadMessage(const MessageLite& prototype,\n                             io::CodedInputStream* input) = 0;\n    virtual void WriteMessage(int number,\n                              io::CodedOutputStream* output) const = 0;\n    virtual uint8* WriteMessageToArray(int number, uint8* target) const = 0;\n    virtual uint8* InternalWriteMessageToArray(int number, bool,\n                                               uint8* target) const {\n      // TODO(gpike): make this pure virtual. This is a placeholder because we\n      // need to update third_party/upb, for example.\n      return WriteMessageToArray(number, target);\n    }\n\n   private:\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(LazyMessageExtension);\n  };\n  struct Extension {\n    // The order of these fields packs Extension into 24 bytes when using 8\n    // byte alignment. Consider this when adding or removing fields here.\n    union {\n      int32                 int32_value;\n      int64                 int64_value;\n      uint32                uint32_value;\n      uint64                uint64_value;\n      float                 float_value;\n      double                double_value;\n      bool                  bool_value;\n      int                   enum_value;\n      string*               string_value;\n      MessageLite*          message_value;\n      LazyMessageExtension* lazymessage_value;\n\n      RepeatedField   <int32      >* repeated_int32_value;\n      RepeatedField   <int64      >* repeated_int64_value;\n      RepeatedField   <uint32     >* repeated_uint32_value;\n      RepeatedField   <uint64     >* repeated_uint64_value;\n      RepeatedField   <float      >* repeated_float_value;\n      RepeatedField   <double     >* repeated_double_value;\n      RepeatedField   <bool       >* repeated_bool_value;\n      RepeatedField   <int        >* repeated_enum_value;\n      RepeatedPtrField<string     >* repeated_string_value;\n      RepeatedPtrField<MessageLite>* repeated_message_value;\n    };\n\n    FieldType type;\n    bool is_repeated;\n\n    // For singular types, indicates if the extension is \"cleared\".  This\n    // happens when an extension is set and then later cleared by the caller.\n    // We want to keep the Extension object around for reuse, so instead of\n    // removing it from the map, we just set is_cleared = true.  This has no\n    // meaning for repeated types; for those, the size of the RepeatedField\n    // simply becomes zero when cleared.\n    bool is_cleared : 4;\n\n    // For singular message types, indicates whether lazy parsing is enabled\n    // for this extension. This field is only valid when type == TYPE_MESSAGE\n    // and !is_repeated because we only support lazy parsing for singular\n    // message types currently. If is_lazy = true, the extension is stored in\n    // lazymessage_value. Otherwise, the extension will be message_value.\n    bool is_lazy : 4;\n\n    // For repeated types, this indicates if the [packed=true] option is set.\n    bool is_packed;\n\n    // For packed fields, the size of the packed data is recorded here when\n    // ByteSize() is called then used during serialization.\n    // TODO(kenton):  Use atomic<int> when C++ supports it.\n    mutable int cached_size;\n\n    // The descriptor for this extension, if one exists and is known.  May be\n    // NULL.  Must not be NULL if the descriptor for the extension does not\n    // live in the same pool as the descriptor for the containing type.\n    const FieldDescriptor* descriptor;\n\n    // Some helper methods for operations on a single Extension.\n    void SerializeFieldWithCachedSizes(\n        int number,\n        io::CodedOutputStream* output) const;\n    uint8* InternalSerializeFieldWithCachedSizesToArray(\n        int number,\n        bool deterministic,\n        uint8* target) const;\n    void SerializeMessageSetItemWithCachedSizes(\n        int number,\n        io::CodedOutputStream* output) const;\n    uint8* InternalSerializeMessageSetItemWithCachedSizesToArray(\n        int number,\n        bool deterministic,\n        uint8* target) const;\n    size_t ByteSize(int number) const;\n    size_t MessageSetItemByteSize(int number) const;\n    void Clear();\n    int GetSize() const;\n    void Free();\n    int SpaceUsedExcludingSelf() const;\n  };\n  typedef std::map<int, Extension> ExtensionMap;\n\n\n  // Merges existing Extension from other_extension\n  void InternalExtensionMergeFrom(int number, const Extension& other_extension);\n\n  // Returns true and fills field_number and extension if extension is found.\n  // Note to support packed repeated field compatibility, it also fills whether\n  // the tag on wire is packed, which can be different from\n  // extension->is_packed (whether packed=true is specified).\n  bool FindExtensionInfoFromTag(uint32 tag, ExtensionFinder* extension_finder,\n                                int* field_number, ExtensionInfo* extension,\n                                bool* was_packed_on_wire);\n\n  // Returns true and fills extension if extension is found.\n  // Note to support packed repeated field compatibility, it also fills whether\n  // the tag on wire is packed, which can be different from\n  // extension->is_packed (whether packed=true is specified).\n  bool FindExtensionInfoFromFieldNumber(int wire_type, int field_number,\n                                        ExtensionFinder* extension_finder,\n                                        ExtensionInfo* extension,\n                                        bool* was_packed_on_wire);\n\n  // Parses a single extension from the input. The input should start out\n  // positioned immediately after the wire tag. This method is called in\n  // ParseField() after field number and was_packed_on_wire is extracted from\n  // the wire tag and ExtensionInfo is found by the field number.\n  bool ParseFieldWithExtensionInfo(int field_number,\n                                   bool was_packed_on_wire,\n                                   const ExtensionInfo& extension,\n                                   io::CodedInputStream* input,\n                                   FieldSkipper* field_skipper);\n\n  // Like ParseField(), but this method may parse singular message extensions\n  // lazily depending on the value of FLAGS_eagerly_parse_message_sets.\n  bool ParseFieldMaybeLazily(int wire_type, int field_number,\n                             io::CodedInputStream* input,\n                             ExtensionFinder* extension_finder,\n                             MessageSetFieldSkipper* field_skipper);\n\n  // Gets the extension with the given number, creating it if it does not\n  // already exist.  Returns true if the extension did not already exist.\n  bool MaybeNewExtension(int number, const FieldDescriptor* descriptor,\n                         Extension** result);\n\n  // Gets the repeated extension for the given descriptor, creating it if\n  // it does not exist.\n  Extension* MaybeNewRepeatedExtension(const FieldDescriptor* descriptor);\n\n  // Parse a single MessageSet item -- called just after the item group start\n  // tag has been read.\n  bool ParseMessageSetItem(io::CodedInputStream* input,\n                           ExtensionFinder* extension_finder,\n                           MessageSetFieldSkipper* field_skipper);\n\n  // Hack:  RepeatedPtrFieldBase declares ExtensionSet as a friend.  This\n  //   friendship should automatically extend to ExtensionSet::Extension, but\n  //   unfortunately some older compilers (e.g. GCC 3.4.4) do not implement this\n  //   correctly.  So, we must provide helpers for calling methods of that\n  //   class.\n\n  // Defined in extension_set_heavy.cc.\n  static inline int RepeatedMessage_SpaceUsedExcludingSelf(\n      RepeatedPtrFieldBase* field);\n\n  // The Extension struct is small enough to be passed by value, so we use it\n  // directly as the value type in the map rather than use pointers.  We use\n  // a map rather than hash_map here because we expect most ExtensionSets will\n  // only contain a small number of extensions whereas hash_map is optimized\n  // for 100 elements or more.  Also, we want AppendToList() to order fields\n  // by field number.\n  ExtensionMap extensions_;\n  ::google::protobuf::Arena* arena_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ExtensionSet);\n};\n\n// These are just for convenience...\ninline void ExtensionSet::SetString(int number, FieldType type,\n                                    const string& value,\n                                    const FieldDescriptor* descriptor) {\n  MutableString(number, type, descriptor)->assign(value);\n}\ninline void ExtensionSet::SetRepeatedString(int number, int index,\n                                            const string& value) {\n  MutableRepeatedString(number, index)->assign(value);\n}\ninline void ExtensionSet::AddString(int number, FieldType type,\n                                    const string& value,\n                                    const FieldDescriptor* descriptor) {\n  AddString(number, type, descriptor)->assign(value);\n}\n\n// ===================================================================\n// Glue for generated extension accessors\n\n// -------------------------------------------------------------------\n// Template magic\n\n// First we have a set of classes representing \"type traits\" for different\n// field types.  A type traits class knows how to implement basic accessors\n// for extensions of a particular type given an ExtensionSet.  The signature\n// for a type traits class looks like this:\n//\n//   class TypeTraits {\n//    public:\n//     typedef ? ConstType;\n//     typedef ? MutableType;\n//     // TypeTraits for singular fields and repeated fields will define the\n//     // symbol \"Singular\" or \"Repeated\" respectively. These two symbols will\n//     // be used in extension accessors to distinguish between singular\n//     // extensions and repeated extensions. If the TypeTraits for the passed\n//     // in extension doesn't have the expected symbol defined, it means the\n//     // user is passing a repeated extension to a singular accessor, or the\n//     // opposite. In that case the C++ compiler will generate an error\n//     // message \"no matching member function\" to inform the user.\n//     typedef ? Singular\n//     typedef ? Repeated\n//\n//     static inline ConstType Get(int number, const ExtensionSet& set);\n//     static inline void Set(int number, ConstType value, ExtensionSet* set);\n//     static inline MutableType Mutable(int number, ExtensionSet* set);\n//\n//     // Variants for repeated fields.\n//     static inline ConstType Get(int number, const ExtensionSet& set,\n//                                 int index);\n//     static inline void Set(int number, int index,\n//                            ConstType value, ExtensionSet* set);\n//     static inline MutableType Mutable(int number, int index,\n//                                       ExtensionSet* set);\n//     static inline void Add(int number, ConstType value, ExtensionSet* set);\n//     static inline MutableType Add(int number, ExtensionSet* set);\n//   };\n//\n// Not all of these methods make sense for all field types.  For example, the\n// \"Mutable\" methods only make sense for strings and messages, and the\n// repeated methods only make sense for repeated types.  So, each type\n// traits class implements only the set of methods from this signature that it\n// actually supports.  This will cause a compiler error if the user tries to\n// access an extension using a method that doesn't make sense for its type.\n// For example, if \"foo\" is an extension of type \"optional int32\", then if you\n// try to write code like:\n//   my_message.MutableExtension(foo)\n// you will get a compile error because PrimitiveTypeTraits<int32> does not\n// have a \"Mutable()\" method.\n\n// -------------------------------------------------------------------\n// PrimitiveTypeTraits\n\n// Since the ExtensionSet has different methods for each primitive type,\n// we must explicitly define the methods of the type traits class for each\n// known type.\ntemplate <typename Type>\nclass PrimitiveTypeTraits {\n public:\n  typedef Type ConstType;\n  typedef Type MutableType;\n  typedef PrimitiveTypeTraits<Type> Singular;\n\n  static inline ConstType Get(int number, const ExtensionSet& set,\n                              ConstType default_value);\n  static inline void Set(int number, FieldType field_type,\n                         ConstType value, ExtensionSet* set);\n};\n\ntemplate <typename Type>\nclass RepeatedPrimitiveTypeTraits {\n public:\n  typedef Type ConstType;\n  typedef Type MutableType;\n  typedef RepeatedPrimitiveTypeTraits<Type> Repeated;\n\n  typedef RepeatedField<Type> RepeatedFieldType;\n\n  static inline Type Get(int number, const ExtensionSet& set, int index);\n  static inline void Set(int number, int index, Type value, ExtensionSet* set);\n  static inline void Add(int number, FieldType field_type,\n                         bool is_packed, Type value, ExtensionSet* set);\n\n  static inline const RepeatedField<ConstType>&\n      GetRepeated(int number, const ExtensionSet& set);\n  static inline RepeatedField<Type>*\n      MutableRepeated(int number, FieldType field_type,\n                      bool is_packed, ExtensionSet* set);\n\n  static const RepeatedFieldType* GetDefaultRepeatedField();\n};\n\nLIBPROTOBUF_EXPORT extern ProtobufOnceType repeated_primitive_generic_type_traits_once_init_;\n\nclass LIBPROTOBUF_EXPORT RepeatedPrimitiveGenericTypeTraits {\n private:\n  template<typename Type> friend class RepeatedPrimitiveTypeTraits;\n  static void InitializeDefaultRepeatedFields();\n  static void DestroyDefaultRepeatedFields();\n  static const RepeatedField<int32>* default_repeated_field_int32_;\n  static const RepeatedField<int64>* default_repeated_field_int64_;\n  static const RepeatedField<uint32>* default_repeated_field_uint32_;\n  static const RepeatedField<uint64>* default_repeated_field_uint64_;\n  static const RepeatedField<double>* default_repeated_field_double_;\n  static const RepeatedField<float>* default_repeated_field_float_;\n  static const RepeatedField<bool>* default_repeated_field_bool_;\n};\n\n#define PROTOBUF_DEFINE_PRIMITIVE_TYPE(TYPE, METHOD)                       \\\ntemplate<> inline TYPE PrimitiveTypeTraits<TYPE>::Get(                     \\\n    int number, const ExtensionSet& set, TYPE default_value) {             \\\n  return set.Get##METHOD(number, default_value);                           \\\n}                                                                          \\\ntemplate<> inline void PrimitiveTypeTraits<TYPE>::Set(                     \\\n    int number, FieldType field_type, TYPE value, ExtensionSet* set) {     \\\n  set->Set##METHOD(number, field_type, value, NULL);                       \\\n}                                                                          \\\n                                                                           \\\ntemplate<> inline TYPE RepeatedPrimitiveTypeTraits<TYPE>::Get(             \\\n    int number, const ExtensionSet& set, int index) {                      \\\n  return set.GetRepeated##METHOD(number, index);                           \\\n}                                                                          \\\ntemplate<> inline void RepeatedPrimitiveTypeTraits<TYPE>::Set(             \\\n    int number, int index, TYPE value, ExtensionSet* set) {                \\\n  set->SetRepeated##METHOD(number, index, value);                          \\\n}                                                                          \\\ntemplate<> inline void RepeatedPrimitiveTypeTraits<TYPE>::Add(             \\\n    int number, FieldType field_type, bool is_packed,                      \\\n    TYPE value, ExtensionSet* set) {                                       \\\n  set->Add##METHOD(number, field_type, is_packed, value, NULL);            \\\n}                                                                          \\\ntemplate<> inline const RepeatedField<TYPE>*                               \\\n    RepeatedPrimitiveTypeTraits<TYPE>::GetDefaultRepeatedField() {         \\\n  ::google::protobuf::GoogleOnceInit(                                                          \\\n      &repeated_primitive_generic_type_traits_once_init_,                  \\\n      &RepeatedPrimitiveGenericTypeTraits::InitializeDefaultRepeatedFields); \\\n  return RepeatedPrimitiveGenericTypeTraits::                              \\\n      default_repeated_field_##TYPE##_;                                    \\\n}                                                                          \\\ntemplate<> inline const RepeatedField<TYPE>&                               \\\n    RepeatedPrimitiveTypeTraits<TYPE>::GetRepeated(int number,             \\\n                                               const ExtensionSet& set) {  \\\n  return *reinterpret_cast<const RepeatedField<TYPE>*>(                    \\\n                            set.GetRawRepeatedField(                       \\\n                                number, GetDefaultRepeatedField()));       \\\n}                                                                          \\\ntemplate<> inline RepeatedField<TYPE>*                                     \\\n    RepeatedPrimitiveTypeTraits<TYPE>::MutableRepeated(int number,         \\\n                                                   FieldType field_type,   \\\n                                                   bool is_packed,         \\\n                                                   ExtensionSet* set) {    \\\n  return reinterpret_cast<RepeatedField<TYPE>*>(                           \\\n      set->MutableRawRepeatedField(number, field_type, is_packed, NULL));  \\\n}\n\nPROTOBUF_DEFINE_PRIMITIVE_TYPE( int32,  Int32)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE( int64,  Int64)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE(uint32, UInt32)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE(uint64, UInt64)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE( float,  Float)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE(double, Double)\nPROTOBUF_DEFINE_PRIMITIVE_TYPE(  bool,   Bool)\n\n#undef PROTOBUF_DEFINE_PRIMITIVE_TYPE\n\n// -------------------------------------------------------------------\n// StringTypeTraits\n\n// Strings support both Set() and Mutable().\nclass LIBPROTOBUF_EXPORT StringTypeTraits {\n public:\n  typedef const string& ConstType;\n  typedef string* MutableType;\n  typedef StringTypeTraits Singular;\n\n  static inline const string& Get(int number, const ExtensionSet& set,\n                                  ConstType default_value) {\n    return set.GetString(number, default_value);\n  }\n  static inline void Set(int number, FieldType field_type,\n                         const string& value, ExtensionSet* set) {\n    set->SetString(number, field_type, value, NULL);\n  }\n  static inline string* Mutable(int number, FieldType field_type,\n                                ExtensionSet* set) {\n    return set->MutableString(number, field_type, NULL);\n  }\n};\n\nLIBPROTOBUF_EXPORT extern ProtobufOnceType repeated_string_type_traits_once_init_;\n\nclass LIBPROTOBUF_EXPORT RepeatedStringTypeTraits {\n public:\n  typedef const string& ConstType;\n  typedef string* MutableType;\n  typedef RepeatedStringTypeTraits Repeated;\n\n  typedef RepeatedPtrField<string> RepeatedFieldType;\n\n  static inline const string& Get(int number, const ExtensionSet& set,\n                                  int index) {\n    return set.GetRepeatedString(number, index);\n  }\n  static inline void Set(int number, int index,\n                         const string& value, ExtensionSet* set) {\n    set->SetRepeatedString(number, index, value);\n  }\n  static inline string* Mutable(int number, int index, ExtensionSet* set) {\n    return set->MutableRepeatedString(number, index);\n  }\n  static inline void Add(int number, FieldType field_type,\n                         bool /*is_packed*/, const string& value,\n                         ExtensionSet* set) {\n    set->AddString(number, field_type, value, NULL);\n  }\n  static inline string* Add(int number, FieldType field_type,\n                            ExtensionSet* set) {\n    return set->AddString(number, field_type, NULL);\n  }\n  static inline const RepeatedPtrField<string>&\n      GetRepeated(int number, const ExtensionSet& set) {\n    return *reinterpret_cast<const RepeatedPtrField<string>*>(\n        set.GetRawRepeatedField(number, GetDefaultRepeatedField()));\n  }\n\n  static inline RepeatedPtrField<string>*\n      MutableRepeated(int number, FieldType field_type,\n                      bool is_packed, ExtensionSet* set) {\n    return reinterpret_cast<RepeatedPtrField<string>*>(\n        set->MutableRawRepeatedField(number, field_type,\n                                     is_packed, NULL));\n  }\n\n  static const RepeatedFieldType* GetDefaultRepeatedField() {\n    ::google::protobuf::GoogleOnceInit(&repeated_string_type_traits_once_init_,\n                   &InitializeDefaultRepeatedFields);\n    return default_repeated_field_;\n  }\n\n private:\n  static void InitializeDefaultRepeatedFields();\n  static void DestroyDefaultRepeatedFields();\n  static const RepeatedFieldType *default_repeated_field_;\n};\n\n// -------------------------------------------------------------------\n// EnumTypeTraits\n\n// ExtensionSet represents enums using integers internally, so we have to\n// static_cast around.\ntemplate <typename Type, bool IsValid(int)>\nclass EnumTypeTraits {\n public:\n  typedef Type ConstType;\n  typedef Type MutableType;\n  typedef EnumTypeTraits<Type, IsValid> Singular;\n\n  static inline ConstType Get(int number, const ExtensionSet& set,\n                              ConstType default_value) {\n    return static_cast<Type>(set.GetEnum(number, default_value));\n  }\n  static inline void Set(int number, FieldType field_type,\n                         ConstType value, ExtensionSet* set) {\n    GOOGLE_DCHECK(IsValid(value));\n    set->SetEnum(number, field_type, value, NULL);\n  }\n};\n\ntemplate <typename Type, bool IsValid(int)>\nclass RepeatedEnumTypeTraits {\n public:\n  typedef Type ConstType;\n  typedef Type MutableType;\n  typedef RepeatedEnumTypeTraits<Type, IsValid> Repeated;\n\n  typedef RepeatedField<Type> RepeatedFieldType;\n\n  static inline ConstType Get(int number, const ExtensionSet& set, int index) {\n    return static_cast<Type>(set.GetRepeatedEnum(number, index));\n  }\n  static inline void Set(int number, int index,\n                         ConstType value, ExtensionSet* set) {\n    GOOGLE_DCHECK(IsValid(value));\n    set->SetRepeatedEnum(number, index, value);\n  }\n  static inline void Add(int number, FieldType field_type,\n                         bool is_packed, ConstType value, ExtensionSet* set) {\n    GOOGLE_DCHECK(IsValid(value));\n    set->AddEnum(number, field_type, is_packed, value, NULL);\n  }\n  static inline const RepeatedField<Type>& GetRepeated(int number,\n                                                       const ExtensionSet&\n                                                       set) {\n    // Hack: the `Extension` struct stores a RepeatedField<int> for enums.\n    // RepeatedField<int> cannot implicitly convert to RepeatedField<EnumType>\n    // so we need to do some casting magic. See message.h for similar\n    // contortions for non-extension fields.\n    return *reinterpret_cast<const RepeatedField<Type>*>(\n        set.GetRawRepeatedField(number, GetDefaultRepeatedField()));\n  }\n\n  static inline RepeatedField<Type>* MutableRepeated(int number,\n                                                     FieldType field_type,\n                                                     bool is_packed,\n                                                     ExtensionSet* set) {\n    return reinterpret_cast<RepeatedField<Type>*>(\n        set->MutableRawRepeatedField(number, field_type, is_packed, NULL));\n  }\n\n  static const RepeatedFieldType* GetDefaultRepeatedField() {\n    // Hack: as noted above, repeated enum fields are internally stored as a\n    // RepeatedField<int>. We need to be able to instantiate global static\n    // objects to return as default (empty) repeated fields on non-existent\n    // extensions. We would not be able to know a-priori all of the enum types\n    // (values of |Type|) to instantiate all of these, so we just re-use int32's\n    // default repeated field object.\n    return reinterpret_cast<const RepeatedField<Type>*>(\n        RepeatedPrimitiveTypeTraits<int32>::GetDefaultRepeatedField());\n  }\n};\n\n// -------------------------------------------------------------------\n// MessageTypeTraits\n\n// ExtensionSet guarantees that when manipulating extensions with message\n// types, the implementation used will be the compiled-in class representing\n// that type.  So, we can static_cast down to the exact type we expect.\ntemplate <typename Type>\nclass MessageTypeTraits {\n public:\n  typedef const Type& ConstType;\n  typedef Type* MutableType;\n  typedef MessageTypeTraits<Type> Singular;\n\n  static inline ConstType Get(int number, const ExtensionSet& set,\n                              ConstType default_value) {\n    return static_cast<const Type&>(\n        set.GetMessage(number, default_value));\n  }\n  static inline MutableType Mutable(int number, FieldType field_type,\n                                    ExtensionSet* set) {\n    return static_cast<Type*>(\n      set->MutableMessage(number, field_type, Type::default_instance(), NULL));\n  }\n  static inline void SetAllocated(int number, FieldType field_type,\n                                  MutableType message, ExtensionSet* set) {\n    set->SetAllocatedMessage(number, field_type, NULL, message);\n  }\n  static inline void UnsafeArenaSetAllocated(int number, FieldType field_type,\n                                             MutableType message,\n                                             ExtensionSet* set) {\n    set->UnsafeArenaSetAllocatedMessage(number, field_type, NULL, message);\n  }\n  static inline MutableType Release(int number, FieldType /* field_type */,\n                                    ExtensionSet* set) {\n    return static_cast<Type*>(set->ReleaseMessage(\n        number, Type::default_instance()));\n  }\n  static inline MutableType UnsafeArenaRelease(int number,\n                                               FieldType /* field_type */,\n                                               ExtensionSet* set) {\n    return static_cast<Type*>(set->UnsafeArenaReleaseMessage(\n        number, Type::default_instance()));\n  }\n};\n\n// forward declaration\nclass RepeatedMessageGenericTypeTraits;\n\ntemplate <typename Type>\nclass RepeatedMessageTypeTraits {\n public:\n  typedef const Type& ConstType;\n  typedef Type* MutableType;\n  typedef RepeatedMessageTypeTraits<Type> Repeated;\n\n  typedef RepeatedPtrField<Type> RepeatedFieldType;\n\n  static inline ConstType Get(int number, const ExtensionSet& set, int index) {\n    return static_cast<const Type&>(set.GetRepeatedMessage(number, index));\n  }\n  static inline MutableType Mutable(int number, int index, ExtensionSet* set) {\n    return static_cast<Type*>(set->MutableRepeatedMessage(number, index));\n  }\n  static inline MutableType Add(int number, FieldType field_type,\n                                ExtensionSet* set) {\n    return static_cast<Type*>(\n        set->AddMessage(number, field_type, Type::default_instance(), NULL));\n  }\n  static inline const RepeatedPtrField<Type>& GetRepeated(int number,\n                                                          const ExtensionSet&\n                                                          set) {\n    // See notes above in RepeatedEnumTypeTraits::GetRepeated(): same\n    // casting hack applies here, because a RepeatedPtrField<MessageLite>\n    // cannot naturally become a RepeatedPtrType<Type> even though Type is\n    // presumably a message. google::protobuf::Message goes through similar contortions\n    // with a reinterpret_cast<>.\n    return *reinterpret_cast<const RepeatedPtrField<Type>*>(\n        set.GetRawRepeatedField(number, GetDefaultRepeatedField()));\n  }\n  static inline RepeatedPtrField<Type>* MutableRepeated(int number,\n                                                        FieldType field_type,\n                                                        bool is_packed,\n                                                        ExtensionSet* set) {\n    return reinterpret_cast<RepeatedPtrField<Type>*>(\n        set->MutableRawRepeatedField(number, field_type, is_packed, NULL));\n  }\n\n  static const RepeatedFieldType* GetDefaultRepeatedField();\n};\n\nLIBPROTOBUF_EXPORT extern ProtobufOnceType repeated_message_generic_type_traits_once_init_;\n\n// This class exists only to hold a generic default empty repeated field for all\n// message-type repeated field extensions.\nclass LIBPROTOBUF_EXPORT RepeatedMessageGenericTypeTraits {\n public:\n  typedef RepeatedPtrField< ::google::protobuf::MessageLite*> RepeatedFieldType;\n private:\n  template<typename Type> friend class RepeatedMessageTypeTraits;\n  static void InitializeDefaultRepeatedFields();\n  static void DestroyDefaultRepeatedFields();\n  static const RepeatedFieldType* default_repeated_field_;\n};\n\ntemplate<typename Type> inline\n    const typename RepeatedMessageTypeTraits<Type>::RepeatedFieldType*\n    RepeatedMessageTypeTraits<Type>::GetDefaultRepeatedField() {\n  ::google::protobuf::GoogleOnceInit(\n      &repeated_message_generic_type_traits_once_init_,\n      &RepeatedMessageGenericTypeTraits::InitializeDefaultRepeatedFields);\n  return reinterpret_cast<const RepeatedFieldType*>(\n      RepeatedMessageGenericTypeTraits::default_repeated_field_);\n}\n\n// -------------------------------------------------------------------\n// ExtensionIdentifier\n\n// This is the type of actual extension objects.  E.g. if you have:\n//   extends Foo with optional int32 bar = 1234;\n// then \"bar\" will be defined in C++ as:\n//   ExtensionIdentifier<Foo, PrimitiveTypeTraits<int32>, 1, false> bar(1234);\n//\n// Note that we could, in theory, supply the field number as a template\n// parameter, and thus make an instance of ExtensionIdentifier have no\n// actual contents.  However, if we did that, then using at extension\n// identifier would not necessarily cause the compiler to output any sort\n// of reference to any simple defined in the extension's .pb.o file.  Some\n// linkers will actually drop object files that are not explicitly referenced,\n// but that would be bad because it would cause this extension to not be\n// registered at static initialization, and therefore using it would crash.\n\ntemplate <typename ExtendeeType, typename TypeTraitsType,\n          FieldType field_type, bool is_packed>\nclass ExtensionIdentifier {\n public:\n  typedef TypeTraitsType TypeTraits;\n  typedef ExtendeeType Extendee;\n\n  ExtensionIdentifier(int number, typename TypeTraits::ConstType default_value)\n      : number_(number), default_value_(default_value) {}\n  inline int number() const { return number_; }\n  typename TypeTraits::ConstType default_value() const {\n    return default_value_;\n  }\n\n private:\n  const int number_;\n  typename TypeTraits::ConstType default_value_;\n};\n\n// -------------------------------------------------------------------\n// Generated accessors\n\n// This macro should be expanded in the context of a generated type which\n// has extensions.\n//\n// We use \"_proto_TypeTraits\" as a type name below because \"TypeTraits\"\n// causes problems if the class has a nested message or enum type with that\n// name and \"_TypeTraits\" is technically reserved for the C++ library since\n// it starts with an underscore followed by a capital letter.\n//\n// For similar reason, we use \"_field_type\" and \"_is_packed\" as parameter names\n// below, so that \"field_type\" and \"is_packed\" can be used as field names.\n#define GOOGLE_PROTOBUF_EXTENSION_ACCESSORS(CLASSNAME)                        \\\n  /* Has, Size, Clear */                                                      \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline bool HasExtension(                                                   \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const {   \\\n    return _extensions_.Has(id.number());                                     \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void ClearExtension(                                                 \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) {         \\\n    _extensions_.ClearExtension(id.number());                                 \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline int ExtensionSize(                                                   \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const {   \\\n    return _extensions_.ExtensionSize(id.number());                           \\\n  }                                                                           \\\n                                                                              \\\n  /* Singular accessors */                                                    \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Singular::ConstType GetExtension(        \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) const {   \\\n    return _proto_TypeTraits::Get(id.number(), _extensions_,                  \\\n                                  id.default_value());                        \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Singular::MutableType MutableExtension(  \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) {         \\\n    return _proto_TypeTraits::Mutable(id.number(), _field_type,               \\\n                                      &_extensions_);                         \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void SetExtension(                                                   \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      typename _proto_TypeTraits::Singular::ConstType value) {                \\\n    _proto_TypeTraits::Set(id.number(), _field_type, value, &_extensions_);   \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void SetAllocatedExtension(                                          \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      typename _proto_TypeTraits::Singular::MutableType value) {              \\\n    _proto_TypeTraits::SetAllocated(id.number(), _field_type,                 \\\n                                    value, &_extensions_);                    \\\n  }                                                                           \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void UnsafeArenaSetAllocatedExtension(                               \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      typename _proto_TypeTraits::Singular::MutableType value) {              \\\n    _proto_TypeTraits::UnsafeArenaSetAllocated(id.number(), _field_type,      \\\n                                               value, &_extensions_);         \\\n  }                                                                           \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Singular::MutableType ReleaseExtension(  \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) {         \\\n    return _proto_TypeTraits::Release(id.number(), _field_type,               \\\n                                      &_extensions_);                         \\\n  }                                                                           \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Singular::MutableType                    \\\n      UnsafeArenaReleaseExtension(                                            \\\n          const ::google::protobuf::internal::ExtensionIdentifier<                      \\\n            CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) {     \\\n    return _proto_TypeTraits::UnsafeArenaRelease(id.number(), _field_type,    \\\n                                                 &_extensions_);              \\\n  }                                                                           \\\n                                                                              \\\n  /* Repeated accessors */                                                    \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Repeated::ConstType GetExtension(        \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      int index) const {                                                      \\\n    return _proto_TypeTraits::Get(id.number(), _extensions_, index);          \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Repeated::MutableType MutableExtension(  \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      int index) {                                                            \\\n    return _proto_TypeTraits::Mutable(id.number(), index, &_extensions_);     \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void SetExtension(                                                   \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      int index, typename _proto_TypeTraits::Repeated::ConstType value) {     \\\n    _proto_TypeTraits::Set(id.number(), index, value, &_extensions_);         \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Repeated::MutableType AddExtension(      \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id) {         \\\n    return _proto_TypeTraits::Add(id.number(), _field_type, &_extensions_);   \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline void AddExtension(                                                   \\\n      const ::google::protobuf::internal::ExtensionIdentifier<                          \\\n        CLASSNAME, _proto_TypeTraits, _field_type, _is_packed>& id,           \\\n      typename _proto_TypeTraits::Repeated::ConstType value) {                \\\n    _proto_TypeTraits::Add(id.number(), _field_type, _is_packed,              \\\n                           value, &_extensions_);                             \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline const typename _proto_TypeTraits::Repeated::RepeatedFieldType&       \\\n      GetRepeatedExtension(                                                   \\\n          const ::google::protobuf::internal::ExtensionIdentifier<                      \\\n            CLASSNAME, _proto_TypeTraits, _field_type,                        \\\n            _is_packed>& id) const {                                          \\\n    return _proto_TypeTraits::GetRepeated(id.number(), _extensions_);         \\\n  }                                                                           \\\n                                                                              \\\n  template <typename _proto_TypeTraits,                                       \\\n            ::google::protobuf::internal::FieldType _field_type,                        \\\n            bool _is_packed>                                                  \\\n  inline typename _proto_TypeTraits::Repeated::RepeatedFieldType*             \\\n      MutableRepeatedExtension(                                               \\\n          const ::google::protobuf::internal::ExtensionIdentifier<                      \\\n              CLASSNAME, _proto_TypeTraits, _field_type,                      \\\n              _is_packed>& id) {                                              \\\n    return _proto_TypeTraits::MutableRepeated(id.number(), _field_type,       \\\n                                              _is_packed, &_extensions_);     \\\n  }\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_EXTENSION_SET_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/field_mask.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/field_mask.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2ffield_5fmask_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2ffield_5fmask_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ffield_5fmask_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ffield_5fmask_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2ffield_5fmask_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2ffield_5fmask_2eproto();\n\nclass FieldMask;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT FieldMask : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FieldMask) */ {\n public:\n  FieldMask();\n  virtual ~FieldMask();\n\n  FieldMask(const FieldMask& from);\n\n  inline FieldMask& operator=(const FieldMask& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FieldMask& default_instance();\n\n  static const FieldMask* internal_default_instance();\n\n  void Swap(FieldMask* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FieldMask* New() const { return New(NULL); }\n\n  FieldMask* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FieldMask& from);\n  void MergeFrom(const FieldMask& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FieldMask* other);\n  void UnsafeMergeFrom(const FieldMask& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated string paths = 1;\n  int paths_size() const;\n  void clear_paths();\n  static const int kPathsFieldNumber = 1;\n  const ::std::string& paths(int index) const;\n  ::std::string* mutable_paths(int index);\n  void set_paths(int index, const ::std::string& value);\n  void set_paths(int index, const char* value);\n  void set_paths(int index, const char* value, size_t size);\n  ::std::string* add_paths();\n  void add_paths(const ::std::string& value);\n  void add_paths(const char* value);\n  void add_paths(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& paths() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_paths();\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.FieldMask)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> paths_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ffield_5fmask_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ffield_5fmask_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ffield_5fmask_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ffield_5fmask_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FieldMask> FieldMask_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// FieldMask\n\n// repeated string paths = 1;\ninline int FieldMask::paths_size() const {\n  return paths_.size();\n}\ninline void FieldMask::clear_paths() {\n  paths_.Clear();\n}\ninline const ::std::string& FieldMask::paths(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FieldMask.paths)\n  return paths_.Get(index);\n}\ninline ::std::string* FieldMask::mutable_paths(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.FieldMask.paths)\n  return paths_.Mutable(index);\n}\ninline void FieldMask::set_paths(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.FieldMask.paths)\n  paths_.Mutable(index)->assign(value);\n}\ninline void FieldMask::set_paths(int index, const char* value) {\n  paths_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.FieldMask.paths)\n}\ninline void FieldMask::set_paths(int index, const char* value, size_t size) {\n  paths_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.FieldMask.paths)\n}\ninline ::std::string* FieldMask::add_paths() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.FieldMask.paths)\n  return paths_.Add();\n}\ninline void FieldMask::add_paths(const ::std::string& value) {\n  paths_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.FieldMask.paths)\n}\ninline void FieldMask::add_paths(const char* value) {\n  paths_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.FieldMask.paths)\n}\ninline void FieldMask::add_paths(const char* value, size_t size) {\n  paths_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.FieldMask.paths)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nFieldMask::paths() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.FieldMask.paths)\n  return paths_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nFieldMask::mutable_paths() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.FieldMask.paths)\n  return &paths_;\n}\n\ninline const FieldMask* FieldMask::internal_default_instance() {\n  return &FieldMask_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2ffield_5fmask_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/field_mask.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"FieldMaskProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// `FieldMask` represents a set of symbolic field paths, for example:\n//\n//     paths: \"f.a\"\n//     paths: \"f.b.d\"\n//\n// Here `f` represents a field in some root message, `a` and `b`\n// fields in the message found in `f`, and `d` a field found in the\n// message in `f.b`.\n//\n// Field masks are used to specify a subset of fields that should be\n// returned by a get operation or modified by an update operation.\n// Field masks also have a custom JSON encoding (see below).\n//\n// # Field Masks in Projections\n//\n// When used in the context of a projection, a response message or\n// sub-message is filtered by the API to only contain those fields as\n// specified in the mask. For example, if the mask in the previous\n// example is applied to a response message as follows:\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//         x : 2\n//       }\n//       y : 13\n//     }\n//     z: 8\n//\n// The result will not contain specific values for fields x,y and z\n// (their value will be set to the default, and omitted in proto text\n// output):\n//\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//       }\n//     }\n//\n// A repeated field is not allowed except at the last position of a\n// paths string.\n//\n// If a FieldMask object is not present in a get operation, the\n// operation applies to all fields (as if a FieldMask of all fields\n// had been specified).\n//\n// Note that a field mask does not necessarily apply to the\n// top-level response message. In case of a REST get operation, the\n// field mask applies directly to the response, but in case of a REST\n// list operation, the mask instead applies to each individual message\n// in the returned resource list. In case of a REST custom method,\n// other definitions may be used. Where the mask applies will be\n// clearly documented together with its declaration in the API.  In\n// any case, the effect on the returned resource/resources is required\n// behavior for APIs.\n//\n// # Field Masks in Update Operations\n//\n// A field mask in update operations specifies which fields of the\n// targeted resource are going to be updated. The API is required\n// to only change the values of the fields as specified in the mask\n// and leave the others untouched. If a resource is passed in to\n// describe the updated values, the API ignores the values of all\n// fields not covered by the mask.\n//\n// If a repeated field is specified for an update operation, the existing\n// repeated values in the target resource will be overwritten by the new values.\n// Note that a repeated field is only allowed in the last position of a `paths`\n// string.\n//\n// If a sub-message is specified in the last position of the field mask for an\n// update operation, then the existing sub-message in the target resource is\n// overwritten. Given the target message:\n//\n//     f {\n//       b {\n//         d : 1\n//         x : 2\n//       }\n//       c : 1\n//     }\n//\n// And an update message:\n//\n//     f {\n//       b {\n//         d : 10\n//       }\n//     }\n//\n// then if the field mask is:\n//\n//  paths: \"f.b\"\n//\n// then the result will be:\n//\n//     f {\n//       b {\n//         d : 10\n//       }\n//       c : 1\n//     }\n//\n// However, if the update mask was:\n//\n//  paths: \"f.b.d\"\n//\n// then the result would be:\n//\n//     f {\n//       b {\n//         d : 10\n//         x : 2\n//       }\n//       c : 1\n//     }\n//\n// In order to reset a field's value to the default, the field must\n// be in the mask and set to the default value in the provided resource.\n// Hence, in order to reset all fields of a resource, provide a default\n// instance of the resource and set all fields in the mask, or do\n// not provide a mask as described below.\n//\n// If a field mask is not present on update, the operation applies to\n// all fields (as if a field mask of all fields has been specified).\n// Note that in the presence of schema evolution, this may mean that\n// fields the client does not know and has therefore not filled into\n// the request will be reset to their default. If this is unwanted\n// behavior, a specific service may require a client to always specify\n// a field mask, producing an error if not.\n//\n// As with get operations, the location of the resource which\n// describes the updated values in the request message depends on the\n// operation kind. In any case, the effect of the field mask is\n// required to be honored by the API.\n//\n// ## Considerations for HTTP REST\n//\n// The HTTP kind of an update operation which uses a field mask must\n// be set to PATCH instead of PUT in order to satisfy HTTP semantics\n// (PUT must only be used for full updates).\n//\n// # JSON Encoding of Field Masks\n//\n// In JSON, a field mask is encoded as a single string where paths are\n// separated by a comma. Fields name in each path are converted\n// to/from lower-camel naming conventions.\n//\n// As an example, consider the following message declarations:\n//\n//     message Profile {\n//       User user = 1;\n//       Photo photo = 2;\n//     }\n//     message User {\n//       string display_name = 1;\n//       string address = 2;\n//     }\n//\n// In proto a field mask for `Profile` may look as such:\n//\n//     mask {\n//       paths: \"user.display_name\"\n//       paths: \"photo\"\n//     }\n//\n// In JSON, the same mask is represented as below:\n//\n//     {\n//       mask: \"user.displayName,photo\"\n//     }\n//\n// # Field Masks and Oneof Fields\n//\n// Field masks treat fields in oneofs just as regular fields. Consider the\n// following message:\n//\n//     message SampleMessage {\n//       oneof test_oneof {\n//         string name = 4;\n//         SubMessage sub_message = 9;\n//       }\n//     }\n//\n// The field mask can be:\n//\n//     mask {\n//       paths: \"name\"\n//     }\n//\n// Or:\n//\n//     mask {\n//       paths: \"sub_message\"\n//     }\n//\n// Note that oneof type names (\"test_oneof\" in this case) cannot be used in\n// paths.\nmessage FieldMask {\n  // The set of field mask paths.\n  repeated string paths = 1;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/generated_enum_reflection.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: jasonh@google.com (Jason Hsueh)\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n// It provides reflection support for generated enums, and is included in\n// generated .pb.h files and should have minimal dependencies. The methods are\n// implemented in generated_message_reflection.cc.\n\n#ifndef GOOGLE_PROTOBUF_GENERATED_ENUM_REFLECTION_H__\n#define GOOGLE_PROTOBUF_GENERATED_ENUM_REFLECTION_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/template_util.h>\n#include <google/protobuf/generated_enum_util.h>\n\nnamespace google {\nnamespace protobuf {\n  class EnumDescriptor;\n}  // namespace protobuf\n\nnamespace protobuf {\n\n// Returns the EnumDescriptor for enum type E, which must be a\n// proto-declared enum type.  Code generated by the protocol compiler\n// will include specializations of this template for each enum type declared.\ntemplate <typename E>\nconst EnumDescriptor* GetEnumDescriptor();\n\nnamespace internal {\n\n// Helper for EnumType_Parse functions: try to parse the string 'name' as an\n// enum name of the given type, returning true and filling in value on success,\n// or returning false and leaving value unchanged on failure.\nLIBPROTOBUF_EXPORT bool ParseNamedEnum(const EnumDescriptor* descriptor,\n                    const string& name,\n                    int* value);\n\ntemplate<typename EnumType>\nbool ParseNamedEnum(const EnumDescriptor* descriptor,\n                    const string& name,\n                    EnumType* value) {\n  int tmp;\n  if (!ParseNamedEnum(descriptor, name, &tmp)) return false;\n  *value = static_cast<EnumType>(tmp);\n  return true;\n}\n\n// Just a wrapper around printing the name of a value. The main point of this\n// function is not to be inlined, so that you can do this without including\n// descriptor.h.\nLIBPROTOBUF_EXPORT const string& NameOfEnum(const EnumDescriptor* descriptor, int value);\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_GENERATED_ENUM_REFLECTION_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/generated_enum_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__\n#define GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__\n\n#include <google/protobuf/stubs/template_util.h>\n\nnamespace google {\nnamespace protobuf {\n\n// This type trait can be used to cause templates to only match proto2 enum\n// types.\ntemplate <typename T> struct is_proto_enum : ::google::protobuf::internal::false_type {};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_GENERATED_ENUM_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/generated_message_reflection.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n\n#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__\n#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__\n\n#include <string>\n#include <vector>\n#include <google/protobuf/stubs/casts.h>\n#include <google/protobuf/stubs/common.h>\n// TODO(jasonh): Remove this once the compiler change to directly include this\n// is released to components.\n#include <google/protobuf/generated_enum_reflection.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/unknown_field_set.h>\n\n\nnamespace google {\nnamespace upb {\nnamespace google_opensource {\nclass GMR_Handlers;\n}  // namespace google_opensource\n}  // namespace upb\n\nnamespace protobuf {\nclass DescriptorPool;\nclass MapKey;\nclass MapValueRef;\n}\n\nnamespace protobuf {\nnamespace internal {\nclass DefaultEmptyOneof;\n\n// Defined in this file.\nclass GeneratedMessageReflection;\n\n// Defined in other files.\nclass ExtensionSet;             // extension_set.h\n\n// THIS CLASS IS NOT INTENDED FOR DIRECT USE.  It is intended for use\n// by generated code.  This class is just a big hack that reduces code\n// size.\n//\n// A GeneratedMessageReflection is an implementation of Reflection\n// which expects all fields to be backed by simple variables located in\n// memory.  The locations are given using a base pointer and a set of\n// offsets.\n//\n// It is required that the user represents fields of each type in a standard\n// way, so that GeneratedMessageReflection can cast the void* pointer to\n// the appropriate type.  For primitive fields and string fields, each field\n// should be represented using the obvious C++ primitive type.  Enums and\n// Messages are different:\n//  - Singular Message fields are stored as a pointer to a Message.  These\n//    should start out NULL, except for in the default instance where they\n//    should start out pointing to other default instances.\n//  - Enum fields are stored as an int.  This int must always contain\n//    a valid value, such that EnumDescriptor::FindValueByNumber() would\n//    not return NULL.\n//  - Repeated fields are stored as RepeatedFields or RepeatedPtrFields\n//    of whatever type the individual field would be.  Strings and\n//    Messages use RepeatedPtrFields while everything else uses\n//    RepeatedFields.\nclass LIBPROTOBUF_EXPORT GeneratedMessageReflection : public Reflection {\n public:\n  // Constructs a GeneratedMessageReflection.\n  // Parameters:\n  //   descriptor:    The descriptor for the message type being implemented.\n  //   default_instance:  The default instance of the message.  This is only\n  //                  used to obtain pointers to default instances of embedded\n  //                  messages, which GetMessage() will return if the particular\n  //                  sub-message has not been initialized yet.  (Thus, all\n  //                  embedded message fields *must* have non-NULL pointers\n  //                  in the default instance.)\n  //   offsets:       An array of ints giving the byte offsets, relative to\n  //                  the start of the message object, of each field.  These can\n  //                  be computed at compile time using the\n  //                  GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET() macro, defined\n  //                  below.\n  //   has_bits_offset:  Offset in the message of an array of uint32s of size\n  //                  descriptor->field_count()/32, rounded up.  This is a\n  //                  bitfield where each bit indicates whether or not the\n  //                  corresponding field of the message has been initialized.\n  //                  The bit for field index i is obtained by the expression:\n  //                    has_bits[i / 32] & (1 << (i % 32))\n  //   unknown_fields_offset:  Offset in the message of the UnknownFieldSet for\n  //                  the message.\n  //   extensions_offset:  Offset in the message of the ExtensionSet for the\n  //                  message, or -1 if the message type has no extension\n  //                  ranges.\n  //   pool:          DescriptorPool to search for extension definitions.  Only\n  //                  used by FindKnownExtensionByName() and\n  //                  FindKnownExtensionByNumber().\n  //   factory:       MessageFactory to use to construct extension messages.\n  //   object_size:   The size of a message object of this type, as measured\n  //                  by sizeof().\n  GeneratedMessageReflection(const Descriptor* descriptor,\n                             const Message* default_instance,\n                             const int offsets[], int has_bits_offset,\n                             int unknown_fields_offset, int extensions_offset,\n                             const DescriptorPool* pool,\n                             MessageFactory* factory, int object_size,\n                             int arena_offset);\n\n  // Similar with the construction above. Call this construction if the\n  // message has oneof definition.\n  // Parameters:\n  //   offsets:       An array of ints giving the byte offsets.\n  //                  For each oneof field, the offset is relative to the\n  //                  default_oneof_instance. These can be computed at compile\n  //                  time using the\n  //                  PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET() macro.\n  //                  For each none oneof field, the offset is related to\n  //                  the start of the message object.  These can be computed\n  //                  at compile time using the\n  //                  GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET() macro.\n  //                  Besides offsets for all fields, this array also contains\n  //                  offsets for oneof unions. The offset of the i-th oneof\n  //                  union is offsets[descriptor->field_count() + i].\n  //   default_oneof_instance: The default instance of the oneofs. It is a\n  //                  struct holding the default value of all oneof fields\n  //                  for this message. It is only used to obtain pointers\n  //                  to default instances of oneof fields, which Get\n  //                  methods will return if the field is not set.\n  //   oneof_case_offset:  Offset in the message of an array of uint32s of\n  //                  size descriptor->oneof_decl_count().  Each uint32\n  //                  indicates what field is set for each oneof.\n  //   other parameters are the same with the construction above.\n  GeneratedMessageReflection(const Descriptor* descriptor,\n                             const Message* default_instance,\n                             const int offsets[], int has_bits_offset,\n                             int unknown_fields_offset, int extensions_offset,\n                             const void* default_oneof_instance,\n                             int oneof_case_offset, const DescriptorPool* pool,\n                             MessageFactory* factory, int object_size,\n                             int arena_offset);\n  ~GeneratedMessageReflection();\n\n  // Shorter-to-call helpers for the above two constructions that work if the\n  // pool and factory are the usual, namely, DescriptorPool::generated_pool()\n  // and MessageFactory::generated_factory().\n\n  static GeneratedMessageReflection* NewGeneratedMessageReflection(\n      const Descriptor* descriptor,\n      const Message* default_instance,\n      const int offsets[],\n      int has_bits_offset,\n      int unknown_fields_offset,\n      int extensions_offset,\n      const void* default_oneof_instance,\n      int oneof_case_offset,\n      int object_size,\n      int arena_offset,\n      int is_default_instance_offset = -1);\n\n  static GeneratedMessageReflection* NewGeneratedMessageReflection(\n      const Descriptor* descriptor,\n      const Message* default_instance,\n      const int offsets[],\n      int has_bits_offset,\n      int unknown_fields_offset,\n      int extensions_offset,\n      int object_size,\n      int arena_offset,\n      int is_default_instance_offset = -1);\n\n  // implements Reflection -------------------------------------------\n\n  const UnknownFieldSet& GetUnknownFields(const Message& message) const;\n  UnknownFieldSet* MutableUnknownFields(Message* message) const;\n\n  int SpaceUsed(const Message& message) const;\n\n  bool HasField(const Message& message, const FieldDescriptor* field) const;\n  int FieldSize(const Message& message, const FieldDescriptor* field) const;\n  void ClearField(Message* message, const FieldDescriptor* field) const;\n  bool HasOneof(const Message& message,\n                const OneofDescriptor* oneof_descriptor) const;\n  void ClearOneof(Message* message, const OneofDescriptor* field) const;\n  void RemoveLast(Message* message, const FieldDescriptor* field) const;\n  Message* ReleaseLast(Message* message, const FieldDescriptor* field) const;\n  void Swap(Message* message1, Message* message2) const;\n  void SwapFields(Message* message1, Message* message2,\n                  const std::vector<const FieldDescriptor*>& fields) const;\n  void SwapElements(Message* message, const FieldDescriptor* field,\n                    int index1, int index2) const;\n  void ListFields(const Message& message,\n                  std::vector<const FieldDescriptor*>* output) const;\n\n  int32  GetInt32 (const Message& message,\n                   const FieldDescriptor* field) const;\n  int64  GetInt64 (const Message& message,\n                   const FieldDescriptor* field) const;\n  uint32 GetUInt32(const Message& message,\n                   const FieldDescriptor* field) const;\n  uint64 GetUInt64(const Message& message,\n                   const FieldDescriptor* field) const;\n  float  GetFloat (const Message& message,\n                   const FieldDescriptor* field) const;\n  double GetDouble(const Message& message,\n                   const FieldDescriptor* field) const;\n  bool   GetBool  (const Message& message,\n                   const FieldDescriptor* field) const;\n  string GetString(const Message& message,\n                   const FieldDescriptor* field) const;\n  const string& GetStringReference(const Message& message,\n                                   const FieldDescriptor* field,\n                                   string* scratch) const;\n  const EnumValueDescriptor* GetEnum(const Message& message,\n                                     const FieldDescriptor* field) const;\n  int GetEnumValue(const Message& message,\n                   const FieldDescriptor* field) const;\n  const Message& GetMessage(const Message& message,\n                            const FieldDescriptor* field,\n                            MessageFactory* factory = NULL) const;\n\n  const FieldDescriptor* GetOneofFieldDescriptor(\n      const Message& message,\n      const OneofDescriptor* oneof_descriptor) const;\n\n private:\n  bool ContainsMapKey(const Message& message,\n                      const FieldDescriptor* field,\n                      const MapKey& key) const;\n  bool InsertOrLookupMapValue(Message* message,\n                              const FieldDescriptor* field,\n                              const MapKey& key,\n                              MapValueRef* val) const;\n  bool DeleteMapValue(Message* message,\n                      const FieldDescriptor* field,\n                      const MapKey& key) const;\n  MapIterator MapBegin(\n      Message* message,\n      const FieldDescriptor* field) const;\n  MapIterator MapEnd(\n      Message* message,\n      const FieldDescriptor* field) const;\n  int MapSize(const Message& message, const FieldDescriptor* field) const;\n\n public:\n  void SetInt32 (Message* message,\n                 const FieldDescriptor* field, int32  value) const;\n  void SetInt64 (Message* message,\n                 const FieldDescriptor* field, int64  value) const;\n  void SetUInt32(Message* message,\n                 const FieldDescriptor* field, uint32 value) const;\n  void SetUInt64(Message* message,\n                 const FieldDescriptor* field, uint64 value) const;\n  void SetFloat (Message* message,\n                 const FieldDescriptor* field, float  value) const;\n  void SetDouble(Message* message,\n                 const FieldDescriptor* field, double value) const;\n  void SetBool  (Message* message,\n                 const FieldDescriptor* field, bool   value) const;\n  void SetString(Message* message,\n                 const FieldDescriptor* field,\n                 const string& value) const;\n  void SetEnum  (Message* message, const FieldDescriptor* field,\n                 const EnumValueDescriptor* value) const;\n  void SetEnumValue(Message* message, const FieldDescriptor* field,\n                    int value) const;\n  Message* MutableMessage(Message* message, const FieldDescriptor* field,\n                          MessageFactory* factory = NULL) const;\n  void SetAllocatedMessage(Message* message,\n                           Message* sub_message,\n                           const FieldDescriptor* field) const;\n  Message* ReleaseMessage(Message* message, const FieldDescriptor* field,\n                          MessageFactory* factory = NULL) const;\n\n  int32  GetRepeatedInt32 (const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  int64  GetRepeatedInt64 (const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  uint32 GetRepeatedUInt32(const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  uint64 GetRepeatedUInt64(const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  float  GetRepeatedFloat (const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  double GetRepeatedDouble(const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  bool   GetRepeatedBool  (const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  string GetRepeatedString(const Message& message,\n                           const FieldDescriptor* field, int index) const;\n  const string& GetRepeatedStringReference(const Message& message,\n                                           const FieldDescriptor* field,\n                                           int index, string* scratch) const;\n  const EnumValueDescriptor* GetRepeatedEnum(const Message& message,\n                                             const FieldDescriptor* field,\n                                             int index) const;\n  int GetRepeatedEnumValue(const Message& message,\n                           const FieldDescriptor* field,\n                           int index) const;\n  const Message& GetRepeatedMessage(const Message& message,\n                                    const FieldDescriptor* field,\n                                    int index) const;\n\n  // Set the value of a field.\n  void SetRepeatedInt32 (Message* message,\n                         const FieldDescriptor* field, int index, int32  value) const;\n  void SetRepeatedInt64 (Message* message,\n                         const FieldDescriptor* field, int index, int64  value) const;\n  void SetRepeatedUInt32(Message* message,\n                         const FieldDescriptor* field, int index, uint32 value) const;\n  void SetRepeatedUInt64(Message* message,\n                         const FieldDescriptor* field, int index, uint64 value) const;\n  void SetRepeatedFloat (Message* message,\n                         const FieldDescriptor* field, int index, float  value) const;\n  void SetRepeatedDouble(Message* message,\n                         const FieldDescriptor* field, int index, double value) const;\n  void SetRepeatedBool  (Message* message,\n                         const FieldDescriptor* field, int index, bool   value) const;\n  void SetRepeatedString(Message* message,\n                         const FieldDescriptor* field, int index,\n                         const string& value) const;\n  void SetRepeatedEnum(Message* message, const FieldDescriptor* field,\n                       int index, const EnumValueDescriptor* value) const;\n  void SetRepeatedEnumValue(Message* message, const FieldDescriptor* field,\n                            int index, int value) const;\n  // Get a mutable pointer to a field with a message type.\n  Message* MutableRepeatedMessage(Message* message,\n                                  const FieldDescriptor* field,\n                                  int index) const;\n\n  void AddInt32 (Message* message,\n                 const FieldDescriptor* field, int32  value) const;\n  void AddInt64 (Message* message,\n                 const FieldDescriptor* field, int64  value) const;\n  void AddUInt32(Message* message,\n                 const FieldDescriptor* field, uint32 value) const;\n  void AddUInt64(Message* message,\n                 const FieldDescriptor* field, uint64 value) const;\n  void AddFloat (Message* message,\n                 const FieldDescriptor* field, float  value) const;\n  void AddDouble(Message* message,\n                 const FieldDescriptor* field, double value) const;\n  void AddBool  (Message* message,\n                 const FieldDescriptor* field, bool   value) const;\n  void AddString(Message* message,\n                 const FieldDescriptor* field, const string& value) const;\n  void AddEnum(Message* message,\n               const FieldDescriptor* field,\n               const EnumValueDescriptor* value) const;\n  void AddEnumValue(Message* message,\n                    const FieldDescriptor* field,\n                    int value) const;\n  Message* AddMessage(Message* message, const FieldDescriptor* field,\n                      MessageFactory* factory = NULL) const;\n  void AddAllocatedMessage(\n      Message* message, const FieldDescriptor* field,\n      Message* new_entry) const;\n\n  const FieldDescriptor* FindKnownExtensionByName(const string& name) const;\n  const FieldDescriptor* FindKnownExtensionByNumber(int number) const;\n\n  bool SupportsUnknownEnumValues() const;\n\n  // This value for arena_offset_ indicates that there is no arena pointer in\n  // this message (e.g., old generated code).\n  static const int kNoArenaPointer = -1;\n\n  // This value for unknown_field_offset_ indicates that there is no\n  // UnknownFieldSet in this message, and that instead, we are using the\n  // Zero-Overhead Arena Pointer trick. When this is the case, arena_offset_\n  // actually indexes to an InternalMetadataWithArena instance, which can return\n  // either an arena pointer or an UnknownFieldSet or both. It is never the case\n  // that unknown_field_offset_ == kUnknownFieldSetInMetadata && arena_offset_\n  // == kNoArenaPointer.\n  static const int kUnknownFieldSetInMetadata = -1;\n\n protected:\n  void* MutableRawRepeatedField(\n      Message* message, const FieldDescriptor* field, FieldDescriptor::CppType,\n      int ctype, const Descriptor* desc) const;\n\n  const void* GetRawRepeatedField(\n      const Message& message, const FieldDescriptor* field,\n      FieldDescriptor::CppType, int ctype,\n      const Descriptor* desc) const;\n\n  virtual MessageFactory* GetMessageFactory() const;\n\n  virtual void* RepeatedFieldData(\n      Message* message, const FieldDescriptor* field,\n      FieldDescriptor::CppType cpp_type,\n      const Descriptor* message_type) const;\n\n private:\n  friend class GeneratedMessage;\n\n  // To parse directly into a proto2 generated class, the class GMR_Handlers\n  // needs access to member offsets and hasbits.\n  friend class upb::google_opensource::GMR_Handlers;\n\n  const Descriptor* descriptor_;\n  const Message* default_instance_;\n  const void* default_oneof_instance_;\n  const int* offsets_;\n\n  int has_bits_offset_;\n  int oneof_case_offset_;\n  int unknown_fields_offset_;\n  int extensions_offset_;\n  int arena_offset_;\n  int object_size_;\n\n  const DescriptorPool* descriptor_pool_;\n  MessageFactory* message_factory_;\n\n  template <typename Type>\n  inline const Type& GetRaw(const Message& message,\n                            const FieldDescriptor* field) const;\n  template <typename Type>\n  inline Type* MutableRaw(Message* message,\n                          const FieldDescriptor* field) const;\n  template <typename Type>\n  inline const Type& DefaultRaw(const FieldDescriptor* field) const;\n  template <typename Type>\n  inline const Type& DefaultOneofRaw(const FieldDescriptor* field) const;\n\n  inline const uint32* GetHasBits(const Message& message) const;\n  inline uint32* MutableHasBits(Message* message) const;\n  inline uint32 GetOneofCase(\n      const Message& message,\n      const OneofDescriptor* oneof_descriptor) const;\n  inline uint32* MutableOneofCase(\n      Message* message,\n      const OneofDescriptor* oneof_descriptor) const;\n  inline const ExtensionSet& GetExtensionSet(const Message& message) const;\n  inline ExtensionSet* MutableExtensionSet(Message* message) const;\n  inline Arena* GetArena(Message* message) const;\n  inline const internal::InternalMetadataWithArena&\n      GetInternalMetadataWithArena(const Message& message) const;\n  inline internal::InternalMetadataWithArena*\n      MutableInternalMetadataWithArena(Message* message) const;\n\n  inline bool GetIsDefaultInstance(const Message& message) const;\n\n  inline bool HasBit(const Message& message,\n                     const FieldDescriptor* field) const;\n  inline void SetBit(Message* message,\n                     const FieldDescriptor* field) const;\n  inline void ClearBit(Message* message,\n                       const FieldDescriptor* field) const;\n  inline void SwapBit(Message* message1,\n                      Message* message2,\n                      const FieldDescriptor* field) const;\n\n  // This function only swaps the field. Should swap corresponding has_bit\n  // before or after using this function.\n  void SwapField(Message* message1,\n                 Message* message2,\n                 const FieldDescriptor* field) const;\n\n  void SwapOneofField(Message* message1,\n                      Message* message2,\n                      const OneofDescriptor* oneof_descriptor) const;\n\n  inline bool HasOneofField(const Message& message,\n                            const FieldDescriptor* field) const;\n  inline void SetOneofCase(Message* message,\n                           const FieldDescriptor* field) const;\n  inline void ClearOneofField(Message* message,\n                              const FieldDescriptor* field) const;\n\n  template <typename Type>\n  inline const Type& GetField(const Message& message,\n                              const FieldDescriptor* field) const;\n  template <typename Type>\n  inline void SetField(Message* message,\n                       const FieldDescriptor* field, const Type& value) const;\n  template <typename Type>\n  inline Type* MutableField(Message* message,\n                            const FieldDescriptor* field) const;\n  template <typename Type>\n  inline const Type& GetRepeatedField(const Message& message,\n                                      const FieldDescriptor* field,\n                                      int index) const;\n  template <typename Type>\n  inline const Type& GetRepeatedPtrField(const Message& message,\n                                         const FieldDescriptor* field,\n                                         int index) const;\n  template <typename Type>\n  inline void SetRepeatedField(Message* message,\n                               const FieldDescriptor* field, int index,\n                               Type value) const;\n  template <typename Type>\n  inline Type* MutableRepeatedField(Message* message,\n                                    const FieldDescriptor* field,\n                                    int index) const;\n  template <typename Type>\n  inline void AddField(Message* message,\n                       const FieldDescriptor* field, const Type& value) const;\n  template <typename Type>\n  inline Type* AddField(Message* message,\n                        const FieldDescriptor* field) const;\n\n  int GetExtensionNumberOrDie(const Descriptor* type) const;\n\n  // Internal versions of EnumValue API perform no checking. Called after checks\n  // by public methods.\n  void SetEnumValueInternal(Message* message,\n                            const FieldDescriptor* field,\n                            int value) const;\n  void SetRepeatedEnumValueInternal(Message* message,\n                                    const FieldDescriptor* field,\n                                    int index,\n                                    int value) const;\n  void AddEnumValueInternal(Message* message,\n                            const FieldDescriptor* field,\n                            int value) const;\n\n\n  Message* UnsafeArenaReleaseMessage(Message* message,\n                                     const FieldDescriptor* field,\n                                     MessageFactory* factory = NULL) const;\n\n  void UnsafeArenaSetAllocatedMessage(Message* message,\n                                      Message* sub_message,\n                                      const FieldDescriptor* field) const;\n\n  internal::MapFieldBase* MapData(\n      Message* message, const FieldDescriptor* field) const;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GeneratedMessageReflection);\n};\n\n// Returns the offset of the given field within the given aggregate type.\n// This is equivalent to the ANSI C offsetof() macro.  However, according\n// to the C++ standard, offsetof() only works on POD types, and GCC\n// enforces this requirement with a warning.  In practice, this rule is\n// unnecessarily strict; there is probably no compiler or platform on\n// which the offsets of the direct fields of a class are non-constant.\n// Fields inherited from superclasses *can* have non-constant offsets,\n// but that's not what this macro will be used for.\n#if defined(__clang__)\n// For Clang we use __builtin_offsetof() and suppress the warning,\n// to avoid Control Flow Integrity and UBSan vptr sanitizers from\n// crashing while trying to validate the invalid reinterpet_casts.\n#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD)    \\\n  _Pragma(\"clang diagnostic push\")                            \\\n  _Pragma(\"clang diagnostic ignored \\\"-Winvalid-offsetof\\\"\")  \\\n  __builtin_offsetof(TYPE, FIELD)                             \\\n  _Pragma(\"clang diagnostic pop\")\n#else\n// Note that we calculate relative to the pointer value 16 here since if we\n// just use zero, GCC complains about dereferencing a NULL pointer.  We\n// choose 16 rather than some other number just in case the compiler would\n// be confused by an unaligned pointer.\n#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(TYPE, FIELD)    \\\n  static_cast<int>(                                           \\\n      reinterpret_cast<const char*>(                          \\\n          &reinterpret_cast<const TYPE*>(16)->FIELD) -        \\\n      reinterpret_cast<const char*>(16))\n#endif\n\n#define PROTO2_GENERATED_DEFAULT_ONEOF_FIELD_OFFSET(ONEOF, FIELD)     \\\n  static_cast<int>(                                                   \\\n      reinterpret_cast<const char*>(&(ONEOF->FIELD))                  \\\n      - reinterpret_cast<const char*>(ONEOF))\n\n// There are some places in proto2 where dynamic_cast would be useful as an\n// optimization.  For example, take Message::MergeFrom(const Message& other).\n// For a given generated message FooMessage, we generate these two methods:\n//   void MergeFrom(const FooMessage& other);\n//   void MergeFrom(const Message& other);\n// The former method can be implemented directly in terms of FooMessage's\n// inline accessors, but the latter method must work with the reflection\n// interface.  However, if the parameter to the latter method is actually of\n// type FooMessage, then we'd like to be able to just call the other method\n// as an optimization.  So, we use dynamic_cast to check this.\n//\n// That said, dynamic_cast requires RTTI, which many people like to disable\n// for performance and code size reasons.  When RTTI is not available, we\n// still need to produce correct results.  So, in this case we have to fall\n// back to using reflection, which is what we would have done anyway if the\n// objects were not of the exact same class.\n//\n// dynamic_cast_if_available() implements this logic.  If RTTI is\n// enabled, it does a dynamic_cast.  If RTTI is disabled, it just returns\n// NULL.\n//\n// If you need to compile without RTTI, simply #define GOOGLE_PROTOBUF_NO_RTTI.\n// On MSVC, this should be detected automatically.\ntemplate<typename To, typename From>\ninline To dynamic_cast_if_available(From from) {\n#if defined(GOOGLE_PROTOBUF_NO_RTTI) || (defined(_MSC_VER)&&!defined(_CPPRTTI))\n  return NULL;\n#else\n  return dynamic_cast<To>(from);\n#endif\n}\n\n// Tries to downcast this message to a generated message type.\n// Returns NULL if this class is not an instance of T.\n//\n// This is like dynamic_cast_if_available, except it works even when\n// dynamic_cast is not available by using Reflection.  However it only works\n// with Message objects.\n//\n// TODO(haberman): can we remove dynamic_cast_if_available in favor of this?\ntemplate <typename T>\nT* DynamicCastToGenerated(const Message* from) {\n  // Compile-time assert that T is a generated type that has a\n  // default_instance() accessor, but avoid actually calling it.\n  const T&(*get_default_instance)() = &T::default_instance;\n  (void)get_default_instance;\n\n  // Compile-time assert that T is a subclass of google::protobuf::Message.\n  const Message* unused = static_cast<T*>(NULL);\n  (void)unused;\n\n#if defined(GOOGLE_PROTOBUF_NO_RTTI) || \\\n  (defined(_MSC_VER) && !defined(_CPPRTTI))\n  bool ok = &T::default_instance() ==\n            from->GetReflection()->GetMessageFactory()->GetPrototype(\n                from->GetDescriptor());\n  return ok ? down_cast<T*>(from) : NULL;\n#else\n  return dynamic_cast<T*>(from);\n#endif\n}\n\ntemplate <typename T>\nT* DynamicCastToGenerated(Message* from) {\n  const Message* message_const = from;\n  return const_cast<T*>(DynamicCastToGenerated<const T>(message_const));\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_GENERATED_MESSAGE_REFLECTION_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/generated_message_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains miscellaneous helper code used by generated code --\n// including lite types -- but which should not be used directly by users.\n\n#ifndef GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__\n#define GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__\n\n#include <assert.h>\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/once.h>\n#include <google/protobuf/has_bits.h>\n\nnamespace google {\n\nnamespace protobuf {\n\nclass Arena;\nnamespace io { class CodedInputStream; }\n\nnamespace internal {\n\n\n// Annotation for the compiler to emit a deprecation message if a field marked\n// with option 'deprecated=true' is used in the code, or for other things in\n// generated code which are deprecated.\n//\n// For internal use in the pb.cc files, deprecation warnings are suppressed\n// there.\n#undef DEPRECATED_PROTOBUF_FIELD\n#define PROTOBUF_DEPRECATED\n\n#define GOOGLE_PROTOBUF_DEPRECATED_ATTR\n\n\n// Constants for special floating point values.\nLIBPROTOBUF_EXPORT double Infinity();\nLIBPROTOBUF_EXPORT double NaN();\n\n// This type is used to define a global variable, without it's constructor\n// and destructor run on start and end of the program lifetime. This circumvents\n// the initial construction order fiasco, while keeping the address of the\n// empty string a compile time constant.\ntemplate <typename T>\nclass ExplicitlyConstructed {\n public:\n  void DefaultConstruct() {\n    new (&union_) T();\n    init_ = true;\n  }\n\n  bool IsInitialized() { return init_; }\n  void Shutdown() {\n    if (init_) {\n      init_ = false;\n      get_mutable()->~T();\n    }\n  }\n\n  const T& get() const { return reinterpret_cast<const T&>(union_); }\n  T* get_mutable() { return reinterpret_cast<T*>(&union_); }\n\n private:\n  // Prefer c++14 aligned_storage, but for compatibility this will do.\n  union AlignedUnion {\n    char space[sizeof(T)];\n    int64 align_to_int64;\n    void* align_to_ptr;\n  } union_;\n  bool init_;  // false by linker\n};\n\n// TODO(jieluo): Change to template. We have tried to use template,\n// but it causes net/rpc/python:rpcutil_test fail (the empty string will\n// init twice). It may related to swig. Change to template after we\n// found the solution.\n\n// Default empty string object. Don't use this directly. Instead, call\n// GetEmptyString() to get the reference.\nextern ExplicitlyConstructed< ::std::string> fixed_address_empty_string;\nLIBPROTOBUF_EXPORT extern ProtobufOnceType empty_string_once_init_;\nLIBPROTOBUF_EXPORT void InitEmptyString();\n\n\nLIBPROTOBUF_EXPORT inline const ::std::string& GetEmptyStringAlreadyInited() {\n  return fixed_address_empty_string.get();\n}\n\nLIBPROTOBUF_EXPORT inline const ::std::string& GetEmptyString() {\n  ::google::protobuf::GoogleOnceInit(&empty_string_once_init_, &InitEmptyString);\n  return GetEmptyStringAlreadyInited();\n}\n\nLIBPROTOBUF_EXPORT int StringSpaceUsedExcludingSelf(const string& str);\n\n\n// True if IsInitialized() is true for all elements of t.  Type is expected\n// to be a RepeatedPtrField<some message type>.  It's useful to have this\n// helper here to keep the protobuf compiler from ever having to emit loops in\n// IsInitialized() methods.  We want the C++ compiler to inline this or not\n// as it sees fit.\ntemplate <class Type> bool AllAreInitialized(const Type& t) {\n  for (int i = t.size(); --i >= 0; ) {\n    if (!t.Get(i).IsInitialized()) return false;\n  }\n  return true;\n}\n\n// Helper function to crash on merge failure.\n// Moved out of generated code to reduce binary size.\nLIBPROTOBUF_EXPORT void MergeFromFail(const char* file, int line) GOOGLE_ATTRIBUTE_NORETURN;\n\n// We compute sizes as size_t but cache them as int.  This function converts a\n// computed size to a cached size.  Since we don't proceed with serialization if\n// the total size was > INT_MAX, it is not important what this function returns\n// for inputs > INT_MAX.\ninline int ToCachedSize(size_t size) {\n  return static_cast<int>(size);\n}\n\n// We mainly calculate sizes in terms of size_t, but some functions that compute\n// sizes return \"int\".  These int sizes are expected to always be positive.\n// This function is more efficient than casting an int to size_t directly on\n// 64-bit platforms because it avoids making the compiler emit a sign extending\n// instruction, which we don't want and don't want to pay for.\ninline size_t FromIntSize(int size) {\n  // Convert to unsigned before widening so sign extension is not necessary.\n  return static_cast<unsigned int>(size);\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_GENERATED_MESSAGE_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/has_bits.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_HAS_BITS_H__\n#define GOOGLE_PROTOBUF_HAS_BITS_H__\n\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ntemplate<size_t doublewords>\nclass HasBits {\n public:\n  HasBits() GOOGLE_ATTRIBUTE_ALWAYS_INLINE { Clear(); }\n\n  void Clear() GOOGLE_ATTRIBUTE_ALWAYS_INLINE {\n    memset(has_bits_, 0, sizeof(has_bits_));\n  }\n\n  ::google::protobuf::uint32& operator[](int index) GOOGLE_ATTRIBUTE_ALWAYS_INLINE {\n    return has_bits_[index];\n  }\n\n  const ::google::protobuf::uint32& operator[](int index) const GOOGLE_ATTRIBUTE_ALWAYS_INLINE {\n    return has_bits_[index];\n  }\n\n  bool operator==(const HasBits<doublewords>& rhs) const {\n    return memcmp(has_bits_, rhs.has_bits_, sizeof(has_bits_)) == 0;\n  }\n\n  bool operator!=(const HasBits<doublewords>& rhs) const {\n    return !(*this == rhs);\n  }\n private:\n  ::google::protobuf::uint32 has_bits_[doublewords];\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_HAS_BITS_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/coded_stream.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains the CodedInputStream and CodedOutputStream classes,\n// which wrap a ZeroCopyInputStream or ZeroCopyOutputStream, respectively,\n// and allow you to read or write individual pieces of data in various\n// formats.  In particular, these implement the varint encoding for\n// integers, a simple variable-length encoding in which smaller numbers\n// take fewer bytes.\n//\n// Typically these classes will only be used internally by the protocol\n// buffer library in order to encode and decode protocol buffers.  Clients\n// of the library only need to know about this class if they wish to write\n// custom message parsing or serialization procedures.\n//\n// CodedOutputStream example:\n//   // Write some data to \"myfile\".  First we write a 4-byte \"magic number\"\n//   // to identify the file type, then write a length-delimited string.  The\n//   // string is composed of a varint giving the length followed by the raw\n//   // bytes.\n//   int fd = open(\"myfile\", O_CREAT | O_WRONLY);\n//   ZeroCopyOutputStream* raw_output = new FileOutputStream(fd);\n//   CodedOutputStream* coded_output = new CodedOutputStream(raw_output);\n//\n//   int magic_number = 1234;\n//   char text[] = \"Hello world!\";\n//   coded_output->WriteLittleEndian32(magic_number);\n//   coded_output->WriteVarint32(strlen(text));\n//   coded_output->WriteRaw(text, strlen(text));\n//\n//   delete coded_output;\n//   delete raw_output;\n//   close(fd);\n//\n// CodedInputStream example:\n//   // Read a file created by the above code.\n//   int fd = open(\"myfile\", O_RDONLY);\n//   ZeroCopyInputStream* raw_input = new FileInputStream(fd);\n//   CodedInputStream coded_input = new CodedInputStream(raw_input);\n//\n//   coded_input->ReadLittleEndian32(&magic_number);\n//   if (magic_number != 1234) {\n//     cerr << \"File not in expected format.\" << endl;\n//     return;\n//   }\n//\n//   uint32 size;\n//   coded_input->ReadVarint32(&size);\n//\n//   char* text = new char[size + 1];\n//   coded_input->ReadRaw(buffer, size);\n//   text[size] = '\\0';\n//\n//   delete coded_input;\n//   delete raw_input;\n//   close(fd);\n//\n//   cout << \"Text is: \" << text << endl;\n//   delete [] text;\n//\n// For those who are interested, varint encoding is defined as follows:\n//\n// The encoding operates on unsigned integers of up to 64 bits in length.\n// Each byte of the encoded value has the format:\n// * bits 0-6: Seven bits of the number being encoded.\n// * bit 7: Zero if this is the last byte in the encoding (in which\n//   case all remaining bits of the number are zero) or 1 if\n//   more bytes follow.\n// The first byte contains the least-significant 7 bits of the number, the\n// second byte (if present) contains the next-least-significant 7 bits,\n// and so on.  So, the binary number 1011000101011 would be encoded in two\n// bytes as \"10101011 00101100\".\n//\n// In theory, varint could be used to encode integers of any length.\n// However, for practicality we set a limit at 64 bits.  The maximum encoded\n// length of a number is thus 10 bytes.\n\n#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_H__\n#define GOOGLE_PROTOBUF_IO_CODED_STREAM_H__\n\n#include <assert.h>\n#include <string>\n#include <utility>\n#ifdef _MSC_VER\n  // Assuming windows is always little-endian.\n  #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)\n    #define PROTOBUF_LITTLE_ENDIAN 1\n  #endif\n  #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)\n    // If MSVC has \"/RTCc\" set, it will complain about truncating casts at\n    // runtime.  This file contains some intentional truncating casts.\n    #pragma runtime_checks(\"c\", off)\n  #endif\n#else\n  #include <sys/param.h>   // __BYTE_ORDER\n  #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \\\n         (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \\\n      !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)\n    #define PROTOBUF_LITTLE_ENDIAN 1\n  #endif\n#endif\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\n\nnamespace protobuf {\n\nclass DescriptorPool;\nclass MessageFactory;\n\nnamespace io {\n\n// Defined in this file.\nclass CodedInputStream;\nclass CodedOutputStream;\n\n// Defined in other files.\nclass ZeroCopyInputStream;           // zero_copy_stream.h\nclass ZeroCopyOutputStream;          // zero_copy_stream.h\n\n// Class which reads and decodes binary data which is composed of varint-\n// encoded integers and fixed-width pieces.  Wraps a ZeroCopyInputStream.\n// Most users will not need to deal with CodedInputStream.\n//\n// Most methods of CodedInputStream that return a bool return false if an\n// underlying I/O error occurs or if the data is malformed.  Once such a\n// failure occurs, the CodedInputStream is broken and is no longer useful.\nclass LIBPROTOBUF_EXPORT CodedInputStream {\n public:\n  // Create a CodedInputStream that reads from the given ZeroCopyInputStream.\n  explicit CodedInputStream(ZeroCopyInputStream* input);\n\n  // Create a CodedInputStream that reads from the given flat array.  This is\n  // faster than using an ArrayInputStream.  PushLimit(size) is implied by\n  // this constructor.\n  explicit CodedInputStream(const uint8* buffer, int size);\n\n  // Destroy the CodedInputStream and position the underlying\n  // ZeroCopyInputStream at the first unread byte.  If an error occurred while\n  // reading (causing a method to return false), then the exact position of\n  // the input stream may be anywhere between the last value that was read\n  // successfully and the stream's byte limit.\n  ~CodedInputStream();\n\n  // Return true if this CodedInputStream reads from a flat array instead of\n  // a ZeroCopyInputStream.\n  inline bool IsFlat() const;\n\n  // Skips a number of bytes.  Returns false if an underlying read error\n  // occurs.\n  bool Skip(int count);\n\n  // Sets *data to point directly at the unread part of the CodedInputStream's\n  // underlying buffer, and *size to the size of that buffer, but does not\n  // advance the stream's current position.  This will always either produce\n  // a non-empty buffer or return false.  If the caller consumes any of\n  // this data, it should then call Skip() to skip over the consumed bytes.\n  // This may be useful for implementing external fast parsing routines for\n  // types of data not covered by the CodedInputStream interface.\n  bool GetDirectBufferPointer(const void** data, int* size);\n\n  // Like GetDirectBufferPointer, but this method is inlined, and does not\n  // attempt to Refresh() if the buffer is currently empty.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void GetDirectBufferPointerInline(const void** data,\n                                                            int* size);\n\n  // Read raw bytes, copying them into the given buffer.\n  bool ReadRaw(void* buffer, int size);\n\n  // Like the above, with inlined optimizations. This should only be used\n  // by the protobuf implementation.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE bool InternalReadRawInline(void* buffer, int size);\n\n  // Like ReadRaw, but reads into a string.\n  //\n  // Implementation Note:  ReadString() grows the string gradually as it\n  // reads in the data, rather than allocating the entire requested size\n  // upfront.  This prevents denial-of-service attacks in which a client\n  // could claim that a string is going to be MAX_INT bytes long in order to\n  // crash the server because it can't allocate this much space at once.\n  bool ReadString(string* buffer, int size);\n  // Like the above, with inlined optimizations. This should only be used\n  // by the protobuf implementation.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE bool InternalReadStringInline(string* buffer,\n                                                        int size);\n\n\n  // Read a 32-bit little-endian integer.\n  bool ReadLittleEndian32(uint32* value);\n  // Read a 64-bit little-endian integer.\n  bool ReadLittleEndian64(uint64* value);\n\n  // These methods read from an externally provided buffer. The caller is\n  // responsible for ensuring that the buffer has sufficient space.\n  // Read a 32-bit little-endian integer.\n  static const uint8* ReadLittleEndian32FromArray(const uint8* buffer,\n                                                   uint32* value);\n  // Read a 64-bit little-endian integer.\n  static const uint8* ReadLittleEndian64FromArray(const uint8* buffer,\n                                                   uint64* value);\n\n  // Read an unsigned integer with Varint encoding, truncating to 32 bits.\n  // Reading a 32-bit value is equivalent to reading a 64-bit one and casting\n  // it to uint32, but may be more efficient.\n  bool ReadVarint32(uint32* value);\n  // Read an unsigned integer with Varint encoding.\n  bool ReadVarint64(uint64* value);\n\n  // Reads a varint off the wire into an \"int\". This should be used for reading\n  // sizes off the wire (sizes of strings, submessages, bytes fields, etc).\n  //\n  // The value from the wire is interpreted as unsigned.  If its value exceeds\n  // the representable value of an integer on this platform, instead of\n  // truncating we return false. Truncating (as performed by ReadVarint32()\n  // above) is an acceptable approach for fields representing an integer, but\n  // when we are parsing a size from the wire, truncating the value would result\n  // in us misparsing the payload.\n  bool ReadVarintSizeAsInt(int* value);\n\n  // Read a tag.  This calls ReadVarint32() and returns the result, or returns\n  // zero (which is not a valid tag) if ReadVarint32() fails.  Also, it updates\n  // the last tag value, which can be checked with LastTagWas().\n  // Always inline because this is only called in one place per parse loop\n  // but it is called for every iteration of said loop, so it should be fast.\n  // GCC doesn't want to inline this by default.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE uint32 ReadTag();\n\n  // This usually a faster alternative to ReadTag() when cutoff is a manifest\n  // constant.  It does particularly well for cutoff >= 127.  The first part\n  // of the return value is the tag that was read, though it can also be 0 in\n  // the cases where ReadTag() would return 0.  If the second part is true\n  // then the tag is known to be in [0, cutoff].  If not, the tag either is\n  // above cutoff or is 0.  (There's intentional wiggle room when tag is 0,\n  // because that can arise in several ways, and for best performance we want\n  // to avoid an extra \"is tag == 0?\" check here.)\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE std::pair<uint32, bool> ReadTagWithCutoff(\n      uint32 cutoff);\n\n  // Usually returns true if calling ReadVarint32() now would produce the given\n  // value.  Will always return false if ReadVarint32() would not return the\n  // given value.  If ExpectTag() returns true, it also advances past\n  // the varint.  For best performance, use a compile-time constant as the\n  // parameter.\n  // Always inline because this collapses to a small number of instructions\n  // when given a constant parameter, but GCC doesn't want to inline by default.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE bool ExpectTag(uint32 expected);\n\n  // Like above, except this reads from the specified buffer. The caller is\n  // responsible for ensuring that the buffer is large enough to read a varint\n  // of the expected size. For best performance, use a compile-time constant as\n  // the expected tag parameter.\n  //\n  // Returns a pointer beyond the expected tag if it was found, or NULL if it\n  // was not.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static const uint8* ExpectTagFromArray(\n      const uint8* buffer,\n      uint32 expected);\n\n  // Usually returns true if no more bytes can be read.  Always returns false\n  // if more bytes can be read.  If ExpectAtEnd() returns true, a subsequent\n  // call to LastTagWas() will act as if ReadTag() had been called and returned\n  // zero, and ConsumedEntireMessage() will return true.\n  bool ExpectAtEnd();\n\n  // If the last call to ReadTag() or ReadTagWithCutoff() returned the\n  // given value, returns true.  Otherwise, returns false;\n  //\n  // This is needed because parsers for some types of embedded messages\n  // (with field type TYPE_GROUP) don't actually know that they've reached the\n  // end of a message until they see an ENDGROUP tag, which was actually part\n  // of the enclosing message.  The enclosing message would like to check that\n  // tag to make sure it had the right number, so it calls LastTagWas() on\n  // return from the embedded parser to check.\n  bool LastTagWas(uint32 expected);\n\n  // When parsing message (but NOT a group), this method must be called\n  // immediately after MergeFromCodedStream() returns (if it returns true)\n  // to further verify that the message ended in a legitimate way.  For\n  // example, this verifies that parsing did not end on an end-group tag.\n  // It also checks for some cases where, due to optimizations,\n  // MergeFromCodedStream() can incorrectly return true.\n  bool ConsumedEntireMessage();\n\n  // Limits ----------------------------------------------------------\n  // Limits are used when parsing length-delimited embedded messages.\n  // After the message's length is read, PushLimit() is used to prevent\n  // the CodedInputStream from reading beyond that length.  Once the\n  // embedded message has been parsed, PopLimit() is called to undo the\n  // limit.\n\n  // Opaque type used with PushLimit() and PopLimit().  Do not modify\n  // values of this type yourself.  The only reason that this isn't a\n  // struct with private internals is for efficiency.\n  typedef int Limit;\n\n  // Places a limit on the number of bytes that the stream may read,\n  // starting from the current position.  Once the stream hits this limit,\n  // it will act like the end of the input has been reached until PopLimit()\n  // is called.\n  //\n  // As the names imply, the stream conceptually has a stack of limits.  The\n  // shortest limit on the stack is always enforced, even if it is not the\n  // top limit.\n  //\n  // The value returned by PushLimit() is opaque to the caller, and must\n  // be passed unchanged to the corresponding call to PopLimit().\n  Limit PushLimit(int byte_limit);\n\n  // Pops the last limit pushed by PushLimit().  The input must be the value\n  // returned by that call to PushLimit().\n  void PopLimit(Limit limit);\n\n  // Returns the number of bytes left until the nearest limit on the\n  // stack is hit, or -1 if no limits are in place.\n  int BytesUntilLimit() const;\n\n  // Returns current position relative to the beginning of the input stream.\n  int CurrentPosition() const;\n\n  // Total Bytes Limit -----------------------------------------------\n  // To prevent malicious users from sending excessively large messages\n  // and causing integer overflows or memory exhaustion, CodedInputStream\n  // imposes a hard limit on the total number of bytes it will read.\n\n  // Sets the maximum number of bytes that this CodedInputStream will read\n  // before refusing to continue.  To prevent integer overflows in the\n  // protocol buffers implementation, as well as to prevent servers from\n  // allocating enormous amounts of memory to hold parsed messages, the\n  // maximum message length should be limited to the shortest length that\n  // will not harm usability.  The theoretical shortest message that could\n  // cause integer overflows is 512MB.  The default limit is 64MB.  Apps\n  // should set shorter limits if possible.  If warning_threshold is not -1,\n  // a warning will be printed to stderr after warning_threshold bytes are\n  // read.  For backwards compatibility all negative values get squashed to -1,\n  // as other negative values might have special internal meanings.\n  // An error will always be printed to stderr if the limit is reached.\n  //\n  // This is unrelated to PushLimit()/PopLimit().\n  //\n  // Hint:  If you are reading this because your program is printing a\n  //   warning about dangerously large protocol messages, you may be\n  //   confused about what to do next.  The best option is to change your\n  //   design such that excessively large messages are not necessary.\n  //   For example, try to design file formats to consist of many small\n  //   messages rather than a single large one.  If this is infeasible,\n  //   you will need to increase the limit.  Chances are, though, that\n  //   your code never constructs a CodedInputStream on which the limit\n  //   can be set.  You probably parse messages by calling things like\n  //   Message::ParseFromString().  In this case, you will need to change\n  //   your code to instead construct some sort of ZeroCopyInputStream\n  //   (e.g. an ArrayInputStream), construct a CodedInputStream around\n  //   that, then call Message::ParseFromCodedStream() instead.  Then\n  //   you can adjust the limit.  Yes, it's more work, but you're doing\n  //   something unusual.\n  void SetTotalBytesLimit(int total_bytes_limit, int warning_threshold);\n\n  // The Total Bytes Limit minus the Current Position, or -1 if there\n  // is no Total Bytes Limit.\n  int BytesUntilTotalBytesLimit() const;\n\n  // Recursion Limit -------------------------------------------------\n  // To prevent corrupt or malicious messages from causing stack overflows,\n  // we must keep track of the depth of recursion when parsing embedded\n  // messages and groups.  CodedInputStream keeps track of this because it\n  // is the only object that is passed down the stack during parsing.\n\n  // Sets the maximum recursion depth.  The default is 100.\n  void SetRecursionLimit(int limit);\n\n\n  // Increments the current recursion depth.  Returns true if the depth is\n  // under the limit, false if it has gone over.\n  bool IncrementRecursionDepth();\n\n  // Decrements the recursion depth if possible.\n  void DecrementRecursionDepth();\n\n  // Decrements the recursion depth blindly.  This is faster than\n  // DecrementRecursionDepth().  It should be used only if all previous\n  // increments to recursion depth were successful.\n  void UnsafeDecrementRecursionDepth();\n\n  // Shorthand for make_pair(PushLimit(byte_limit), --recursion_budget_).\n  // Using this can reduce code size and complexity in some cases.  The caller\n  // is expected to check that the second part of the result is non-negative (to\n  // bail out if the depth of recursion is too high) and, if all is well, to\n  // later pass the first part of the result to PopLimit() or similar.\n  std::pair<CodedInputStream::Limit, int> IncrementRecursionDepthAndPushLimit(\n      int byte_limit);\n\n  // Shorthand for PushLimit(ReadVarint32(&length) ? length : 0).\n  Limit ReadLengthAndPushLimit();\n\n  // Helper that is equivalent to: {\n  //  bool result = ConsumedEntireMessage();\n  //  PopLimit(limit);\n  //  UnsafeDecrementRecursionDepth();\n  //  return result; }\n  // Using this can reduce code size and complexity in some cases.\n  // Do not use unless the current recursion depth is greater than zero.\n  bool DecrementRecursionDepthAndPopLimit(Limit limit);\n\n  // Helper that is equivalent to: {\n  //  bool result = ConsumedEntireMessage();\n  //  PopLimit(limit);\n  //  return result; }\n  // Using this can reduce code size and complexity in some cases.\n  bool CheckEntireMessageConsumedAndPopLimit(Limit limit);\n\n  // Extension Registry ----------------------------------------------\n  // ADVANCED USAGE:  99.9% of people can ignore this section.\n  //\n  // By default, when parsing extensions, the parser looks for extension\n  // definitions in the pool which owns the outer message's Descriptor.\n  // However, you may call SetExtensionRegistry() to provide an alternative\n  // pool instead.  This makes it possible, for example, to parse a message\n  // using a generated class, but represent some extensions using\n  // DynamicMessage.\n\n  // Set the pool used to look up extensions.  Most users do not need to call\n  // this as the correct pool will be chosen automatically.\n  //\n  // WARNING:  It is very easy to misuse this.  Carefully read the requirements\n  //   below.  Do not use this unless you are sure you need it.  Almost no one\n  //   does.\n  //\n  // Let's say you are parsing a message into message object m, and you want\n  // to take advantage of SetExtensionRegistry().  You must follow these\n  // requirements:\n  //\n  // The given DescriptorPool must contain m->GetDescriptor().  It is not\n  // sufficient for it to simply contain a descriptor that has the same name\n  // and content -- it must be the *exact object*.  In other words:\n  //   assert(pool->FindMessageTypeByName(m->GetDescriptor()->full_name()) ==\n  //          m->GetDescriptor());\n  // There are two ways to satisfy this requirement:\n  // 1) Use m->GetDescriptor()->pool() as the pool.  This is generally useless\n  //    because this is the pool that would be used anyway if you didn't call\n  //    SetExtensionRegistry() at all.\n  // 2) Use a DescriptorPool which has m->GetDescriptor()->pool() as an\n  //    \"underlay\".  Read the documentation for DescriptorPool for more\n  //    information about underlays.\n  //\n  // You must also provide a MessageFactory.  This factory will be used to\n  // construct Message objects representing extensions.  The factory's\n  // GetPrototype() MUST return non-NULL for any Descriptor which can be found\n  // through the provided pool.\n  //\n  // If the provided factory might return instances of protocol-compiler-\n  // generated (i.e. compiled-in) types, or if the outer message object m is\n  // a generated type, then the given factory MUST have this property:  If\n  // GetPrototype() is given a Descriptor which resides in\n  // DescriptorPool::generated_pool(), the factory MUST return the same\n  // prototype which MessageFactory::generated_factory() would return.  That\n  // is, given a descriptor for a generated type, the factory must return an\n  // instance of the generated class (NOT DynamicMessage).  However, when\n  // given a descriptor for a type that is NOT in generated_pool, the factory\n  // is free to return any implementation.\n  //\n  // The reason for this requirement is that generated sub-objects may be\n  // accessed via the standard (non-reflection) extension accessor methods,\n  // and these methods will down-cast the object to the generated class type.\n  // If the object is not actually of that type, the results would be undefined.\n  // On the other hand, if an extension is not compiled in, then there is no\n  // way the code could end up accessing it via the standard accessors -- the\n  // only way to access the extension is via reflection.  When using reflection,\n  // DynamicMessage and generated messages are indistinguishable, so it's fine\n  // if these objects are represented using DynamicMessage.\n  //\n  // Using DynamicMessageFactory on which you have called\n  // SetDelegateToGeneratedFactory(true) should be sufficient to satisfy the\n  // above requirement.\n  //\n  // If either pool or factory is NULL, both must be NULL.\n  //\n  // Note that this feature is ignored when parsing \"lite\" messages as they do\n  // not have descriptors.\n  void SetExtensionRegistry(const DescriptorPool* pool,\n                            MessageFactory* factory);\n\n  // Get the DescriptorPool set via SetExtensionRegistry(), or NULL if no pool\n  // has been provided.\n  const DescriptorPool* GetExtensionPool();\n\n  // Get the MessageFactory set via SetExtensionRegistry(), or NULL if no\n  // factory has been provided.\n  MessageFactory* GetExtensionFactory();\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedInputStream);\n\n  const uint8* buffer_;\n  const uint8* buffer_end_;     // pointer to the end of the buffer.\n  ZeroCopyInputStream* input_;\n  int total_bytes_read_;  // total bytes read from input_, including\n                          // the current buffer\n\n  // If total_bytes_read_ surpasses INT_MAX, we record the extra bytes here\n  // so that we can BackUp() on destruction.\n  int overflow_bytes_;\n\n  // LastTagWas() stuff.\n  uint32 last_tag_;         // result of last ReadTag() or ReadTagWithCutoff().\n\n  // This is set true by ReadTag{Fallback/Slow}() if it is called when exactly\n  // at EOF, or by ExpectAtEnd() when it returns true.  This happens when we\n  // reach the end of a message and attempt to read another tag.\n  bool legitimate_message_end_;\n\n  // See EnableAliasing().\n  bool aliasing_enabled_;\n\n  // Limits\n  Limit current_limit_;   // if position = -1, no limit is applied\n\n  // For simplicity, if the current buffer crosses a limit (either a normal\n  // limit created by PushLimit() or the total bytes limit), buffer_size_\n  // only tracks the number of bytes before that limit.  This field\n  // contains the number of bytes after it.  Note that this implies that if\n  // buffer_size_ == 0 and buffer_size_after_limit_ > 0, we know we've\n  // hit a limit.  However, if both are zero, it doesn't necessarily mean\n  // we aren't at a limit -- the buffer may have ended exactly at the limit.\n  int buffer_size_after_limit_;\n\n  // Maximum number of bytes to read, period.  This is unrelated to\n  // current_limit_.  Set using SetTotalBytesLimit().\n  int total_bytes_limit_;\n\n  // If positive/0: Limit for bytes read after which a warning due to size\n  // should be logged.\n  // If -1: Printing of warning disabled. Can be set by client.\n  // If -2: Internal: Limit has been reached, print full size when destructing.\n  int total_bytes_warning_threshold_;\n\n  // Current recursion budget, controlled by IncrementRecursionDepth() and\n  // similar.  Starts at recursion_limit_ and goes down: if this reaches\n  // -1 we are over budget.\n  int recursion_budget_;\n  // Recursion depth limit, set by SetRecursionLimit().\n  int recursion_limit_;\n\n  // See SetExtensionRegistry().\n  const DescriptorPool* extension_pool_;\n  MessageFactory* extension_factory_;\n\n  // Private member functions.\n\n  // Advance the buffer by a given number of bytes.\n  void Advance(int amount);\n\n  // Back up input_ to the current buffer position.\n  void BackUpInputToCurrentPosition();\n\n  // Recomputes the value of buffer_size_after_limit_.  Must be called after\n  // current_limit_ or total_bytes_limit_ changes.\n  void RecomputeBufferLimits();\n\n  // Writes an error message saying that we hit total_bytes_limit_.\n  void PrintTotalBytesLimitError();\n\n  // Called when the buffer runs out to request more data.  Implies an\n  // Advance(BufferSize()).\n  bool Refresh();\n\n  // When parsing varints, we optimize for the common case of small values, and\n  // then optimize for the case when the varint fits within the current buffer\n  // piece. The Fallback method is used when we can't use the one-byte\n  // optimization. The Slow method is yet another fallback when the buffer is\n  // not large enough. Making the slow path out-of-line speeds up the common\n  // case by 10-15%. The slow path is fairly uncommon: it only triggers when a\n  // message crosses multiple buffers.  Note: ReadVarint32Fallback() and\n  // ReadVarint64Fallback() are called frequently and generally not inlined, so\n  // they have been optimized to avoid \"out\" parameters.  The former returns -1\n  // if it fails and the uint32 it read otherwise.  The latter has a bool\n  // indicating success or failure as part of its return type.\n  int64 ReadVarint32Fallback(uint32 first_byte_or_zero);\n  int ReadVarintSizeAsIntFallback();\n  std::pair<uint64, bool> ReadVarint64Fallback();\n  bool ReadVarint32Slow(uint32* value);\n  bool ReadVarint64Slow(uint64* value);\n  int ReadVarintSizeAsIntSlow();\n  bool ReadLittleEndian32Fallback(uint32* value);\n  bool ReadLittleEndian64Fallback(uint64* value);\n  // Fallback/slow methods for reading tags. These do not update last_tag_,\n  // but will set legitimate_message_end_ if we are at the end of the input\n  // stream.\n  uint32 ReadTagFallback(uint32 first_byte_or_zero);\n  uint32 ReadTagSlow();\n  bool ReadStringFallback(string* buffer, int size);\n\n  // Return the size of the buffer.\n  int BufferSize() const;\n\n  static const int kDefaultTotalBytesLimit = 64 << 20;  // 64MB\n\n  static const int kDefaultTotalBytesWarningThreshold = 32 << 20;  // 32MB\n\n  static int default_recursion_limit_;  // 100 by default.\n};\n\n// Class which encodes and writes binary data which is composed of varint-\n// encoded integers and fixed-width pieces.  Wraps a ZeroCopyOutputStream.\n// Most users will not need to deal with CodedOutputStream.\n//\n// Most methods of CodedOutputStream which return a bool return false if an\n// underlying I/O error occurs.  Once such a failure occurs, the\n// CodedOutputStream is broken and is no longer useful. The Write* methods do\n// not return the stream status, but will invalidate the stream if an error\n// occurs. The client can probe HadError() to determine the status.\n//\n// Note that every method of CodedOutputStream which writes some data has\n// a corresponding static \"ToArray\" version. These versions write directly\n// to the provided buffer, returning a pointer past the last written byte.\n// They require that the buffer has sufficient capacity for the encoded data.\n// This allows an optimization where we check if an output stream has enough\n// space for an entire message before we start writing and, if there is, we\n// call only the ToArray methods to avoid doing bound checks for each\n// individual value.\n// i.e., in the example above:\n//\n//   CodedOutputStream coded_output = new CodedOutputStream(raw_output);\n//   int magic_number = 1234;\n//   char text[] = \"Hello world!\";\n//\n//   int coded_size = sizeof(magic_number) +\n//                    CodedOutputStream::VarintSize32(strlen(text)) +\n//                    strlen(text);\n//\n//   uint8* buffer =\n//       coded_output->GetDirectBufferForNBytesAndAdvance(coded_size);\n//   if (buffer != NULL) {\n//     // The output stream has enough space in the buffer: write directly to\n//     // the array.\n//     buffer = CodedOutputStream::WriteLittleEndian32ToArray(magic_number,\n//                                                            buffer);\n//     buffer = CodedOutputStream::WriteVarint32ToArray(strlen(text), buffer);\n//     buffer = CodedOutputStream::WriteRawToArray(text, strlen(text), buffer);\n//   } else {\n//     // Make bound-checked writes, which will ask the underlying stream for\n//     // more space as needed.\n//     coded_output->WriteLittleEndian32(magic_number);\n//     coded_output->WriteVarint32(strlen(text));\n//     coded_output->WriteRaw(text, strlen(text));\n//   }\n//\n//   delete coded_output;\nclass LIBPROTOBUF_EXPORT CodedOutputStream {\n public:\n  // Create an CodedOutputStream that writes to the given ZeroCopyOutputStream.\n  explicit CodedOutputStream(ZeroCopyOutputStream* output);\n  CodedOutputStream(ZeroCopyOutputStream* output, bool do_eager_refresh);\n\n  // Destroy the CodedOutputStream and position the underlying\n  // ZeroCopyOutputStream immediately after the last byte written.\n  ~CodedOutputStream();\n\n  // Trims any unused space in the underlying buffer so that its size matches\n  // the number of bytes written by this stream. The underlying buffer will\n  // automatically be trimmed when this stream is destroyed; this call is only\n  // necessary if the underlying buffer is accessed *before* the stream is\n  // destroyed.\n  void Trim();\n\n  // Skips a number of bytes, leaving the bytes unmodified in the underlying\n  // buffer.  Returns false if an underlying write error occurs.  This is\n  // mainly useful with GetDirectBufferPointer().\n  bool Skip(int count);\n\n  // Sets *data to point directly at the unwritten part of the\n  // CodedOutputStream's underlying buffer, and *size to the size of that\n  // buffer, but does not advance the stream's current position.  This will\n  // always either produce a non-empty buffer or return false.  If the caller\n  // writes any data to this buffer, it should then call Skip() to skip over\n  // the consumed bytes.  This may be useful for implementing external fast\n  // serialization routines for types of data not covered by the\n  // CodedOutputStream interface.\n  bool GetDirectBufferPointer(void** data, int* size);\n\n  // If there are at least \"size\" bytes available in the current buffer,\n  // returns a pointer directly into the buffer and advances over these bytes.\n  // The caller may then write directly into this buffer (e.g. using the\n  // *ToArray static methods) rather than go through CodedOutputStream.  If\n  // there are not enough bytes available, returns NULL.  The return pointer is\n  // invalidated as soon as any other non-const method of CodedOutputStream\n  // is called.\n  inline uint8* GetDirectBufferForNBytesAndAdvance(int size);\n\n  // Write raw bytes, copying them from the given buffer.\n  void WriteRaw(const void* buffer, int size);\n  // Like WriteRaw()  but will try to write aliased data if aliasing is\n  // turned on.\n  void WriteRawMaybeAliased(const void* data, int size);\n  // Like WriteRaw()  but writing directly to the target array.\n  // This is _not_ inlined, as the compiler often optimizes memcpy into inline\n  // copy loops. Since this gets called by every field with string or bytes\n  // type, inlining may lead to a significant amount of code bloat, with only a\n  // minor performance gain.\n  static uint8* WriteRawToArray(const void* buffer, int size, uint8* target);\n\n  // Equivalent to WriteRaw(str.data(), str.size()).\n  void WriteString(const string& str);\n  // Like WriteString()  but writing directly to the target array.\n  static uint8* WriteStringToArray(const string& str, uint8* target);\n  // Write the varint-encoded size of str followed by str.\n  static uint8* WriteStringWithSizeToArray(const string& str, uint8* target);\n\n\n  // Instructs the CodedOutputStream to allow the underlying\n  // ZeroCopyOutputStream to hold pointers to the original structure instead of\n  // copying, if it supports it (i.e. output->AllowsAliasing() is true).  If the\n  // underlying stream does not support aliasing, then enabling it has no\n  // affect.  For now, this only affects the behavior of\n  // WriteRawMaybeAliased().\n  //\n  // NOTE: It is caller's responsibility to ensure that the chunk of memory\n  // remains live until all of the data has been consumed from the stream.\n  void EnableAliasing(bool enabled);\n\n  // Write a 32-bit little-endian integer.\n  void WriteLittleEndian32(uint32 value);\n  // Like WriteLittleEndian32()  but writing directly to the target array.\n  static uint8* WriteLittleEndian32ToArray(uint32 value, uint8* target);\n  // Write a 64-bit little-endian integer.\n  void WriteLittleEndian64(uint64 value);\n  // Like WriteLittleEndian64()  but writing directly to the target array.\n  static uint8* WriteLittleEndian64ToArray(uint64 value, uint8* target);\n\n  // Write an unsigned integer with Varint encoding.  Writing a 32-bit value\n  // is equivalent to casting it to uint64 and writing it as a 64-bit value,\n  // but may be more efficient.\n  void WriteVarint32(uint32 value);\n  // Like WriteVarint32()  but writing directly to the target array.\n  static uint8* WriteVarint32ToArray(uint32 value, uint8* target);\n  // Write an unsigned integer with Varint encoding.\n  void WriteVarint64(uint64 value);\n  // Like WriteVarint64()  but writing directly to the target array.\n  static uint8* WriteVarint64ToArray(uint64 value, uint8* target);\n\n  // Equivalent to WriteVarint32() except when the value is negative,\n  // in which case it must be sign-extended to a full 10 bytes.\n  void WriteVarint32SignExtended(int32 value);\n  // Like WriteVarint32SignExtended()  but writing directly to the target array.\n  static uint8* WriteVarint32SignExtendedToArray(int32 value, uint8* target);\n\n  // This is identical to WriteVarint32(), but optimized for writing tags.\n  // In particular, if the input is a compile-time constant, this method\n  // compiles down to a couple instructions.\n  // Always inline because otherwise the aformentioned optimization can't work,\n  // but GCC by default doesn't want to inline this.\n  void WriteTag(uint32 value);\n  // Like WriteTag()  but writing directly to the target array.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static uint8* WriteTagToArray(uint32 value,\n                                                        uint8* target);\n\n  // Returns the number of bytes needed to encode the given value as a varint.\n  static size_t VarintSize32(uint32 value);\n  // Returns the number of bytes needed to encode the given value as a varint.\n  static size_t VarintSize64(uint64 value);\n\n  // If negative, 10 bytes.  Otheriwse, same as VarintSize32().\n  static size_t VarintSize32SignExtended(int32 value);\n\n  // Compile-time equivalent of VarintSize32().\n  template <uint32 Value>\n  struct StaticVarintSize32 {\n    static const size_t value =\n        (Value < (1 << 7))\n            ? 1\n            : (Value < (1 << 14))\n                ? 2\n                : (Value < (1 << 21))\n                    ? 3\n                    : (Value < (1 << 28))\n                        ? 4\n                        : 5;\n  };\n\n  // Returns the total number of bytes written since this object was created.\n  inline int ByteCount() const;\n\n  // Returns true if there was an underlying I/O error since this object was\n  // created.\n  bool HadError() const { return had_error_; }\n\n  // Deterministic serialization, if requested, guarantees that for a given\n  // binary, equal messages will always be serialized to the same bytes. This\n  // implies:\n  //   . repeated serialization of a message will return the same bytes\n  //   . different processes of the same binary (which may be executing on\n  //     different machines) will serialize equal messages to the same bytes.\n  //\n  // Note the deterministic serialization is NOT canonical across languages; it\n  // is also unstable across different builds with schema changes due to unknown\n  // fields. Users who need canonical serialization, e.g., persistent storage in\n  // a canonical form, fingerprinting, etc., should define their own\n  // canonicalization specification and implement the serializer using\n  // reflection APIs rather than relying on this API.\n  //\n  // If determinisitc serialization is requested, the serializer will\n  // sort map entries by keys in lexicographical order or numerical order.\n  // (This is an implementation detail and may subject to change.)\n  //\n  // There are two ways to determine whether serialization should be\n  // deterministic for this CodedOutputStream.  If SetSerializationDeterministic\n  // has not yet been called, then the default comes from the global default,\n  // which is false, until SetDefaultSerializationDeterministic has been called.\n  // Otherwise, SetSerializationDeterministic has been called, and the last\n  // value passed to it is all that matters.\n  void SetSerializationDeterministic(bool value) {\n    serialization_deterministic_is_overridden_ = true;\n    serialization_deterministic_override_ = value;\n  }\n  // See above.  Also, note that users of this CodedOutputStream may need to\n  // call IsSerializationDeterminstic() to serialize in the intended way.  This\n  // CodedOutputStream cannot enforce a desire for deterministic serialization\n  // by itself.\n  bool IsSerializationDeterminstic() const {\n    return serialization_deterministic_is_overridden_ ?\n        serialization_deterministic_override_ :\n        default_serialization_deterministic_;\n  }\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedOutputStream);\n\n  ZeroCopyOutputStream* output_;\n  uint8* buffer_;\n  int buffer_size_;\n  int total_bytes_;  // Sum of sizes of all buffers seen so far.\n  bool had_error_;   // Whether an error occurred during output.\n  bool aliasing_enabled_;  // See EnableAliasing().\n  // See SetSerializationDeterministic() regarding these three fields.\n  bool serialization_deterministic_is_overridden_;\n  bool serialization_deterministic_override_;\n  static bool default_serialization_deterministic_;\n\n  // Advance the buffer by a given number of bytes.\n  void Advance(int amount);\n\n  // Called when the buffer runs out to request more data.  Implies an\n  // Advance(buffer_size_).\n  bool Refresh();\n\n  // Like WriteRaw() but may avoid copying if the underlying\n  // ZeroCopyOutputStream supports it.\n  void WriteAliasedRaw(const void* buffer, int size);\n\n  // If this write might cross the end of the buffer, we compose the bytes first\n  // then use WriteRaw().\n  void WriteVarint32SlowPath(uint32 value);\n\n  // Always-inlined versions of WriteVarint* functions so that code can be\n  // reused, while still controlling size. For instance, WriteVarint32ToArray()\n  // should not directly call this: since it is inlined itself, doing so\n  // would greatly increase the size of generated code. Instead, it should call\n  // WriteVarint32FallbackToArray.  Meanwhile, WriteVarint32() is already\n  // out-of-line, so it should just invoke this directly to avoid any extra\n  // function call overhead.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE static uint8* WriteVarint64ToArrayInline(\n      uint64 value, uint8* target);\n\n  static size_t VarintSize32Fallback(uint32 value);\n\n  // See above.  Other projects may use \"friend\" to allow them to call this.\n  static void SetDefaultSerializationDeterministic() {\n    default_serialization_deterministic_ = true;\n  }\n};\n\n// inline methods ====================================================\n// The vast majority of varints are only one byte.  These inline\n// methods optimize for that case.\n\ninline bool CodedInputStream::ReadVarint32(uint32* value) {\n  uint32 v = 0;\n  if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {\n    v = *buffer_;\n    if (v < 0x80) {\n      *value = v;\n      Advance(1);\n      return true;\n    }\n  }\n  int64 result = ReadVarint32Fallback(v);\n  *value = static_cast<uint32>(result);\n  return result >= 0;\n}\n\ninline bool CodedInputStream::ReadVarint64(uint64* value) {\n  if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && *buffer_ < 0x80) {\n    *value = *buffer_;\n    Advance(1);\n    return true;\n  }\n  std::pair<uint64, bool> p = ReadVarint64Fallback();\n  *value = p.first;\n  return p.second;\n}\n\ninline bool CodedInputStream::ReadVarintSizeAsInt(int* value) {\n  if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {\n    int v = *buffer_;\n    if (v < 0x80) {\n      *value = v;\n      Advance(1);\n      return true;\n    }\n  }\n  *value = ReadVarintSizeAsIntFallback();\n  return *value >= 0;\n}\n\n// static\ninline const uint8* CodedInputStream::ReadLittleEndian32FromArray(\n    const uint8* buffer,\n    uint32* value) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  memcpy(value, buffer, sizeof(*value));\n  return buffer + sizeof(*value);\n#else\n  *value = (static_cast<uint32>(buffer[0])      ) |\n           (static_cast<uint32>(buffer[1]) <<  8) |\n           (static_cast<uint32>(buffer[2]) << 16) |\n           (static_cast<uint32>(buffer[3]) << 24);\n  return buffer + sizeof(*value);\n#endif\n}\n// static\ninline const uint8* CodedInputStream::ReadLittleEndian64FromArray(\n    const uint8* buffer,\n    uint64* value) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  memcpy(value, buffer, sizeof(*value));\n  return buffer + sizeof(*value);\n#else\n  uint32 part0 = (static_cast<uint32>(buffer[0])      ) |\n                 (static_cast<uint32>(buffer[1]) <<  8) |\n                 (static_cast<uint32>(buffer[2]) << 16) |\n                 (static_cast<uint32>(buffer[3]) << 24);\n  uint32 part1 = (static_cast<uint32>(buffer[4])      ) |\n                 (static_cast<uint32>(buffer[5]) <<  8) |\n                 (static_cast<uint32>(buffer[6]) << 16) |\n                 (static_cast<uint32>(buffer[7]) << 24);\n  *value = static_cast<uint64>(part0) |\n          (static_cast<uint64>(part1) << 32);\n  return buffer + sizeof(*value);\n#endif\n}\n\ninline bool CodedInputStream::ReadLittleEndian32(uint32* value) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {\n    memcpy(value, buffer_, sizeof(*value));\n    Advance(sizeof(*value));\n    return true;\n  } else {\n    return ReadLittleEndian32Fallback(value);\n  }\n#else\n  return ReadLittleEndian32Fallback(value);\n#endif\n}\n\ninline bool CodedInputStream::ReadLittleEndian64(uint64* value) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  if (GOOGLE_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {\n    memcpy(value, buffer_, sizeof(*value));\n    Advance(sizeof(*value));\n    return true;\n  } else {\n    return ReadLittleEndian64Fallback(value);\n  }\n#else\n  return ReadLittleEndian64Fallback(value);\n#endif\n}\n\ninline uint32 CodedInputStream::ReadTag() {\n  uint32 v = 0;\n  if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {\n    v = *buffer_;\n    if (v < 0x80) {\n      last_tag_ = v;\n      Advance(1);\n      return v;\n    }\n  }\n  last_tag_ = ReadTagFallback(v);\n  return last_tag_;\n}\n\ninline std::pair<uint32, bool> CodedInputStream::ReadTagWithCutoff(\n    uint32 cutoff) {\n  // In performance-sensitive code we can expect cutoff to be a compile-time\n  // constant, and things like \"cutoff >= kMax1ByteVarint\" to be evaluated at\n  // compile time.\n  uint32 first_byte_or_zero = 0;\n  if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_)) {\n    // Hot case: buffer_ non_empty, buffer_[0] in [1, 128).\n    // TODO(gpike): Is it worth rearranging this? E.g., if the number of fields\n    // is large enough then is it better to check for the two-byte case first?\n    first_byte_or_zero = buffer_[0];\n    if (static_cast<int8>(buffer_[0]) > 0) {\n      const uint32 kMax1ByteVarint = 0x7f;\n      uint32 tag = last_tag_ = buffer_[0];\n      Advance(1);\n      return std::make_pair(tag, cutoff >= kMax1ByteVarint || tag <= cutoff);\n    }\n    // Other hot case: cutoff >= 0x80, buffer_ has at least two bytes available,\n    // and tag is two bytes.  The latter is tested by bitwise-and-not of the\n    // first byte and the second byte.\n    if (cutoff >= 0x80 &&\n        GOOGLE_PREDICT_TRUE(buffer_ + 1 < buffer_end_) &&\n        GOOGLE_PREDICT_TRUE((buffer_[0] & ~buffer_[1]) >= 0x80)) {\n      const uint32 kMax2ByteVarint = (0x7f << 7) + 0x7f;\n      uint32 tag = last_tag_ = (1u << 7) * buffer_[1] + (buffer_[0] - 0x80);\n      Advance(2);\n      // It might make sense to test for tag == 0 now, but it is so rare that\n      // that we don't bother.  A varint-encoded 0 should be one byte unless\n      // the encoder lost its mind.  The second part of the return value of\n      // this function is allowed to be either true or false if the tag is 0,\n      // so we don't have to check for tag == 0.  We may need to check whether\n      // it exceeds cutoff.\n      bool at_or_below_cutoff = cutoff >= kMax2ByteVarint || tag <= cutoff;\n      return std::make_pair(tag, at_or_below_cutoff);\n    }\n  }\n  // Slow path\n  last_tag_ = ReadTagFallback(first_byte_or_zero);\n  return std::make_pair(last_tag_, static_cast<uint32>(last_tag_ - 1) < cutoff);\n}\n\ninline bool CodedInputStream::LastTagWas(uint32 expected) {\n  return last_tag_ == expected;\n}\n\ninline bool CodedInputStream::ConsumedEntireMessage() {\n  return legitimate_message_end_;\n}\n\ninline bool CodedInputStream::ExpectTag(uint32 expected) {\n  if (expected < (1 << 7)) {\n    if (GOOGLE_PREDICT_TRUE(buffer_ < buffer_end_) && buffer_[0] == expected) {\n      Advance(1);\n      return true;\n    } else {\n      return false;\n    }\n  } else if (expected < (1 << 14)) {\n    if (GOOGLE_PREDICT_TRUE(BufferSize() >= 2) &&\n        buffer_[0] == static_cast<uint8>(expected | 0x80) &&\n        buffer_[1] == static_cast<uint8>(expected >> 7)) {\n      Advance(2);\n      return true;\n    } else {\n      return false;\n    }\n  } else {\n    // Don't bother optimizing for larger values.\n    return false;\n  }\n}\n\ninline const uint8* CodedInputStream::ExpectTagFromArray(\n    const uint8* buffer, uint32 expected) {\n  if (expected < (1 << 7)) {\n    if (buffer[0] == expected) {\n      return buffer + 1;\n    }\n  } else if (expected < (1 << 14)) {\n    if (buffer[0] == static_cast<uint8>(expected | 0x80) &&\n        buffer[1] == static_cast<uint8>(expected >> 7)) {\n      return buffer + 2;\n    }\n  }\n  return NULL;\n}\n\ninline void CodedInputStream::GetDirectBufferPointerInline(const void** data,\n                                                           int* size) {\n  *data = buffer_;\n  *size = static_cast<int>(buffer_end_ - buffer_);\n}\n\ninline bool CodedInputStream::ExpectAtEnd() {\n  // If we are at a limit we know no more bytes can be read.  Otherwise, it's\n  // hard to say without calling Refresh(), and we'd rather not do that.\n\n  if (buffer_ == buffer_end_ &&\n      ((buffer_size_after_limit_ != 0) ||\n       (total_bytes_read_ == current_limit_))) {\n    last_tag_ = 0;                   // Pretend we called ReadTag()...\n    legitimate_message_end_ = true;  // ... and it hit EOF.\n    return true;\n  } else {\n    return false;\n  }\n}\n\ninline int CodedInputStream::CurrentPosition() const {\n  return total_bytes_read_ - (BufferSize() + buffer_size_after_limit_);\n}\n\ninline uint8* CodedOutputStream::GetDirectBufferForNBytesAndAdvance(int size) {\n  if (buffer_size_ < size) {\n    return NULL;\n  } else {\n    uint8* result = buffer_;\n    Advance(size);\n    return result;\n  }\n}\n\ninline uint8* CodedOutputStream::WriteVarint32ToArray(uint32 value,\n                                                      uint8* target) {\n  while (value >= 0x80) {\n    *target = static_cast<uint8>(value | 0x80);\n    value >>= 7;\n    ++target;\n  }\n  *target = static_cast<uint8>(value);\n  return target + 1;\n}\n\ninline void CodedOutputStream::WriteVarint32SignExtended(int32 value) {\n  if (value < 0) {\n    WriteVarint64(static_cast<uint64>(value));\n  } else {\n    WriteVarint32(static_cast<uint32>(value));\n  }\n}\n\ninline uint8* CodedOutputStream::WriteVarint32SignExtendedToArray(\n    int32 value, uint8* target) {\n  if (value < 0) {\n    return WriteVarint64ToArray(static_cast<uint64>(value), target);\n  } else {\n    return WriteVarint32ToArray(static_cast<uint32>(value), target);\n  }\n}\n\ninline uint8* CodedOutputStream::WriteLittleEndian32ToArray(uint32 value,\n                                                            uint8* target) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  memcpy(target, &value, sizeof(value));\n#else\n  target[0] = static_cast<uint8>(value);\n  target[1] = static_cast<uint8>(value >>  8);\n  target[2] = static_cast<uint8>(value >> 16);\n  target[3] = static_cast<uint8>(value >> 24);\n#endif\n  return target + sizeof(value);\n}\n\ninline uint8* CodedOutputStream::WriteLittleEndian64ToArray(uint64 value,\n                                                            uint8* target) {\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n  memcpy(target, &value, sizeof(value));\n#else\n  uint32 part0 = static_cast<uint32>(value);\n  uint32 part1 = static_cast<uint32>(value >> 32);\n\n  target[0] = static_cast<uint8>(part0);\n  target[1] = static_cast<uint8>(part0 >>  8);\n  target[2] = static_cast<uint8>(part0 >> 16);\n  target[3] = static_cast<uint8>(part0 >> 24);\n  target[4] = static_cast<uint8>(part1);\n  target[5] = static_cast<uint8>(part1 >>  8);\n  target[6] = static_cast<uint8>(part1 >> 16);\n  target[7] = static_cast<uint8>(part1 >> 24);\n#endif\n  return target + sizeof(value);\n}\n\ninline void CodedOutputStream::WriteVarint32(uint32 value) {\n  if (buffer_size_ >= 5) {\n    // Fast path:  We have enough bytes left in the buffer to guarantee that\n    // this write won't cross the end, so we can skip the checks.\n    uint8* target = buffer_;\n    uint8* end = WriteVarint32ToArray(value, target);\n    int size = static_cast<int>(end - target);\n    Advance(size);\n  } else {\n    WriteVarint32SlowPath(value);\n  }\n}\n\ninline void CodedOutputStream::WriteTag(uint32 value) {\n  WriteVarint32(value);\n}\n\ninline uint8* CodedOutputStream::WriteTagToArray(\n    uint32 value, uint8* target) {\n  return WriteVarint32ToArray(value, target);\n}\n\ninline size_t CodedOutputStream::VarintSize32(uint32 value) {\n  if (value < (1 << 7)) {\n    return 1;\n  } else  {\n    return VarintSize32Fallback(value);\n  }\n}\n\ninline size_t CodedOutputStream::VarintSize32SignExtended(int32 value) {\n  if (value < 0) {\n    return 10;     // TODO(kenton):  Make this a symbolic constant.\n  } else {\n    return VarintSize32(static_cast<uint32>(value));\n  }\n}\n\ninline void CodedOutputStream::WriteString(const string& str) {\n  WriteRaw(str.data(), static_cast<int>(str.size()));\n}\n\ninline void CodedOutputStream::WriteRawMaybeAliased(\n    const void* data, int size) {\n  if (aliasing_enabled_) {\n    WriteAliasedRaw(data, size);\n  } else {\n    WriteRaw(data, size);\n  }\n}\n\ninline uint8* CodedOutputStream::WriteStringToArray(\n    const string& str, uint8* target) {\n  return WriteRawToArray(str.data(), static_cast<int>(str.size()), target);\n}\n\ninline int CodedOutputStream::ByteCount() const {\n  return total_bytes_ - buffer_size_;\n}\n\ninline void CodedInputStream::Advance(int amount) {\n  buffer_ += amount;\n}\n\ninline void CodedOutputStream::Advance(int amount) {\n  buffer_ += amount;\n  buffer_size_ -= amount;\n}\n\ninline void CodedInputStream::SetRecursionLimit(int limit) {\n  recursion_budget_ += limit - recursion_limit_;\n  recursion_limit_ = limit;\n}\n\ninline bool CodedInputStream::IncrementRecursionDepth() {\n  --recursion_budget_;\n  return recursion_budget_ >= 0;\n}\n\ninline void CodedInputStream::DecrementRecursionDepth() {\n  if (recursion_budget_ < recursion_limit_) ++recursion_budget_;\n}\n\ninline void CodedInputStream::UnsafeDecrementRecursionDepth() {\n  assert(recursion_budget_ < recursion_limit_);\n  ++recursion_budget_;\n}\n\ninline void CodedInputStream::SetExtensionRegistry(const DescriptorPool* pool,\n                                                   MessageFactory* factory) {\n  extension_pool_ = pool;\n  extension_factory_ = factory;\n}\n\ninline const DescriptorPool* CodedInputStream::GetExtensionPool() {\n  return extension_pool_;\n}\n\ninline MessageFactory* CodedInputStream::GetExtensionFactory() {\n  return extension_factory_;\n}\n\ninline int CodedInputStream::BufferSize() const {\n  return static_cast<int>(buffer_end_ - buffer_);\n}\n\ninline CodedInputStream::CodedInputStream(ZeroCopyInputStream* input)\n  : buffer_(NULL),\n    buffer_end_(NULL),\n    input_(input),\n    total_bytes_read_(0),\n    overflow_bytes_(0),\n    last_tag_(0),\n    legitimate_message_end_(false),\n    aliasing_enabled_(false),\n    current_limit_(kint32max),\n    buffer_size_after_limit_(0),\n    total_bytes_limit_(kDefaultTotalBytesLimit),\n    total_bytes_warning_threshold_(kDefaultTotalBytesWarningThreshold),\n    recursion_budget_(default_recursion_limit_),\n    recursion_limit_(default_recursion_limit_),\n    extension_pool_(NULL),\n    extension_factory_(NULL) {\n  // Eagerly Refresh() so buffer space is immediately available.\n  Refresh();\n}\n\ninline CodedInputStream::CodedInputStream(const uint8* buffer, int size)\n  : buffer_(buffer),\n    buffer_end_(buffer + size),\n    input_(NULL),\n    total_bytes_read_(size),\n    overflow_bytes_(0),\n    last_tag_(0),\n    legitimate_message_end_(false),\n    aliasing_enabled_(false),\n    current_limit_(size),\n    buffer_size_after_limit_(0),\n    total_bytes_limit_(kDefaultTotalBytesLimit),\n    total_bytes_warning_threshold_(kDefaultTotalBytesWarningThreshold),\n    recursion_budget_(default_recursion_limit_),\n    recursion_limit_(default_recursion_limit_),\n    extension_pool_(NULL),\n    extension_factory_(NULL) {\n  // Note that setting current_limit_ == size is important to prevent some\n  // code paths from trying to access input_ and segfaulting.\n}\n\ninline bool CodedInputStream::IsFlat() const {\n  return input_ == NULL;\n}\n\n}  // namespace io\n}  // namespace protobuf\n\n\n#if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)\n  #pragma runtime_checks(\"c\", restore)\n#endif  // _MSC_VER && !defined(__INTEL_COMPILER)\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_CODED_STREAM_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/gzip_stream.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: brianolson@google.com (Brian Olson)\n//\n// This file contains the definition for classes GzipInputStream and\n// GzipOutputStream.\n//\n// GzipInputStream decompresses data from an underlying\n// ZeroCopyInputStream and provides the decompressed data as a\n// ZeroCopyInputStream.\n//\n// GzipOutputStream is an ZeroCopyOutputStream that compresses data to\n// an underlying ZeroCopyOutputStream.\n\n#ifndef GOOGLE_PROTOBUF_IO_GZIP_STREAM_H__\n#define GOOGLE_PROTOBUF_IO_GZIP_STREAM_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/io/zero_copy_stream.h>\n#include <zlib.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\n// A ZeroCopyInputStream that reads compressed data through zlib\nclass LIBPROTOBUF_EXPORT GzipInputStream : public ZeroCopyInputStream {\n public:\n  // Format key for constructor\n  enum Format {\n    // zlib will autodetect gzip header or deflate stream\n    AUTO = 0,\n\n    // GZIP streams have some extra header data for file attributes.\n    GZIP = 1,\n\n    // Simpler zlib stream format.\n    ZLIB = 2,\n  };\n\n  // buffer_size and format may be -1 for default of 64kB and GZIP format\n  explicit GzipInputStream(\n      ZeroCopyInputStream* sub_stream,\n      Format format = AUTO,\n      int buffer_size = -1);\n  virtual ~GzipInputStream();\n\n  // Return last error message or NULL if no error.\n  inline const char* ZlibErrorMessage() const {\n    return zcontext_.msg;\n  }\n  inline int ZlibErrorCode() const {\n    return zerror_;\n  }\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n private:\n  Format format_;\n\n  ZeroCopyInputStream* sub_stream_;\n\n  z_stream zcontext_;\n  int zerror_;\n\n  void* output_buffer_;\n  void* output_position_;\n  size_t output_buffer_length_;\n  int64 byte_count_;\n\n  int Inflate(int flush);\n  void DoNextOutput(const void** data, int* size);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GzipInputStream);\n};\n\n\nclass LIBPROTOBUF_EXPORT GzipOutputStream : public ZeroCopyOutputStream {\n public:\n  // Format key for constructor\n  enum Format {\n    // GZIP streams have some extra header data for file attributes.\n    GZIP = 1,\n\n    // Simpler zlib stream format.\n    ZLIB = 2,\n  };\n\n  struct Options {\n    // Defaults to GZIP.\n    Format format;\n\n    // What size buffer to use internally.  Defaults to 64kB.\n    int buffer_size;\n\n    // A number between 0 and 9, where 0 is no compression and 9 is best\n    // compression.  Defaults to Z_DEFAULT_COMPRESSION (see zlib.h).\n    int compression_level;\n\n    // Defaults to Z_DEFAULT_STRATEGY.  Can also be set to Z_FILTERED,\n    // Z_HUFFMAN_ONLY, or Z_RLE.  See the documentation for deflateInit2 in\n    // zlib.h for definitions of these constants.\n    int compression_strategy;\n\n    Options();  // Initializes with default values.\n  };\n\n  // Create a GzipOutputStream with default options.\n  explicit GzipOutputStream(ZeroCopyOutputStream* sub_stream);\n\n  // Create a GzipOutputStream with the given options.\n  GzipOutputStream(\n      ZeroCopyOutputStream* sub_stream,\n      const Options& options);\n\n  virtual ~GzipOutputStream();\n\n  // Return last error message or NULL if no error.\n  inline const char* ZlibErrorMessage() const {\n    return zcontext_.msg;\n  }\n  inline int ZlibErrorCode() const {\n    return zerror_;\n  }\n\n  // Flushes data written so far to zipped data in the underlying stream.\n  // It is the caller's responsibility to flush the underlying stream if\n  // necessary.\n  // Compression may be less efficient stopping and starting around flushes.\n  // Returns true if no error.\n  //\n  // Please ensure that block size is > 6. Here is an excerpt from the zlib\n  // doc that explains why:\n  //\n  // In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that avail_out\n  // is greater than six to avoid repeated flush markers due to\n  // avail_out == 0 on return.\n  bool Flush();\n\n  // Writes out all data and closes the gzip stream.\n  // It is the caller's responsibility to close the underlying stream if\n  // necessary.\n  // Returns true if no error.\n  bool Close();\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n private:\n  ZeroCopyOutputStream* sub_stream_;\n  // Result from calling Next() on sub_stream_\n  void* sub_data_;\n  int sub_data_size_;\n\n  z_stream zcontext_;\n  int zerror_;\n  void* input_buffer_;\n  size_t input_buffer_length_;\n\n  // Shared constructor code.\n  void Init(ZeroCopyOutputStream* sub_stream, const Options& options);\n\n  // Do some compression.\n  // Takes zlib flush mode.\n  // Returns zlib error code.\n  int Deflate(int flush);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GzipOutputStream);\n};\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_GZIP_STREAM_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/printer.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Utility class for writing text to a ZeroCopyOutputStream.\n\n#ifndef GOOGLE_PROTOBUF_IO_PRINTER_H__\n#define GOOGLE_PROTOBUF_IO_PRINTER_H__\n\n#include <string>\n#include <map>\n#include <vector>\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\nclass ZeroCopyOutputStream;     // zero_copy_stream.h\n\n// Records annotations about a Printer's output.\nclass LIBPROTOBUF_EXPORT AnnotationCollector {\n public:\n  // Records that the bytes in file_path beginning with begin_offset and ending\n  // before end_offset are associated with the SourceCodeInfo-style path.\n  virtual void AddAnnotation(size_t begin_offset, size_t end_offset,\n                             const string& file_path,\n                             const vector<int>& path) = 0;\n\n  virtual ~AnnotationCollector() {}\n};\n\n// Records annotations about a Printer's output to the given protocol buffer,\n// assuming that the buffer has an ::Annotation message exposing path,\n// source_file, begin and end fields.\ntemplate <typename AnnotationProto>\nclass AnnotationProtoCollector : public AnnotationCollector {\n public:\n  // annotation_proto is the protocol buffer to which new Annotations should be\n  // added. It is not owned by the AnnotationProtoCollector.\n  explicit AnnotationProtoCollector(AnnotationProto* annotation_proto)\n      : annotation_proto_(annotation_proto) {}\n\n  // Override for AnnotationCollector::AddAnnotation.\n  virtual void AddAnnotation(size_t begin_offset, size_t end_offset,\n                             const string& file_path, const vector<int>& path) {\n    typename AnnotationProto::Annotation* annotation =\n        annotation_proto_->add_annotation();\n    for (int i = 0; i < path.size(); ++i) {\n      annotation->add_path(path[i]);\n    }\n    annotation->set_source_file(file_path);\n    annotation->set_begin(begin_offset);\n    annotation->set_end(end_offset);\n  }\n\n private:\n  // The protocol buffer to which new annotations should be added.\n  AnnotationProto* const annotation_proto_;\n};\n\n// This simple utility class assists in code generation.  It basically\n// allows the caller to define a set of variables and then output some\n// text with variable substitutions.  Example usage:\n//\n//   Printer printer(output, '$');\n//   map<string, string> vars;\n//   vars[\"name\"] = \"Bob\";\n//   printer.Print(vars, \"My name is $name$.\");\n//\n// The above writes \"My name is Bob.\" to the output stream.\n//\n// Printer aggressively enforces correct usage, crashing (with assert failures)\n// in the case of undefined variables in debug builds. This helps greatly in\n// debugging code which uses it.\n//\n// If a Printer is constructed with an AnnotationCollector, it will provide it\n// with annotations that connect the Printer's output to paths that can identify\n// various descriptors.  In the above example, if person_ is a descriptor that\n// identifies Bob, we can associate the output string \"My name is Bob.\" with\n// a source path pointing to that descriptor with:\n//\n//   printer.Annotate(\"name\", person_);\n//\n// The AnnotationCollector will be sent an annotation linking the output range\n// covering \"Bob\" to the logical path provided by person_.  Tools may use\n// this association to (for example) link \"Bob\" in the output back to the\n// source file that defined the person_ descriptor identifying Bob.\n//\n// Annotate can only examine variables substituted during the last call to\n// Print.  It is invalid to refer to a variable that was used multiple times\n// in a single Print call.\n//\n// In full generality, one may specify a range of output text using a beginning\n// substitution variable and an ending variable.  The resulting annotation will\n// span from the first character of the substituted value for the beginning\n// variable to the last character of the substituted value for the ending\n// variable.  For example, the Annotate call above is equivalent to this one:\n//\n//   printer.Annotate(\"name\", \"name\", person_);\n//\n// This is useful if multiple variables combine to form a single span of output\n// that should be annotated with the same source path.  For example:\n//\n//   Printer printer(output, '$');\n//   map<string, string> vars;\n//   vars[\"first\"] = \"Alice\";\n//   vars[\"last\"] = \"Smith\";\n//   printer.Print(vars, \"My name is $first$ $last$.\");\n//   printer.Annotate(\"first\", \"last\", person_);\n//\n// This code would associate the span covering \"Alice Smith\" in the output with\n// the person_ descriptor.\n//\n// Note that the beginning variable must come before (or overlap with, in the\n// case of zero-sized substitution values) the ending variable.\n//\n// It is also sometimes useful to use variables with zero-sized values as\n// markers.  This avoids issues with multiple references to the same variable\n// and also allows annotation ranges to span literal text from the Print\n// templates:\n//\n//   Printer printer(output, '$');\n//   map<string, string> vars;\n//   vars[\"foo\"] = \"bar\";\n//   vars[\"function\"] = \"call\";\n//   vars[\"mark\"] = \"\";\n//   printer.Print(vars, \"$function$($foo$,$foo$)$mark$\");\n//   printer.Annotate(\"function\", \"rmark\", call_);\n//\n// This code associates the span covering \"call(bar,bar)\" in the output with the\n// call_ descriptor.\n\nclass LIBPROTOBUF_EXPORT Printer {\n public:\n  // Create a printer that writes text to the given output stream.  Use the\n  // given character as the delimiter for variables.\n  Printer(ZeroCopyOutputStream* output, char variable_delimiter);\n\n  // Create a printer that writes text to the given output stream.  Use the\n  // given character as the delimiter for variables.  If annotation_collector\n  // is not null, Printer will provide it with annotations about code written\n  // to the stream.  annotation_collector is not owned by Printer.\n  Printer(ZeroCopyOutputStream* output, char variable_delimiter,\n          AnnotationCollector* annotation_collector);\n\n  ~Printer();\n\n  // Link a subsitution variable emitted by the last call to Print to the object\n  // described by descriptor.\n  template <typename SomeDescriptor>\n  void Annotate(const char* varname, const SomeDescriptor* descriptor) {\n    Annotate(varname, varname, descriptor);\n  }\n\n  // Link the output range defined by the substitution variables as emitted by\n  // the last call to Print to the object described by descriptor. The range\n  // begins at begin_varname's value and ends after the last character of the\n  // value substituted for end_varname.\n  template <typename SomeDescriptor>\n  void Annotate(const char* begin_varname, const char* end_varname,\n                const SomeDescriptor* descriptor) {\n    if (annotation_collector_ == NULL) {\n      // Annotations aren't turned on for this Printer, so don't pay the cost\n      // of building the location path.\n      return;\n    }\n    vector<int> path;\n    descriptor->GetLocationPath(&path);\n    Annotate(begin_varname, end_varname, descriptor->file()->name(), path);\n  }\n\n  // Link a subsitution variable emitted by the last call to Print to the file\n  // with path file_name.\n  void Annotate(const char* varname, const string& file_name) {\n    Annotate(varname, varname, file_name);\n  }\n\n  // Link the output range defined by the substitution variables as emitted by\n  // the last call to Print to the file with path file_name. The range begins\n  // at begin_varname's value and ends after the last character of the value\n  // substituted for end_varname.\n  void Annotate(const char* begin_varname, const char* end_varname,\n                const string& file_name) {\n    if (annotation_collector_ == NULL) {\n      // Annotations aren't turned on for this Printer.\n      return;\n    }\n    vector<int> empty_path;\n    Annotate(begin_varname, end_varname, file_name, empty_path);\n  }\n\n  // Print some text after applying variable substitutions.  If a particular\n  // variable in the text is not defined, this will crash.  Variables to be\n  // substituted are identified by their names surrounded by delimiter\n  // characters (as given to the constructor).  The variable bindings are\n  // defined by the given map.\n  void Print(const map<string, string>& variables, const char* text);\n\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable, const string& value);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3,\n                               const char* variable4, const string& value4);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3,\n                               const char* variable4, const string& value4,\n                               const char* variable5, const string& value5);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3,\n                               const char* variable4, const string& value4,\n                               const char* variable5, const string& value5,\n                               const char* variable6, const string& value6);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3,\n                               const char* variable4, const string& value4,\n                               const char* variable5, const string& value5,\n                               const char* variable6, const string& value6,\n                               const char* variable7, const string& value7);\n  // Like the first Print(), except the substitutions are given as parameters.\n  void Print(const char* text, const char* variable1, const string& value1,\n                               const char* variable2, const string& value2,\n                               const char* variable3, const string& value3,\n                               const char* variable4, const string& value4,\n                               const char* variable5, const string& value5,\n                               const char* variable6, const string& value6,\n                               const char* variable7, const string& value7,\n                               const char* variable8, const string& value8);\n\n  // Indent text by two spaces.  After calling Indent(), two spaces will be\n  // inserted at the beginning of each line of text.  Indent() may be called\n  // multiple times to produce deeper indents.\n  void Indent();\n\n  // Reduces the current indent level by two spaces, or crashes if the indent\n  // level is zero.\n  void Outdent();\n\n  // Write a string to the output buffer.\n  // This method does not look for newlines to add indentation.\n  void PrintRaw(const string& data);\n\n  // Write a zero-delimited string to output buffer.\n  // This method does not look for newlines to add indentation.\n  void PrintRaw(const char* data);\n\n  // Write some bytes to the output buffer.\n  // This method does not look for newlines to add indentation.\n  void WriteRaw(const char* data, int size);\n\n  // True if any write to the underlying stream failed.  (We don't just\n  // crash in this case because this is an I/O failure, not a programming\n  // error.)\n  bool failed() const { return failed_; }\n\n private:\n  // Link the output range defined by the substitution variables as emitted by\n  // the last call to Print to the object found at the SourceCodeInfo-style path\n  // in a file with path file_path. The range begins at the start of\n  // begin_varname's value and ends after the last character of the value\n  // substituted for end_varname. Note that begin_varname and end_varname\n  // may refer to the same variable.\n  void Annotate(const char* begin_varname, const char* end_varname,\n                const string& file_path, const vector<int>& path);\n\n  const char variable_delimiter_;\n\n  ZeroCopyOutputStream* const output_;\n  char* buffer_;\n  int buffer_size_;\n  // The current position, in bytes, in the output stream.  This is equivalent\n  // to the total number of bytes that have been written so far.  This value is\n  // used to calculate annotation ranges in the substitutions_ map below.\n  size_t offset_;\n\n  string indent_;\n  bool at_start_of_line_;\n  bool failed_;\n\n  // A map from variable name to [start, end) offsets in the output buffer.\n  // These refer to the offsets used for a variable after the last call to\n  // Print.  If a variable was used more than once, the entry used in\n  // this map is set to a negative-length span.  For singly-used variables, the\n  // start offset is the beginning of the substitution; the end offset is the\n  // last byte of the substitution plus one (such that (end - start) is the\n  // length of the substituted string).\n  map<string, pair<size_t, size_t> > substitutions_;\n\n  // Returns true and sets range to the substitution range in the output for\n  // varname if varname was used once in the last call to Print. If varname\n  // was not used, or if it was used multiple times, returns false (and\n  // fails a debug assertion).\n  bool GetSubstitutionRange(const char* varname, pair<size_t, size_t>* range);\n\n  // If non-null, annotation_collector_ is used to store annotations about\n  // generated code.\n  AnnotationCollector* const annotation_collector_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Printer);\n};\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_PRINTER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/strtod.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// A locale-independent version of strtod(), used to parse floating\n// point default values in .proto files, where the decimal separator\n// is always a dot.\n\n#ifndef GOOGLE_PROTOBUF_IO_STRTOD_H__\n#define GOOGLE_PROTOBUF_IO_STRTOD_H__\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\n// A locale-independent version of the standard strtod(), which always\n// uses a dot as the decimal separator.\ndouble NoLocaleStrtod(const char* str, char** endptr);\n\n// Casts a double value to a float value. If the value is outside of the\n// representable range of float, it will be converted to positive or negative\n// infinity.\nfloat SafeDoubleToFloat(double value);\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_STRTOD_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/tokenizer.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Class for parsing tokenized text from a ZeroCopyInputStream.\n\n#ifndef GOOGLE_PROTOBUF_IO_TOKENIZER_H__\n#define GOOGLE_PROTOBUF_IO_TOKENIZER_H__\n\n#include <string>\n#include <vector>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/logging.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\nclass ZeroCopyInputStream;     // zero_copy_stream.h\n\n// Defined in this file.\nclass ErrorCollector;\nclass Tokenizer;\n\n// By \"column number\", the proto compiler refers to a count of the number\n// of bytes before a given byte, except that a tab character advances to\n// the next multiple of 8 bytes.  Note in particular that column numbers\n// are zero-based, while many user interfaces use one-based column numbers.\ntypedef int ColumnNumber;\n\n// Abstract interface for an object which collects the errors that occur\n// during parsing.  A typical implementation might simply print the errors\n// to stdout.\nclass LIBPROTOBUF_EXPORT ErrorCollector {\n public:\n  inline ErrorCollector() {}\n  virtual ~ErrorCollector();\n\n  // Indicates that there was an error in the input at the given line and\n  // column numbers.  The numbers are zero-based, so you may want to add\n  // 1 to each before printing them.\n  virtual void AddError(int line, ColumnNumber column,\n                        const string& message) = 0;\n\n  // Indicates that there was a warning in the input at the given line and\n  // column numbers.  The numbers are zero-based, so you may want to add\n  // 1 to each before printing them.\n  virtual void AddWarning(int line, ColumnNumber column,\n                          const string& message) { }\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ErrorCollector);\n};\n\n// This class converts a stream of raw text into a stream of tokens for\n// the protocol definition parser to parse.  The tokens recognized are\n// similar to those that make up the C language; see the TokenType enum for\n// precise descriptions.  Whitespace and comments are skipped.  By default,\n// C- and C++-style comments are recognized, but other styles can be used by\n// calling set_comment_style().\nclass LIBPROTOBUF_EXPORT Tokenizer {\n public:\n  // Construct a Tokenizer that reads and tokenizes text from the given\n  // input stream and writes errors to the given error_collector.\n  // The caller keeps ownership of input and error_collector.\n  Tokenizer(ZeroCopyInputStream* input, ErrorCollector* error_collector);\n  ~Tokenizer();\n\n  enum TokenType {\n    TYPE_START,       // Next() has not yet been called.\n    TYPE_END,         // End of input reached.  \"text\" is empty.\n\n    TYPE_IDENTIFIER,  // A sequence of letters, digits, and underscores, not\n                      // starting with a digit.  It is an error for a number\n                      // to be followed by an identifier with no space in\n                      // between.\n    TYPE_INTEGER,     // A sequence of digits representing an integer.  Normally\n                      // the digits are decimal, but a prefix of \"0x\" indicates\n                      // a hex number and a leading zero indicates octal, just\n                      // like with C numeric literals.  A leading negative sign\n                      // is NOT included in the token; it's up to the parser to\n                      // interpret the unary minus operator on its own.\n    TYPE_FLOAT,       // A floating point literal, with a fractional part and/or\n                      // an exponent.  Always in decimal.  Again, never\n                      // negative.\n    TYPE_STRING,      // A quoted sequence of escaped characters.  Either single\n                      // or double quotes can be used, but they must match.\n                      // A string literal cannot cross a line break.\n    TYPE_SYMBOL,      // Any other printable character, like '!' or '+'.\n                      // Symbols are always a single character, so \"!+$%\" is\n                      // four tokens.\n  };\n\n  // Structure representing a token read from the token stream.\n  struct Token {\n    TokenType type;\n    string text;       // The exact text of the token as it appeared in\n                       // the input.  e.g. tokens of TYPE_STRING will still\n                       // be escaped and in quotes.\n\n    // \"line\" and \"column\" specify the position of the first character of\n    // the token within the input stream.  They are zero-based.\n    int line;\n    ColumnNumber column;\n    ColumnNumber end_column;\n  };\n\n  // Get the current token.  This is updated when Next() is called.  Before\n  // the first call to Next(), current() has type TYPE_START and no contents.\n  const Token& current();\n\n  // Return the previous token -- i.e. what current() returned before the\n  // previous call to Next().\n  const Token& previous();\n\n  // Advance to the next token.  Returns false if the end of the input is\n  // reached.\n  bool Next();\n\n  // Like Next(), but also collects comments which appear between the previous\n  // and next tokens.\n  //\n  // Comments which appear to be attached to the previous token are stored\n  // in *prev_tailing_comments.  Comments which appear to be attached to the\n  // next token are stored in *next_leading_comments.  Comments appearing in\n  // between which do not appear to be attached to either will be added to\n  // detached_comments.  Any of these parameters can be NULL to simply discard\n  // the comments.\n  //\n  // A series of line comments appearing on consecutive lines, with no other\n  // tokens appearing on those lines, will be treated as a single comment.\n  //\n  // Only the comment content is returned; comment markers (e.g. //) are\n  // stripped out.  For block comments, leading whitespace and an asterisk will\n  // be stripped from the beginning of each line other than the first.  Newlines\n  // are included in the output.\n  //\n  // Examples:\n  //\n  //   optional int32 foo = 1;  // Comment attached to foo.\n  //   // Comment attached to bar.\n  //   optional int32 bar = 2;\n  //\n  //   optional string baz = 3;\n  //   // Comment attached to baz.\n  //   // Another line attached to baz.\n  //\n  //   // Comment attached to qux.\n  //   //\n  //   // Another line attached to qux.\n  //   optional double qux = 4;\n  //\n  //   // Detached comment.  This is not attached to qux or corge\n  //   // because there are blank lines separating it from both.\n  //\n  //   optional string corge = 5;\n  //   /* Block comment attached\n  //    * to corge.  Leading asterisks\n  //    * will be removed. */\n  //   /* Block comment attached to\n  //    * grault. */\n  //   optional int32 grault = 6;\n  bool NextWithComments(string* prev_trailing_comments,\n                        vector<string>* detached_comments,\n                        string* next_leading_comments);\n\n  // Parse helpers ---------------------------------------------------\n\n  // Parses a TYPE_FLOAT token.  This never fails, so long as the text actually\n  // comes from a TYPE_FLOAT token parsed by Tokenizer.  If it doesn't, the\n  // result is undefined (possibly an assert failure).\n  static double ParseFloat(const string& text);\n\n  // Parses a TYPE_STRING token.  This never fails, so long as the text actually\n  // comes from a TYPE_STRING token parsed by Tokenizer.  If it doesn't, the\n  // result is undefined (possibly an assert failure).\n  static void ParseString(const string& text, string* output);\n\n  // Identical to ParseString, but appends to output.\n  static void ParseStringAppend(const string& text, string* output);\n\n  // Parses a TYPE_INTEGER token.  Returns false if the result would be\n  // greater than max_value.  Otherwise, returns true and sets *output to the\n  // result.  If the text is not from a Token of type TYPE_INTEGER originally\n  // parsed by a Tokenizer, the result is undefined (possibly an assert\n  // failure).\n  static bool ParseInteger(const string& text, uint64 max_value,\n                           uint64* output);\n\n  // Options ---------------------------------------------------------\n\n  // Set true to allow floats to be suffixed with the letter 'f'.  Tokens\n  // which would otherwise be integers but which have the 'f' suffix will be\n  // forced to be interpreted as floats.  For all other purposes, the 'f' is\n  // ignored.\n  void set_allow_f_after_float(bool value) { allow_f_after_float_ = value; }\n\n  // Valid values for set_comment_style().\n  enum CommentStyle {\n    // Line comments begin with \"//\", block comments are delimited by \"/*\" and\n    // \"*/\".\n    CPP_COMMENT_STYLE,\n    // Line comments begin with \"#\".  No way to write block comments.\n    SH_COMMENT_STYLE\n  };\n\n  // Sets the comment style.\n  void set_comment_style(CommentStyle style) { comment_style_ = style; }\n\n  // Whether to require whitespace between a number and a field name.\n  // Default is true. Do not use this; for Google-internal cleanup only.\n  void set_require_space_after_number(bool require) {\n    require_space_after_number_ = require;\n  }\n\n  // Whether to allow string literals to span multiple lines. Default is false.\n  // Do not use this; for Google-internal cleanup only.\n  void set_allow_multiline_strings(bool allow) {\n    allow_multiline_strings_ = allow;\n  }\n\n  // External helper: validate an identifier.\n  static bool IsIdentifier(const string& text);\n\n  // -----------------------------------------------------------------\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Tokenizer);\n\n  Token current_;           // Returned by current().\n  Token previous_;          // Returned by previous().\n\n  ZeroCopyInputStream* input_;\n  ErrorCollector* error_collector_;\n\n  char current_char_;       // == buffer_[buffer_pos_], updated by NextChar().\n  const char* buffer_;      // Current buffer returned from input_.\n  int buffer_size_;         // Size of buffer_.\n  int buffer_pos_;          // Current position within the buffer.\n  bool read_error_;         // Did we previously encounter a read error?\n\n  // Line and column number of current_char_ within the whole input stream.\n  int line_;\n  ColumnNumber column_;\n\n  // String to which text should be appended as we advance through it.\n  // Call RecordTo(&str) to start recording and StopRecording() to stop.\n  // E.g. StartToken() calls RecordTo(&current_.text).  record_start_ is the\n  // position within the current buffer where recording started.\n  string* record_target_;\n  int record_start_;\n\n  // Options.\n  bool allow_f_after_float_;\n  CommentStyle comment_style_;\n  bool require_space_after_number_;\n  bool allow_multiline_strings_;\n\n  // Since we count columns we need to interpret tabs somehow.  We'll take\n  // the standard 8-character definition for lack of any way to do better.\n  // This must match the documentation of ColumnNumber.\n  static const int kTabWidth = 8;\n\n  // -----------------------------------------------------------------\n  // Helper methods.\n\n  // Consume this character and advance to the next one.\n  void NextChar();\n\n  // Read a new buffer from the input.\n  void Refresh();\n\n  inline void RecordTo(string* target);\n  inline void StopRecording();\n\n  // Called when the current character is the first character of a new\n  // token (not including whitespace or comments).\n  inline void StartToken();\n  // Called when the current character is the first character after the\n  // end of the last token.  After this returns, current_.text will\n  // contain all text consumed since StartToken() was called.\n  inline void EndToken();\n\n  // Convenience method to add an error at the current line and column.\n  void AddError(const string& message) {\n    error_collector_->AddError(line_, column_, message);\n  }\n\n  // -----------------------------------------------------------------\n  // The following four methods are used to consume tokens of specific\n  // types.  They are actually used to consume all characters *after*\n  // the first, since the calling function consumes the first character\n  // in order to decide what kind of token is being read.\n\n  // Read and consume a string, ending when the given delimiter is\n  // consumed.\n  void ConsumeString(char delimiter);\n\n  // Read and consume a number, returning TYPE_FLOAT or TYPE_INTEGER\n  // depending on what was read.  This needs to know if the first\n  // character was a zero in order to correctly recognize hex and octal\n  // numbers.\n  // It also needs to know if the first character was a . to parse floating\n  // point correctly.\n  TokenType ConsumeNumber(bool started_with_zero, bool started_with_dot);\n\n  // Consume the rest of a line.\n  void ConsumeLineComment(string* content);\n  // Consume until \"*/\".\n  void ConsumeBlockComment(string* content);\n\n  enum NextCommentStatus {\n    // Started a line comment.\n    LINE_COMMENT,\n\n    // Started a block comment.\n    BLOCK_COMMENT,\n\n    // Consumed a slash, then realized it wasn't a comment.  current_ has\n    // been filled in with a slash token.  The caller should return it.\n    SLASH_NOT_COMMENT,\n\n    // We do not appear to be starting a comment here.\n    NO_COMMENT\n  };\n\n  // If we're at the start of a new comment, consume it and return what kind\n  // of comment it is.\n  NextCommentStatus TryConsumeCommentStart();\n\n  // -----------------------------------------------------------------\n  // These helper methods make the parsing code more readable.  The\n  // \"character classes\" referred to are defined at the top of the .cc file.\n  // Basically it is a C++ class with one method:\n  //   static bool InClass(char c);\n  // The method returns true if c is a member of this \"class\", like \"Letter\"\n  // or \"Digit\".\n\n  // Returns true if the current character is of the given character\n  // class, but does not consume anything.\n  template<typename CharacterClass>\n  inline bool LookingAt();\n\n  // If the current character is in the given class, consume it and return\n  // true.  Otherwise return false.\n  // e.g. TryConsumeOne<Letter>()\n  template<typename CharacterClass>\n  inline bool TryConsumeOne();\n\n  // Like above, but try to consume the specific character indicated.\n  inline bool TryConsume(char c);\n\n  // Consume zero or more of the given character class.\n  template<typename CharacterClass>\n  inline void ConsumeZeroOrMore();\n\n  // Consume one or more of the given character class or log the given\n  // error message.\n  // e.g. ConsumeOneOrMore<Digit>(\"Expected digits.\");\n  template<typename CharacterClass>\n  inline void ConsumeOneOrMore(const char* error);\n};\n\n// inline methods ====================================================\ninline const Tokenizer::Token& Tokenizer::current() {\n  return current_;\n}\n\ninline const Tokenizer::Token& Tokenizer::previous() {\n  return previous_;\n}\n\ninline void Tokenizer::ParseString(const string& text, string* output) {\n  output->clear();\n  ParseStringAppend(text, output);\n}\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_TOKENIZER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/zero_copy_stream.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains the ZeroCopyInputStream and ZeroCopyOutputStream\n// interfaces, which represent abstract I/O streams to and from which\n// protocol buffers can be read and written.  For a few simple\n// implementations of these interfaces, see zero_copy_stream_impl.h.\n//\n// These interfaces are different from classic I/O streams in that they\n// try to minimize the amount of data copying that needs to be done.\n// To accomplish this, responsibility for allocating buffers is moved to\n// the stream object, rather than being the responsibility of the caller.\n// So, the stream can return a buffer which actually points directly into\n// the final data structure where the bytes are to be stored, and the caller\n// can interact directly with that buffer, eliminating an intermediate copy\n// operation.\n//\n// As an example, consider the common case in which you are reading bytes\n// from an array that is already in memory (or perhaps an mmap()ed file).\n// With classic I/O streams, you would do something like:\n//   char buffer[BUFFER_SIZE];\n//   input->Read(buffer, BUFFER_SIZE);\n//   DoSomething(buffer, BUFFER_SIZE);\n// Then, the stream basically just calls memcpy() to copy the data from\n// the array into your buffer.  With a ZeroCopyInputStream, you would do\n// this instead:\n//   const void* buffer;\n//   int size;\n//   input->Next(&buffer, &size);\n//   DoSomething(buffer, size);\n// Here, no copy is performed.  The input stream returns a pointer directly\n// into the backing array, and the caller ends up reading directly from it.\n//\n// If you want to be able to read the old-fashion way, you can create\n// a CodedInputStream or CodedOutputStream wrapping these objects and use\n// their ReadRaw()/WriteRaw() methods.  These will, of course, add a copy\n// step, but Coded*Stream will handle buffering so at least it will be\n// reasonably efficient.\n//\n// ZeroCopyInputStream example:\n//   // Read in a file and print its contents to stdout.\n//   int fd = open(\"myfile\", O_RDONLY);\n//   ZeroCopyInputStream* input = new FileInputStream(fd);\n//\n//   const void* buffer;\n//   int size;\n//   while (input->Next(&buffer, &size)) {\n//     cout.write(buffer, size);\n//   }\n//\n//   delete input;\n//   close(fd);\n//\n// ZeroCopyOutputStream example:\n//   // Copy the contents of \"infile\" to \"outfile\", using plain read() for\n//   // \"infile\" but a ZeroCopyOutputStream for \"outfile\".\n//   int infd = open(\"infile\", O_RDONLY);\n//   int outfd = open(\"outfile\", O_WRONLY);\n//   ZeroCopyOutputStream* output = new FileOutputStream(outfd);\n//\n//   void* buffer;\n//   int size;\n//   while (output->Next(&buffer, &size)) {\n//     int bytes = read(infd, buffer, size);\n//     if (bytes < size) {\n//       // Reached EOF.\n//       output->BackUp(size - bytes);\n//       break;\n//     }\n//   }\n//\n//   delete output;\n//   close(infd);\n//   close(outfd);\n\n#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__\n#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__\n\n#include <string>\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\n\nnamespace protobuf {\nnamespace io {\n\n// Defined in this file.\nclass ZeroCopyInputStream;\nclass ZeroCopyOutputStream;\n\n// Abstract interface similar to an input stream but designed to minimize\n// copying.\nclass LIBPROTOBUF_EXPORT ZeroCopyInputStream {\n public:\n  inline ZeroCopyInputStream() {}\n  virtual ~ZeroCopyInputStream();\n\n  // Obtains a chunk of data from the stream.\n  //\n  // Preconditions:\n  // * \"size\" and \"data\" are not NULL.\n  //\n  // Postconditions:\n  // * If the returned value is false, there is no more data to return or\n  //   an error occurred.  All errors are permanent.\n  // * Otherwise, \"size\" points to the actual number of bytes read and \"data\"\n  //   points to a pointer to a buffer containing these bytes.\n  // * Ownership of this buffer remains with the stream, and the buffer\n  //   remains valid only until some other method of the stream is called\n  //   or the stream is destroyed.\n  // * It is legal for the returned buffer to have zero size, as long\n  //   as repeatedly calling Next() eventually yields a buffer with non-zero\n  //   size.\n  virtual bool Next(const void** data, int* size) = 0;\n\n  // Backs up a number of bytes, so that the next call to Next() returns\n  // data again that was already returned by the last call to Next().  This\n  // is useful when writing procedures that are only supposed to read up\n  // to a certain point in the input, then return.  If Next() returns a\n  // buffer that goes beyond what you wanted to read, you can use BackUp()\n  // to return to the point where you intended to finish.\n  //\n  // Preconditions:\n  // * The last method called must have been Next().\n  // * count must be less than or equal to the size of the last buffer\n  //   returned by Next().\n  //\n  // Postconditions:\n  // * The last \"count\" bytes of the last buffer returned by Next() will be\n  //   pushed back into the stream.  Subsequent calls to Next() will return\n  //   the same data again before producing new data.\n  virtual void BackUp(int count) = 0;\n\n  // Skips a number of bytes.  Returns false if the end of the stream is\n  // reached or some input error occurred.  In the end-of-stream case, the\n  // stream is advanced to the end of the stream (so ByteCount() will return\n  // the total size of the stream).\n  virtual bool Skip(int count) = 0;\n\n  // Returns the total number of bytes read since this object was created.\n  virtual int64 ByteCount() const = 0;\n\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyInputStream);\n};\n\n// Abstract interface similar to an output stream but designed to minimize\n// copying.\nclass LIBPROTOBUF_EXPORT ZeroCopyOutputStream {\n public:\n  inline ZeroCopyOutputStream() {}\n  virtual ~ZeroCopyOutputStream();\n\n  // Obtains a buffer into which data can be written.  Any data written\n  // into this buffer will eventually (maybe instantly, maybe later on)\n  // be written to the output.\n  //\n  // Preconditions:\n  // * \"size\" and \"data\" are not NULL.\n  //\n  // Postconditions:\n  // * If the returned value is false, an error occurred.  All errors are\n  //   permanent.\n  // * Otherwise, \"size\" points to the actual number of bytes in the buffer\n  //   and \"data\" points to the buffer.\n  // * Ownership of this buffer remains with the stream, and the buffer\n  //   remains valid only until some other method of the stream is called\n  //   or the stream is destroyed.\n  // * Any data which the caller stores in this buffer will eventually be\n  //   written to the output (unless BackUp() is called).\n  // * It is legal for the returned buffer to have zero size, as long\n  //   as repeatedly calling Next() eventually yields a buffer with non-zero\n  //   size.\n  virtual bool Next(void** data, int* size) = 0;\n\n  // Backs up a number of bytes, so that the end of the last buffer returned\n  // by Next() is not actually written.  This is needed when you finish\n  // writing all the data you want to write, but the last buffer was bigger\n  // than you needed.  You don't want to write a bunch of garbage after the\n  // end of your data, so you use BackUp() to back up.\n  //\n  // Preconditions:\n  // * The last method called must have been Next().\n  // * count must be less than or equal to the size of the last buffer\n  //   returned by Next().\n  // * The caller must not have written anything to the last \"count\" bytes\n  //   of that buffer.\n  //\n  // Postconditions:\n  // * The last \"count\" bytes of the last buffer returned by Next() will be\n  //   ignored.\n  virtual void BackUp(int count) = 0;\n\n  // Returns the total number of bytes written since this object was created.\n  virtual int64 ByteCount() const = 0;\n\n  // Write a given chunk of data to the output.  Some output streams may\n  // implement this in a way that avoids copying. Check AllowsAliasing() before\n  // calling WriteAliasedRaw(). It will GOOGLE_CHECK fail if WriteAliasedRaw() is\n  // called on a stream that does not allow aliasing.\n  //\n  // NOTE: It is caller's responsibility to ensure that the chunk of memory\n  // remains live until all of the data has been consumed from the stream.\n  virtual bool WriteAliasedRaw(const void* data, int size);\n  virtual bool AllowsAliasing() const { return false; }\n\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyOutputStream);\n};\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/zero_copy_stream_impl.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains common implementations of the interfaces defined in\n// zero_copy_stream.h which are only included in the full (non-lite)\n// protobuf library.  These implementations include Unix file descriptors\n// and C++ iostreams.  See also:  zero_copy_stream_impl_lite.h\n\n#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_H__\n#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_H__\n\n#include <string>\n#include <iosfwd>\n#include <google/protobuf/io/zero_copy_stream.h>\n#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n#include <google/protobuf/stubs/common.h>\n\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\n\n// ===================================================================\n\n// A ZeroCopyInputStream which reads from a file descriptor.\n//\n// FileInputStream is preferred over using an ifstream with IstreamInputStream.\n// The latter will introduce an extra layer of buffering, harming performance.\n// Also, it's conceivable that FileInputStream could someday be enhanced\n// to use zero-copy file descriptors on OSs which support them.\nclass LIBPROTOBUF_EXPORT FileInputStream : public ZeroCopyInputStream {\n public:\n  // Creates a stream that reads from the given Unix file descriptor.\n  // If a block_size is given, it specifies the number of bytes that\n  // should be read and returned with each call to Next().  Otherwise,\n  // a reasonable default is used.\n  explicit FileInputStream(int file_descriptor, int block_size = -1);\n  ~FileInputStream();\n\n  // Flushes any buffers and closes the underlying file.  Returns false if\n  // an error occurs during the process; use GetErrno() to examine the error.\n  // Even if an error occurs, the file descriptor is closed when this returns.\n  bool Close();\n\n  // By default, the file descriptor is not closed when the stream is\n  // destroyed.  Call SetCloseOnDelete(true) to change that.  WARNING:\n  // This leaves no way for the caller to detect if close() fails.  If\n  // detecting close() errors is important to you, you should arrange\n  // to close the descriptor yourself.\n  void SetCloseOnDelete(bool value) { copying_input_.SetCloseOnDelete(value); }\n\n  // If an I/O error has occurred on this file descriptor, this is the\n  // errno from that error.  Otherwise, this is zero.  Once an error\n  // occurs, the stream is broken and all subsequent operations will\n  // fail.\n  int GetErrno() { return copying_input_.GetErrno(); }\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n private:\n  class LIBPROTOBUF_EXPORT CopyingFileInputStream : public CopyingInputStream {\n   public:\n    CopyingFileInputStream(int file_descriptor);\n    ~CopyingFileInputStream();\n\n    bool Close();\n    void SetCloseOnDelete(bool value) { close_on_delete_ = value; }\n    int GetErrno() { return errno_; }\n\n    // implements CopyingInputStream ---------------------------------\n    int Read(void* buffer, int size);\n    int Skip(int count);\n\n   private:\n    // The file descriptor.\n    const int file_;\n    bool close_on_delete_;\n    bool is_closed_;\n\n    // The errno of the I/O error, if one has occurred.  Otherwise, zero.\n    int errno_;\n\n    // Did we try to seek once and fail?  If so, we assume this file descriptor\n    // doesn't support seeking and won't try again.\n    bool previous_seek_failed_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingFileInputStream);\n  };\n\n  CopyingFileInputStream copying_input_;\n  CopyingInputStreamAdaptor impl_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FileInputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyOutputStream which writes to a file descriptor.\n//\n// FileOutputStream is preferred over using an ofstream with\n// OstreamOutputStream.  The latter will introduce an extra layer of buffering,\n// harming performance.  Also, it's conceivable that FileOutputStream could\n// someday be enhanced to use zero-copy file descriptors on OSs which\n// support them.\nclass LIBPROTOBUF_EXPORT FileOutputStream : public ZeroCopyOutputStream {\n public:\n  // Creates a stream that writes to the given Unix file descriptor.\n  // If a block_size is given, it specifies the size of the buffers\n  // that should be returned by Next().  Otherwise, a reasonable default\n  // is used.\n  explicit FileOutputStream(int file_descriptor, int block_size = -1);\n  ~FileOutputStream();\n\n  // Flushes any buffers and closes the underlying file.  Returns false if\n  // an error occurs during the process; use GetErrno() to examine the error.\n  // Even if an error occurs, the file descriptor is closed when this returns.\n  bool Close();\n\n  // Flushes FileOutputStream's buffers but does not close the\n  // underlying file. No special measures are taken to ensure that\n  // underlying operating system file object is synchronized to disk.\n  bool Flush();\n\n  // By default, the file descriptor is not closed when the stream is\n  // destroyed.  Call SetCloseOnDelete(true) to change that.  WARNING:\n  // This leaves no way for the caller to detect if close() fails.  If\n  // detecting close() errors is important to you, you should arrange\n  // to close the descriptor yourself.\n  void SetCloseOnDelete(bool value) { copying_output_.SetCloseOnDelete(value); }\n\n  // If an I/O error has occurred on this file descriptor, this is the\n  // errno from that error.  Otherwise, this is zero.  Once an error\n  // occurs, the stream is broken and all subsequent operations will\n  // fail.\n  int GetErrno() { return copying_output_.GetErrno(); }\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n private:\n  class LIBPROTOBUF_EXPORT CopyingFileOutputStream : public CopyingOutputStream {\n   public:\n    CopyingFileOutputStream(int file_descriptor);\n    ~CopyingFileOutputStream();\n\n    bool Close();\n    void SetCloseOnDelete(bool value) { close_on_delete_ = value; }\n    int GetErrno() { return errno_; }\n\n    // implements CopyingOutputStream --------------------------------\n    bool Write(const void* buffer, int size);\n\n   private:\n    // The file descriptor.\n    const int file_;\n    bool close_on_delete_;\n    bool is_closed_;\n\n    // The errno of the I/O error, if one has occurred.  Otherwise, zero.\n    int errno_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingFileOutputStream);\n  };\n\n  CopyingFileOutputStream copying_output_;\n  CopyingOutputStreamAdaptor impl_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FileOutputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyInputStream which reads from a C++ istream.\n//\n// Note that for reading files (or anything represented by a file descriptor),\n// FileInputStream is more efficient.\nclass LIBPROTOBUF_EXPORT IstreamInputStream : public ZeroCopyInputStream {\n public:\n  // Creates a stream that reads from the given C++ istream.\n  // If a block_size is given, it specifies the number of bytes that\n  // should be read and returned with each call to Next().  Otherwise,\n  // a reasonable default is used.\n  explicit IstreamInputStream(std::istream* stream, int block_size = -1);\n  ~IstreamInputStream();\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n private:\n  class LIBPROTOBUF_EXPORT CopyingIstreamInputStream : public CopyingInputStream {\n   public:\n    CopyingIstreamInputStream(std::istream* input);\n    ~CopyingIstreamInputStream();\n\n    // implements CopyingInputStream ---------------------------------\n    int Read(void* buffer, int size);\n    // (We use the default implementation of Skip().)\n\n   private:\n    // The stream.\n    std::istream* input_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingIstreamInputStream);\n  };\n\n  CopyingIstreamInputStream copying_input_;\n  CopyingInputStreamAdaptor impl_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(IstreamInputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyOutputStream which writes to a C++ ostream.\n//\n// Note that for writing files (or anything represented by a file descriptor),\n// FileOutputStream is more efficient.\nclass LIBPROTOBUF_EXPORT OstreamOutputStream : public ZeroCopyOutputStream {\n public:\n  // Creates a stream that writes to the given C++ ostream.\n  // If a block_size is given, it specifies the size of the buffers\n  // that should be returned by Next().  Otherwise, a reasonable default\n  // is used.\n  explicit OstreamOutputStream(std::ostream* stream, int block_size = -1);\n  ~OstreamOutputStream();\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n private:\n  class LIBPROTOBUF_EXPORT CopyingOstreamOutputStream : public CopyingOutputStream {\n   public:\n    CopyingOstreamOutputStream(std::ostream* output);\n    ~CopyingOstreamOutputStream();\n\n    // implements CopyingOutputStream --------------------------------\n    bool Write(const void* buffer, int size);\n\n   private:\n    // The stream.\n    std::ostream* output_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingOstreamOutputStream);\n  };\n\n  CopyingOstreamOutputStream copying_output_;\n  CopyingOutputStreamAdaptor impl_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(OstreamOutputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyInputStream which reads from several other streams in sequence.\n// ConcatenatingInputStream is unable to distinguish between end-of-stream\n// and read errors in the underlying streams, so it assumes any errors mean\n// end-of-stream.  So, if the underlying streams fail for any other reason,\n// ConcatenatingInputStream may do odd things.  It is suggested that you do\n// not use ConcatenatingInputStream on streams that might produce read errors\n// other than end-of-stream.\nclass LIBPROTOBUF_EXPORT ConcatenatingInputStream : public ZeroCopyInputStream {\n public:\n  // All streams passed in as well as the array itself must remain valid\n  // until the ConcatenatingInputStream is destroyed.\n  ConcatenatingInputStream(ZeroCopyInputStream* const streams[], int count);\n  ~ConcatenatingInputStream();\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n\n private:\n  // As streams are retired, streams_ is incremented and count_ is\n  // decremented.\n  ZeroCopyInputStream* const* streams_;\n  int stream_count_;\n  int64 bytes_retired_;  // Bytes read from previous streams.\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ConcatenatingInputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyInputStream which wraps some other stream and limits it to\n// a particular byte count.\nclass LIBPROTOBUF_EXPORT LimitingInputStream : public ZeroCopyInputStream {\n public:\n  LimitingInputStream(ZeroCopyInputStream* input, int64 limit);\n  ~LimitingInputStream();\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n\n private:\n  ZeroCopyInputStream* input_;\n  int64 limit_;  // Decreases as we go, becomes negative if we overshoot.\n  int64 prior_bytes_read_;  // Bytes read on underlying stream at construction\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(LimitingInputStream);\n};\n\n// ===================================================================\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/io/zero_copy_stream_impl_lite.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file contains common implementations of the interfaces defined in\n// zero_copy_stream.h which are included in the \"lite\" protobuf library.\n// These implementations cover I/O on raw arrays and strings, as well as\n// adaptors which make it easy to implement streams based on traditional\n// streams.  Of course, many users will probably want to write their own\n// implementations of these interfaces specific to the particular I/O\n// abstractions they prefer to use, but these should cover the most common\n// cases.\n\n#ifndef GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__\n#define GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__\n\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n#include <string>\n#include <iosfwd>\n#include <google/protobuf/io/zero_copy_stream.h>\n#include <google/protobuf/stubs/callback.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/stl_util.h>\n\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\n\n// ===================================================================\n\n// A ZeroCopyInputStream backed by an in-memory array of bytes.\nclass LIBPROTOBUF_EXPORT ArrayInputStream : public ZeroCopyInputStream {\n public:\n  // Create an InputStream that returns the bytes pointed to by \"data\".\n  // \"data\" remains the property of the caller but must remain valid until\n  // the stream is destroyed.  If a block_size is given, calls to Next()\n  // will return data blocks no larger than the given size.  Otherwise, the\n  // first call to Next() returns the entire array.  block_size is mainly\n  // useful for testing; in production you would probably never want to set\n  // it.\n  ArrayInputStream(const void* data, int size, int block_size = -1);\n  ~ArrayInputStream();\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n\n private:\n  const uint8* const data_;  // The byte array.\n  const int size_;           // Total size of the array.\n  const int block_size_;     // How many bytes to return at a time.\n\n  int position_;\n  int last_returned_size_;   // How many bytes we returned last time Next()\n                             // was called (used for error checking only).\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayInputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyOutputStream backed by an in-memory array of bytes.\nclass LIBPROTOBUF_EXPORT ArrayOutputStream : public ZeroCopyOutputStream {\n public:\n  // Create an OutputStream that writes to the bytes pointed to by \"data\".\n  // \"data\" remains the property of the caller but must remain valid until\n  // the stream is destroyed.  If a block_size is given, calls to Next()\n  // will return data blocks no larger than the given size.  Otherwise, the\n  // first call to Next() returns the entire array.  block_size is mainly\n  // useful for testing; in production you would probably never want to set\n  // it.\n  ArrayOutputStream(void* data, int size, int block_size = -1);\n  ~ArrayOutputStream();\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n private:\n  uint8* const data_;        // The byte array.\n  const int size_;           // Total size of the array.\n  const int block_size_;     // How many bytes to return at a time.\n\n  int position_;\n  int last_returned_size_;   // How many bytes we returned last time Next()\n                             // was called (used for error checking only).\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayOutputStream);\n};\n\n// ===================================================================\n\n// A ZeroCopyOutputStream which appends bytes to a string.\nclass LIBPROTOBUF_EXPORT StringOutputStream : public ZeroCopyOutputStream {\n public:\n  // Create a StringOutputStream which appends bytes to the given string.\n  // The string remains property of the caller, but it is mutated in arbitrary\n  // ways and MUST NOT be accessed in any way until you're done with the\n  // stream. Either be sure there's no further usage, or (safest) destroy the\n  // stream before using the contents.\n  //\n  // Hint:  If you call target->reserve(n) before creating the stream,\n  //   the first call to Next() will return at least n bytes of buffer\n  //   space.\n  explicit StringOutputStream(string* target);\n  ~StringOutputStream();\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n protected:\n  void SetString(string* target);\n\n private:\n  static const int kMinimumSize = 16;\n\n  string* target_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringOutputStream);\n};\n\n// LazyStringOutputStream is a StringOutputStream with lazy acquisition of\n// the output string from a callback. The string is owned externally, and not\n// deleted in the stream destructor.\nclass LIBPROTOBUF_EXPORT LazyStringOutputStream : public StringOutputStream {\n public:\n  // Callback should be permanent (non-self-deleting). Ownership is transferred\n  // to the LazyStringOutputStream.\n  explicit LazyStringOutputStream(ResultCallback<string*>* callback);\n  ~LazyStringOutputStream();\n\n  // implements ZeroCopyOutputStream, overriding StringOutputStream -----------\n  bool Next(void** data, int* size);\n  int64 ByteCount() const;\n\n private:\n  const google::protobuf::scoped_ptr<ResultCallback<string*> > callback_;\n  bool string_is_set_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(LazyStringOutputStream);\n};\n\n// Note:  There is no StringInputStream.  Instead, just create an\n// ArrayInputStream as follows:\n//   ArrayInputStream input(str.data(), str.size());\n\n// ===================================================================\n\n// A generic traditional input stream interface.\n//\n// Lots of traditional input streams (e.g. file descriptors, C stdio\n// streams, and C++ iostreams) expose an interface where every read\n// involves copying bytes into a buffer.  If you want to take such an\n// interface and make a ZeroCopyInputStream based on it, simply implement\n// CopyingInputStream and then use CopyingInputStreamAdaptor.\n//\n// CopyingInputStream implementations should avoid buffering if possible.\n// CopyingInputStreamAdaptor does its own buffering and will read data\n// in large blocks.\nclass LIBPROTOBUF_EXPORT CopyingInputStream {\n public:\n  virtual ~CopyingInputStream();\n\n  // Reads up to \"size\" bytes into the given buffer.  Returns the number of\n  // bytes read.  Read() waits until at least one byte is available, or\n  // returns zero if no bytes will ever become available (EOF), or -1 if a\n  // permanent read error occurred.\n  virtual int Read(void* buffer, int size) = 0;\n\n  // Skips the next \"count\" bytes of input.  Returns the number of bytes\n  // actually skipped.  This will always be exactly equal to \"count\" unless\n  // EOF was reached or a permanent read error occurred.\n  //\n  // The default implementation just repeatedly calls Read() into a scratch\n  // buffer.\n  virtual int Skip(int count);\n};\n\n// A ZeroCopyInputStream which reads from a CopyingInputStream.  This is\n// useful for implementing ZeroCopyInputStreams that read from traditional\n// streams.  Note that this class is not really zero-copy.\n//\n// If you want to read from file descriptors or C++ istreams, this is\n// already implemented for you:  use FileInputStream or IstreamInputStream\n// respectively.\nclass LIBPROTOBUF_EXPORT CopyingInputStreamAdaptor : public ZeroCopyInputStream {\n public:\n  // Creates a stream that reads from the given CopyingInputStream.\n  // If a block_size is given, it specifies the number of bytes that\n  // should be read and returned with each call to Next().  Otherwise,\n  // a reasonable default is used.  The caller retains ownership of\n  // copying_stream unless SetOwnsCopyingStream(true) is called.\n  explicit CopyingInputStreamAdaptor(CopyingInputStream* copying_stream,\n                                     int block_size = -1);\n  ~CopyingInputStreamAdaptor();\n\n  // Call SetOwnsCopyingStream(true) to tell the CopyingInputStreamAdaptor to\n  // delete the underlying CopyingInputStream when it is destroyed.\n  void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; }\n\n  // implements ZeroCopyInputStream ----------------------------------\n  bool Next(const void** data, int* size);\n  void BackUp(int count);\n  bool Skip(int count);\n  int64 ByteCount() const;\n\n private:\n  // Insures that buffer_ is not NULL.\n  void AllocateBufferIfNeeded();\n  // Frees the buffer and resets buffer_used_.\n  void FreeBuffer();\n\n  // The underlying copying stream.\n  CopyingInputStream* copying_stream_;\n  bool owns_copying_stream_;\n\n  // True if we have seen a permenant error from the underlying stream.\n  bool failed_;\n\n  // The current position of copying_stream_, relative to the point where\n  // we started reading.\n  int64 position_;\n\n  // Data is read into this buffer.  It may be NULL if no buffer is currently\n  // in use.  Otherwise, it points to an array of size buffer_size_.\n  google::protobuf::scoped_array<uint8> buffer_;\n  const int buffer_size_;\n\n  // Number of valid bytes currently in the buffer (i.e. the size last\n  // returned by Next()).  0 <= buffer_used_ <= buffer_size_.\n  int buffer_used_;\n\n  // Number of bytes in the buffer which were backed up over by a call to\n  // BackUp().  These need to be returned again.\n  // 0 <= backup_bytes_ <= buffer_used_\n  int backup_bytes_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingInputStreamAdaptor);\n};\n\n// ===================================================================\n\n// A generic traditional output stream interface.\n//\n// Lots of traditional output streams (e.g. file descriptors, C stdio\n// streams, and C++ iostreams) expose an interface where every write\n// involves copying bytes from a buffer.  If you want to take such an\n// interface and make a ZeroCopyOutputStream based on it, simply implement\n// CopyingOutputStream and then use CopyingOutputStreamAdaptor.\n//\n// CopyingOutputStream implementations should avoid buffering if possible.\n// CopyingOutputStreamAdaptor does its own buffering and will write data\n// in large blocks.\nclass LIBPROTOBUF_EXPORT CopyingOutputStream {\n public:\n  virtual ~CopyingOutputStream();\n\n  // Writes \"size\" bytes from the given buffer to the output.  Returns true\n  // if successful, false on a write error.\n  virtual bool Write(const void* buffer, int size) = 0;\n};\n\n// A ZeroCopyOutputStream which writes to a CopyingOutputStream.  This is\n// useful for implementing ZeroCopyOutputStreams that write to traditional\n// streams.  Note that this class is not really zero-copy.\n//\n// If you want to write to file descriptors or C++ ostreams, this is\n// already implemented for you:  use FileOutputStream or OstreamOutputStream\n// respectively.\nclass LIBPROTOBUF_EXPORT CopyingOutputStreamAdaptor : public ZeroCopyOutputStream {\n public:\n  // Creates a stream that writes to the given Unix file descriptor.\n  // If a block_size is given, it specifies the size of the buffers\n  // that should be returned by Next().  Otherwise, a reasonable default\n  // is used.\n  explicit CopyingOutputStreamAdaptor(CopyingOutputStream* copying_stream,\n                                      int block_size = -1);\n  ~CopyingOutputStreamAdaptor();\n\n  // Writes all pending data to the underlying stream.  Returns false if a\n  // write error occurred on the underlying stream.  (The underlying\n  // stream itself is not necessarily flushed.)\n  bool Flush();\n\n  // Call SetOwnsCopyingStream(true) to tell the CopyingOutputStreamAdaptor to\n  // delete the underlying CopyingOutputStream when it is destroyed.\n  void SetOwnsCopyingStream(bool value) { owns_copying_stream_ = value; }\n\n  // implements ZeroCopyOutputStream ---------------------------------\n  bool Next(void** data, int* size);\n  void BackUp(int count);\n  int64 ByteCount() const;\n\n private:\n  // Write the current buffer, if it is present.\n  bool WriteBuffer();\n  // Insures that buffer_ is not NULL.\n  void AllocateBufferIfNeeded();\n  // Frees the buffer.\n  void FreeBuffer();\n\n  // The underlying copying stream.\n  CopyingOutputStream* copying_stream_;\n  bool owns_copying_stream_;\n\n  // True if we have seen a permenant error from the underlying stream.\n  bool failed_;\n\n  // The current position of copying_stream_, relative to the point where\n  // we started writing.\n  int64 position_;\n\n  // Data is written from this buffer.  It may be NULL if no buffer is\n  // currently in use.  Otherwise, it points to an array of size buffer_size_.\n  google::protobuf::scoped_array<uint8> buffer_;\n  const int buffer_size_;\n\n  // Number of valid bytes currently in the buffer (i.e. the size last\n  // returned by Next()).  When BackUp() is called, we just reduce this.\n  // 0 <= buffer_used_ <= buffer_size_.\n  int buffer_used_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CopyingOutputStreamAdaptor);\n};\n\n// ===================================================================\n\n// mutable_string_data() and as_string_data() are workarounds to improve\n// the performance of writing new data to an existing string.  Unfortunately\n// the methods provided by the string class are suboptimal, and using memcpy()\n// is mildly annoying because it requires its pointer args to be non-NULL even\n// if we ask it to copy 0 bytes.  Furthermore, string_as_array() has the\n// property that it always returns NULL if its arg is the empty string, exactly\n// what we want to avoid if we're using it in conjunction with memcpy()!\n// With C++11, the desired memcpy() boils down to memcpy(..., &(*s)[0], size),\n// where s is a string*.  Without C++11, &(*s)[0] is not guaranteed to be safe,\n// so we use string_as_array(), and live with the extra logic that tests whether\n// *s is empty.\n\n// Return a pointer to mutable characters underlying the given string.  The\n// return value is valid until the next time the string is resized.  We\n// trust the caller to treat the return value as an array of length s->size().\ninline char* mutable_string_data(string* s) {\n#ifdef LANG_CXX11\n  // This should be simpler & faster than string_as_array() because the latter\n  // is guaranteed to return NULL when *s is empty, so it has to check for that.\n  return &(*s)[0];\n#else\n  return string_as_array(s);\n#endif\n}\n\n// as_string_data(s) is equivalent to\n//  ({ char* p = mutable_string_data(s); make_pair(p, p != NULL); })\n// Sometimes it's faster: in some scenarios p cannot be NULL, and then the\n// code can avoid that check.\ninline std::pair<char*, bool> as_string_data(string* s) {\n  char *p = mutable_string_data(s);\n#ifdef LANG_CXX11\n  return std::make_pair(p, true);\n#else\n  return make_pair(p, p != NULL);\n#endif\n}\n\n}  // namespace io\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_IO_ZERO_COPY_STREAM_IMPL_LITE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file defines the map container and its helpers to support protobuf maps.\n//\n// The Map and MapIterator types are provided by this header file.\n// Please avoid using other types defined here, unless they are public\n// types within Map or MapIterator, such as Map::value_type.\n\n#ifndef GOOGLE_PROTOBUF_MAP_H__\n#define GOOGLE_PROTOBUF_MAP_H__\n\n#include <google/protobuf/stubs/hash.h>\n#include <iterator>\n#include <limits>  // To support Visual Studio 2008\n#include <set>\n#include <utility>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/arena.h>\n#include <google/protobuf/generated_enum_util.h>\n#include <google/protobuf/map_type_handler.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/descriptor.h>\n#if __cpp_exceptions && LANG_CXX11\n#include <random>\n#endif\n\nnamespace google {\nnamespace protobuf {\n\ntemplate <typename Key, typename T>\nclass Map;\n\nclass MapIterator;\n\ntemplate <typename Enum> struct is_proto_enum;\n\nnamespace internal {\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nclass MapFieldLite;\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nclass MapField;\n\ntemplate <typename Key, typename T>\nclass TypeDefinedMapFieldBase;\n\nclass DynamicMapField;\n\nclass GeneratedMessageReflection;\n}  // namespace internal\n\n#define TYPE_CHECK(EXPECTEDTYPE, METHOD)                        \\\n  if (type() != EXPECTEDTYPE) {                                 \\\n    GOOGLE_LOG(FATAL)                                                  \\\n        << \"Protocol Buffer map usage error:\\n\"                 \\\n        << METHOD << \" type does not match\\n\"                   \\\n        << \"  Expected : \"                                      \\\n        << FieldDescriptor::CppTypeName(EXPECTEDTYPE) << \"\\n\"   \\\n        << \"  Actual   : \"                                      \\\n        << FieldDescriptor::CppTypeName(type());                \\\n  }\n\n// MapKey is an union type for representing any possible\n// map key.\nclass LIBPROTOBUF_EXPORT MapKey {\n public:\n  MapKey() : type_(0) {\n  }\n  MapKey(const MapKey& other) : type_(0) {\n    CopyFrom(other);\n  }\n\n  ~MapKey() {\n    if (type_ == FieldDescriptor::CPPTYPE_STRING) {\n      delete val_.string_value_;\n    }\n  }\n\n  FieldDescriptor::CppType type() const {\n    if (type_ == 0) {\n      GOOGLE_LOG(FATAL)\n          << \"Protocol Buffer map usage error:\\n\"\n          << \"MapKey::type MapKey is not initialized. \"\n          << \"Call set methods to initialize MapKey.\";\n    }\n    return (FieldDescriptor::CppType)type_;\n  }\n\n  void SetInt64Value(int64 value) {\n    SetType(FieldDescriptor::CPPTYPE_INT64);\n    val_.int64_value_ = value;\n  }\n  void SetUInt64Value(uint64 value) {\n    SetType(FieldDescriptor::CPPTYPE_UINT64);\n    val_.uint64_value_ = value;\n  }\n  void SetInt32Value(int32 value) {\n    SetType(FieldDescriptor::CPPTYPE_INT32);\n    val_.int32_value_ = value;\n  }\n  void SetUInt32Value(uint32 value) {\n    SetType(FieldDescriptor::CPPTYPE_UINT32);\n    val_.uint32_value_ = value;\n  }\n  void SetBoolValue(bool value) {\n    SetType(FieldDescriptor::CPPTYPE_BOOL);\n    val_.bool_value_ = value;\n  }\n  void SetStringValue(const string& val) {\n    SetType(FieldDescriptor::CPPTYPE_STRING);\n    *val_.string_value_ = val;\n  }\n\n  int64 GetInt64Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64,\n               \"MapKey::GetInt64Value\");\n    return val_.int64_value_;\n  }\n  uint64 GetUInt64Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT64,\n               \"MapKey::GetUInt64Value\");\n    return val_.uint64_value_;\n  }\n  int32 GetInt32Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT32,\n               \"MapKey::GetInt32Value\");\n    return val_.int32_value_;\n  }\n  uint32 GetUInt32Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT32,\n               \"MapKey::GetUInt32Value\");\n    return val_.uint32_value_;\n  }\n  bool GetBoolValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_BOOL,\n               \"MapKey::GetBoolValue\");\n    return val_.bool_value_;\n  }\n  const string& GetStringValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_STRING,\n               \"MapKey::GetStringValue\");\n    return *val_.string_value_;\n  }\n\n  bool operator<(const MapKey& other) const {\n    if (type_ != other.type_) {\n      // We could define a total order that handles this case, but\n      // there currently no need.  So, for now, fail.\n      GOOGLE_LOG(FATAL) << \"Unsupported: type mismatch\";\n    }\n    switch (type()) {\n      case FieldDescriptor::CPPTYPE_DOUBLE:\n      case FieldDescriptor::CPPTYPE_FLOAT:\n      case FieldDescriptor::CPPTYPE_ENUM:\n      case FieldDescriptor::CPPTYPE_MESSAGE:\n        GOOGLE_LOG(FATAL) << \"Unsupported\";\n        return false;\n      case FieldDescriptor::CPPTYPE_STRING:\n        return *val_.string_value_ < *other.val_.string_value_;\n      case FieldDescriptor::CPPTYPE_INT64:\n        return val_.int64_value_ < other.val_.int64_value_;\n      case FieldDescriptor::CPPTYPE_INT32:\n        return val_.int32_value_ < other.val_.int32_value_;\n      case FieldDescriptor::CPPTYPE_UINT64:\n        return val_.uint64_value_ < other.val_.uint64_value_;\n      case FieldDescriptor::CPPTYPE_UINT32:\n        return val_.uint32_value_ < other.val_.uint32_value_;\n      case FieldDescriptor::CPPTYPE_BOOL:\n        return val_.bool_value_ < other.val_.bool_value_;\n    }\n    return false;\n  }\n\n  bool operator==(const MapKey& other) const {\n    if (type_ != other.type_) {\n      // To be consistent with operator<, we don't allow this either.\n      GOOGLE_LOG(FATAL) << \"Unsupported: type mismatch\";\n    }\n    switch (type()) {\n      case FieldDescriptor::CPPTYPE_DOUBLE:\n      case FieldDescriptor::CPPTYPE_FLOAT:\n      case FieldDescriptor::CPPTYPE_ENUM:\n      case FieldDescriptor::CPPTYPE_MESSAGE:\n        GOOGLE_LOG(FATAL) << \"Unsupported\";\n        break;\n      case FieldDescriptor::CPPTYPE_STRING:\n        return *val_.string_value_ == *other.val_.string_value_;\n      case FieldDescriptor::CPPTYPE_INT64:\n        return val_.int64_value_ == other.val_.int64_value_;\n      case FieldDescriptor::CPPTYPE_INT32:\n        return val_.int32_value_ == other.val_.int32_value_;\n      case FieldDescriptor::CPPTYPE_UINT64:\n        return val_.uint64_value_ == other.val_.uint64_value_;\n      case FieldDescriptor::CPPTYPE_UINT32:\n        return val_.uint32_value_ == other.val_.uint32_value_;\n      case FieldDescriptor::CPPTYPE_BOOL:\n        return val_.bool_value_ == other.val_.bool_value_;\n    }\n    GOOGLE_LOG(FATAL) << \"Can't get here.\";\n    return false;\n  }\n\n  void CopyFrom(const MapKey& other) {\n    SetType(other.type());\n    switch (type_) {\n      case FieldDescriptor::CPPTYPE_DOUBLE:\n      case FieldDescriptor::CPPTYPE_FLOAT:\n      case FieldDescriptor::CPPTYPE_ENUM:\n      case FieldDescriptor::CPPTYPE_MESSAGE:\n        GOOGLE_LOG(FATAL) << \"Unsupported\";\n        break;\n      case FieldDescriptor::CPPTYPE_STRING:\n        *val_.string_value_ = *other.val_.string_value_;\n        break;\n      case FieldDescriptor::CPPTYPE_INT64:\n        val_.int64_value_ = other.val_.int64_value_;\n        break;\n      case FieldDescriptor::CPPTYPE_INT32:\n        val_.int32_value_ = other.val_.int32_value_;\n        break;\n      case FieldDescriptor::CPPTYPE_UINT64:\n        val_.uint64_value_ = other.val_.uint64_value_;\n        break;\n      case FieldDescriptor::CPPTYPE_UINT32:\n        val_.uint32_value_ = other.val_.uint32_value_;\n        break;\n      case FieldDescriptor::CPPTYPE_BOOL:\n        val_.bool_value_ = other.val_.bool_value_;\n        break;\n    }\n  }\n\n private:\n  template <typename K, typename V>\n  friend class internal::TypeDefinedMapFieldBase;\n  friend class MapIterator;\n  friend class internal::DynamicMapField;\n\n  union KeyValue {\n    KeyValue() {}\n    string* string_value_;\n    int64 int64_value_;\n    int32 int32_value_;\n    uint64 uint64_value_;\n    uint32 uint32_value_;\n    bool bool_value_;\n  } val_;\n\n  void SetType(FieldDescriptor::CppType type) {\n    if (type_ == type) return;\n    if (type_ == FieldDescriptor::CPPTYPE_STRING) {\n      delete val_.string_value_;\n    }\n    type_ = type;\n    if (type_ == FieldDescriptor::CPPTYPE_STRING) {\n      val_.string_value_ = new string;\n    }\n  }\n\n  // type_ is 0 or a valid FieldDescriptor::CppType.\n  int type_;\n};\n\n// MapValueRef points to a map value.\nclass LIBPROTOBUF_EXPORT MapValueRef {\n public:\n  MapValueRef() : data_(NULL), type_(0) {}\n\n  void SetInt64Value(int64 value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64,\n               \"MapValueRef::SetInt64Value\");\n    *reinterpret_cast<int64*>(data_) = value;\n  }\n  void SetUInt64Value(uint64 value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT64,\n               \"MapValueRef::SetUInt64Value\");\n    *reinterpret_cast<uint64*>(data_) = value;\n  }\n  void SetInt32Value(int32 value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT32,\n               \"MapValueRef::SetInt32Value\");\n    *reinterpret_cast<int32*>(data_) = value;\n  }\n  void SetUInt32Value(uint32 value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT32,\n               \"MapValueRef::SetUInt32Value\");\n    *reinterpret_cast<uint32*>(data_) = value;\n  }\n  void SetBoolValue(bool value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_BOOL,\n               \"MapValueRef::SetBoolValue\");\n    *reinterpret_cast<bool*>(data_) = value;\n  }\n  // TODO(jieluo) - Checks that enum is member.\n  void SetEnumValue(int value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_ENUM,\n               \"MapValueRef::SetEnumValue\");\n    *reinterpret_cast<int*>(data_) = value;\n  }\n  void SetStringValue(const string& value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_STRING,\n               \"MapValueRef::SetStringValue\");\n    *reinterpret_cast<string*>(data_) = value;\n  }\n  void SetFloatValue(float value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_FLOAT,\n               \"MapValueRef::SetFloatValue\");\n    *reinterpret_cast<float*>(data_) = value;\n  }\n  void SetDoubleValue(double value) {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_DOUBLE,\n               \"MapValueRef::SetDoubleValue\");\n    *reinterpret_cast<double*>(data_) = value;\n  }\n\n  int64 GetInt64Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT64,\n               \"MapValueRef::GetInt64Value\");\n    return *reinterpret_cast<int64*>(data_);\n  }\n  uint64 GetUInt64Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT64,\n               \"MapValueRef::GetUInt64Value\");\n    return *reinterpret_cast<uint64*>(data_);\n  }\n  int32 GetInt32Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_INT32,\n               \"MapValueRef::GetInt32Value\");\n    return *reinterpret_cast<int32*>(data_);\n  }\n  uint32 GetUInt32Value() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_UINT32,\n               \"MapValueRef::GetUInt32Value\");\n    return *reinterpret_cast<uint32*>(data_);\n  }\n  bool GetBoolValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_BOOL,\n               \"MapValueRef::GetBoolValue\");\n    return *reinterpret_cast<bool*>(data_);\n  }\n  int GetEnumValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_ENUM,\n               \"MapValueRef::GetEnumValue\");\n    return *reinterpret_cast<int*>(data_);\n  }\n  const string& GetStringValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_STRING,\n               \"MapValueRef::GetStringValue\");\n    return *reinterpret_cast<string*>(data_);\n  }\n  float GetFloatValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_FLOAT,\n               \"MapValueRef::GetFloatValue\");\n    return *reinterpret_cast<float*>(data_);\n  }\n  double GetDoubleValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_DOUBLE,\n               \"MapValueRef::GetDoubleValue\");\n    return *reinterpret_cast<double*>(data_);\n  }\n\n  const Message& GetMessageValue() const {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_MESSAGE,\n               \"MapValueRef::GetMessageValue\");\n    return *reinterpret_cast<Message*>(data_);\n  }\n\n  Message* MutableMessageValue() {\n    TYPE_CHECK(FieldDescriptor::CPPTYPE_MESSAGE,\n               \"MapValueRef::MutableMessageValue\");\n    return reinterpret_cast<Message*>(data_);\n  }\n\n private:\n  template <typename K, typename V,\n            internal::WireFormatLite::FieldType key_wire_type,\n            internal::WireFormatLite::FieldType value_wire_type,\n            int default_enum_value>\n  friend class internal::MapField;\n  template <typename K, typename V>\n  friend class internal::TypeDefinedMapFieldBase;\n  friend class MapIterator;\n  friend class internal::GeneratedMessageReflection;\n  friend class internal::DynamicMapField;\n\n  void SetType(FieldDescriptor::CppType type) {\n    type_ = type;\n  }\n\n  FieldDescriptor::CppType type() const {\n    if (type_ == 0 || data_ == NULL) {\n      GOOGLE_LOG(FATAL)\n          << \"Protocol Buffer map usage error:\\n\"\n          << \"MapValueRef::type MapValueRef is not initialized.\";\n    }\n    return (FieldDescriptor::CppType)type_;\n  }\n  void SetValue(const void* val) {\n    data_ = const_cast<void*>(val);\n  }\n  void CopyFrom(const MapValueRef& other) {\n    type_ = other.type_;\n    data_ = other.data_;\n  }\n  // Only used in DynamicMapField\n  void DeleteData() {\n    switch (type_) {\n#define HANDLE_TYPE(CPPTYPE, TYPE)                              \\\n      case google::protobuf::FieldDescriptor::CPPTYPE_##CPPTYPE: {        \\\n        delete reinterpret_cast<TYPE*>(data_);                  \\\n        break;                                                  \\\n      }\n      HANDLE_TYPE(INT32, int32);\n      HANDLE_TYPE(INT64, int64);\n      HANDLE_TYPE(UINT32, uint32);\n      HANDLE_TYPE(UINT64, uint64);\n      HANDLE_TYPE(DOUBLE, double);\n      HANDLE_TYPE(FLOAT, float);\n      HANDLE_TYPE(BOOL, bool);\n      HANDLE_TYPE(STRING, string);\n      HANDLE_TYPE(ENUM, int32);\n      HANDLE_TYPE(MESSAGE, Message);\n#undef HANDLE_TYPE\n    }\n  }\n  // data_ point to a map value. MapValueRef does not\n  // own this value.\n  void* data_;\n  // type_ is 0 or a valid FieldDescriptor::CppType.\n  int type_;\n};\n\n#undef TYPE_CHECK\n\n// This is the class for google::protobuf::Map's internal value_type. Instead of using\n// std::pair as value_type, we use this class which provides us more control of\n// its process of construction and destruction.\ntemplate <typename Key, typename T>\nclass MapPair {\n public:\n  typedef const Key first_type;\n  typedef T second_type;\n\n  MapPair(const Key& other_first, const T& other_second)\n      : first(other_first), second(other_second) {}\n  explicit MapPair(const Key& other_first) : first(other_first), second() {}\n  MapPair(const MapPair& other)\n      : first(other.first), second(other.second) {}\n\n  ~MapPair() {}\n\n  // Implicitly convertible to std::pair of compatible types.\n  template <typename T1, typename T2>\n  operator std::pair<T1, T2>() const {\n    return std::pair<T1, T2>(first, second);\n  }\n\n  const Key first;\n  T second;\n\n private:\n  friend class ::google::protobuf::Arena;\n  friend class Map<Key, T>;\n};\n\n// google::protobuf::Map is an associative container type used to store protobuf map\n// fields.  Each Map instance may or may not use a different hash function, a\n// different iteration order, and so on.  E.g., please don't examine\n// implementation details to decide if the following would work:\n//  Map<int, int> m0, m1;\n//  m0[0] = m1[0] = m0[1] = m1[1] = 0;\n//  assert(m0.begin()->first == m1.begin()->first);  // Bug!\n//\n// Map's interface is similar to std::unordered_map, except that Map is not\n// designed to play well with exceptions.\ntemplate <typename Key, typename T>\nclass Map {\n public:\n  typedef Key key_type;\n  typedef T mapped_type;\n  typedef MapPair<Key, T> value_type;\n\n  typedef value_type* pointer;\n  typedef const value_type* const_pointer;\n  typedef value_type& reference;\n  typedef const value_type& const_reference;\n\n  typedef size_t size_type;\n  typedef hash<Key> hasher;\n\n  explicit Map(bool old_style = true)\n      : arena_(NULL),\n        default_enum_value_(0),\n        old_style_(old_style) {\n    Init();\n  }\n  explicit Map(Arena* arena, bool old_style = true)\n      : arena_(arena),\n        default_enum_value_(0),\n        old_style_(old_style) {\n    Init();\n  }\n  Map(const Map& other)\n      : arena_(NULL),\n        default_enum_value_(other.default_enum_value_),\n        old_style_(other.old_style_) {\n    Init();\n    insert(other.begin(), other.end());\n  }\n  template <class InputIt>\n  Map(const InputIt& first, const InputIt& last, bool old_style = true)\n      : arena_(NULL),\n        default_enum_value_(0),\n        old_style_(old_style) {\n    Init();\n    insert(first, last);\n  }\n\n  ~Map() {\n    clear();\n    if (arena_ == NULL) {\n      if (old_style_)\n        delete deprecated_elements_;\n      else\n        delete elements_;\n    }\n  }\n\n private:\n  void Init() {\n    if (old_style_)\n      deprecated_elements_ = Arena::Create<DeprecatedInnerMap>(\n          arena_, 0, hasher(), std::equal_to<Key>(),\n          MapAllocator<std::pair<const Key, MapPair<Key, T>*> >(arena_));\n    else\n      elements_ =\n          Arena::Create<InnerMap>(arena_, 0, hasher(), Allocator(arena_));\n  }\n\n  // re-implement std::allocator to use arena allocator for memory allocation.\n  // Used for google::protobuf::Map implementation. Users should not use this class\n  // directly.\n  template <typename U>\n  class MapAllocator {\n   public:\n    typedef U value_type;\n    typedef value_type* pointer;\n    typedef const value_type* const_pointer;\n    typedef value_type& reference;\n    typedef const value_type& const_reference;\n    typedef size_t size_type;\n    typedef ptrdiff_t difference_type;\n\n    MapAllocator() : arena_(NULL) {}\n    explicit MapAllocator(Arena* arena) : arena_(arena) {}\n    template <typename X>\n    MapAllocator(const MapAllocator<X>& allocator)\n        : arena_(allocator.arena()) {}\n\n    pointer allocate(size_type n, const_pointer hint = 0) {\n      // If arena is not given, malloc needs to be called which doesn't\n      // construct element object.\n      if (arena_ == NULL) {\n        return static_cast<pointer>(::operator new(n * sizeof(value_type)));\n      } else {\n        return reinterpret_cast<pointer>(\n            Arena::CreateArray<uint8>(arena_, n * sizeof(value_type)));\n      }\n    }\n\n    void deallocate(pointer p, size_type n) {\n      if (arena_ == NULL) {\n#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)\n        ::operator delete(p, n * sizeof(value_type));\n#else\n        ::operator delete(p);\n#endif\n      }\n    }\n\n#if __cplusplus >= 201103L && !defined(GOOGLE_PROTOBUF_OS_APPLE) && \\\n    !defined(GOOGLE_PROTOBUF_OS_NACL) &&                            \\\n    !defined(GOOGLE_PROTOBUF_OS_ANDROID) &&                         \\\n    !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)\n    template<class NodeType, class... Args>\n    void construct(NodeType* p, Args&&... args) {\n      // Clang 3.6 doesn't compile static casting to void* directly. (Issue\n      // #1266) According C++ standard 5.2.9/1: \"The static_cast operator shall\n      // not cast away constness\". So first the maybe const pointer is casted to\n      // const void* and after the const void* is const casted.\n      new (const_cast<void*>(static_cast<const void*>(p)))\n          NodeType(std::forward<Args>(args)...);\n    }\n\n    template<class NodeType>\n    void destroy(NodeType* p) {\n      p->~NodeType();\n    }\n#else\n    void construct(pointer p, const_reference t) { new (p) value_type(t); }\n\n    void destroy(pointer p) { p->~value_type(); }\n#endif\n\n    template <typename X>\n    struct rebind {\n      typedef MapAllocator<X> other;\n    };\n\n    template <typename X>\n    bool operator==(const MapAllocator<X>& other) const {\n      return arena_ == other.arena_;\n    }\n\n    template <typename X>\n    bool operator!=(const MapAllocator<X>& other) const {\n      return arena_ != other.arena_;\n    }\n\n    // To support Visual Studio 2008\n    size_type max_size() const {\n      return std::numeric_limits<size_type>::max();\n    }\n\n    // To support gcc-4.4, which does not properly\n    // support templated friend classes\n    Arena* arena() const {\n      return arena_;\n    }\n\n   private:\n    typedef void DestructorSkippable_;\n    Arena* const arena_;\n  };\n\n  // InnerMap's key type is Key and its value type is value_type*.  We use a\n  // custom class here and for Node, below, to ensure that k_ is at offset 0,\n  // allowing safe conversion from pointer to Node to pointer to Key, and vice\n  // versa when appropriate.\n  class KeyValuePair {\n   public:\n    KeyValuePair(const Key& k, value_type* v) : k_(k), v_(v) {}\n\n    const Key& key() const { return k_; }\n    Key& key() { return k_; }\n    value_type* const value() const { return v_; }\n    value_type*& value() { return v_; }\n\n   private:\n    Key k_;\n    value_type* v_;\n  };\n\n  typedef MapAllocator<KeyValuePair> Allocator;\n\n  // InnerMap is a generic hash-based map.  It doesn't contain any\n  // protocol-buffer-specific logic.  It is a chaining hash map with the\n  // additional feature that some buckets can be converted to use an ordered\n  // container.  This ensures O(lg n) bounds on find, insert, and erase, while\n  // avoiding the overheads of ordered containers most of the time.\n  //\n  // The implementation doesn't need the full generality of unordered_map,\n  // and it doesn't have it.  More bells and whistles can be added as needed.\n  // Some implementation details:\n  // 1. The hash function has type hasher and the equality function\n  //    equal_to<Key>.  We inherit from hasher to save space\n  //    (empty-base-class optimization).\n  // 2. The number of buckets is a power of two.\n  // 3. Buckets are converted to trees in pairs: if we convert bucket b then\n  //    buckets b and b^1 will share a tree.  Invariant: buckets b and b^1 have\n  //    the same non-NULL value iff they are sharing a tree.  (An alternative\n  //    implementation strategy would be to have a tag bit per bucket.)\n  // 4. As is typical for hash_map and such, the Keys and Values are always\n  //    stored in linked list nodes.  Pointers to elements are never invalidated\n  //    until the element is deleted.\n  // 5. The trees' payload type is pointer to linked-list node.  Tree-converting\n  //    a bucket doesn't copy Key-Value pairs.\n  // 6. Once we've tree-converted a bucket, it is never converted back. However,\n  //    the items a tree contains may wind up assigned to trees or lists upon a\n  //    rehash.\n  // 7. The code requires no C++ features from C++11 or later.\n  // 8. Mutations to a map do not invalidate the map's iterators, pointers to\n  //    elements, or references to elements.\n  // 9. Except for erase(iterator), any non-const method can reorder iterators.\n  class InnerMap : private hasher {\n   public:\n    typedef value_type* Value;\n\n    InnerMap(size_type n, hasher h, Allocator alloc)\n        : hasher(h),\n          num_elements_(0),\n          seed_(Seed()),\n          table_(NULL),\n          alloc_(alloc) {\n      n = TableSize(n);\n      table_ = CreateEmptyTable(n);\n      num_buckets_ = index_of_first_non_null_ = n;\n    }\n\n    ~InnerMap() {\n      if (table_ != NULL) {\n        clear();\n        Dealloc<void*>(table_, num_buckets_);\n      }\n    }\n\n   private:\n    enum { kMinTableSize = 8 };\n\n    // Linked-list nodes, as one would expect for a chaining hash table.\n    struct Node {\n      KeyValuePair kv;\n      Node* next;\n    };\n\n    // This is safe only if the given pointer is known to point to a Key that is\n    // part of a Node.\n    static Node* NodePtrFromKeyPtr(Key* k) {\n      return reinterpret_cast<Node*>(k);\n    }\n\n    static Key* KeyPtrFromNodePtr(Node* node) { return &node->kv.key(); }\n\n    // Trees.  The payload type is pointer to Key, so that we can query the tree\n    // with Keys that are not in any particular data structure.  When we insert,\n    // though, the pointer is always pointing to a Key that is inside a Node.\n    struct KeyCompare {\n      bool operator()(const Key* n0, const Key* n1) const { return *n0 < *n1; }\n    };\n    typedef typename Allocator::template rebind<Key*>::other KeyPtrAllocator;\n    typedef std::set<Key*, KeyCompare, KeyPtrAllocator> Tree;\n\n    // iterator and const_iterator are instantiations of iterator_base.\n    template <typename KeyValueType>\n    class iterator_base {\n     public:\n      typedef KeyValueType& reference;\n      typedef KeyValueType* pointer;\n      typedef typename Tree::iterator TreeIterator;\n\n      // Invariants:\n      // node_ is always correct. This is handy because the most common\n      // operations are operator* and operator-> and they only use node_.\n      // When node_ is set to a non-NULL value, all the other non-const fields\n      // are updated to be correct also, but those fields can become stale\n      // if the underlying map is modified.  When those fields are needed they\n      // are rechecked, and updated if necessary.\n      iterator_base() : node_(NULL) {}\n\n      explicit iterator_base(const InnerMap* m) : m_(m) {\n        SearchFrom(m->index_of_first_non_null_);\n      }\n\n      // Any iterator_base can convert to any other.  This is overkill, and we\n      // rely on the enclosing class to use it wisely.  The standard \"iterator\n      // can convert to const_iterator\" is OK but the reverse direction is not.\n      template <typename U>\n      explicit iterator_base(const iterator_base<U>& it)\n          : node_(it.node_),\n            m_(it.m_),\n            bucket_index_(it.bucket_index_),\n            tree_it_(it.tree_it_) {}\n\n      iterator_base(Node* n, const InnerMap* m, size_type index)\n          : node_(n),\n            m_(m),\n            bucket_index_(index) {}\n\n      iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index)\n          : node_(NodePtrFromKeyPtr(*tree_it)),\n            m_(m),\n            bucket_index_(index),\n            tree_it_(tree_it) {\n        // Invariant: iterators that use tree_it_ have an even bucket_index_.\n        GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0);\n      }\n\n      // Advance through buckets, looking for the first that isn't empty.\n      // If nothing non-empty is found then leave node_ == NULL.\n      void SearchFrom(size_type start_bucket) {\n        GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||\n               m_->table_[m_->index_of_first_non_null_] != NULL);\n        node_ = NULL;\n        for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_;\n             bucket_index_++) {\n          if (m_->TableEntryIsNonEmptyList(bucket_index_)) {\n            node_ = static_cast<Node*>(m_->table_[bucket_index_]);\n            break;\n          } else if (m_->TableEntryIsTree(bucket_index_)) {\n            Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);\n            GOOGLE_DCHECK(!tree->empty());\n            tree_it_ = tree->begin();\n            node_ = NodePtrFromKeyPtr(*tree_it_);\n            break;\n          }\n        }\n      }\n\n      reference operator*() const { return node_->kv; }\n      pointer operator->() const { return &(operator*()); }\n\n      friend bool operator==(const iterator_base& a, const iterator_base& b) {\n        return a.node_ == b.node_;\n      }\n      friend bool operator!=(const iterator_base& a, const iterator_base& b) {\n        return a.node_ != b.node_;\n      }\n\n      iterator_base& operator++() {\n        if (node_->next == NULL) {\n          const bool is_list = revalidate_if_necessary();\n          if (is_list) {\n            SearchFrom(bucket_index_ + 1);\n          } else {\n            GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0);\n            Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);\n            if (++tree_it_ == tree->end()) {\n              SearchFrom(bucket_index_ + 2);\n            } else {\n              node_ = NodePtrFromKeyPtr(*tree_it_);\n            }\n          }\n        } else {\n          node_ = node_->next;\n        }\n        return *this;\n      }\n\n      iterator_base operator++(int /* unused */) {\n        iterator_base tmp = *this;\n        ++*this;\n        return tmp;\n      }\n\n      // Assumes node_ and m_ are correct and non-NULL, but other fields may be\n      // stale.  Fix them as needed.  Then return true iff node_ points to a\n      // Node in a list.\n      bool revalidate_if_necessary() {\n        GOOGLE_DCHECK(node_ != NULL && m_ != NULL);\n        // Force bucket_index_ to be in range.\n        bucket_index_ &= (m_->num_buckets_ - 1);\n        // Common case: the bucket we think is relevant points to node_.\n        if (m_->table_[bucket_index_] == static_cast<void*>(node_))\n          return true;\n        // Less common: the bucket is a linked list with node_ somewhere in it,\n        // but not at the head.\n        if (m_->TableEntryIsNonEmptyList(bucket_index_)) {\n          Node* l = static_cast<Node*>(m_->table_[bucket_index_]);\n          while ((l = l->next) != NULL) {\n            if (l == node_) {\n              return true;\n            }\n          }\n        }\n        // Well, bucket_index_ still might be correct, but probably\n        // not.  Revalidate just to be sure.  This case is rare enough that we\n        // don't worry about potential optimizations, such as having a custom\n        // find-like method that compares Node* instead of const Key&.\n        iterator_base i(m_->find(*KeyPtrFromNodePtr(node_)));\n        bucket_index_ = i.bucket_index_;\n        tree_it_ = i.tree_it_;\n        return m_->TableEntryIsList(bucket_index_);\n      }\n\n      Node* node_;\n      const InnerMap* m_;\n      size_type bucket_index_;\n      TreeIterator tree_it_;\n    };\n\n   public:\n    typedef iterator_base<KeyValuePair> iterator;\n    typedef iterator_base<const KeyValuePair> const_iterator;\n\n    iterator begin() { return iterator(this); }\n    iterator end() { return iterator(); }\n    const_iterator begin() const { return const_iterator(this); }\n    const_iterator end() const { return const_iterator(); }\n\n    void clear() {\n      for (size_type b = 0; b < num_buckets_; b++) {\n        if (TableEntryIsNonEmptyList(b)) {\n          Node* node = static_cast<Node*>(table_[b]);\n          table_[b] = NULL;\n          do {\n            Node* next = node->next;\n            DestroyNode(node);\n            node = next;\n          } while (node != NULL);\n        } else if (TableEntryIsTree(b)) {\n          Tree* tree = static_cast<Tree*>(table_[b]);\n          GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0);\n          table_[b] = table_[b + 1] = NULL;\n          typename Tree::iterator tree_it = tree->begin();\n          do {\n            Node* node = NodePtrFromKeyPtr(*tree_it);\n            typename Tree::iterator next = tree_it;\n            ++next;\n            tree->erase(tree_it);\n            DestroyNode(node);\n            tree_it = next;\n          } while (tree_it != tree->end());\n          DestroyTree(tree);\n          b++;\n        }\n      }\n      num_elements_ = 0;\n      index_of_first_non_null_ = num_buckets_;\n    }\n\n    const hasher& hash_function() const { return *this; }\n\n    static size_type max_size() {\n      return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28);\n    }\n    size_type size() const { return num_elements_; }\n    bool empty() const { return size() == 0; }\n\n    iterator find(const Key& k) { return iterator(FindHelper(k).first); }\n    const_iterator find(const Key& k) const { return FindHelper(k).first; }\n\n    // In traditional C++ style, this performs \"insert if not present.\"\n    std::pair<iterator, bool> insert(const KeyValuePair& kv) {\n      std::pair<const_iterator, size_type> p = FindHelper(kv.key());\n      // Case 1: key was already present.\n      if (p.first.node_ != NULL)\n        return std::make_pair(iterator(p.first), false);\n      // Case 2: insert.\n      if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {\n        p = FindHelper(kv.key());\n      }\n      const size_type b = p.second;  // bucket number\n      Node* node = Alloc<Node>(1);\n      alloc_.construct(&node->kv, kv);\n      iterator result = InsertUnique(b, node);\n      ++num_elements_;\n      return std::make_pair(result, true);\n    }\n\n    // The same, but if an insertion is necessary then the value portion of the\n    // inserted key-value pair is left uninitialized.\n    std::pair<iterator, bool> insert(const Key& k) {\n      std::pair<const_iterator, size_type> p = FindHelper(k);\n      // Case 1: key was already present.\n      if (p.first.node_ != NULL)\n        return std::make_pair(iterator(p.first), false);\n      // Case 2: insert.\n      if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {\n        p = FindHelper(k);\n      }\n      const size_type b = p.second;  // bucket number\n      Node* node = Alloc<Node>(1);\n      typedef typename Allocator::template rebind<Key>::other KeyAllocator;\n      KeyAllocator(alloc_).construct(&node->kv.key(), k);\n      iterator result = InsertUnique(b, node);\n      ++num_elements_;\n      return std::make_pair(result, true);\n    }\n\n    Value& operator[](const Key& k) {\n      KeyValuePair kv(k, Value());\n      return insert(kv).first->value();\n    }\n\n    void erase(iterator it) {\n      GOOGLE_DCHECK_EQ(it.m_, this);\n      const bool is_list = it.revalidate_if_necessary();\n      size_type b = it.bucket_index_;\n      Node* const item = it.node_;\n      if (is_list) {\n        GOOGLE_DCHECK(TableEntryIsNonEmptyList(b));\n        Node* head = static_cast<Node*>(table_[b]);\n        head = EraseFromLinkedList(item, head);\n        table_[b] = static_cast<void*>(head);\n      } else {\n        GOOGLE_DCHECK(TableEntryIsTree(b));\n        Tree* tree = static_cast<Tree*>(table_[b]);\n        tree->erase(it.tree_it_);\n        if (tree->empty()) {\n          // Force b to be the minimum of b and b ^ 1.  This is important\n          // only because we want index_of_first_non_null_ to be correct.\n          b &= ~static_cast<size_type>(1);\n          DestroyTree(tree);\n          table_[b] = table_[b + 1] = NULL;\n        }\n      }\n      DestroyNode(item);\n      --num_elements_;\n      if (GOOGLE_PREDICT_FALSE(b == index_of_first_non_null_)) {\n        while (index_of_first_non_null_ < num_buckets_ &&\n               table_[index_of_first_non_null_] == NULL) {\n          ++index_of_first_non_null_;\n        }\n      }\n    }\n\n   private:\n    std::pair<const_iterator, size_type> FindHelper(const Key& k) const {\n      size_type b = BucketNumber(k);\n      if (TableEntryIsNonEmptyList(b)) {\n        Node* node = static_cast<Node*>(table_[b]);\n        do {\n          if (IsMatch(*KeyPtrFromNodePtr(node), k)) {\n            return std::make_pair(const_iterator(node, this, b), b);\n          } else {\n            node = node->next;\n          }\n        } while (node != NULL);\n      } else if (TableEntryIsTree(b)) {\n        GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);\n        b &= ~static_cast<size_t>(1);\n        Tree* tree = static_cast<Tree*>(table_[b]);\n        Key* key = const_cast<Key*>(&k);\n        typename Tree::iterator tree_it = tree->find(key);\n        if (tree_it != tree->end()) {\n          return std::make_pair(const_iterator(tree_it, this, b), b);\n        }\n      }\n      return std::make_pair(end(), b);\n    }\n\n    // Insert the given Node in bucket b.  If that would make bucket b too big,\n    // and bucket b is not a tree, create a tree for buckets b and b^1 to share.\n    // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct\n    // bucket.  num_elements_ is not modified.\n    iterator InsertUnique(size_type b, Node* node) {\n      GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ ||\n             table_[index_of_first_non_null_] != NULL);\n      // In practice, the code that led to this point may have already\n      // determined whether we are inserting into an empty list, a short list,\n      // or whatever.  But it's probably cheap enough to recompute that here;\n      // it's likely that we're inserting into an empty or short list.\n      iterator result;\n      GOOGLE_DCHECK(find(*KeyPtrFromNodePtr(node)) == end());\n      if (TableEntryIsEmpty(b)) {\n        result = InsertUniqueInList(b, node);\n      } else if (TableEntryIsNonEmptyList(b)) {\n        if (GOOGLE_PREDICT_FALSE(TableEntryIsTooLong(b))) {\n          TreeConvert(b);\n          result = InsertUniqueInTree(b, node);\n          GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1));\n        } else {\n          // Insert into a pre-existing list.  This case cannot modify\n          // index_of_first_non_null_, so we skip the code to update it.\n          return InsertUniqueInList(b, node);\n        }\n      } else {\n        // Insert into a pre-existing tree.  This case cannot modify\n        // index_of_first_non_null_, so we skip the code to update it.\n        return InsertUniqueInTree(b, node);\n      }\n      index_of_first_non_null_ =\n          std::min(index_of_first_non_null_, result.bucket_index_);\n      return result;\n    }\n\n    // Helper for InsertUnique.  Handles the case where bucket b is a\n    // not-too-long linked list.\n    iterator InsertUniqueInList(size_type b, Node* node) {\n      node->next = static_cast<Node*>(table_[b]);\n      table_[b] = static_cast<void*>(node);\n      return iterator(node, this, b);\n    }\n\n    // Helper for InsertUnique.  Handles the case where bucket b points to a\n    // Tree.\n    iterator InsertUniqueInTree(size_type b, Node* node) {\n      GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);\n      // Maintain the invariant that node->next is NULL for all Nodes in Trees.\n      node->next = NULL;\n      return iterator(static_cast<Tree*>(table_[b])\n                      ->insert(KeyPtrFromNodePtr(node))\n                      .first,\n                      this, b & ~static_cast<size_t>(1));\n    }\n\n    // Returns whether it did resize.  Currently this is only used when\n    // num_elements_ increases, though it could be used in other situations.\n    // It checks for load too low as well as load too high: because any number\n    // of erases can occur between inserts, the load could be as low as 0 here.\n    // Resizing to a lower size is not always helpful, but failing to do so can\n    // destroy the expected big-O bounds for some operations. By having the\n    // policy that sometimes we resize down as well as up, clients can easily\n    // keep O(size()) = O(number of buckets) if they want that.\n    bool ResizeIfLoadIsOutOfRange(size_type new_size) {\n      const size_type kMaxMapLoadTimes16 = 12;  // controls RAM vs CPU tradeoff\n      const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16;\n      const size_type lo_cutoff = hi_cutoff / 4;\n      // We don't care how many elements are in trees.  If a lot are,\n      // we may resize even though there are many empty buckets.  In\n      // practice, this seems fine.\n      if (GOOGLE_PREDICT_FALSE(new_size >= hi_cutoff)) {\n        if (num_buckets_ <= max_size() / 2) {\n          Resize(num_buckets_ * 2);\n          return true;\n        }\n      } else if (GOOGLE_PREDICT_FALSE(new_size <= lo_cutoff &&\n                               num_buckets_ > kMinTableSize)) {\n        size_type lg2_of_size_reduction_factor = 1;\n        // It's possible we want to shrink a lot here... size() could even be 0.\n        // So, estimate how much to shrink by making sure we don't shrink so\n        // much that we would need to grow the table after a few inserts.\n        const size_type hypothetical_size = new_size * 5 / 4 + 1;\n        while ((hypothetical_size << lg2_of_size_reduction_factor) <\n               hi_cutoff) {\n          ++lg2_of_size_reduction_factor;\n        }\n        size_type new_num_buckets = std::max<size_type>(\n            kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);\n        if (new_num_buckets != num_buckets_) {\n          Resize(new_num_buckets);\n          return true;\n        }\n      }\n      return false;\n    }\n\n    // Resize to the given number of buckets.\n    void Resize(size_t new_num_buckets) {\n      GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize);\n      void** const old_table = table_;\n      const size_type old_table_size = num_buckets_;\n      num_buckets_ = new_num_buckets;\n      table_ = CreateEmptyTable(num_buckets_);\n      const size_type start = index_of_first_non_null_;\n      index_of_first_non_null_ = num_buckets_;\n      for (size_type i = start; i < old_table_size; i++) {\n        if (TableEntryIsNonEmptyList(old_table, i)) {\n          TransferList(old_table, i);\n        } else if (TableEntryIsTree(old_table, i)) {\n          TransferTree(old_table, i++);\n        }\n      }\n      Dealloc<void*>(old_table, old_table_size);\n    }\n\n    void TransferList(void* const* table, size_type index) {\n      Node* node = static_cast<Node*>(table[index]);\n      do {\n        Node* next = node->next;\n        InsertUnique(BucketNumber(*KeyPtrFromNodePtr(node)), node);\n        node = next;\n      } while (node != NULL);\n    }\n\n    void TransferTree(void* const* table, size_type index) {\n      Tree* tree = static_cast<Tree*>(table[index]);\n      typename Tree::iterator tree_it = tree->begin();\n      do {\n        Node* node = NodePtrFromKeyPtr(*tree_it);\n        InsertUnique(BucketNumber(**tree_it), node);\n      } while (++tree_it != tree->end());\n      DestroyTree(tree);\n    }\n\n    Node* EraseFromLinkedList(Node* item, Node* head) {\n      if (head == item) {\n        return head->next;\n      } else {\n        head->next = EraseFromLinkedList(item, head->next);\n        return head;\n      }\n    }\n\n    bool TableEntryIsEmpty(size_type b) const {\n      return TableEntryIsEmpty(table_, b);\n    }\n    bool TableEntryIsNonEmptyList(size_type b) const {\n      return TableEntryIsNonEmptyList(table_, b);\n    }\n    bool TableEntryIsTree(size_type b) const {\n      return TableEntryIsTree(table_, b);\n    }\n    bool TableEntryIsList(size_type b) const {\n      return TableEntryIsList(table_, b);\n    }\n    static bool TableEntryIsEmpty(void* const* table, size_type b) {\n      return table[b] == NULL;\n    }\n    static bool TableEntryIsNonEmptyList(void* const* table, size_type b) {\n      return table[b] != NULL && table[b] != table[b ^ 1];\n    }\n    static bool TableEntryIsTree(void* const* table, size_type b) {\n      return !TableEntryIsEmpty(table, b) &&\n          !TableEntryIsNonEmptyList(table, b);\n    }\n    static bool TableEntryIsList(void* const* table, size_type b) {\n      return !TableEntryIsTree(table, b);\n    }\n\n    void TreeConvert(size_type b) {\n      GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1));\n      typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);\n      Tree* tree = tree_allocator.allocate(1);\n      // We want to use the three-arg form of construct, if it exists, but we\n      // create a temporary and use the two-arg construct that's known to exist.\n      // It's clunky, but the compiler should be able to generate more-or-less\n      // the same code.\n      tree_allocator.construct(tree,\n                               Tree(KeyCompare(), KeyPtrAllocator(alloc_)));\n      // Now the tree is ready to use.\n      size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree);\n      GOOGLE_DCHECK_EQ(count, tree->size());\n      table_[b] = table_[b ^ 1] = static_cast<void*>(tree);\n    }\n\n    // Copy a linked list in the given bucket to a tree.\n    // Returns the number of things it copied.\n    size_type CopyListToTree(size_type b, Tree* tree) {\n      size_type count = 0;\n      Node* node = static_cast<Node*>(table_[b]);\n      while (node != NULL) {\n        tree->insert(KeyPtrFromNodePtr(node));\n        ++count;\n        Node* next = node->next;\n        node->next = NULL;\n        node = next;\n      }\n      return count;\n    }\n\n    // Return whether table_[b] is a linked list that seems awfully long.\n    // Requires table_[b] to point to a non-empty linked list.\n    bool TableEntryIsTooLong(size_type b) {\n      const size_type kMaxLength = 8;\n      size_type count = 0;\n      Node* node = static_cast<Node*>(table_[b]);\n      do {\n        ++count;\n        node = node->next;\n      } while (node != NULL);\n      // Invariant: no linked list ever is more than kMaxLength in length.\n      GOOGLE_DCHECK_LE(count, kMaxLength);\n      return count >= kMaxLength;\n    }\n\n    size_type BucketNumber(const Key& k) const {\n      // We inherit from hasher, so one-arg operator() provides a hash function.\n      size_type h = (*const_cast<InnerMap*>(this))(k);\n      // To help prevent people from making assumptions about the hash function,\n      // we use the seed differently depending on NDEBUG.  The default hash\n      // function, the seeding, etc., are all likely to change in the future.\n#ifndef NDEBUG\n      return (h * (seed_ | 1)) & (num_buckets_ - 1);\n#else\n      return (h + seed_) & (num_buckets_ - 1);\n#endif\n    }\n\n    bool IsMatch(const Key& k0, const Key& k1) const {\n      return std::equal_to<Key>()(k0, k1);\n    }\n\n    // Return a power of two no less than max(kMinTableSize, n).\n    // Assumes either n < kMinTableSize or n is a power of two.\n    size_type TableSize(size_type n) {\n      return n < kMinTableSize ? kMinTableSize : n;\n    }\n\n    // Use alloc_ to allocate an array of n objects of type U.\n    template <typename U>\n    U* Alloc(size_type n) {\n      typedef typename Allocator::template rebind<U>::other alloc_type;\n      return alloc_type(alloc_).allocate(n);\n    }\n\n    // Use alloc_ to deallocate an array of n objects of type U.\n    template <typename U>\n    void Dealloc(U* t, size_type n) {\n      typedef typename Allocator::template rebind<U>::other alloc_type;\n      alloc_type(alloc_).deallocate(t, n);\n    }\n\n    void DestroyNode(Node* node) {\n      alloc_.destroy(&node->kv);\n      Dealloc<Node>(node, 1);\n    }\n\n    void DestroyTree(Tree* tree) {\n      typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);\n      tree_allocator.destroy(tree);\n      tree_allocator.deallocate(tree, 1);\n    }\n\n    void** CreateEmptyTable(size_type n) {\n      GOOGLE_DCHECK(n >= kMinTableSize);\n      GOOGLE_DCHECK_EQ(n & (n - 1), 0);\n      void** result = Alloc<void*>(n);\n      memset(result, 0, n * sizeof(result[0]));\n      return result;\n    }\n\n    // Return a randomish value.\n    size_type Seed() const {\n      // random_device can throw, so avoid it unless we are compiling with\n      // exceptions enabled.\n#if __cpp_exceptions && LANG_CXX11\n      try {\n        std::random_device rd;\n        std::knuth_b knuth(rd());\n        std::uniform_int_distribution<size_type> u;\n        return u(knuth);\n      } catch (...) { }\n#endif\n      size_type s = static_cast<size_type>(reinterpret_cast<uintptr_t>(this));\n#if defined(__x86_64__) && defined(__GNUC__)\n      uint32 hi, lo;\n      asm(\"rdtsc\" : \"=a\" (lo), \"=d\" (hi));\n      s += ((static_cast<uint64>(hi) << 32) | lo);\n#endif\n      return s;\n    }\n\n    size_type num_elements_;\n    size_type num_buckets_;\n    size_type seed_;\n    size_type index_of_first_non_null_;\n    void** table_;  // an array with num_buckets_ entries\n    Allocator alloc_;\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap);\n  };  // end of class InnerMap\n\n  typedef hash_map<Key, value_type*, hash<Key>, std::equal_to<Key>,\n                   MapAllocator<std::pair<const Key, MapPair<Key, T>*> > >\n      DeprecatedInnerMap;\n\n public:\n  // Iterators\n  class iterator_base {\n   public:\n    // We support \"old style\" and \"new style\" iterators for now. This is\n    // temporary.  Also, for \"iterator()\" we have an unknown category.\n    // TODO(gpike): get rid of this.\n    enum IteratorStyle { kUnknown, kOld, kNew };\n    explicit iterator_base(IteratorStyle style) : iterator_style_(style) {}\n\n    bool OldStyle() const {\n      GOOGLE_DCHECK_NE(iterator_style_, kUnknown);\n      return iterator_style_ == kOld;\n    }\n    bool UnknownStyle() const {\n      return iterator_style_ == kUnknown;\n    }\n    bool SameStyle(const iterator_base& other) const {\n      return iterator_style_ == other.iterator_style_;\n    }\n\n   private:\n    IteratorStyle iterator_style_;\n  };\n\n  class const_iterator\n      : private iterator_base,\n        public std::iterator<std::forward_iterator_tag, value_type, ptrdiff_t,\n                             const value_type*, const value_type&> {\n    typedef typename InnerMap::const_iterator InnerIt;\n    typedef typename DeprecatedInnerMap::const_iterator DeprecatedInnerIt;\n\n   public:\n    const_iterator() : iterator_base(iterator_base::kUnknown) {}\n    explicit const_iterator(const DeprecatedInnerIt& dit)\n        : iterator_base(iterator_base::kOld), dit_(dit) {}\n    explicit const_iterator(const InnerIt& it)\n        : iterator_base(iterator_base::kNew), it_(it) {}\n\n    const_iterator(const const_iterator& other)\n        : iterator_base(other), it_(other.it_), dit_(other.dit_) {}\n\n    const_reference operator*() const {\n      return this->OldStyle() ? *dit_->second : *it_->value();\n    }\n    const_pointer operator->() const { return &(operator*()); }\n\n    const_iterator& operator++() {\n      if (this->OldStyle())\n        ++dit_;\n      else\n        ++it_;\n      return *this;\n    }\n    const_iterator operator++(int) {\n      return this->OldStyle() ? const_iterator(dit_++) : const_iterator(it_++);\n    }\n\n    friend bool operator==(const const_iterator& a, const const_iterator& b) {\n      if (!a.SameStyle(b)) return false;\n      if (a.UnknownStyle()) return true;\n      return a.OldStyle() ? (a.dit_ == b.dit_) : (a.it_ == b.it_);\n    }\n    friend bool operator!=(const const_iterator& a, const const_iterator& b) {\n      return !(a == b);\n    }\n\n   private:\n    InnerIt it_;\n    DeprecatedInnerIt dit_;\n  };\n\n  class iterator : private iterator_base,\n                   public std::iterator<std::forward_iterator_tag, value_type> {\n    typedef typename InnerMap::iterator InnerIt;\n    typedef typename DeprecatedInnerMap::iterator DeprecatedInnerIt;\n\n   public:\n    iterator() : iterator_base(iterator_base::kUnknown) {}\n    explicit iterator(const DeprecatedInnerIt& dit)\n        : iterator_base(iterator_base::kOld), dit_(dit) {}\n    explicit iterator(const InnerIt& it)\n        : iterator_base(iterator_base::kNew), it_(it) {}\n\n    reference operator*() const {\n      return this->OldStyle() ? *dit_->second : *it_->value();\n    }\n    pointer operator->() const { return &(operator*()); }\n\n    iterator& operator++() {\n      if (this->OldStyle())\n        ++dit_;\n      else\n        ++it_;\n      return *this;\n    }\n    iterator operator++(int) {\n      return this->OldStyle() ? iterator(dit_++) : iterator(it_++);\n    }\n\n    // Allow implicit conversion to const_iterator.\n    operator const_iterator() const {\n      return this->OldStyle() ?\n          const_iterator(typename DeprecatedInnerMap::const_iterator(dit_)) :\n          const_iterator(typename InnerMap::const_iterator(it_));\n    }\n\n    friend bool operator==(const iterator& a, const iterator& b) {\n      if (!a.SameStyle(b)) return false;\n      if (a.UnknownStyle()) return true;\n      return a.OldStyle() ? a.dit_ == b.dit_ : a.it_ == b.it_;\n    }\n    friend bool operator!=(const iterator& a, const iterator& b) {\n      return !(a == b);\n    }\n\n   private:\n    friend class Map;\n\n    InnerIt it_;\n    DeprecatedInnerIt dit_;\n  };\n\n  iterator begin() {\n    return old_style_ ? iterator(deprecated_elements_->begin())\n                      : iterator(elements_->begin());\n  }\n  iterator end() {\n    return old_style_ ? iterator(deprecated_elements_->end())\n                      : iterator(elements_->end());\n  }\n  const_iterator begin() const {\n    return old_style_ ? const_iterator(deprecated_elements_->begin())\n                      : const_iterator(iterator(elements_->begin()));\n  }\n  const_iterator end() const {\n    return old_style_ ? const_iterator(deprecated_elements_->end())\n                      : const_iterator(iterator(elements_->end()));\n  }\n  const_iterator cbegin() const { return begin(); }\n  const_iterator cend() const { return end(); }\n\n  // Capacity\n  size_type size() const {\n    return old_style_ ? deprecated_elements_->size() : elements_->size();\n  }\n  bool empty() const { return size() == 0; }\n\n  // Element access\n  T& operator[](const key_type& key) {\n    value_type** value =\n        old_style_ ? &(*deprecated_elements_)[key] : &(*elements_)[key];\n    if (*value == NULL) {\n      *value = CreateValueTypeInternal(key);\n      internal::MapValueInitializer<google::protobuf::is_proto_enum<T>::value,\n                                    T>::Initialize((*value)->second,\n                                                   default_enum_value_);\n    }\n    return (*value)->second;\n  }\n  const T& at(const key_type& key) const {\n    const_iterator it = find(key);\n    GOOGLE_CHECK(it != end());\n    return it->second;\n  }\n  T& at(const key_type& key) {\n    iterator it = find(key);\n    GOOGLE_CHECK(it != end());\n    return it->second;\n  }\n\n  // Lookup\n  size_type count(const key_type& key) const {\n    if (find(key) != end()) assert(key == find(key)->first);\n    return find(key) == end() ? 0 : 1;\n  }\n  const_iterator find(const key_type& key) const {\n    return old_style_ ? const_iterator(deprecated_elements_->find(key))\n        : const_iterator(iterator(elements_->find(key)));\n  }\n  iterator find(const key_type& key) {\n    return old_style_ ? iterator(deprecated_elements_->find(key))\n                      : iterator(elements_->find(key));\n  }\n  std::pair<const_iterator, const_iterator> equal_range(\n      const key_type& key) const {\n    const_iterator it = find(key);\n    if (it == end()) {\n      return std::pair<const_iterator, const_iterator>(it, it);\n    } else {\n      const_iterator begin = it++;\n      return std::pair<const_iterator, const_iterator>(begin, it);\n    }\n  }\n  std::pair<iterator, iterator> equal_range(const key_type& key) {\n    iterator it = find(key);\n    if (it == end()) {\n      return std::pair<iterator, iterator>(it, it);\n    } else {\n      iterator begin = it++;\n      return std::pair<iterator, iterator>(begin, it);\n    }\n  }\n\n  // insert\n  std::pair<iterator, bool> insert(const value_type& value) {\n    if (old_style_) {\n      iterator it = find(value.first);\n      if (it != end()) {\n        return std::pair<iterator, bool>(it, false);\n      } else {\n        return std::pair<iterator, bool>(\n            iterator(deprecated_elements_->insert(std::pair<Key, value_type*>(\n                value.first, CreateValueTypeInternal(value))).first), true);\n      }\n    } else {\n      std::pair<typename InnerMap::iterator, bool> p =\n          elements_->insert(value.first);\n      if (p.second) {\n        p.first->value() = CreateValueTypeInternal(value);\n      }\n      return std::pair<iterator, bool>(iterator(p.first), p.second);\n    }\n  }\n  template <class InputIt>\n  void insert(InputIt first, InputIt last) {\n    for (InputIt it = first; it != last; ++it) {\n      iterator exist_it = find(it->first);\n      if (exist_it == end()) {\n        operator[](it->first) = it->second;\n      }\n    }\n  }\n\n  // Erase and clear\n  size_type erase(const key_type& key) {\n    iterator it = find(key);\n    if (it == end()) {\n      return 0;\n    } else {\n      erase(it);\n      return 1;\n    }\n  }\n  iterator erase(iterator pos) {\n    if (arena_ == NULL) delete pos.operator->();\n    iterator i = pos++;\n    if (old_style_)\n      deprecated_elements_->erase(i.dit_);\n    else\n      elements_->erase(i.it_);\n    return pos;\n  }\n  void erase(iterator first, iterator last) {\n    while (first != last) {\n      first = erase(first);\n    }\n  }\n  void clear() { erase(begin(), end()); }\n\n  // Assign\n  Map& operator=(const Map& other) {\n    if (this != &other) {\n      clear();\n      insert(other.begin(), other.end());\n    }\n    return *this;\n  }\n\n  void swap(Map& other) {\n    if (arena_ == other.arena_ && old_style_ == other.old_style_) {\n      std::swap(default_enum_value_, other.default_enum_value_);\n      if (old_style_) {\n        std::swap(deprecated_elements_, other.deprecated_elements_);\n      } else {\n        std::swap(elements_, other.elements_);\n      }\n    } else {\n      // TODO(zuguang): optimize this. The temporary copy can be allocated\n      // in the same arena as the other message, and the \"other = copy\" can\n      // be replaced with the fast-path swap above.\n      Map copy = *this;\n      *this = other;\n      other = copy;\n    }\n  }\n\n  // Access to hasher.  Currently this returns a copy, but it may\n  // be modified to return a const reference in the future.\n  hasher hash_function() const {\n    return old_style_ ? deprecated_elements_->hash_function()\n                      : elements_->hash_function();\n  }\n\n private:\n  // Set default enum value only for proto2 map field whose value is enum type.\n  void SetDefaultEnumValue(int default_enum_value) {\n    default_enum_value_ = default_enum_value;\n  }\n\n  value_type* CreateValueTypeInternal(const Key& key) {\n    if (arena_ == NULL) {\n      return new value_type(key);\n    } else {\n      value_type* value = reinterpret_cast<value_type*>(\n          Arena::CreateArray<uint8>(arena_, sizeof(value_type)));\n      Arena::CreateInArenaStorage(const_cast<Key*>(&value->first), arena_);\n      Arena::CreateInArenaStorage(&value->second, arena_);\n      const_cast<Key&>(value->first) = key;\n      return value;\n    }\n  }\n\n  value_type* CreateValueTypeInternal(const value_type& value) {\n    if (arena_ == NULL) {\n      return new value_type(value);\n    } else {\n      value_type* p = reinterpret_cast<value_type*>(\n          Arena::CreateArray<uint8>(arena_, sizeof(value_type)));\n      Arena::CreateInArenaStorage(const_cast<Key*>(&p->first), arena_);\n      Arena::CreateInArenaStorage(&p->second, arena_);\n      const_cast<Key&>(p->first) = value.first;\n      p->second = value.second;\n      return p;\n    }\n  }\n\n  Arena* arena_;\n  int default_enum_value_;\n  // The following is a tagged union because we support two map styles\n  // for now.\n  // TODO(gpike): get rid of the old style.\n  const bool old_style_;\n  union {\n    InnerMap* elements_;\n    DeprecatedInnerMap* deprecated_elements_;\n  };\n\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  template <typename K, typename V,\n            internal::WireFormatLite::FieldType key_wire_type,\n            internal::WireFormatLite::FieldType value_wire_type,\n            int default_enum_value>\n  friend class internal::MapFieldLite;\n};\n\n}  // namespace protobuf\n}  // namespace google\n\nGOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START\ntemplate<>\nstruct hash<google::protobuf::MapKey> {\n  size_t\n  operator()(const google::protobuf::MapKey& map_key) const {\n    switch (map_key.type()) {\n      case google::protobuf::FieldDescriptor::CPPTYPE_DOUBLE:\n      case google::protobuf::FieldDescriptor::CPPTYPE_FLOAT:\n      case google::protobuf::FieldDescriptor::CPPTYPE_ENUM:\n      case google::protobuf::FieldDescriptor::CPPTYPE_MESSAGE:\n        GOOGLE_LOG(FATAL) << \"Unsupported\";\n        break;\n      case google::protobuf::FieldDescriptor::CPPTYPE_STRING:\n        return hash<string>()(map_key.GetStringValue());\n      case google::protobuf::FieldDescriptor::CPPTYPE_INT64:\n        return hash< ::google::protobuf::int64>()(map_key.GetInt64Value());\n      case google::protobuf::FieldDescriptor::CPPTYPE_INT32:\n        return hash< ::google::protobuf::int32>()(map_key.GetInt32Value());\n      case google::protobuf::FieldDescriptor::CPPTYPE_UINT64:\n        return hash< ::google::protobuf::uint64>()(map_key.GetUInt64Value());\n      case google::protobuf::FieldDescriptor::CPPTYPE_UINT32:\n        return hash< ::google::protobuf::uint32>()(map_key.GetUInt32Value());\n      case google::protobuf::FieldDescriptor::CPPTYPE_BOOL:\n        return hash<bool>()(map_key.GetBoolValue());\n    }\n    GOOGLE_LOG(FATAL) << \"Can't get here.\";\n    return 0;\n  }\n  bool\n  operator()(const google::protobuf::MapKey& map_key1,\n             const google::protobuf::MapKey& map_key2) const {\n    return map_key1 < map_key2;\n  }\n};\nGOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END\n\n#endif  // GOOGLE_PROTOBUF_MAP_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_entry.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MAP_ENTRY_H__\n#define GOOGLE_PROTOBUF_MAP_ENTRY_H__\n\n#include <google/protobuf/generated_message_reflection.h>\n#include <google/protobuf/map_entry_lite.h>\n#include <google/protobuf/map_type_handler.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/reflection_ops.h>\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/wire_format_lite_inl.h>\n\nnamespace google {\nnamespace protobuf {\nclass Arena;\nnamespace internal {\ntemplate <typename Key, typename Value,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nclass MapField;\n}\n}\n\nnamespace protobuf {\nnamespace internal {\n\n// Register all MapEntry default instances so we can delete them in\n// ShutdownProtobufLibrary().\nvoid LIBPROTOBUF_EXPORT RegisterMapEntryDefaultInstance(\n    MessageLite* default_instance);\n\n// This is the common base class for MapEntry. It is used by MapFieldBase in\n// reflection api, in which the static type of key and value is unknown.\nclass LIBPROTOBUF_EXPORT MapEntryBase : public Message {\n public:\n  ::google::protobuf::Metadata GetMetadata() const {\n    ::google::protobuf::Metadata metadata;\n    metadata.descriptor = descriptor_;\n    metadata.reflection = reflection_;\n    return metadata;\n  }\n\n protected:\n  MapEntryBase() : descriptor_(NULL), reflection_(NULL) {  }\n  virtual ~MapEntryBase() {}\n\n  const Descriptor* descriptor_;\n  const Reflection* reflection_;\n};\n\n// MapEntry is the returned google::protobuf::Message when calling AddMessage of\n// google::protobuf::Reflection. In order to let it work with generated message\n// reflection, its in-memory type is the same as generated message with the same\n// fields. However, in order to decide the in-memory type of key/value, we need\n// to know both their cpp type in generated api and proto type. In\n// implementation, all in-memory types have related wire format functions to\n// support except ArenaStringPtr. Therefore, we need to define another type with\n// supporting wire format functions. Since this type is only used as return type\n// of MapEntry accessors, it's named MapEntry accessor type.\n//\n// cpp type:               the type visible to users in public API.\n// proto type:             WireFormatLite::FieldType of the field.\n// in-memory type:         type of the data member used to stored this field.\n// MapEntry accessor type: type used in MapEntry getters/mutators to access the\n//                         field.\n//\n// cpp type | proto type  | in-memory type | MapEntry accessor type\n// int32      TYPE_INT32    int32            int32\n// int32      TYPE_FIXED32  int32            int32\n// string     TYPE_STRING   ArenaStringPtr   string\n// FooEnum    TYPE_ENUM     int              int\n// FooMessage TYPE_MESSAGE  FooMessage*      FooMessage\n//\n// The in-memory types of primitive types can be inferred from its proto type,\n// while we need to explicitly specify the cpp type if proto type is\n// TYPE_MESSAGE to infer the in-memory type.  Moreover, default_enum_value is\n// used to initialize enum field in proto2.\ntemplate <typename Key, typename Value,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nclass MapEntry : public MapEntryBase {\n  // Provide utilities to parse/serialize key/value.  Provide utilities to\n  // manipulate internal stored type.\n  typedef MapTypeHandler<kKeyFieldType, Key> KeyTypeHandler;\n  typedef MapTypeHandler<kValueFieldType, Value> ValueTypeHandler;\n\n  // Enum type cannot be used for MapTypeHandler::Read. Define a type\n  // which will replace Enum with int.\n  typedef typename KeyTypeHandler::MapEntryAccessorType KeyMapEntryAccessorType;\n  typedef typename ValueTypeHandler::MapEntryAccessorType\n      ValueMapEntryAccessorType;\n\n  // Abbreviation for MapEntry\n  typedef typename google::protobuf::internal::MapEntry<\n      Key, Value, kKeyFieldType, kValueFieldType, default_enum_value> EntryType;\n\n  // Abbreviation for MapEntryLite\n  typedef typename google::protobuf::internal::MapEntryLite<\n      Key, Value, kKeyFieldType, kValueFieldType, default_enum_value>\n      EntryLiteType;\n\n public:\n  ~MapEntry() {\n    if (this == default_instance_) {\n      delete reflection_;\n    }\n  }\n\n  // accessors ======================================================\n\n  virtual inline const KeyMapEntryAccessorType& key() const {\n    return entry_lite_.key();\n  }\n  inline KeyMapEntryAccessorType* mutable_key() {\n    return entry_lite_.mutable_key();\n  }\n  virtual inline const ValueMapEntryAccessorType& value() const {\n    return entry_lite_.value();\n  }\n  inline ValueMapEntryAccessorType* mutable_value() {\n    return entry_lite_.mutable_value();\n  }\n\n  // implements Message =============================================\n\n  bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) {\n    return entry_lite_.MergePartialFromCodedStream(input);\n  }\n\n  size_t ByteSizeLong() const {\n    return entry_lite_.ByteSizeLong();\n  }\n\n  void SerializeWithCachedSizes(::google::protobuf::io::CodedOutputStream* output) const {\n    entry_lite_.SerializeWithCachedSizes(output);\n  }\n\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(bool deterministic,\n                                                   ::google::protobuf::uint8* output) const {\n    return entry_lite_.InternalSerializeWithCachedSizesToArray(deterministic,\n                                                               output);\n  }\n\n  int GetCachedSize() const {\n    return entry_lite_.GetCachedSize();\n  }\n\n  bool IsInitialized() const {\n    return entry_lite_.IsInitialized();\n  }\n\n  Message* New() const {\n    MapEntry* entry = new MapEntry;\n    entry->descriptor_ = descriptor_;\n    entry->reflection_ = reflection_;\n    entry->set_default_instance(default_instance_);\n    return entry;\n  }\n\n  Message* New(Arena* arena) const {\n    MapEntry* entry = Arena::CreateMessage<MapEntry>(arena);\n    entry->descriptor_ = descriptor_;\n    entry->reflection_ = reflection_;\n    entry->set_default_instance(default_instance_);\n    return entry;\n  }\n\n  int SpaceUsed() const {\n    int size = sizeof(MapEntry);\n    size += entry_lite_.SpaceUsed();\n    return size;\n  }\n\n  void CopyFrom(const ::google::protobuf::Message& from) {\n    Clear();\n    MergeFrom(from);\n  }\n\n  void MergeFrom(const ::google::protobuf::Message& from) {\n    GOOGLE_CHECK_NE(&from, this);\n    const MapEntry* source = dynamic_cast_if_available<const MapEntry*>(&from);\n    if (source == NULL) {\n      ReflectionOps::Merge(from, this);\n    } else {\n      MergeFrom(*source);\n    }\n  }\n\n  void CopyFrom(const MapEntry& from) {\n    Clear();\n    MergeFrom(from);\n  }\n\n  void MergeFrom(const MapEntry& from) {\n    entry_lite_.MergeFrom(from.entry_lite_);\n  }\n\n  void Clear() {\n    entry_lite_.Clear();\n  }\n\n  void InitAsDefaultInstance() {\n    entry_lite_.InitAsDefaultInstance();\n  }\n\n  Arena* GetArena() const {\n    return entry_lite_.GetArena();\n  }\n\n  // Create default MapEntry instance for given descriptor. Descriptor has to be\n  // given when creating default MapEntry instance because different map field\n  // may have the same type and MapEntry class. The given descriptor is needed\n  // to distinguish instances of the same MapEntry class.\n  static MapEntry* CreateDefaultInstance(const Descriptor* descriptor) {\n    MapEntry* entry = new MapEntry;\n    const Reflection* reflection = new GeneratedMessageReflection(\n        descriptor, entry, offsets_,\n        GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MapEntry, entry_lite_._has_bits_),\n        GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MapEntry, _unknown_fields_), -1,\n        DescriptorPool::generated_pool(),\n        ::google::protobuf::MessageFactory::generated_factory(),\n        sizeof(MapEntry),\n        GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MapEntry, _internal_metadata_));\n    entry->descriptor_ = descriptor;\n    entry->reflection_ = reflection;\n    entry->set_default_instance(entry);\n    entry->InitAsDefaultInstance();\n    RegisterMapEntryDefaultInstance(entry);\n    return entry;\n  }\n\n private:\n  MapEntry()\n      : _internal_metadata_(NULL), default_instance_(NULL), entry_lite_() {}\n\n  explicit MapEntry(Arena* arena)\n      : _internal_metadata_(arena),\n        default_instance_(NULL),\n        entry_lite_(arena) {}\n\n  inline Arena* GetArenaNoVirtual() const {\n    return entry_lite_.GetArenaNoVirtual();\n  }\n\n  void set_default_instance(MapEntry* default_instance) {\n    default_instance_ = default_instance;\n    entry_lite_.set_default_instance(&default_instance->entry_lite_);\n  }\n\n  static int offsets_[2];\n  UnknownFieldSet _unknown_fields_;\n  InternalMetadataWithArena _internal_metadata_;\n  MapEntry* default_instance_;\n  EntryLiteType entry_lite_;\n\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  template <typename K, typename V, WireFormatLite::FieldType k_wire_type,\n            WireFormatLite::FieldType, int default_enum>\n  friend class internal::MapField;\n  friend class internal::GeneratedMessageReflection;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntry);\n};\n\ntemplate <typename Key, typename Value, WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType, int default_enum_value>\nint MapEntry<Key, Value, kKeyFieldType, kValueFieldType,\n             default_enum_value>::offsets_[2] = {\n    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MapEntry, entry_lite_.key_),\n    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MapEntry, entry_lite_.value_),\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MAP_ENTRY_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_entry_lite.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__\n#define GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__\n\n#include <assert.h>\n#include <google/protobuf/map_type_handler.h>\n#include <google/protobuf/wire_format_lite_inl.h>\n\nnamespace google {\nnamespace protobuf {\nclass Arena;\nnamespace internal {\ntemplate <typename Key, typename Value,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nclass MapEntry;\ntemplate <typename Key, typename Value,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nclass MapFieldLite;\n}  // namespace internal\n}  // namespace protobuf\n\nnamespace protobuf {\nnamespace internal {\n\n// MoveHelper::Move is used to set *dest.  It copies *src, or moves it (in\n// the C++11 sense), or swaps it. *src is left in a sane state for\n// subsequent destruction, but shouldn't be used for anything.\ntemplate <bool is_enum, bool is_message, bool is_stringlike, typename T>\nstruct MoveHelper {  // primitives\n  static void Move(T* src, T* dest) { *dest = *src; }\n};\n\ntemplate <bool is_message, bool is_stringlike, typename T>\nstruct MoveHelper<true, is_message, is_stringlike, T> {  // enums\n  static void Move(T* src, T* dest) { *dest = *src; }\n  // T is an enum here, so allow conversions to and from int.\n  static void Move(T* src, int* dest) { *dest = static_cast<int>(*src); }\n  static void Move(int* src, T* dest) { *dest = static_cast<T>(*src); }\n};\n\ntemplate <bool is_stringlike, typename T>\nstruct MoveHelper<false, true, is_stringlike, T> {  // messages\n  static void Move(T* src, T* dest) { dest->Swap(src); }\n};\n\ntemplate <typename T>\nstruct MoveHelper<false, false, true, T> {  // strings and similar\n  static void Move(T* src, T* dest) {\n#if __cplusplus >= 201103L\n    *dest = std::move(*src);\n#else\n    dest->swap(*src);\n#endif\n  }\n};\n\n// MapEntryLite is used to implement parsing and serialization of map for lite\n// runtime.\ntemplate <typename Key, typename Value,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nclass MapEntryLite : public MessageLite {\n  // Provide utilities to parse/serialize key/value.  Provide utilities to\n  // manipulate internal stored type.\n  typedef MapTypeHandler<kKeyFieldType, Key> KeyTypeHandler;\n  typedef MapTypeHandler<kValueFieldType, Value> ValueTypeHandler;\n\n  // Define internal memory layout. Strings and messages are stored as\n  // pointers, while other types are stored as values.\n  typedef typename KeyTypeHandler::TypeOnMemory KeyOnMemory;\n  typedef typename ValueTypeHandler::TypeOnMemory ValueOnMemory;\n\n  // Enum type cannot be used for MapTypeHandler::Read. Define a type\n  // which will replace Enum with int.\n  typedef typename KeyTypeHandler::MapEntryAccessorType KeyMapEntryAccessorType;\n  typedef typename ValueTypeHandler::MapEntryAccessorType\n      ValueMapEntryAccessorType;\n\n  // Constants for field number.\n  static const int kKeyFieldNumber = 1;\n  static const int kValueFieldNumber = 2;\n\n  // Constants for field tag.\n  static const uint8 kKeyTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(\n      kKeyFieldNumber, KeyTypeHandler::kWireType);\n  static const uint8 kValueTag = GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(\n      kValueFieldNumber, ValueTypeHandler::kWireType);\n  static const size_t kTagSize = 1;\n\n public:\n  ~MapEntryLite() {\n    if (this != default_instance_) {\n      if (GetArenaNoVirtual() != NULL) return;\n      KeyTypeHandler::DeleteNoArena(key_);\n      ValueTypeHandler::DeleteNoArena(value_);\n    }\n  }\n\n  // accessors ======================================================\n\n  virtual inline const KeyMapEntryAccessorType& key() const {\n    return KeyTypeHandler::GetExternalReference(key_);\n  }\n  virtual inline const ValueMapEntryAccessorType& value() const {\n    GOOGLE_CHECK(default_instance_ != NULL);\n    return ValueTypeHandler::DefaultIfNotInitialized(value_,\n                                                    default_instance_->value_);\n  }\n  inline KeyMapEntryAccessorType* mutable_key() {\n    set_has_key();\n    return KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual());\n  }\n  inline ValueMapEntryAccessorType* mutable_value() {\n    set_has_value();\n    return ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual());\n  }\n\n  // implements MessageLite =========================================\n\n  // MapEntryLite is for implementation only and this function isn't called\n  // anywhere. Just provide a fake implementation here for MessageLite.\n  string GetTypeName() const { return \"\"; }\n\n  void CheckTypeAndMergeFrom(const MessageLite& other) {\n    MergeFrom(*::google::protobuf::down_cast<const MapEntryLite*>(&other));\n  }\n\n  bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) {\n    uint32 tag;\n\n    for (;;) {\n      // 1) corrupted data: return false;\n      // 2) unknown field: skip without putting into unknown field set;\n      // 3) unknown enum value: keep it in parsing. In proto2, caller should\n      // check the value and put this entry into containing message's unknown\n      // field set if the value is an unknown enum. In proto3, caller doesn't\n      // need to care whether the value is unknown enum;\n      // 4) missing key/value: missed key/value will have default value. caller\n      // should take this entry as if key/value is set to default value.\n      tag = input->ReadTag();\n      switch (tag) {\n        case kKeyTag:\n          if (!KeyTypeHandler::Read(input, mutable_key())) {\n            return false;\n          }\n          set_has_key();\n          if (!input->ExpectTag(kValueTag)) break;\n          GOOGLE_FALLTHROUGH_INTENDED;\n\n        case kValueTag:\n          if (!ValueTypeHandler::Read(input, mutable_value())) {\n            return false;\n          }\n          set_has_value();\n          if (input->ExpectAtEnd()) return true;\n          break;\n\n        default:\n          if (tag == 0 ||\n              WireFormatLite::GetTagWireType(tag) ==\n              WireFormatLite::WIRETYPE_END_GROUP) {\n            return true;\n          }\n          if (!WireFormatLite::SkipField(input, tag)) return false;\n          break;\n      }\n    }\n  }\n\n  size_t ByteSizeLong() const {\n    size_t size = 0;\n    size += has_key() ? kTagSize + KeyTypeHandler::ByteSize(key()) : 0;\n    size += has_value() ? kTagSize + ValueTypeHandler::ByteSize(value()) : 0;\n    return size;\n  }\n\n  void SerializeWithCachedSizes(::google::protobuf::io::CodedOutputStream* output) const {\n    KeyTypeHandler::Write(kKeyFieldNumber, key(), output);\n    ValueTypeHandler::Write(kValueFieldNumber, value(), output);\n  }\n\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(bool deterministic,\n                                                   ::google::protobuf::uint8* output) const {\n    output = KeyTypeHandler::InternalWriteToArray(kKeyFieldNumber, key(),\n                                                  deterministic, output);\n    output = ValueTypeHandler::InternalWriteToArray(kValueFieldNumber, value(),\n                                                    deterministic, output);\n    return output;\n  }\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n\n  int GetCachedSize() const {\n    int size = 0;\n    size += has_key()\n        ? kTagSize + KeyTypeHandler::GetCachedSize(key())\n        : 0;\n    size += has_value()\n        ? kTagSize + ValueTypeHandler::GetCachedSize(\n            value())\n        : 0;\n    return size;\n  }\n\n  bool IsInitialized() const { return ValueTypeHandler::IsInitialized(value_); }\n\n  MessageLite* New() const {\n    MapEntryLite* entry = new MapEntryLite;\n    entry->default_instance_ = default_instance_;\n    return entry;\n  }\n\n  MessageLite* New(Arena* arena) const {\n    MapEntryLite* entry = Arena::CreateMessage<MapEntryLite>(arena);\n    entry->default_instance_ = default_instance_;\n    return entry;\n  }\n\n  int SpaceUsed() const {\n    int size = sizeof(MapEntryLite);\n    size += KeyTypeHandler::SpaceUsedInMapEntry(key_);\n    size += ValueTypeHandler::SpaceUsedInMapEntry(value_);\n    return size;\n  }\n\n  void MergeFrom(const MapEntryLite& from) {\n    if (from._has_bits_[0]) {\n      if (from.has_key()) {\n        KeyTypeHandler::EnsureMutable(&key_, GetArenaNoVirtual());\n        KeyTypeHandler::Merge(from.key(), &key_, GetArenaNoVirtual());\n        set_has_key();\n      }\n      if (from.has_value()) {\n        ValueTypeHandler::EnsureMutable(&value_, GetArenaNoVirtual());\n        ValueTypeHandler::Merge(from.value(), &value_, GetArenaNoVirtual());\n        set_has_value();\n      }\n    }\n  }\n\n  void Clear() {\n    KeyTypeHandler::Clear(&key_, GetArenaNoVirtual());\n    ValueTypeHandler::ClearMaybeByDefaultEnum(\n        &value_, GetArenaNoVirtual(), default_enum_value);\n    clear_has_key();\n    clear_has_value();\n  }\n\n  void InitAsDefaultInstance() {\n    KeyTypeHandler::AssignDefaultValue(&key_);\n    ValueTypeHandler::AssignDefaultValue(&value_);\n  }\n\n  Arena* GetArena() const {\n    return GetArenaNoVirtual();\n  }\n\n  // Create a MapEntryLite for given key and value from google::protobuf::Map in\n  // serialization. This function is only called when value is enum. Enum is\n  // treated differently because its type in MapEntry is int and its type in\n  // google::protobuf::Map is enum. We cannot create a reference to int from an enum.\n  static MapEntryLite* EnumWrap(const Key& key, const Value value,\n                                Arena* arena) {\n    return Arena::CreateMessage<MapEnumEntryWrapper<\n        Key, Value, kKeyFieldType, kValueFieldType, default_enum_value> >(\n        arena, key, value);\n  }\n\n  // Like above, but for all the other types. This avoids value copy to create\n  // MapEntryLite from google::protobuf::Map in serialization.\n  static MapEntryLite* Wrap(const Key& key, const Value& value, Arena* arena) {\n    return Arena::CreateMessage<MapEntryWrapper<Key, Value, kKeyFieldType,\n                                                kValueFieldType,\n                                                default_enum_value> >(\n        arena, key, value);\n  }\n\n  // Parsing using MergePartialFromCodedStream, above, is not as\n  // efficient as it could be.  This helper class provides a speedier way.\n  template <typename MapField, typename Map>\n  class Parser {\n   public:\n    explicit Parser(MapField* mf) : mf_(mf), map_(mf->MutableMap()) {}\n\n    // This does what the typical MergePartialFromCodedStream() is expected to\n    // do, with the additional side-effect that if successful (i.e., if true is\n    // going to be its return value) it inserts the key-value pair into map_.\n    bool MergePartialFromCodedStream(::google::protobuf::io::CodedInputStream* input) {\n      // Look for the expected thing: a key and then a value.  If it fails,\n      // invoke the enclosing class's MergePartialFromCodedStream, or return\n      // false if that would be pointless.\n      if (input->ExpectTag(kKeyTag)) {\n        if (!KeyTypeHandler::Read(input, &key_)) {\n          return false;\n        }\n        // Peek at the next byte to see if it is kValueTag.  If not, bail out.\n        const void* data;\n        int size;\n        input->GetDirectBufferPointerInline(&data, &size);\n        // We could use memcmp here, but we don't bother. The tag is one byte.\n        assert(kTagSize == 1);\n        if (size > 0 && *reinterpret_cast<const char*>(data) == kValueTag) {\n          typename Map::size_type size = map_->size();\n          value_ptr_ = &(*map_)[key_];\n          if (GOOGLE_PREDICT_TRUE(size != map_->size())) {\n            // We created a new key-value pair.  Fill in the value.\n            typedef\n                typename MapIf<ValueTypeHandler::kIsEnum, int*, Value*>::type T;\n            input->Skip(kTagSize);  // Skip kValueTag.\n            if (!ValueTypeHandler::Read(input,\n                                        reinterpret_cast<T>(value_ptr_))) {\n              map_->erase(key_);  // Failure! Undo insertion.\n              return false;\n            }\n            if (input->ExpectAtEnd()) return true;\n            return ReadBeyondKeyValuePair(input);\n          }\n        }\n      } else {\n        key_ = Key();\n      }\n\n      entry_.reset(mf_->NewEntry());\n      *entry_->mutable_key() = key_;\n      if (!entry_->MergePartialFromCodedStream(input)) return false;\n      return UseKeyAndValueFromEntry();\n    }\n\n    const Key& key() const { return key_; }\n    const Value& value() const { return *value_ptr_; }\n\n   private:\n    bool UseKeyAndValueFromEntry() GOOGLE_ATTRIBUTE_COLD {\n      // Update key_ in case we need it later (because key() is called).\n      // This is potentially inefficient, especially if the key is\n      // expensive to copy (e.g., a long string), but this is a cold\n      // path, so it's not a big deal.\n      key_ = entry_->key();\n      value_ptr_ = &(*map_)[key_];\n      MoveHelper<ValueTypeHandler::kIsEnum,\n                 ValueTypeHandler::kIsMessage,\n                 ValueTypeHandler::kWireType ==\n                 WireFormatLite::WIRETYPE_LENGTH_DELIMITED,\n                 Value>::Move(entry_->mutable_value(), value_ptr_);\n      if (entry_->GetArena() != NULL) entry_.release();\n      return true;\n    }\n\n    // After reading a key and value successfully, and inserting that data\n    // into map_, we are not at the end of the input.  This is unusual, but\n    // allowed by the spec.\n    bool ReadBeyondKeyValuePair(::google::protobuf::io::CodedInputStream* input)\n        GOOGLE_ATTRIBUTE_COLD {\n      typedef MoveHelper<KeyTypeHandler::kIsEnum,\n                         KeyTypeHandler::kIsMessage,\n                         KeyTypeHandler::kWireType ==\n                         WireFormatLite::WIRETYPE_LENGTH_DELIMITED,\n                         Key> KeyMover;\n      typedef MoveHelper<ValueTypeHandler::kIsEnum,\n                         ValueTypeHandler::kIsMessage,\n                         ValueTypeHandler::kWireType ==\n                         WireFormatLite::WIRETYPE_LENGTH_DELIMITED,\n                         Value> ValueMover;\n      entry_.reset(mf_->NewEntry());\n      ValueMover::Move(value_ptr_, entry_->mutable_value());\n      map_->erase(key_);\n      KeyMover::Move(&key_, entry_->mutable_key());\n      if (!entry_->MergePartialFromCodedStream(input)) return false;\n      return UseKeyAndValueFromEntry();\n    }\n\n    MapField* const mf_;\n    Map* const map_;\n    Key key_;\n    Value* value_ptr_;\n    // On the fast path entry_ is not used.\n    google::protobuf::scoped_ptr<MapEntryLite> entry_;\n  };\n\n protected:\n  void set_has_key() { _has_bits_[0] |= 0x00000001u; }\n  bool has_key() const { return (_has_bits_[0] & 0x00000001u) != 0; }\n  void clear_has_key() { _has_bits_[0] &= ~0x00000001u; }\n  void set_has_value() { _has_bits_[0] |= 0x00000002u; }\n  bool has_value() const { return (_has_bits_[0] & 0x00000002u) != 0; }\n  void clear_has_value() { _has_bits_[0] &= ~0x00000002u; }\n\n private:\n  // Serializing a generated message containing map field involves serializing\n  // key-value pairs from google::protobuf::Map. The wire format of each key-value pair\n  // after serialization should be the same as that of a MapEntry message\n  // containing the same key and value inside it.  However, google::protobuf::Map doesn't\n  // store key and value as MapEntry message, which disables us to use existing\n  // code to serialize message. In order to use existing code to serialize\n  // message, we need to construct a MapEntry from key-value pair. But it\n  // involves copy of key and value to construct a MapEntry. In order to avoid\n  // this copy in constructing a MapEntry, we need the following class which\n  // only takes references of given key and value.\n  template <typename K, typename V, WireFormatLite::FieldType k_wire_type,\n            WireFormatLite::FieldType v_wire_type, int default_enum>\n  class MapEntryWrapper\n      : public MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum> {\n    typedef MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum> Base;\n    typedef typename Base::KeyMapEntryAccessorType KeyMapEntryAccessorType;\n    typedef typename Base::ValueMapEntryAccessorType ValueMapEntryAccessorType;\n\n   public:\n    MapEntryWrapper(Arena* arena, const K& key, const V& value)\n        : MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum>(arena),\n          key_(key),\n          value_(value) {\n      Base::set_has_key();\n      Base::set_has_value();\n    }\n    inline const KeyMapEntryAccessorType& key() const { return key_; }\n    inline const ValueMapEntryAccessorType& value() const { return value_; }\n\n   private:\n    const Key& key_;\n    const Value& value_;\n\n    friend class ::google::protobuf::Arena;\n    typedef void InternalArenaConstructable_;\n    typedef void DestructorSkippable_;\n  };\n\n  // Like above, but for enum value only, which stores value instead of\n  // reference of value field inside. This is needed because the type of value\n  // field in constructor is an enum, while we need to store it as an int. If we\n  // initialize a reference to int with a reference to enum, compiler will\n  // generate a temporary int from enum and initialize the reference to int with\n  // the temporary.\n  template <typename K, typename V, WireFormatLite::FieldType k_wire_type,\n            WireFormatLite::FieldType v_wire_type, int default_enum>\n  class MapEnumEntryWrapper\n      : public MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum> {\n    typedef MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum> Base;\n    typedef typename Base::KeyMapEntryAccessorType KeyMapEntryAccessorType;\n    typedef typename Base::ValueMapEntryAccessorType ValueMapEntryAccessorType;\n\n   public:\n    MapEnumEntryWrapper(Arena* arena, const K& key, const V& value)\n        : MapEntryLite<K, V, k_wire_type, v_wire_type, default_enum>(arena),\n          key_(key),\n          value_(value) {\n      Base::set_has_key();\n      Base::set_has_value();\n    }\n    inline const KeyMapEntryAccessorType& key() const { return key_; }\n    inline const ValueMapEntryAccessorType& value() const { return value_; }\n\n   private:\n    const KeyMapEntryAccessorType& key_;\n    const ValueMapEntryAccessorType value_;\n\n    friend class google::protobuf::Arena;\n    typedef void DestructorSkippable_;\n  };\n\n  MapEntryLite() : default_instance_(NULL), arena_(NULL) {\n    KeyTypeHandler::Initialize(&key_, NULL);\n    ValueTypeHandler::InitializeMaybeByDefaultEnum(\n        &value_, default_enum_value, NULL);\n    _has_bits_[0] = 0;\n  }\n\n  explicit MapEntryLite(Arena* arena)\n      : default_instance_(NULL), arena_(arena) {\n    KeyTypeHandler::Initialize(&key_, arena);\n    ValueTypeHandler::InitializeMaybeByDefaultEnum(\n        &value_, default_enum_value, arena);\n    _has_bits_[0] = 0;\n  }\n\n  inline Arena* GetArenaNoVirtual() const {\n    return arena_;\n  }\n\n  void set_default_instance(MapEntryLite* default_instance) {\n    default_instance_ = default_instance;\n  }\n\n  MapEntryLite* default_instance_;\n\n  KeyOnMemory key_;\n  ValueOnMemory value_;\n  Arena* arena_;\n  uint32 _has_bits_[1];\n\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  template <typename K, typename V, WireFormatLite::FieldType,\n            WireFormatLite::FieldType, int>\n  friend class internal::MapEntry;\n  template <typename K, typename V, WireFormatLite::FieldType,\n            WireFormatLite::FieldType, int>\n  friend class internal::MapFieldLite;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapEntryLite);\n};\n\n// Helpers for deterministic serialization =============================\n\n// This struct can be used with any generic sorting algorithm.  If the Key\n// type is relatively small and easy to copy then copying Keys into an\n// array of SortItems can be beneficial.  Then all the data the sorting\n// algorithm needs to touch is in that one array.\ntemplate <typename Key, typename PtrToKeyValuePair> struct SortItem {\n  SortItem() {}\n  explicit SortItem(PtrToKeyValuePair p) : first(p->first), second(p) {}\n\n  Key first;\n  PtrToKeyValuePair second;\n};\n\ntemplate <typename T> struct CompareByFirstField {\n  bool operator()(const T& a, const T& b) const {\n    return a.first < b.first;\n  }\n};\n\ntemplate <typename T> struct CompareByDerefFirst {\n  bool operator()(const T& a, const T& b) const {\n    return a->first < b->first;\n  }\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MAP_ENTRY_LITE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_field.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MAP_FIELD_H__\n#define GOOGLE_PROTOBUF_MAP_FIELD_H__\n\n#include <google/protobuf/stubs/atomicops.h>\n#include <google/protobuf/stubs/mutex.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/generated_message_reflection.h>\n#include <google/protobuf/arena.h>\n#include <google/protobuf/map_entry.h>\n#include <google/protobuf/map_field_lite.h>\n#include <google/protobuf/map_type_handler.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/unknown_field_set.h>\n\n\nnamespace google {\nnamespace protobuf {\nclass DynamicMessage;\nclass MapKey;\nnamespace internal {\n\nclass ContendedMapCleanTest;\nclass GeneratedMessageReflection;\nclass MapFieldAccessor;\n\n// This class provides access to map field using reflection, which is the same\n// as those provided for RepeatedPtrField<Message>. It is used for internal\n// reflection implentation only. Users should never use this directly.\nclass LIBPROTOBUF_EXPORT MapFieldBase {\n public:\n  MapFieldBase()\n      : arena_(NULL),\n        repeated_field_(NULL),\n        entry_descriptor_(NULL),\n        assign_descriptor_callback_(NULL),\n        state_(STATE_MODIFIED_MAP) {}\n  explicit MapFieldBase(Arena* arena)\n      : arena_(arena),\n        repeated_field_(NULL),\n        entry_descriptor_(NULL),\n        assign_descriptor_callback_(NULL),\n        state_(STATE_MODIFIED_MAP) {\n    // Mutex's destructor needs to be called explicitly to release resources\n    // acquired in its constructor.\n    arena->OwnDestructor(&mutex_);\n  }\n  virtual ~MapFieldBase();\n\n  // Returns reference to internal repeated field. Data written using\n  // google::protobuf::Map's api prior to calling this function is guarantted to be\n  // included in repeated field.\n  const RepeatedPtrFieldBase& GetRepeatedField() const;\n\n  // Like above. Returns mutable pointer to the internal repeated field.\n  RepeatedPtrFieldBase* MutableRepeatedField();\n\n  // Pure virtual map APIs for Map Reflection.\n  virtual bool ContainsMapKey(const MapKey& map_key) const = 0;\n  virtual bool InsertOrLookupMapValue(\n      const MapKey& map_key, MapValueRef* val) = 0;\n  virtual bool DeleteMapValue(const MapKey& map_key) = 0;\n  virtual bool EqualIterator(const MapIterator& a,\n                             const MapIterator& b) const = 0;\n  virtual void MapBegin(MapIterator* map_iter) const = 0;\n  virtual void MapEnd(MapIterator* map_iter) const = 0;\n  // Sync Map with repeated field and returns the size of map.\n  virtual int size() const = 0;\n\n  // Returns the number of bytes used by the repeated field, excluding\n  // sizeof(*this)\n  int SpaceUsedExcludingSelf() const;\n\n protected:\n  // Gets the size of space used by map field.\n  virtual int SpaceUsedExcludingSelfNoLock() const;\n\n  // Synchronizes the content in Map to RepeatedPtrField if there is any change\n  // to Map after last synchronization.\n  void SyncRepeatedFieldWithMap() const;\n  virtual void SyncRepeatedFieldWithMapNoLock() const;\n\n  // Synchronizes the content in RepeatedPtrField to Map if there is any change\n  // to RepeatedPtrField after last synchronization.\n  void SyncMapWithRepeatedField() const;\n  virtual void SyncMapWithRepeatedFieldNoLock() const {}\n\n  // Tells MapFieldBase that there is new change to Map.\n  void SetMapDirty();\n\n  // Tells MapFieldBase that there is new change to RepeatedPTrField.\n  void SetRepeatedDirty();\n\n  // Provides derived class the access to repeated field.\n  void* MutableRepeatedPtrField() const;\n\n  // Creates descriptor for only one time.\n  void InitMetadataOnce() const;\n\n  enum State {\n    STATE_MODIFIED_MAP = 0,       // map has newly added data that has not been\n                                  // synchronized to repeated field\n    STATE_MODIFIED_REPEATED = 1,  // repeated field has newly added data that\n                                  // has not been synchronized to map\n    CLEAN = 2,  // data in map and repeated field are same\n  };\n\n  Arena* arena_;\n  mutable RepeatedPtrField<Message>* repeated_field_;\n  // MapEntry can only be created from MapField. To create MapEntry, MapField\n  // needs to know its descriptor, because MapEntry is not generated class which\n  // cannot initialize its own descriptor by calling generated\n  // descriptor-assign-function. Thus, we need to register a callback to\n  // initialize MapEntry's descriptor.\n  const Descriptor** entry_descriptor_;\n  void (*assign_descriptor_callback_)();\n\n  mutable Mutex mutex_;  // The thread to synchronize map and repeated field\n                         // needs to get lock first;\n  mutable volatile Atomic32 state_;  // 0: STATE_MODIFIED_MAP\n                                     // 1: STATE_MODIFIED_REPEATED\n                                     // 2: CLEAN\n\n private:\n  friend class ContendedMapCleanTest;\n  friend class GeneratedMessageReflection;\n  friend class MapFieldAccessor;\n  friend class ::google::protobuf::DynamicMessage;\n\n  // Virtual helper methods for MapIterator. MapIterator doesn't have the\n  // type helper for key and value. Call these help methods to deal with\n  // different types. Real helper methods are implemented in\n  // TypeDefinedMapFieldBase.\n  friend class ::google::protobuf::MapIterator;\n  // Allocate map<...>::iterator for MapIterator.\n  virtual void InitializeIterator(MapIterator* map_iter) const = 0;\n\n  // DeleteIterator() is called by the destructor of MapIterator only.\n  // It deletes map<...>::iterator for MapIterator.\n  virtual void DeleteIterator(MapIterator* map_iter) const = 0;\n\n  // Copy the map<...>::iterator from other_iterator to\n  // this_iterator.\n  virtual void CopyIterator(MapIterator* this_iterator,\n                            const MapIterator& other_iterator) const = 0;\n\n  // IncreaseIterator() is called by operator++() of MapIterator only.\n  // It implements the ++ operator of MapIterator.\n  virtual void IncreaseIterator(MapIterator* map_iter) const = 0;\n};\n\n// This class provides common Map Reflection implementations for generated\n// message and dynamic message.\ntemplate<typename Key, typename T>\nclass TypeDefinedMapFieldBase : public MapFieldBase {\n public:\n  TypeDefinedMapFieldBase() {}\n  explicit TypeDefinedMapFieldBase(Arena* arena) : MapFieldBase(arena) {}\n  ~TypeDefinedMapFieldBase() {}\n  void MapBegin(MapIterator* map_iter) const;\n  void MapEnd(MapIterator* map_iter) const;\n  bool EqualIterator(const MapIterator& a, const MapIterator& b) const;\n\n  virtual const Map<Key, T>& GetMap() const = 0;\n  virtual Map<Key, T>* MutableMap() = 0;\n\n protected:\n  typename Map<Key, T>::const_iterator& InternalGetIterator(\n      const MapIterator* map_iter) const;\n\n private:\n  void InitializeIterator(MapIterator* map_iter) const;\n  void DeleteIterator(MapIterator* map_iter) const;\n  void CopyIterator(MapIterator* this_iteratorm,\n                    const MapIterator& that_iterator) const;\n  void IncreaseIterator(MapIterator* map_iter) const;\n\n  virtual void SetMapIteratorValue(MapIterator* map_iter) const = 0;\n};\n\n// This class provides access to map field using generated api. It is used for\n// internal generated message implentation only. Users should never use this\n// directly.\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value = 0>\nclass MapField : public TypeDefinedMapFieldBase<Key, T>,\n                 public MapFieldLite<Key, T, kKeyFieldType, kValueFieldType,\n                                     default_enum_value> {\n  // Provide utilities to parse/serialize key/value.  Provide utilities to\n  // manipulate internal stored type.\n  typedef MapTypeHandler<kKeyFieldType, Key> KeyTypeHandler;\n  typedef MapTypeHandler<kValueFieldType, T> ValueTypeHandler;\n\n  // Define message type for internal repeated field.\n  typedef MapEntry<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>\n      EntryType;\n  typedef MapEntryLite<Key, T, kKeyFieldType, kValueFieldType,\n                       default_enum_value> EntryLiteType;\n\n  // Define abbreviation for parent MapFieldLite\n  typedef MapFieldLite<Key, T, kKeyFieldType, kValueFieldType,\n                       default_enum_value> MapFieldLiteType;\n\n  // Enum needs to be handled differently from other types because it has\n  // different exposed type in google::protobuf::Map's api and repeated field's api. For\n  // details see the comment in the implementation of\n  // SyncMapWithRepeatedFieldNoLock.\n  static const bool kIsValueEnum = ValueTypeHandler::kIsEnum;\n  typedef typename MapIf<kIsValueEnum, T, const T&>::type CastValueType;\n\n public:\n  MapField();\n  explicit MapField(Arena* arena);\n  // MapField doesn't own the default_entry, which means default_entry must\n  // outlive the lifetime of MapField.\n  MapField(const Message* default_entry);\n  // For tests only.\n  MapField(Arena* arena, const Message* default_entry);\n  ~MapField();\n\n  // Implement MapFieldBase\n  bool ContainsMapKey(const MapKey& map_key) const;\n  bool InsertOrLookupMapValue(const MapKey& map_key, MapValueRef* val);\n  bool DeleteMapValue(const MapKey& map_key);\n\n  // Accessors\n  const Map<Key, T>& GetMap() const;\n  Map<Key, T>* MutableMap();\n\n  // Convenient methods for generated message implementation.\n  int size() const;\n  void Clear();\n  void MergeFrom(const MapFieldLiteType& other);\n  void Swap(MapFieldLiteType* other);\n\n  // Allocates metadata only if this MapField is part of a generated message.\n  void SetEntryDescriptor(const Descriptor** descriptor);\n  void SetAssignDescriptorCallback(void (*callback)());\n\n private:\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n\n  // MapField needs MapEntry's default instance to create new MapEntry.\n  void InitDefaultEntryOnce() const;\n\n  // Manually set default entry instance. For test only.\n  void SetDefaultEntryOnce(const EntryType* default_entry) const;\n\n  // Convenient methods to get internal google::protobuf::Map\n  const Map<Key, T>& GetInternalMap() const;\n  Map<Key, T>* MutableInternalMap();\n\n  // Implements MapFieldBase\n  void SyncRepeatedFieldWithMapNoLock() const;\n  void SyncMapWithRepeatedFieldNoLock() const;\n  int SpaceUsedExcludingSelfNoLock() const;\n\n  void SetMapIteratorValue(MapIterator* map_iter) const;\n\n  mutable const EntryType* default_entry_;\n\n  friend class ::google::protobuf::Arena;\n};\n\nclass LIBPROTOBUF_EXPORT DynamicMapField: public TypeDefinedMapFieldBase<MapKey, MapValueRef> {\n public:\n  explicit DynamicMapField(const Message* default_entry);\n  DynamicMapField(const Message* default_entry, Arena* arena);\n  ~DynamicMapField();\n\n  // Implement MapFieldBase\n  bool ContainsMapKey(const MapKey& map_key) const;\n  bool InsertOrLookupMapValue(const MapKey& map_key, MapValueRef* val);\n  bool DeleteMapValue(const MapKey& map_key);\n\n  const Map<MapKey, MapValueRef>& GetMap() const;\n  Map<MapKey, MapValueRef>* MutableMap();\n\n  int size() const;\n\n private:\n  Map<MapKey, MapValueRef> map_;\n  const Message* default_entry_;\n\n  // Implements MapFieldBase\n  void SyncRepeatedFieldWithMapNoLock() const;\n  void SyncMapWithRepeatedFieldNoLock() const;\n  int SpaceUsedExcludingSelfNoLock() const;\n  void SetMapIteratorValue(MapIterator* map_iter) const;\n};\n\n}  // namespace internal\n\nclass LIBPROTOBUF_EXPORT MapIterator {\n public:\n  MapIterator(Message* message, const FieldDescriptor* field) {\n    const Reflection* reflection = message->GetReflection();\n    map_ = reflection->MapData(message, field);\n    key_.SetType(field->message_type()->FindFieldByName(\"key\")->cpp_type());\n    value_.SetType(field->message_type()->FindFieldByName(\"value\")->cpp_type());\n    map_->InitializeIterator(this);\n  }\n  MapIterator(const MapIterator& other) {\n    map_ = other.map_;\n    map_->InitializeIterator(this);\n    map_->CopyIterator(this, other);\n  }\n  ~MapIterator() {\n    map_->DeleteIterator(this);\n  }\n  friend bool operator==(const MapIterator& a, const MapIterator& b) {\n    return a.map_->EqualIterator(a, b);\n  }\n  friend bool operator!=(const MapIterator& a, const MapIterator& b) {\n    return !a.map_->EqualIterator(a, b);\n  }\n  MapIterator& operator++() {\n    map_->IncreaseIterator(this);\n    return *this;\n  }\n  MapIterator operator++(int) {\n    // iter_ is copied from Map<...>::iterator, no need to\n    // copy from its self again. Use the same implementation\n    // with operator++()\n    map_->IncreaseIterator(this);\n    return *this;\n  }\n  const MapKey& GetKey() {\n    return key_;\n  }\n  const MapValueRef& GetValueRef() {\n    return value_;\n  }\n  MapValueRef* MutableValueRef() {\n    map_->SetMapDirty();\n    return &value_;\n  }\n\n private:\n  template <typename Key, typename T>\n  friend class internal::TypeDefinedMapFieldBase;\n  friend class internal::DynamicMapField;\n  template <typename Key, typename T,\n            internal::WireFormatLite::FieldType kKeyFieldType,\n            internal::WireFormatLite::FieldType kValueFieldType,\n            int default_enum_value>\n  friend class internal::MapField;\n\n  // reinterpret_cast from heap-allocated Map<...>::iterator*. MapIterator owns\n  // the iterator. It is allocated by MapField<...>::InitializeIterator() called\n  // in constructor and deleted by MapField<...>::DeleteIterator() called in\n  // destructor.\n  void* iter_;\n  // Point to a MapField to call helper methods implemented in MapField.\n  // MapIterator does not own this object.\n  internal::MapFieldBase* map_;\n  MapKey key_;\n  MapValueRef value_;\n};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MAP_FIELD_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_field_inl.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MAP_FIELD_INL_H__\n#define GOOGLE_PROTOBUF_MAP_FIELD_INL_H__\n\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n\n#include <google/protobuf/map.h>\n#include <google/protobuf/map_field.h>\n#include <google/protobuf/map_type_handler.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n// UnwrapMapKey template\ntemplate<typename T>\nT UnwrapMapKey(const MapKey& map_key);\ntemplate<>\ninline int32 UnwrapMapKey<int32>(const MapKey& map_key) {\n  return map_key.GetInt32Value();\n}\ntemplate<>\ninline uint32 UnwrapMapKey<uint32>(const MapKey& map_key) {\n  return map_key.GetUInt32Value();\n}\ntemplate<>\ninline int64 UnwrapMapKey<int64>(const MapKey& map_key) {\n  return map_key.GetInt64Value();\n}\ntemplate<>\ninline uint64 UnwrapMapKey<uint64>(const MapKey& map_key) {\n  return map_key.GetUInt64Value();\n}\ntemplate<>\ninline bool UnwrapMapKey<bool>(const MapKey& map_key) {\n  return map_key.GetBoolValue();\n}\ntemplate<>\ninline string UnwrapMapKey<string>(const MapKey& map_key) {\n  return map_key.GetStringValue();\n}\n\n// SetMapKey template\ntemplate<typename T>\ninline void SetMapKey(MapKey* map_key, const T& value);\ntemplate<>\ninline void SetMapKey<int32>(MapKey* map_key, const int32& value) {\n  map_key->SetInt32Value(value);\n}\ntemplate<>\ninline void SetMapKey<uint32>(MapKey* map_key, const uint32& value) {\n  map_key->SetUInt32Value(value);\n}\ntemplate<>\ninline void SetMapKey<int64>(MapKey* map_key, const int64& value) {\n  map_key->SetInt64Value(value);\n}\ntemplate<>\ninline void SetMapKey<uint64>(MapKey* map_key, const uint64& value) {\n  map_key->SetUInt64Value(value);\n}\ntemplate<>\ninline void SetMapKey<bool>(MapKey* map_key, const bool& value) {\n  map_key->SetBoolValue(value);\n}\ntemplate<>\ninline void SetMapKey<string>(MapKey* map_key, const string& value) {\n  map_key->SetStringValue(value);\n}\n\n// ------------------------TypeDefinedMapFieldBase---------------\ntemplate <typename Key, typename T>\ntypename Map<Key, T>::const_iterator&\nTypeDefinedMapFieldBase<Key, T>::InternalGetIterator(\n    const MapIterator* map_iter) const {\n  return *reinterpret_cast<typename Map<Key, T>::const_iterator *>(\n      map_iter->iter_);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::MapBegin(MapIterator* map_iter) const {\n  InternalGetIterator(map_iter) = GetMap().begin();\n  SetMapIteratorValue(map_iter);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::MapEnd(MapIterator* map_iter) const {\n  InternalGetIterator(map_iter) = GetMap().end();\n}\n\ntemplate <typename Key, typename T>\nbool TypeDefinedMapFieldBase<Key, T>::EqualIterator(const MapIterator& a,\n                                                    const MapIterator& b)\n    const {\n  return InternalGetIterator(&a) == InternalGetIterator(&b);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::IncreaseIterator(MapIterator* map_iter)\n    const {\n  ++InternalGetIterator(map_iter);\n  SetMapIteratorValue(map_iter);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::InitializeIterator(\n    MapIterator* map_iter) const {\n  map_iter->iter_ = new typename Map<Key, T>::const_iterator;\n  GOOGLE_CHECK(map_iter->iter_ != NULL);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::DeleteIterator(MapIterator* map_iter)\n    const {\n  delete reinterpret_cast<typename Map<Key, T>::const_iterator *>(\n      map_iter->iter_);\n}\n\ntemplate <typename Key, typename T>\nvoid TypeDefinedMapFieldBase<Key, T>::CopyIterator(\n    MapIterator* this_iter,\n    const MapIterator& that_iter) const {\n  InternalGetIterator(this_iter) = InternalGetIterator(&that_iter);\n  this_iter->key_.SetType(that_iter.key_.type());\n  // MapValueRef::type() fails when containing data is null. However, if\n  // this_iter points to MapEnd, data can be null.\n  this_iter->value_.SetType(\n      static_cast<FieldDescriptor::CppType>(that_iter.value_.type_));\n  SetMapIteratorValue(this_iter);\n}\n\n// ----------------------------------------------------------------------\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMapField<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>::MapField()\n    : default_entry_(NULL) {}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMapField<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>::MapField(\n    Arena* arena)\n    : TypeDefinedMapFieldBase<Key, T>(arena),\n      MapFieldLite<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>(\n          arena),\n      default_entry_(NULL) {}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMapField<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>::MapField(\n    const Message* default_entry)\n    : default_entry_(down_cast<const EntryType*>(default_entry)) {}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMapField<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>::MapField(\n    Arena* arena, const Message* default_entry)\n    : TypeDefinedMapFieldBase<Key, T>(arena),\n      MapFieldLite<Key, T, kKeyFieldType, kValueFieldType, default_enum_value>(\n          arena),\n      default_entry_(down_cast<const EntryType*>(default_entry)) {}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::~MapField() {}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nint\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::size() const {\n  MapFieldBase::SyncMapWithRepeatedField();\n  return MapFieldLiteType::GetInternalMap().size();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::Clear() {\n  MapFieldBase::SyncMapWithRepeatedField();\n  MapFieldLiteType::MutableInternalMap()->clear();\n  MapFieldBase::SetMapDirty();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid MapField<Key, T, kKeyFieldType, kValueFieldType,\n              default_enum_value>::SetMapIteratorValue(\n                  MapIterator* map_iter) const {\n  const Map<Key, T>& map = GetMap();\n  typename Map<Key, T>::const_iterator iter =\n      TypeDefinedMapFieldBase<Key, T>::InternalGetIterator(map_iter);\n  if (iter == map.end()) return;\n  SetMapKey(&map_iter->key_, iter->first);\n  map_iter->value_.SetValue(&iter->second);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nbool MapField<Key, T, kKeyFieldType, kValueFieldType,\n              default_enum_value>::ContainsMapKey(\n                  const MapKey& map_key) const {\n  const Map<Key, T>& map = GetMap();\n  const Key& key = UnwrapMapKey<Key>(map_key);\n  typename Map<Key, T>::const_iterator iter = map.find(key);\n  return iter != map.end();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nbool MapField<Key, T, kKeyFieldType, kValueFieldType,\n              default_enum_value>::InsertOrLookupMapValue(\n                  const MapKey& map_key,\n                  MapValueRef* val) {\n  // Always use mutable map because users may change the map value by\n  // MapValueRef.\n  Map<Key, T>* map = MutableMap();\n  const Key& key = UnwrapMapKey<Key>(map_key);\n  typename Map<Key, T>::iterator iter = map->find(key);\n  if (map->end() == iter) {\n    val->SetValue(&((*map)[key]));\n    return true;\n  }\n  // Key is already in the map. Make sure (*map)[key] is not called.\n  // [] may reorder the map and iterators.\n  val->SetValue(&(iter->second));\n  return false;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nbool MapField<Key, T, kKeyFieldType, kValueFieldType,\n              default_enum_value>::DeleteMapValue(\n                  const MapKey& map_key) {\n  const Key& key = UnwrapMapKey<Key>(map_key);\n  return MutableMap()->erase(key);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nconst Map<Key, T>&\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::GetMap() const {\n  MapFieldBase::SyncMapWithRepeatedField();\n  return MapFieldLiteType::GetInternalMap();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMap<Key, T>*\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::MutableMap() {\n  MapFieldBase::SyncMapWithRepeatedField();\n  Map<Key, T>* result = MapFieldLiteType::MutableInternalMap();\n  MapFieldBase::SetMapDirty();\n  return result;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::MergeFrom(\n    const MapFieldLiteType& other) {\n  const MapField& down_other = down_cast<const MapField&>(other);\n  MapFieldBase::SyncMapWithRepeatedField();\n  down_other.SyncMapWithRepeatedField();\n  MapFieldLiteType::MergeFrom(other);\n  MapFieldBase::SetMapDirty();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::Swap(\n    MapFieldLiteType* other) {\n  MapField* down_other = down_cast<MapField*>(other);\n  std::swap(MapFieldBase::repeated_field_, down_other->repeated_field_);\n  MapFieldLiteType::Swap(other);\n  std::swap(MapFieldBase::state_, down_other->state_);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::SetEntryDescriptor(\n    const Descriptor** descriptor) {\n  MapFieldBase::entry_descriptor_ = descriptor;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::SetAssignDescriptorCallback(void (*callback)()) {\n  MapFieldBase::assign_descriptor_callback_ = callback;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nconst Map<Key, T>&\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::GetInternalMap() const {\n  return MapFieldLiteType::GetInternalMap();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nMap<Key, T>*\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::MutableInternalMap() {\n  return MapFieldLiteType::MutableInternalMap();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::SyncRepeatedFieldWithMapNoLock() const {\n  if (MapFieldBase::repeated_field_ == NULL) {\n    if (MapFieldBase::arena_ == NULL) {\n      MapFieldBase::repeated_field_ = new RepeatedPtrField<Message>();\n    } else {\n      MapFieldBase::repeated_field_ =\n          Arena::CreateMessage<RepeatedPtrField<Message> >(\n              MapFieldBase::arena_);\n    }\n  }\n  const Map<Key, T>& map = GetInternalMap();\n  RepeatedPtrField<EntryType>* repeated_field =\n      reinterpret_cast<RepeatedPtrField<EntryType>*>(\n          MapFieldBase::repeated_field_);\n\n  repeated_field->Clear();\n\n  for (typename Map<Key, T>::const_iterator it = map.begin();\n       it != map.end(); ++it) {\n    InitDefaultEntryOnce();\n    GOOGLE_CHECK(default_entry_ != NULL);\n    EntryType* new_entry =\n        down_cast<EntryType*>(default_entry_->New(MapFieldBase::arena_));\n    repeated_field->AddAllocated(new_entry);\n    (*new_entry->mutable_key()) = it->first;\n    (*new_entry->mutable_value()) = it->second;\n  }\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::SyncMapWithRepeatedFieldNoLock() const {\n  Map<Key, T>* map = const_cast<MapField*>(this)->MutableInternalMap();\n  RepeatedPtrField<EntryType>* repeated_field =\n      reinterpret_cast<RepeatedPtrField<EntryType>*>(\n          MapFieldBase::repeated_field_);\n  GOOGLE_CHECK(MapFieldBase::repeated_field_ != NULL);\n  map->clear();\n  for (typename RepeatedPtrField<EntryType>::iterator it =\n           repeated_field->begin(); it != repeated_field->end(); ++it) {\n    // Cast is needed because Map's api and internal storage is different when\n    // value is enum. For enum, we cannot cast an int to enum. Thus, we have to\n    // copy value. For other types, they have same exposed api type and internal\n    // stored type. We should not introduce value copy for them. We achieve this\n    // by casting to value for enum while casting to reference for other types.\n    (*map)[it->key()] = static_cast<CastValueType>(it->value());\n  }\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nint\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::SpaceUsedExcludingSelfNoLock() const {\n  int size = 0;\n  if (MapFieldBase::repeated_field_ != NULL) {\n    size += MapFieldBase::repeated_field_->SpaceUsedExcludingSelf();\n  }\n  Map<Key, T>* map = const_cast<MapField*>(this)->MutableInternalMap();\n  size += sizeof(*map);\n  for (typename Map<Key, T>::iterator it = map->begin();\n       it != map->end(); ++it) {\n    size += KeyTypeHandler::SpaceUsedInMap(it->first);\n    size += ValueTypeHandler::SpaceUsedInMap(it->second);\n  }\n  return size;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType kKeyFieldType,\n          WireFormatLite::FieldType kValueFieldType,\n          int default_enum_value>\nvoid\nMapField<Key, T, kKeyFieldType, kValueFieldType,\n         default_enum_value>::InitDefaultEntryOnce()\n    const {\n  if (default_entry_ == NULL) {\n    MapFieldBase::InitMetadataOnce();\n    GOOGLE_CHECK(*MapFieldBase::entry_descriptor_ != NULL);\n    default_entry_ = down_cast<const EntryType*>(\n        MessageFactory::generated_factory()->GetPrototype(\n            *MapFieldBase::entry_descriptor_));\n  }\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MAP_FIELD_INL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_field_lite.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__\n#define GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__\n\n#include <google/protobuf/map.h>\n#include <google/protobuf/map_entry_lite.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// This class provides access to map field using generated api. It is used for\n// internal generated message implentation only. Users should never use this\n// directly.\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value = 0>\nclass MapFieldLite {\n  // Define message type for internal repeated field.\n  typedef MapEntryLite<Key, T, key_wire_type, value_wire_type,\n                       default_enum_value> EntryType;\n\n public:\n  MapFieldLite();\n  explicit MapFieldLite(Arena* arena);\n  virtual ~MapFieldLite();\n\n  // Accessors\n  virtual const Map<Key, T>& GetMap() const;\n  virtual Map<Key, T>* MutableMap();\n\n  // Convenient methods for generated message implementation.\n  virtual int size() const;\n  virtual void Clear();\n  virtual void MergeFrom(const MapFieldLite& other);\n  virtual void Swap(MapFieldLite* other);\n\n  // Set default enum value only for proto2 map field whose value is enum type.\n  void SetDefaultEnumValue();\n\n  // Used in the implementation of parsing. Caller should take the ownership.\n  EntryType* NewEntry() const;\n  // Used in the implementation of serializing enum value type. Caller should\n  // take the ownership.\n  EntryType* NewEnumEntryWrapper(const Key& key, const T t) const;\n  // Used in the implementation of serializing other value types. Caller should\n  // take the ownership.\n  EntryType* NewEntryWrapper(const Key& key, const T& t) const;\n\n protected:\n  // Convenient methods to get internal google::protobuf::Map\n  virtual const Map<Key, T>& GetInternalMap() const;\n  virtual Map<Key, T>* MutableInternalMap();\n\n private:\n  typedef void DestructorSkippable_;\n\n  Arena* arena_;\n  Map<Key, T>* map_;\n\n  friend class ::google::protobuf::Arena;\n};\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::MapFieldLite()\n    : arena_(NULL) {\n  map_ = new Map<Key, T>;\n  SetDefaultEnumValue();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::MapFieldLite(Arena* arena)\n  : arena_(arena) {\n  map_ = Arena::CreateMessage<Map<Key, T> >(arena);\n  SetDefaultEnumValue();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::~MapFieldLite() {\n  delete map_;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nconst Map<Key, T>&\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::GetMap() const {\n  return *map_;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nMap<Key, T>*\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::MutableMap() {\n  return map_;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nint\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::size() const {\n  return map_->size();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nvoid\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::Clear() {\n  map_->clear();\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nvoid\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::MergeFrom(\n    const MapFieldLite& other) {\n  for (typename Map<Key, T>::const_iterator it = other.map_->begin();\n       it != other.map_->end(); ++it) {\n    (*map_)[it->first] = it->second;\n  }\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nvoid\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::Swap(\n    MapFieldLite* other) {\n  std::swap(map_, other->map_);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nvoid\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::SetDefaultEnumValue() {\n  MutableInternalMap()->SetDefaultEnumValue(default_enum_value);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nconst Map<Key, T>&\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::GetInternalMap() const {\n  return *map_;\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nMap<Key, T>*\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::MutableInternalMap() {\n  return map_;\n}\n\n#define EntryType \\\n  MapEntryLite<Key, T, key_wire_type, value_wire_type, default_enum_value>\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nEntryType*\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::NewEntry() const {\n  if (arena_ == NULL) {\n    return new EntryType();\n  } else {\n    return Arena::CreateMessage<EntryType>(arena_);\n  }\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nEntryType*\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::NewEnumEntryWrapper(const Key& key,\n                                                      const T t) const {\n  return EntryType::EnumWrap(key, t, arena_);\n}\n\ntemplate <typename Key, typename T,\n          WireFormatLite::FieldType key_wire_type,\n          WireFormatLite::FieldType value_wire_type,\n          int default_enum_value>\nEntryType*\nMapFieldLite<Key, T, key_wire_type, value_wire_type,\n             default_enum_value>::NewEntryWrapper(const Key& key,\n                                                  const T& t) const {\n  return EntryType::Wrap(key, t, arena_);\n}\n\n#undef EntryType\n\n// True if IsInitialized() is true for value field in all elements of t. T is\n// expected to be message.  It's useful to have this helper here to keep the\n// protobuf compiler from ever having to emit loops in IsInitialized() methods.\n// We want the C++ compiler to inline this or not as it sees fit.\ntemplate <typename Key, typename T>\nbool AllAreInitialized(const Map<Key, T>& t) {\n  for (typename Map<Key, T>::const_iterator it = t.begin(); it != t.end();\n       ++it) {\n    if (!it->second.IsInitialized()) return false;\n  }\n  return true;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MAP_FIELD_LITE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/map_type_handler.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_TYPE_HANDLER_H__\n#define GOOGLE_PROTOBUF_TYPE_HANDLER_H__\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/wire_format_lite_inl.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Used for compile time type selection. MapIf::type will be TrueType if Flag is\n// true and FalseType otherwise.\ntemplate<bool Flag, typename TrueType, typename FalseType>\nstruct MapIf;\n\ntemplate<typename TrueType, typename FalseType>\nstruct MapIf<true, TrueType, FalseType> {\n  typedef TrueType type;\n};\n\ntemplate<typename TrueType, typename FalseType>\nstruct MapIf<false, TrueType, FalseType> {\n  typedef FalseType type;\n};\n\n// In proto2 Map, enum needs to be initialized to given default value, while\n// other types' default value can be inferred from the type.\ntemplate <bool IsEnum, typename Type>\nclass MapValueInitializer {\n public:\n  static inline void Initialize(Type& type, int default_enum_value);\n};\n\ntemplate <typename Type>\nclass MapValueInitializer<true, Type> {\n public:\n  static inline void Initialize(Type& value, int default_enum_value) {\n    value = static_cast<Type>(default_enum_value);\n  }\n};\n\ntemplate <typename Type>\nclass MapValueInitializer<false, Type> {\n public:\n  static inline void Initialize(Type& value, int default_enum_value) {}\n};\n\ntemplate <typename Type, bool is_arena_constructable>\nclass MapArenaMessageCreator {\n public:\n  // Use arena to create message if Type is arena constructable. Otherwise,\n  // create the message on heap.\n  static inline Type* CreateMessage(Arena* arena);\n};\ntemplate <typename Type>\nclass MapArenaMessageCreator<Type, true> {\n public:\n  static inline Type* CreateMessage(Arena* arena) {\n    return Arena::CreateMessage<Type>(arena);\n  }\n};\ntemplate <typename Type>\nclass MapArenaMessageCreator<Type, false> {\n public:\n  static inline Type* CreateMessage(Arena* arena) {\n    return Arena::Create<Type>(arena);\n  }\n};\n\n// Define constants for given wire field type\ntemplate <WireFormatLite::FieldType field_type, typename Type>\nclass MapWireFieldTypeTraits {};\n\n#define TYPE_TRAITS(FieldType, CType, WireFormatType, IsMessage, IsEnum)   \\\n  template <typename Type>                                                 \\\n  class MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, Type> {   \\\n   public:                                                                 \\\n    static const bool kIsMessage = IsMessage;                              \\\n    static const bool kIsEnum = IsEnum;                                    \\\n    typedef typename MapIf<kIsMessage, Type*, CType>::type TypeOnMemory;   \\\n    typedef typename MapIf<kIsEnum, int, Type>::type MapEntryAccessorType; \\\n    static const WireFormatLite::WireType kWireType =                      \\\n        WireFormatLite::WIRETYPE_##WireFormatType;                         \\\n  };\n\nTYPE_TRAITS(MESSAGE , Type, LENGTH_DELIMITED, true, false)\nTYPE_TRAITS(STRING  , ArenaStringPtr, LENGTH_DELIMITED, false, false)\nTYPE_TRAITS(BYTES   , ArenaStringPtr ,  LENGTH_DELIMITED, false, false)\nTYPE_TRAITS(INT64   , int64  ,  VARINT , false, false)\nTYPE_TRAITS(UINT64  , uint64 ,  VARINT , false, false)\nTYPE_TRAITS(INT32   , int32  ,  VARINT , false, false)\nTYPE_TRAITS(UINT32  , uint32 ,  VARINT , false, false)\nTYPE_TRAITS(SINT64  , int64  ,  VARINT , false, false)\nTYPE_TRAITS(SINT32  , int32  ,  VARINT , false, false)\nTYPE_TRAITS(ENUM    , int    ,  VARINT , false, true )\nTYPE_TRAITS(DOUBLE  , double ,  FIXED64, false, false)\nTYPE_TRAITS(FLOAT   , float  ,  FIXED32, false, false)\nTYPE_TRAITS(FIXED64 , uint64 ,  FIXED64, false, false)\nTYPE_TRAITS(FIXED32 , uint32 ,  FIXED32, false, false)\nTYPE_TRAITS(SFIXED64, int64  ,  FIXED64, false, false)\nTYPE_TRAITS(SFIXED32, int32  ,  FIXED32, false, false)\nTYPE_TRAITS(BOOL    , bool   ,  VARINT , false, false)\n\n#undef TYPE_TRAITS\n\ntemplate <WireFormatLite::FieldType field_type, typename Type>\nclass MapTypeHandler {};\n\ntemplate <typename Type>\nclass MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type> {\n public:\n  // Enum type cannot be used for MapTypeHandler::Read. Define a type which will\n  // replace Enum with int.\n  typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE,\n      Type>::MapEntryAccessorType MapEntryAccessorType;\n  // Internal stored type in MapEntryLite for given wire field type.\n  typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE,\n                                          Type>::TypeOnMemory TypeOnMemory;\n  // Corresponding wire type for field type.\n  static const WireFormatLite::WireType kWireType =\n      MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kWireType;\n  // Whether wire type is for message.\n  static const bool kIsMessage =\n      MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kIsMessage;\n  // Whether wire type is for enum.\n  static const bool kIsEnum =\n      MapWireFieldTypeTraits<WireFormatLite::TYPE_MESSAGE, Type>::kIsEnum;\n\n  // Functions used in parsing and serialization. ===================\n  static inline size_t ByteSize(const MapEntryAccessorType& value);\n  static inline int GetCachedSize(const MapEntryAccessorType& value);\n  static inline bool Read(io::CodedInputStream* input,\n                          MapEntryAccessorType* value);\n  static inline void Write(int field, const MapEntryAccessorType& value,\n                           io::CodedOutputStream* output);\n  static inline uint8* InternalWriteToArray(int field,\n                                            const MapEntryAccessorType& value,\n                                            bool deterministic, uint8* target);\n  static inline uint8* WriteToArray(int field,\n                                    const MapEntryAccessorType& value,\n                                    uint8* target);\n\n  // Functions to manipulate data on memory. ========================\n  static inline const Type& GetExternalReference(const Type* value);\n  static inline void DeleteNoArena(const Type* x);\n  static inline void Merge(const Type& from, Type** to, Arena* arena);\n  static inline void Clear(Type** value, Arena* arena);\n  static inline void ClearMaybeByDefaultEnum(Type** value, Arena* arena,\n                                             int default_enum_value);\n  static inline void Initialize(Type** x, Arena* arena);\n\n  static inline void InitializeMaybeByDefaultEnum(Type** x,\n                                                  int default_enum_value,\n                                                  Arena* arena);\n  static inline Type* EnsureMutable(Type** value, Arena* arena);\n  // SpaceUsedInMapEntry: Return bytes used by value in MapEntry, excluding\n  // those already calculate in sizeof(MapField).\n  static inline int SpaceUsedInMapEntry(const Type* value);\n  // Return bytes used by value in Map.\n  static inline int SpaceUsedInMap(const Type& value);\n  // Assign default value to given instance.\n  static inline void AssignDefaultValue(Type** value);\n  // Return default instance if value is not initialized when calling const\n  // reference accessor.\n  static inline const Type& DefaultIfNotInitialized(\n      const Type* value, const Type* default_value);\n  // Check if all required fields have values set.\n  static inline bool IsInitialized(Type* value);\n};\n\n#define MAP_HANDLER(FieldType)                                                \\\n  template <typename Type>                                                    \\\n  class MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type> {              \\\n   public:                                                                    \\\n    typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \\\n                                            Type>::MapEntryAccessorType       \\\n        MapEntryAccessorType;                                                 \\\n    typedef typename MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType, \\\n                                            Type>::TypeOnMemory TypeOnMemory; \\\n    static const WireFormatLite::WireType kWireType =                         \\\n        MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType,              \\\n                               Type>::kWireType;                              \\\n    static const bool kIsMessage =                                            \\\n        MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType,              \\\n                               Type>::kIsMessage;                             \\\n    static const bool kIsEnum =                                               \\\n        MapWireFieldTypeTraits<WireFormatLite::TYPE_##FieldType,              \\\n                               Type>::kIsEnum;                                \\\n    static inline int ByteSize(const MapEntryAccessorType& value);            \\\n    static inline int GetCachedSize(const MapEntryAccessorType& value);       \\\n    static inline bool Read(io::CodedInputStream* input,                      \\\n                            MapEntryAccessorType* value);                     \\\n    static inline void Write(int field, const MapEntryAccessorType& value,    \\\n                             io::CodedOutputStream* output);                  \\\n    static inline uint8* InternalWriteToArray(                                \\\n        int field,                                                            \\\n        const MapEntryAccessorType& value,                                    \\\n        bool deterministic,                                                   \\\n        uint8* target);                                                       \\\n    static inline uint8* WriteToArray(int field,                              \\\n                                      const MapEntryAccessorType& value,      \\\n                                      uint8* target) {                        \\\n      return InternalWriteToArray(field, value, false, target);               \\\n    }                                                                         \\\n    static inline const MapEntryAccessorType& GetExternalReference(           \\\n        const TypeOnMemory& value);                                           \\\n    static inline void DeleteNoArena(const TypeOnMemory& x);                  \\\n    static inline void Merge(const MapEntryAccessorType& from,                \\\n                             TypeOnMemory* to, Arena* arena);                 \\\n    static inline void Clear(TypeOnMemory* value, Arena* arena);              \\\n    static inline void ClearMaybeByDefaultEnum(TypeOnMemory* value,           \\\n                                               Arena* arena,                  \\\n                                               int default_enum);             \\\n    static inline int SpaceUsedInMapEntry(const TypeOnMemory& value);         \\\n    static inline int SpaceUsedInMap(const TypeOnMemory& value);              \\\n    static inline int SpaceUsedInMap(const string& value);                    \\\n    static inline void AssignDefaultValue(TypeOnMemory* value);               \\\n    static inline const MapEntryAccessorType& DefaultIfNotInitialized(        \\\n        const TypeOnMemory& value, const TypeOnMemory& default_value);        \\\n    static inline bool IsInitialized(const TypeOnMemory& value);              \\\n    static void DeleteNoArena(TypeOnMemory& value);                           \\\n    static inline void Initialize(TypeOnMemory* value, Arena* arena);         \\\n    static inline void InitializeMaybeByDefaultEnum(TypeOnMemory* value,      \\\n                                                    int default_enum_value,   \\\n                                                    Arena* arena);            \\\n    static inline MapEntryAccessorType* EnsureMutable(TypeOnMemory* value,    \\\n                                                      Arena* arena);          \\\n  };\nMAP_HANDLER(STRING)\nMAP_HANDLER(BYTES)\nMAP_HANDLER(INT64)\nMAP_HANDLER(UINT64)\nMAP_HANDLER(INT32)\nMAP_HANDLER(UINT32)\nMAP_HANDLER(SINT64)\nMAP_HANDLER(SINT32)\nMAP_HANDLER(ENUM)\nMAP_HANDLER(DOUBLE)\nMAP_HANDLER(FLOAT)\nMAP_HANDLER(FIXED64)\nMAP_HANDLER(FIXED32)\nMAP_HANDLER(SFIXED64)\nMAP_HANDLER(SFIXED32)\nMAP_HANDLER(BOOL)\n#undef MAP_HANDLER\n\ntemplate <typename Type>\ninline size_t\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::ByteSize(\n    const MapEntryAccessorType& value) {\n  return WireFormatLite::MessageSizeNoVirtual(value);\n}\n\n#define GOOGLE_PROTOBUF_BYTE_SIZE(FieldType, DeclaredType)                     \\\n  template <typename Type>                                                     \\\n  inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::ByteSize( \\\n      const MapEntryAccessorType& value) {                                     \\\n    return WireFormatLite::DeclaredType##Size(value);                          \\\n  }\n\nGOOGLE_PROTOBUF_BYTE_SIZE(STRING, String)\nGOOGLE_PROTOBUF_BYTE_SIZE(BYTES , Bytes)\nGOOGLE_PROTOBUF_BYTE_SIZE(INT64 , Int64)\nGOOGLE_PROTOBUF_BYTE_SIZE(UINT64, UInt64)\nGOOGLE_PROTOBUF_BYTE_SIZE(INT32 , Int32)\nGOOGLE_PROTOBUF_BYTE_SIZE(UINT32, UInt32)\nGOOGLE_PROTOBUF_BYTE_SIZE(SINT64, SInt64)\nGOOGLE_PROTOBUF_BYTE_SIZE(SINT32, SInt32)\nGOOGLE_PROTOBUF_BYTE_SIZE(ENUM  , Enum)\n\n#undef GOOGLE_PROTOBUF_BYTE_SIZE\n\n#define FIXED_BYTE_SIZE(FieldType, DeclaredType)                               \\\n  template <typename Type>                                                     \\\n  inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::ByteSize( \\\n      const MapEntryAccessorType& value) {                                     \\\n    return WireFormatLite::k##DeclaredType##Size;                              \\\n  }\n\nFIXED_BYTE_SIZE(DOUBLE  , Double)\nFIXED_BYTE_SIZE(FLOAT   , Float)\nFIXED_BYTE_SIZE(FIXED64 , Fixed64)\nFIXED_BYTE_SIZE(FIXED32 , Fixed32)\nFIXED_BYTE_SIZE(SFIXED64, SFixed64)\nFIXED_BYTE_SIZE(SFIXED32, SFixed32)\nFIXED_BYTE_SIZE(BOOL    , Bool)\n\n#undef FIXED_BYTE_SIZE\n\ntemplate <typename Type>\ninline int\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::GetCachedSize(\n    const MapEntryAccessorType& value) {\n  return WireFormatLite::LengthDelimitedSize(value.GetCachedSize());\n}\n\n#define GET_CACHED_SIZE(FieldType, DeclaredType)                         \\\n  template <typename Type>                                               \\\n  inline int                                                             \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::GetCachedSize( \\\n      const MapEntryAccessorType& value) {                               \\\n    return WireFormatLite::DeclaredType##Size(value);                    \\\n  }\n\nGET_CACHED_SIZE(STRING, String)\nGET_CACHED_SIZE(BYTES , Bytes)\nGET_CACHED_SIZE(INT64 , Int64)\nGET_CACHED_SIZE(UINT64, UInt64)\nGET_CACHED_SIZE(INT32 , Int32)\nGET_CACHED_SIZE(UINT32, UInt32)\nGET_CACHED_SIZE(SINT64, SInt64)\nGET_CACHED_SIZE(SINT32, SInt32)\nGET_CACHED_SIZE(ENUM  , Enum)\n\n#undef GET_CACHED_SIZE\n\n#define GET_FIXED_CACHED_SIZE(FieldType, DeclaredType)                   \\\n  template <typename Type>                                               \\\n  inline int                                                             \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::GetCachedSize( \\\n      const MapEntryAccessorType& value) {                               \\\n    return WireFormatLite::k##DeclaredType##Size;                        \\\n  }\n\nGET_FIXED_CACHED_SIZE(DOUBLE  , Double)\nGET_FIXED_CACHED_SIZE(FLOAT   , Float)\nGET_FIXED_CACHED_SIZE(FIXED64 , Fixed64)\nGET_FIXED_CACHED_SIZE(FIXED32 , Fixed32)\nGET_FIXED_CACHED_SIZE(SFIXED64, SFixed64)\nGET_FIXED_CACHED_SIZE(SFIXED32, SFixed32)\nGET_FIXED_CACHED_SIZE(BOOL    , Bool)\n\n#undef GET_FIXED_CACHED_SIZE\n\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Write(\n    int field, const MapEntryAccessorType& value,\n    io::CodedOutputStream* output) {\n  WireFormatLite::WriteMessageMaybeToArray(field, value, output);\n}\n\ntemplate <typename Type>\ninline uint8*\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::InternalWriteToArray(\n    int field, const MapEntryAccessorType& value, bool deterministic,\n    uint8* target) {\n  return WireFormatLite::InternalWriteMessageToArray(field, value,\n                                                     deterministic, target);\n}\n\n#define WRITE_METHOD(FieldType, DeclaredType)                                  \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Write(   \\\n      int field, const MapEntryAccessorType& value,                            \\\n      io::CodedOutputStream* output) {                                         \\\n    return WireFormatLite::Write##DeclaredType(field, value, output);          \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline uint8*                                                                \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::InternalWriteToArray(                                  \\\n      int field, const MapEntryAccessorType& value, bool, uint8* target) {     \\\n    return WireFormatLite::Write##DeclaredType##ToArray(field, value, target); \\\n  }\n\nWRITE_METHOD(STRING  , String)\nWRITE_METHOD(BYTES   , Bytes)\nWRITE_METHOD(INT64   , Int64)\nWRITE_METHOD(UINT64  , UInt64)\nWRITE_METHOD(INT32   , Int32)\nWRITE_METHOD(UINT32  , UInt32)\nWRITE_METHOD(SINT64  , SInt64)\nWRITE_METHOD(SINT32  , SInt32)\nWRITE_METHOD(ENUM    , Enum)\nWRITE_METHOD(DOUBLE  , Double)\nWRITE_METHOD(FLOAT   , Float)\nWRITE_METHOD(FIXED64 , Fixed64)\nWRITE_METHOD(FIXED32 , Fixed32)\nWRITE_METHOD(SFIXED64, SFixed64)\nWRITE_METHOD(SFIXED32, SFixed32)\nWRITE_METHOD(BOOL    , Bool)\n\n#undef WRITE_METHOD\n\ntemplate <typename Type>\ninline bool MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Read(\n    io::CodedInputStream* input, MapEntryAccessorType* value) {\n  return WireFormatLite::ReadMessageNoVirtual(input, value);\n}\n\ntemplate <typename Type>\ninline bool MapTypeHandler<WireFormatLite::TYPE_STRING, Type>::Read(\n    io::CodedInputStream* input, MapEntryAccessorType* value) {\n  return WireFormatLite::ReadString(input, value);\n}\n\ntemplate <typename Type>\ninline bool MapTypeHandler<WireFormatLite::TYPE_BYTES, Type>::Read(\n    io::CodedInputStream* input, MapEntryAccessorType* value) {\n  return WireFormatLite::ReadBytes(input, value);\n}\n\n#define READ_METHOD(FieldType)                                              \\\n  template <typename Type>                                                  \\\n  inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Read( \\\n      io::CodedInputStream* input, MapEntryAccessorType* value) {           \\\n    return WireFormatLite::ReadPrimitive<TypeOnMemory,                      \\\n                                         WireFormatLite::TYPE_##FieldType>( \\\n        input, value);                                                      \\\n  }\n\nREAD_METHOD(INT64)\nREAD_METHOD(UINT64)\nREAD_METHOD(INT32)\nREAD_METHOD(UINT32)\nREAD_METHOD(SINT64)\nREAD_METHOD(SINT32)\nREAD_METHOD(ENUM)\nREAD_METHOD(DOUBLE)\nREAD_METHOD(FLOAT)\nREAD_METHOD(FIXED64)\nREAD_METHOD(FIXED32)\nREAD_METHOD(SFIXED64)\nREAD_METHOD(SFIXED32)\nREAD_METHOD(BOOL)\n\n#undef READ_METHOD\n\n// Definition for message handler\n\ntemplate <typename Type>\ninline const Type&\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                        Type>::GetExternalReference(const Type* value) {\n  return *value;\n}\n\ntemplate <typename Type>\ninline int\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                        Type>::SpaceUsedInMapEntry(const Type* value) {\n  return value->SpaceUsed();\n}\n\ntemplate <typename Type>\nint MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::SpaceUsedInMap(\n    const Type& value) {\n  return value.SpaceUsed();\n}\n\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Clear(\n    Type** value, Arena* arena) {\n  if (*value != NULL) (*value)->Clear();\n}\ntemplate <typename Type>\ninline void\nMapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                        Type>::ClearMaybeByDefaultEnum(Type** value,\n                                                       Arena* arena,\n                                                       int default_enum_value) {\n  if (*value != NULL) (*value)->Clear();\n}\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::Merge(\n    const Type& from, Type** to, Arena* arena) {\n  (*to)->MergeFrom(from);\n}\n\ntemplate <typename Type>\nvoid MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::DeleteNoArena(\n    const Type* ptr) {\n  delete ptr;\n}\n\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                                    Type>::AssignDefaultValue(Type** value) {\n  *value = const_cast<Type*>(&Type::default_instance());\n}\n\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                                    Type>::Initialize(Type** x,\n                                                      Arena* arena) {\n  *x = NULL;\n}\n\ntemplate <typename Type>\ninline void MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::\n    InitializeMaybeByDefaultEnum(Type** x, int default_enum_value,\n                                 Arena* arena) {\n  *x = NULL;\n}\n\ntemplate <typename Type>\ninline Type* MapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                                     Type>::EnsureMutable(Type** value,\n                                                          Arena* arena) {\n  if (*value == NULL) {\n    *value =\n        MapArenaMessageCreator<Type, Arena::is_arena_constructable<Type>::\n                                         type::value>::CreateMessage(arena);\n  }\n  return *value;\n}\n\ntemplate <typename Type>\ninline const Type& MapTypeHandler<WireFormatLite::TYPE_MESSAGE, Type>::\n    DefaultIfNotInitialized(const Type* value, const Type* default_value) {\n  return value != NULL ? *value : *default_value;\n}\n\ntemplate <typename Type>\ninline bool MapTypeHandler<WireFormatLite::TYPE_MESSAGE,\n                                    Type>::IsInitialized(Type* value) {\n  return value->IsInitialized();\n}\n\n// Definition for string/bytes handler\n\n#define STRING_OR_BYTES_HANDLER_FUNCTIONS(FieldType)                           \\\n  template <typename Type>                                                     \\\n  inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,       \\\n                                       Type>::MapEntryAccessorType&            \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::GetExternalReference(const TypeOnMemory& value) {      \\\n    return value.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());      \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline int                                                                   \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::SpaceUsedInMapEntry( \\\n      const TypeOnMemory& value) {                                             \\\n    return sizeof(value);                                                      \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType,                  \\\n                            Type>::SpaceUsedInMap(const TypeOnMemory& value) { \\\n    return sizeof(value);                                                      \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType,                  \\\n                            Type>::SpaceUsedInMap(const string& value) {       \\\n    return sizeof(value);                                                      \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Clear(   \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    value->ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),    \\\n                        arena);                                                \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::ClearMaybeByDefaultEnum(TypeOnMemory* value,           \\\n                                                Arena* arena,                  \\\n                                                int default_enum) {            \\\n    Clear(value, arena);                                                       \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Merge(   \\\n      const MapEntryAccessorType& from, TypeOnMemory* to, Arena* arena) {      \\\n    to->Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from, arena);  \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::DeleteNoArena(  \\\n      TypeOnMemory& value) {                                                   \\\n    value.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());  \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType,                 \\\n                             Type>::AssignDefaultValue(TypeOnMemory* value) {} \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Initialize(          \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    value->UnsafeSetDefault(                                                   \\\n        &::google::protobuf::internal::GetEmptyStringAlreadyInited());                   \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::InitializeMaybeByDefaultEnum(TypeOnMemory* value,      \\\n                                                     int default_enum_value,   \\\n                                                     Arena* arena) {           \\\n    Initialize(value, arena);                                                  \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,             \\\n                                 Type>::MapEntryAccessorType*                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::EnsureMutable(       \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    return value->Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),  \\\n                          arena);                                              \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,       \\\n                                       Type>::MapEntryAccessorType&            \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::DefaultIfNotInitialized(const TypeOnMemory& value,     \\\n                                                const TypeOnMemory&            \\\n                                                    default_value) {           \\\n    return value.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());      \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType,                 \\\n                             Type>::IsInitialized(const TypeOnMemory& value) { \\\n    return true;                                                               \\\n  }\nSTRING_OR_BYTES_HANDLER_FUNCTIONS(STRING)\nSTRING_OR_BYTES_HANDLER_FUNCTIONS(BYTES)\n#undef STRING_OR_BYTES_HANDLER_FUNCTIONS\n\n#define PRIMITIVE_HANDLER_FUNCTIONS(FieldType)                                 \\\n  template <typename Type>                                                     \\\n  inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,       \\\n                                       Type>::MapEntryAccessorType&            \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::GetExternalReference(const TypeOnMemory& value) {      \\\n    return value;                                                              \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline int                                                                   \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::SpaceUsedInMapEntry( \\\n      const TypeOnMemory& value) {                                             \\\n    return 0;                                                                  \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline int MapTypeHandler<WireFormatLite::TYPE_##FieldType,                  \\\n                            Type>::SpaceUsedInMap(const TypeOnMemory& value) { \\\n    return sizeof(Type);                                                       \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Clear(   \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    *value = 0;                                                                \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::ClearMaybeByDefaultEnum(TypeOnMemory* value,           \\\n                                                Arena* arena,                  \\\n                                                int default_enum_value) {      \\\n    *value = static_cast<TypeOnMemory>(default_enum_value);                    \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Merge(   \\\n      const MapEntryAccessorType& from, TypeOnMemory* to, Arena* arena) {      \\\n    *to = from;                                                                \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType,                 \\\n                             Type>::DeleteNoArena(TypeOnMemory& x) {}          \\\n  template <typename Type>                                                     \\\n  inline void MapTypeHandler<WireFormatLite::TYPE_##FieldType,                 \\\n                             Type>::AssignDefaultValue(TypeOnMemory* value) {} \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::Initialize(          \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    *value = 0;                                                                \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline void                                                                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::InitializeMaybeByDefaultEnum(TypeOnMemory* value,      \\\n                                                     int default_enum_value,   \\\n                                                     Arena* arena) {           \\\n    *value = static_cast<TypeOnMemory>(default_enum_value);                    \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,             \\\n                                 Type>::MapEntryAccessorType*                  \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType, Type>::EnsureMutable(       \\\n      TypeOnMemory* value, Arena* arena) {                                     \\\n    return value;                                                              \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline const typename MapTypeHandler<WireFormatLite::TYPE_##FieldType,       \\\n                                       Type>::MapEntryAccessorType&            \\\n  MapTypeHandler<WireFormatLite::TYPE_##FieldType,                             \\\n                 Type>::DefaultIfNotInitialized(const TypeOnMemory& value,     \\\n                                                const TypeOnMemory&            \\\n                                                    default_value) {           \\\n    return value;                                                              \\\n  }                                                                            \\\n  template <typename Type>                                                     \\\n  inline bool MapTypeHandler<WireFormatLite::TYPE_##FieldType,                 \\\n                             Type>::IsInitialized(const TypeOnMemory& value) { \\\n    return true;                                                               \\\n  }\nPRIMITIVE_HANDLER_FUNCTIONS(INT64)\nPRIMITIVE_HANDLER_FUNCTIONS(UINT64)\nPRIMITIVE_HANDLER_FUNCTIONS(INT32)\nPRIMITIVE_HANDLER_FUNCTIONS(UINT32)\nPRIMITIVE_HANDLER_FUNCTIONS(SINT64)\nPRIMITIVE_HANDLER_FUNCTIONS(SINT32)\nPRIMITIVE_HANDLER_FUNCTIONS(ENUM)\nPRIMITIVE_HANDLER_FUNCTIONS(DOUBLE)\nPRIMITIVE_HANDLER_FUNCTIONS(FLOAT)\nPRIMITIVE_HANDLER_FUNCTIONS(FIXED64)\nPRIMITIVE_HANDLER_FUNCTIONS(FIXED32)\nPRIMITIVE_HANDLER_FUNCTIONS(SFIXED64)\nPRIMITIVE_HANDLER_FUNCTIONS(SFIXED32)\nPRIMITIVE_HANDLER_FUNCTIONS(BOOL)\n#undef PRIMITIVE_HANDLER_FUNCTIONS\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_TYPE_HANDLER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/message.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Defines Message, the abstract interface implemented by non-lite\n// protocol message objects.  Although it's possible to implement this\n// interface manually, most users will use the protocol compiler to\n// generate implementations.\n//\n// Example usage:\n//\n// Say you have a message defined as:\n//\n//   message Foo {\n//     optional string text = 1;\n//     repeated int32 numbers = 2;\n//   }\n//\n// Then, if you used the protocol compiler to generate a class from the above\n// definition, you could use it like so:\n//\n//   string data;  // Will store a serialized version of the message.\n//\n//   {\n//     // Create a message and serialize it.\n//     Foo foo;\n//     foo.set_text(\"Hello World!\");\n//     foo.add_numbers(1);\n//     foo.add_numbers(5);\n//     foo.add_numbers(42);\n//\n//     foo.SerializeToString(&data);\n//   }\n//\n//   {\n//     // Parse the serialized message and check that it contains the\n//     // correct data.\n//     Foo foo;\n//     foo.ParseFromString(data);\n//\n//     assert(foo.text() == \"Hello World!\");\n//     assert(foo.numbers_size() == 3);\n//     assert(foo.numbers(0) == 1);\n//     assert(foo.numbers(1) == 5);\n//     assert(foo.numbers(2) == 42);\n//   }\n//\n//   {\n//     // Same as the last block, but do it dynamically via the Message\n//     // reflection interface.\n//     Message* foo = new Foo;\n//     const Descriptor* descriptor = foo->GetDescriptor();\n//\n//     // Get the descriptors for the fields we're interested in and verify\n//     // their types.\n//     const FieldDescriptor* text_field = descriptor->FindFieldByName(\"text\");\n//     assert(text_field != NULL);\n//     assert(text_field->type() == FieldDescriptor::TYPE_STRING);\n//     assert(text_field->label() == FieldDescriptor::LABEL_OPTIONAL);\n//     const FieldDescriptor* numbers_field = descriptor->\n//                                            FindFieldByName(\"numbers\");\n//     assert(numbers_field != NULL);\n//     assert(numbers_field->type() == FieldDescriptor::TYPE_INT32);\n//     assert(numbers_field->label() == FieldDescriptor::LABEL_REPEATED);\n//\n//     // Parse the message.\n//     foo->ParseFromString(data);\n//\n//     // Use the reflection interface to examine the contents.\n//     const Reflection* reflection = foo->GetReflection();\n//     assert(reflection->GetString(*foo, text_field) == \"Hello World!\");\n//     assert(reflection->FieldSize(*foo, numbers_field) == 3);\n//     assert(reflection->GetRepeatedInt32(*foo, numbers_field, 0) == 1);\n//     assert(reflection->GetRepeatedInt32(*foo, numbers_field, 1) == 5);\n//     assert(reflection->GetRepeatedInt32(*foo, numbers_field, 2) == 42);\n//\n//     delete foo;\n//   }\n\n#ifndef GOOGLE_PROTOBUF_MESSAGE_H__\n#define GOOGLE_PROTOBUF_MESSAGE_H__\n\n#include <iosfwd>\n#include <string>\n#include <google/protobuf/stubs/type_traits.h>\n#include <vector>\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/message_lite.h>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/descriptor.h>\n\n\n#define GOOGLE_PROTOBUF_HAS_ONEOF\n#define GOOGLE_PROTOBUF_HAS_ARENAS\n\nnamespace google {\nnamespace protobuf {\n\n// Defined in this file.\nclass Message;\nclass Reflection;\nclass MessageFactory;\n\n// Defined in other files.\nclass MapKey;\nclass MapValueRef;\nclass MapIterator;\nclass MapReflectionTester;\n\nnamespace internal {\nclass MapFieldBase;\n}\nclass UnknownFieldSet;         // unknown_field_set.h\nnamespace io {\nclass ZeroCopyInputStream;     // zero_copy_stream.h\nclass ZeroCopyOutputStream;    // zero_copy_stream.h\nclass CodedInputStream;        // coded_stream.h\nclass CodedOutputStream;       // coded_stream.h\n}\nnamespace python {\nclass MapReflectionFriend;     // scalar_map_container.h\n}\n\n\ntemplate<typename T>\nclass RepeatedField;     // repeated_field.h\n\ntemplate<typename T>\nclass RepeatedPtrField;  // repeated_field.h\n\n// A container to hold message metadata.\nstruct Metadata {\n  const Descriptor* descriptor;\n  const Reflection* reflection;\n};\n\n// Abstract interface for protocol messages.\n//\n// See also MessageLite, which contains most every-day operations.  Message\n// adds descriptors and reflection on top of that.\n//\n// The methods of this class that are virtual but not pure-virtual have\n// default implementations based on reflection.  Message classes which are\n// optimized for speed will want to override these with faster implementations,\n// but classes optimized for code size may be happy with keeping them.  See\n// the optimize_for option in descriptor.proto.\nclass LIBPROTOBUF_EXPORT Message : public MessageLite {\n public:\n  inline Message() {}\n  virtual ~Message() {}\n\n  // Basic Operations ------------------------------------------------\n\n  // Construct a new instance of the same type.  Ownership is passed to the\n  // caller.  (This is also defined in MessageLite, but is defined again here\n  // for return-type covariance.)\n  virtual Message* New() const = 0;\n\n  // Construct a new instance on the arena. Ownership is passed to the caller\n  // if arena is a NULL. Default implementation allows for API compatibility\n  // during the Arena transition.\n  virtual Message* New(::google::protobuf::Arena* arena) const {\n    Message* message = New();\n    if (arena != NULL) {\n      arena->Own(message);\n    }\n    return message;\n  }\n\n  // Make this message into a copy of the given message.  The given message\n  // must have the same descriptor, but need not necessarily be the same class.\n  // By default this is just implemented as \"Clear(); MergeFrom(from);\".\n  virtual void CopyFrom(const Message& from);\n\n  // Merge the fields from the given message into this message.  Singular\n  // fields will be overwritten, if specified in from, except for embedded\n  // messages which will be merged.  Repeated fields will be concatenated.\n  // The given message must be of the same type as this message (i.e. the\n  // exact same class).\n  virtual void MergeFrom(const Message& from);\n\n  // Verifies that IsInitialized() returns true.  GOOGLE_CHECK-fails otherwise, with\n  // a nice error message.\n  void CheckInitialized() const;\n\n  // Slowly build a list of all required fields that are not set.\n  // This is much, much slower than IsInitialized() as it is implemented\n  // purely via reflection.  Generally, you should not call this unless you\n  // have already determined that an error exists by calling IsInitialized().\n  void FindInitializationErrors(std::vector<string>* errors) const;\n\n  // Like FindInitializationErrors, but joins all the strings, delimited by\n  // commas, and returns them.\n  string InitializationErrorString() const;\n\n  // Clears all unknown fields from this message and all embedded messages.\n  // Normally, if unknown tag numbers are encountered when parsing a message,\n  // the tag and value are stored in the message's UnknownFieldSet and\n  // then written back out when the message is serialized.  This allows servers\n  // which simply route messages to other servers to pass through messages\n  // that have new field definitions which they don't yet know about.  However,\n  // this behavior can have security implications.  To avoid it, call this\n  // method after parsing.\n  //\n  // See Reflection::GetUnknownFields() for more on unknown fields.\n  virtual void DiscardUnknownFields();\n\n  // Computes (an estimate of) the total number of bytes currently used for\n  // storing the message in memory.  The default implementation calls the\n  // Reflection object's SpaceUsed() method.\n  //\n  // SpaceUsed() is noticeably slower than ByteSize(), as it is implemented\n  // using reflection (rather than the generated code implementation for\n  // ByteSize()). Like ByteSize(), its CPU time is linear in the number of\n  // fields defined for the proto.\n  virtual int SpaceUsed() const;\n\n  // Debugging & Testing----------------------------------------------\n\n  // Generates a human readable form of this message, useful for debugging\n  // and other purposes.\n  string DebugString() const;\n  // Like DebugString(), but with less whitespace.\n  string ShortDebugString() const;\n  // Like DebugString(), but do not escape UTF-8 byte sequences.\n  string Utf8DebugString() const;\n  // Convenience function useful in GDB.  Prints DebugString() to stdout.\n  void PrintDebugString() const;\n\n  // Heavy I/O -------------------------------------------------------\n  // Additional parsing and serialization methods not implemented by\n  // MessageLite because they are not supported by the lite library.\n\n  // Parse a protocol buffer from a file descriptor.  If successful, the entire\n  // input will be consumed.\n  bool ParseFromFileDescriptor(int file_descriptor);\n  // Like ParseFromFileDescriptor(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromFileDescriptor(int file_descriptor);\n  // Parse a protocol buffer from a C++ istream.  If successful, the entire\n  // input will be consumed.\n  bool ParseFromIstream(std::istream* input);\n  // Like ParseFromIstream(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromIstream(std::istream* input);\n\n  // Serialize the message and write it to the given file descriptor.  All\n  // required fields must be set.\n  bool SerializeToFileDescriptor(int file_descriptor) const;\n  // Like SerializeToFileDescriptor(), but allows missing required fields.\n  bool SerializePartialToFileDescriptor(int file_descriptor) const;\n  // Serialize the message and write it to the given C++ ostream.  All\n  // required fields must be set.\n  bool SerializeToOstream(std::ostream* output) const;\n  // Like SerializeToOstream(), but allows missing required fields.\n  bool SerializePartialToOstream(std::ostream* output) const;\n\n\n  // Reflection-based methods ----------------------------------------\n  // These methods are pure-virtual in MessageLite, but Message provides\n  // reflection-based default implementations.\n\n  virtual string GetTypeName() const;\n  virtual void Clear();\n  virtual bool IsInitialized() const;\n  virtual void CheckTypeAndMergeFrom(const MessageLite& other);\n  virtual bool MergePartialFromCodedStream(io::CodedInputStream* input);\n  virtual size_t ByteSizeLong() const;\n  virtual void SerializeWithCachedSizes(io::CodedOutputStream* output) const;\n\n private:\n  // This is called only by the default implementation of ByteSize(), to\n  // update the cached size.  If you override ByteSize(), you do not need\n  // to override this.  If you do not override ByteSize(), you MUST override\n  // this; the default implementation will crash.\n  //\n  // The method is private because subclasses should never call it; only\n  // override it.  Yes, C++ lets you do that.  Crazy, huh?\n  virtual void SetCachedSize(int size) const;\n\n public:\n\n  // Introspection ---------------------------------------------------\n\n  // Typedef for backwards-compatibility.\n  typedef google::protobuf::Reflection Reflection;\n\n  // Get a Descriptor for this message's type.  This describes what\n  // fields the message contains, the types of those fields, etc.\n  const Descriptor* GetDescriptor() const { return GetMetadata().descriptor; }\n\n  // Get the Reflection interface for this Message, which can be used to\n  // read and modify the fields of the Message dynamically (in other words,\n  // without knowing the message type at compile time).  This object remains\n  // property of the Message.\n  //\n  // This method remains virtual in case a subclass does not implement\n  // reflection and wants to override the default behavior.\n  virtual const Reflection* GetReflection() const {\n    return GetMetadata().reflection;\n  }\n\n protected:\n  // Get a struct containing the metadata for the Message. Most subclasses only\n  // need to implement this method, rather than the GetDescriptor() and\n  // GetReflection() wrappers.\n  virtual Metadata GetMetadata() const  = 0;\n\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Message);\n};\n\nnamespace internal {\n// Forward-declare interfaces used to implement RepeatedFieldRef.\n// These are protobuf internals that users shouldn't care about.\nclass RepeatedFieldAccessor;\n}  // namespace internal\n\n// Forward-declare RepeatedFieldRef templates. The second type parameter is\n// used for SFINAE tricks. Users should ignore it.\ntemplate<typename T, typename Enable = void>\nclass RepeatedFieldRef;\n\ntemplate<typename T, typename Enable = void>\nclass MutableRepeatedFieldRef;\n\n// This interface contains methods that can be used to dynamically access\n// and modify the fields of a protocol message.  Their semantics are\n// similar to the accessors the protocol compiler generates.\n//\n// To get the Reflection for a given Message, call Message::GetReflection().\n//\n// This interface is separate from Message only for efficiency reasons;\n// the vast majority of implementations of Message will share the same\n// implementation of Reflection (GeneratedMessageReflection,\n// defined in generated_message.h), and all Messages of a particular class\n// should share the same Reflection object (though you should not rely on\n// the latter fact).\n//\n// There are several ways that these methods can be used incorrectly.  For\n// example, any of the following conditions will lead to undefined\n// results (probably assertion failures):\n// - The FieldDescriptor is not a field of this message type.\n// - The method called is not appropriate for the field's type.  For\n//   each field type in FieldDescriptor::TYPE_*, there is only one\n//   Get*() method, one Set*() method, and one Add*() method that is\n//   valid for that type.  It should be obvious which (except maybe\n//   for TYPE_BYTES, which are represented using strings in C++).\n// - A Get*() or Set*() method for singular fields is called on a repeated\n//   field.\n// - GetRepeated*(), SetRepeated*(), or Add*() is called on a non-repeated\n//   field.\n// - The Message object passed to any method is not of the right type for\n//   this Reflection object (i.e. message.GetReflection() != reflection).\n//\n// You might wonder why there is not any abstract representation for a field\n// of arbitrary type.  E.g., why isn't there just a \"GetField()\" method that\n// returns \"const Field&\", where \"Field\" is some class with accessors like\n// \"GetInt32Value()\".  The problem is that someone would have to deal with\n// allocating these Field objects.  For generated message classes, having to\n// allocate space for an additional object to wrap every field would at least\n// double the message's memory footprint, probably worse.  Allocating the\n// objects on-demand, on the other hand, would be expensive and prone to\n// memory leaks.  So, instead we ended up with this flat interface.\n//\n// TODO(kenton):  Create a utility class which callers can use to read and\n//   write fields from a Reflection without paying attention to the type.\nclass LIBPROTOBUF_EXPORT Reflection {\n public:\n  inline Reflection() {}\n  virtual ~Reflection();\n\n  // Get the UnknownFieldSet for the message.  This contains fields which\n  // were seen when the Message was parsed but were not recognized according\n  // to the Message's definition. For proto3 protos, this method will always\n  // return an empty UnknownFieldSet.\n  virtual const UnknownFieldSet& GetUnknownFields(\n      const Message& message) const = 0;\n  // Get a mutable pointer to the UnknownFieldSet for the message.  This\n  // contains fields which were seen when the Message was parsed but were not\n  // recognized according to the Message's definition. For proto3 protos, this\n  // method will return a valid mutable UnknownFieldSet pointer but modifying\n  // it won't affect the serialized bytes of the message.\n  virtual UnknownFieldSet* MutableUnknownFields(Message* message) const = 0;\n\n  // Estimate the amount of memory used by the message object.\n  virtual int SpaceUsed(const Message& message) const = 0;\n\n  // Check if the given non-repeated field is set.\n  virtual bool HasField(const Message& message,\n                        const FieldDescriptor* field) const = 0;\n\n  // Get the number of elements of a repeated field.\n  virtual int FieldSize(const Message& message,\n                        const FieldDescriptor* field) const = 0;\n\n  // Clear the value of a field, so that HasField() returns false or\n  // FieldSize() returns zero.\n  virtual void ClearField(Message* message,\n                          const FieldDescriptor* field) const = 0;\n\n  // Check if the oneof is set. Returns true if any field in oneof\n  // is set, false otherwise.\n  // TODO(jieluo) - make it pure virtual after updating all\n  // the subclasses.\n  virtual bool HasOneof(const Message& /*message*/,\n                        const OneofDescriptor* /*oneof_descriptor*/) const {\n    return false;\n  }\n\n  virtual void ClearOneof(Message* /*message*/,\n                          const OneofDescriptor* /*oneof_descriptor*/) const {}\n\n  // Returns the field descriptor if the oneof is set. NULL otherwise.\n  // TODO(jieluo) - make it pure virtual.\n  virtual const FieldDescriptor* GetOneofFieldDescriptor(\n      const Message& /*message*/,\n      const OneofDescriptor* /*oneof_descriptor*/) const {\n    return NULL;\n  }\n\n  // Removes the last element of a repeated field.\n  // We don't provide a way to remove any element other than the last\n  // because it invites inefficient use, such as O(n^2) filtering loops\n  // that should have been O(n).  If you want to remove an element other\n  // than the last, the best way to do it is to re-arrange the elements\n  // (using Swap()) so that the one you want removed is at the end, then\n  // call RemoveLast().\n  virtual void RemoveLast(Message* message,\n                          const FieldDescriptor* field) const = 0;\n  // Removes the last element of a repeated message field, and returns the\n  // pointer to the caller.  Caller takes ownership of the returned pointer.\n  virtual Message* ReleaseLast(Message* message,\n                               const FieldDescriptor* field) const = 0;\n\n  // Swap the complete contents of two messages.\n  virtual void Swap(Message* message1, Message* message2) const = 0;\n\n  // Swap fields listed in fields vector of two messages.\n  virtual void SwapFields(Message* message1,\n                          Message* message2,\n                          const std::vector<const FieldDescriptor*>& fields)\n      const = 0;\n\n  // Swap two elements of a repeated field.\n  virtual void SwapElements(Message* message,\n                            const FieldDescriptor* field,\n                            int index1,\n                            int index2) const = 0;\n\n  // List all fields of the message which are currently set.  This includes\n  // extensions.  Singular fields will only be listed if HasField(field) would\n  // return true and repeated fields will only be listed if FieldSize(field)\n  // would return non-zero.  Fields (both normal fields and extension fields)\n  // will be listed ordered by field number.\n  virtual void ListFields(\n      const Message& message,\n      std::vector<const FieldDescriptor*>* output) const = 0;\n\n  // Singular field getters ------------------------------------------\n  // These get the value of a non-repeated field.  They return the default\n  // value for fields that aren't set.\n\n  virtual int32  GetInt32 (const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual int64  GetInt64 (const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual uint32 GetUInt32(const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual uint64 GetUInt64(const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual float  GetFloat (const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual double GetDouble(const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual bool   GetBool  (const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual string GetString(const Message& message,\n                           const FieldDescriptor* field) const = 0;\n  virtual const EnumValueDescriptor* GetEnum(\n      const Message& message, const FieldDescriptor* field) const = 0;\n\n  // GetEnumValue() returns an enum field's value as an integer rather than\n  // an EnumValueDescriptor*. If the integer value does not correspond to a\n  // known value descriptor, a new value descriptor is created. (Such a value\n  // will only be present when the new unknown-enum-value semantics are enabled\n  // for a message.)\n  virtual int GetEnumValue(\n      const Message& message, const FieldDescriptor* field) const;\n\n  // See MutableMessage() for the meaning of the \"factory\" parameter.\n  virtual const Message& GetMessage(const Message& message,\n                                    const FieldDescriptor* field,\n                                    MessageFactory* factory = NULL) const = 0;\n\n  // Get a string value without copying, if possible.\n  //\n  // GetString() necessarily returns a copy of the string.  This can be\n  // inefficient when the string is already stored in a string object in the\n  // underlying message.  GetStringReference() will return a reference to the\n  // underlying string in this case.  Otherwise, it will copy the string into\n  // *scratch and return that.\n  //\n  // Note:  It is perfectly reasonable and useful to write code like:\n  //     str = reflection->GetStringReference(field, &str);\n  //   This line would ensure that only one copy of the string is made\n  //   regardless of the field's underlying representation.  When initializing\n  //   a newly-constructed string, though, it's just as fast and more readable\n  //   to use code like:\n  //     string str = reflection->GetString(message, field);\n  virtual const string& GetStringReference(const Message& message,\n                                           const FieldDescriptor* field,\n                                           string* scratch) const = 0;\n\n\n  // Singular field mutators -----------------------------------------\n  // These mutate the value of a non-repeated field.\n\n  virtual void SetInt32 (Message* message,\n                         const FieldDescriptor* field, int32  value) const = 0;\n  virtual void SetInt64 (Message* message,\n                         const FieldDescriptor* field, int64  value) const = 0;\n  virtual void SetUInt32(Message* message,\n                         const FieldDescriptor* field, uint32 value) const = 0;\n  virtual void SetUInt64(Message* message,\n                         const FieldDescriptor* field, uint64 value) const = 0;\n  virtual void SetFloat (Message* message,\n                         const FieldDescriptor* field, float  value) const = 0;\n  virtual void SetDouble(Message* message,\n                         const FieldDescriptor* field, double value) const = 0;\n  virtual void SetBool  (Message* message,\n                         const FieldDescriptor* field, bool   value) const = 0;\n  virtual void SetString(Message* message,\n                         const FieldDescriptor* field,\n                         const string& value) const = 0;\n  virtual void SetEnum  (Message* message,\n                         const FieldDescriptor* field,\n                         const EnumValueDescriptor* value) const = 0;\n  // Set an enum field's value with an integer rather than EnumValueDescriptor.\n  // If the value does not correspond to a known enum value, either behavior is\n  // undefined (for proto2 messages), or the value is accepted silently for\n  // messages with new unknown-enum-value semantics.\n  virtual void SetEnumValue(Message* message,\n                            const FieldDescriptor* field,\n                            int value) const;\n\n  // Get a mutable pointer to a field with a message type.  If a MessageFactory\n  // is provided, it will be used to construct instances of the sub-message;\n  // otherwise, the default factory is used.  If the field is an extension that\n  // does not live in the same pool as the containing message's descriptor (e.g.\n  // it lives in an overlay pool), then a MessageFactory must be provided.\n  // If you have no idea what that meant, then you probably don't need to worry\n  // about it (don't provide a MessageFactory).  WARNING:  If the\n  // FieldDescriptor is for a compiled-in extension, then\n  // factory->GetPrototype(field->message_type()) MUST return an instance of\n  // the compiled-in class for this type, NOT DynamicMessage.\n  virtual Message* MutableMessage(Message* message,\n                                  const FieldDescriptor* field,\n                                  MessageFactory* factory = NULL) const = 0;\n  // Replaces the message specified by 'field' with the already-allocated object\n  // sub_message, passing ownership to the message.  If the field contained a\n  // message, that message is deleted.  If sub_message is NULL, the field is\n  // cleared.\n  virtual void SetAllocatedMessage(Message* message,\n                                   Message* sub_message,\n                                   const FieldDescriptor* field) const = 0;\n  // Releases the message specified by 'field' and returns the pointer,\n  // ReleaseMessage() will return the message the message object if it exists.\n  // Otherwise, it may or may not return NULL.  In any case, if the return value\n  // is non-NULL, the caller takes ownership of the pointer.\n  // If the field existed (HasField() is true), then the returned pointer will\n  // be the same as the pointer returned by MutableMessage().\n  // This function has the same effect as ClearField().\n  virtual Message* ReleaseMessage(Message* message,\n                                  const FieldDescriptor* field,\n                                  MessageFactory* factory = NULL) const = 0;\n\n\n  // Repeated field getters ------------------------------------------\n  // These get the value of one element of a repeated field.\n\n  virtual int32  GetRepeatedInt32 (const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual int64  GetRepeatedInt64 (const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual uint32 GetRepeatedUInt32(const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual uint64 GetRepeatedUInt64(const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual float  GetRepeatedFloat (const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual double GetRepeatedDouble(const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual bool   GetRepeatedBool  (const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual string GetRepeatedString(const Message& message,\n                                   const FieldDescriptor* field,\n                                   int index) const = 0;\n  virtual const EnumValueDescriptor* GetRepeatedEnum(\n      const Message& message,\n      const FieldDescriptor* field, int index) const = 0;\n  // GetRepeatedEnumValue() returns an enum field's value as an integer rather\n  // than an EnumValueDescriptor*. If the integer value does not correspond to a\n  // known value descriptor, a new value descriptor is created. (Such a value\n  // will only be present when the new unknown-enum-value semantics are enabled\n  // for a message.)\n  virtual int GetRepeatedEnumValue(\n      const Message& message,\n      const FieldDescriptor* field, int index) const;\n  virtual const Message& GetRepeatedMessage(\n      const Message& message,\n      const FieldDescriptor* field, int index) const = 0;\n\n  // See GetStringReference(), above.\n  virtual const string& GetRepeatedStringReference(\n      const Message& message, const FieldDescriptor* field,\n      int index, string* scratch) const = 0;\n\n\n  // Repeated field mutators -----------------------------------------\n  // These mutate the value of one element of a repeated field.\n\n  virtual void SetRepeatedInt32 (Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, int32  value) const = 0;\n  virtual void SetRepeatedInt64 (Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, int64  value) const = 0;\n  virtual void SetRepeatedUInt32(Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, uint32 value) const = 0;\n  virtual void SetRepeatedUInt64(Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, uint64 value) const = 0;\n  virtual void SetRepeatedFloat (Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, float  value) const = 0;\n  virtual void SetRepeatedDouble(Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, double value) const = 0;\n  virtual void SetRepeatedBool  (Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, bool   value) const = 0;\n  virtual void SetRepeatedString(Message* message,\n                                 const FieldDescriptor* field,\n                                 int index, const string& value) const = 0;\n  virtual void SetRepeatedEnum(Message* message,\n                               const FieldDescriptor* field, int index,\n                               const EnumValueDescriptor* value) const = 0;\n  // Set an enum field's value with an integer rather than EnumValueDescriptor.\n  // If the value does not correspond to a known enum value, either behavior is\n  // undefined (for proto2 messages), or the value is accepted silently for\n  // messages with new unknown-enum-value semantics.\n  virtual void SetRepeatedEnumValue(Message* message,\n                                    const FieldDescriptor* field, int index,\n                                    int value) const;\n  // Get a mutable pointer to an element of a repeated field with a message\n  // type.\n  virtual Message* MutableRepeatedMessage(\n      Message* message, const FieldDescriptor* field, int index) const = 0;\n\n\n  // Repeated field adders -------------------------------------------\n  // These add an element to a repeated field.\n\n  virtual void AddInt32 (Message* message,\n                         const FieldDescriptor* field, int32  value) const = 0;\n  virtual void AddInt64 (Message* message,\n                         const FieldDescriptor* field, int64  value) const = 0;\n  virtual void AddUInt32(Message* message,\n                         const FieldDescriptor* field, uint32 value) const = 0;\n  virtual void AddUInt64(Message* message,\n                         const FieldDescriptor* field, uint64 value) const = 0;\n  virtual void AddFloat (Message* message,\n                         const FieldDescriptor* field, float  value) const = 0;\n  virtual void AddDouble(Message* message,\n                         const FieldDescriptor* field, double value) const = 0;\n  virtual void AddBool  (Message* message,\n                         const FieldDescriptor* field, bool   value) const = 0;\n  virtual void AddString(Message* message,\n                         const FieldDescriptor* field,\n                         const string& value) const = 0;\n  virtual void AddEnum  (Message* message,\n                         const FieldDescriptor* field,\n                         const EnumValueDescriptor* value) const = 0;\n  // Set an enum field's value with an integer rather than EnumValueDescriptor.\n  // If the value does not correspond to a known enum value, either behavior is\n  // undefined (for proto2 messages), or the value is accepted silently for\n  // messages with new unknown-enum-value semantics.\n  virtual void AddEnumValue(Message* message,\n                            const FieldDescriptor* field,\n                            int value) const;\n  // See MutableMessage() for comments on the \"factory\" parameter.\n  virtual Message* AddMessage(Message* message,\n                              const FieldDescriptor* field,\n                              MessageFactory* factory = NULL) const = 0;\n\n  // Appends an already-allocated object 'new_entry' to the repeated field\n  // specifyed by 'field' passing ownership to the message.\n  // TODO(tmarek): Make virtual after all subclasses have been\n  // updated.\n  virtual void AddAllocatedMessage(Message* /* message */,\n                                   const FieldDescriptor* /*field */,\n                                   Message* /* new_entry */) const {}\n\n\n  // Get a RepeatedFieldRef object that can be used to read the underlying\n  // repeated field. The type parameter T must be set according to the\n  // field's cpp type. The following table shows the mapping from cpp type\n  // to acceptable T.\n  //\n  //   field->cpp_type()      T\n  //   CPPTYPE_INT32        int32\n  //   CPPTYPE_UINT32       uint32\n  //   CPPTYPE_INT64        int64\n  //   CPPTYPE_UINT64       uint64\n  //   CPPTYPE_DOUBLE       double\n  //   CPPTYPE_FLOAT        float\n  //   CPPTYPE_BOOL         bool\n  //   CPPTYPE_ENUM         generated enum type or int32\n  //   CPPTYPE_STRING       string\n  //   CPPTYPE_MESSAGE      generated message type or google::protobuf::Message\n  //\n  // A RepeatedFieldRef object can be copied and the resulted object will point\n  // to the same repeated field in the same message. The object can be used as\n  // long as the message is not destroyed.\n  //\n  // Note that to use this method users need to include the header file\n  // \"google/protobuf/reflection.h\" (which defines the RepeatedFieldRef\n  // class templates).\n  template<typename T>\n  RepeatedFieldRef<T> GetRepeatedFieldRef(\n      const Message& message, const FieldDescriptor* field) const;\n\n  // Like GetRepeatedFieldRef() but return an object that can also be used\n  // manipulate the underlying repeated field.\n  template<typename T>\n  MutableRepeatedFieldRef<T> GetMutableRepeatedFieldRef(\n      Message* message, const FieldDescriptor* field) const;\n\n  // DEPRECATED. Please use Get(Mutable)RepeatedFieldRef() for repeated field\n  // access. The following repeated field accesors will be removed in the\n  // future.\n  //\n  // Repeated field accessors  -------------------------------------------------\n  // The methods above, e.g. GetRepeatedInt32(msg, fd, index), provide singular\n  // access to the data in a RepeatedField.  The methods below provide aggregate\n  // access by exposing the RepeatedField object itself with the Message.\n  // Applying these templates to inappropriate types will lead to an undefined\n  // reference at link time (e.g. GetRepeatedField<***double>), or possibly a\n  // template matching error at compile time (e.g. GetRepeatedPtrField<File>).\n  //\n  // Usage example: my_doubs = refl->GetRepeatedField<double>(msg, fd);\n\n  // DEPRECATED. Please use GetRepeatedFieldRef().\n  //\n  // for T = Cord and all protobuf scalar types except enums.\n  template<typename T>\n  const RepeatedField<T>& GetRepeatedField(\n      const Message&, const FieldDescriptor*) const;\n\n  // DEPRECATED. Please use GetMutableRepeatedFieldRef().\n  //\n  // for T = Cord and all protobuf scalar types except enums.\n  template<typename T>\n  RepeatedField<T>* MutableRepeatedField(\n      Message*, const FieldDescriptor*) const;\n\n  // DEPRECATED. Please use GetRepeatedFieldRef().\n  //\n  // for T = string, google::protobuf::internal::StringPieceField\n  //         google::protobuf::Message & descendants.\n  template<typename T>\n  const RepeatedPtrField<T>& GetRepeatedPtrField(\n      const Message&, const FieldDescriptor*) const;\n\n  // DEPRECATED. Please use GetMutableRepeatedFieldRef().\n  //\n  // for T = string, google::protobuf::internal::StringPieceField\n  //         google::protobuf::Message & descendants.\n  template<typename T>\n  RepeatedPtrField<T>* MutableRepeatedPtrField(\n      Message*, const FieldDescriptor*) const;\n\n  // Extensions ----------------------------------------------------------------\n\n  // Try to find an extension of this message type by fully-qualified field\n  // name.  Returns NULL if no extension is known for this name or number.\n  virtual const FieldDescriptor* FindKnownExtensionByName(\n      const string& name) const = 0;\n\n  // Try to find an extension of this message type by field number.\n  // Returns NULL if no extension is known for this name or number.\n  virtual const FieldDescriptor* FindKnownExtensionByNumber(\n      int number) const = 0;\n\n  // Feature Flags -------------------------------------------------------------\n\n  // Does this message support storing arbitrary integer values in enum fields?\n  // If |true|, GetEnumValue/SetEnumValue and associated repeated-field versions\n  // take arbitrary integer values, and the legacy GetEnum() getter will\n  // dynamically create an EnumValueDescriptor for any integer value without\n  // one. If |false|, setting an unknown enum value via the integer-based\n  // setters results in undefined behavior (in practice, GOOGLE_DCHECK-fails).\n  //\n  // Generic code that uses reflection to handle messages with enum fields\n  // should check this flag before using the integer-based setter, and either\n  // downgrade to a compatible value or use the UnknownFieldSet if not. For\n  // example:\n  //\n  // int new_value = GetValueFromApplicationLogic();\n  // if (reflection->SupportsUnknownEnumValues()) {\n  //     reflection->SetEnumValue(message, field, new_value);\n  // } else {\n  //     if (field_descriptor->enum_type()->\n  //             FindValueByNumver(new_value) != NULL) {\n  //         reflection->SetEnumValue(message, field, new_value);\n  //     } else if (emit_unknown_enum_values) {\n  //         reflection->MutableUnknownFields(message)->AddVarint(\n  //             field->number(),\n  //             new_value);\n  //     } else {\n  //         // convert value to a compatible/default value.\n  //         new_value = CompatibleDowngrade(new_value);\n  //         reflection->SetEnumValue(message, field, new_value);\n  //     }\n  // }\n  virtual bool SupportsUnknownEnumValues() const { return false; }\n\n  // Returns the MessageFactory associated with this message.  This can be\n  // useful for determining if a message is a generated message or not, for\n  // example:\n  //\n  // if (message->GetReflection()->GetMessageFactory() ==\n  //     google::protobuf::MessageFactory::generated_factory()) {\n  //   // This is a generated message.\n  // }\n  //\n  // It can also be used to create more messages of this type, though\n  // Message::New() is an easier way to accomplish this.\n  virtual MessageFactory* GetMessageFactory() const;\n\n  // ---------------------------------------------------------------------------\n\n protected:\n  // Obtain a pointer to a Repeated Field Structure and do some type checking:\n  //   on field->cpp_type(),\n  //   on field->field_option().ctype() (if ctype >= 0)\n  //   of field->message_type() (if message_type != NULL).\n  // We use 2 routine rather than 4 (const vs mutable) x (scalar vs pointer).\n  virtual void* MutableRawRepeatedField(\n      Message* message, const FieldDescriptor* field, FieldDescriptor::CppType,\n      int ctype, const Descriptor* message_type) const = 0;\n\n  // TODO(jieluo) - make it pure virtual after updating all the subclasses.\n  virtual const void* GetRawRepeatedField(\n      const Message& message, const FieldDescriptor* field,\n      FieldDescriptor::CppType cpptype, int ctype,\n      const Descriptor* message_type) const {\n    return MutableRawRepeatedField(\n        const_cast<Message*>(&message), field, cpptype, ctype, message_type);\n  }\n\n  // The following methods are used to implement (Mutable)RepeatedFieldRef.\n  // A Ref object will store a raw pointer to the repeated field data (obtained\n  // from RepeatedFieldData()) and a pointer to a Accessor (obtained from\n  // RepeatedFieldAccessor) which will be used to access the raw data.\n  //\n  // TODO(xiaofeng): Make these methods pure-virtual.\n\n  // Returns a raw pointer to the repeated field\n  //\n  // \"cpp_type\" and \"message_type\" are decuded from the type parameter T passed\n  // to Get(Mutable)RepeatedFieldRef. If T is a generated message type,\n  // \"message_type\" should be set to its descriptor. Otherwise \"message_type\"\n  // should be set to NULL. Implementations of this method should check whether\n  // \"cpp_type\"/\"message_type\" is consistent with the actual type of the field.\n  // We use 1 routine rather than 2 (const vs mutable) because it is protected\n  // and it doesn't change the message.\n  virtual void* RepeatedFieldData(\n      Message* message, const FieldDescriptor* field,\n      FieldDescriptor::CppType cpp_type,\n      const Descriptor* message_type) const;\n\n  // The returned pointer should point to a singleton instance which implements\n  // the RepeatedFieldAccessor interface.\n  virtual const internal::RepeatedFieldAccessor* RepeatedFieldAccessor(\n      const FieldDescriptor* field) const;\n\n private:\n  template<typename T, typename Enable>\n  friend class RepeatedFieldRef;\n  template<typename T, typename Enable>\n  friend class MutableRepeatedFieldRef;\n  friend class ::google::protobuf::python::MapReflectionFriend;\n\n  // Special version for specialized implementations of string.  We can't call\n  // MutableRawRepeatedField directly here because we don't have access to\n  // FieldOptions::* which are defined in descriptor.pb.h.  Including that\n  // file here is not possible because it would cause a circular include cycle.\n  // We use 1 routine rather than 2 (const vs mutable) because it is private\n  // and mutable a repeated string field doesn't change the message.\n  void* MutableRawRepeatedString(\n      Message* message, const FieldDescriptor* field, bool is_string) const;\n\n  friend class MapReflectionTester;\n  // TODO(jieluo) - make the map APIs pure virtual after updating\n  // all the subclasses.\n  // Returns true if key is in map. Returns false if key is not in map field.\n  virtual bool ContainsMapKey(const Message& /* message*/,\n                              const FieldDescriptor* /* field */,\n                              const MapKey& /* key */) const {\n    return false;\n  }\n\n  // If key is in map field: Saves the value pointer to val and returns\n  // false. If key in not in map field: Insert the key into map, saves\n  // value pointer to val and retuns true.\n  virtual bool InsertOrLookupMapValue(Message* /* message */,\n                                      const FieldDescriptor* /* field */,\n                                      const MapKey& /* key */,\n                                      MapValueRef* /* val */) const {\n    return false;\n  }\n\n  // Delete and returns true if key is in the map field. Returns false\n  // otherwise.\n  virtual bool DeleteMapValue(Message* /* mesage */,\n                              const FieldDescriptor* /* field */,\n                              const MapKey& /* key */) const {\n    return false;\n  }\n\n  // Returns a MapIterator referring to the first element in the map field.\n  // If the map field is empty, this function returns the same as\n  // reflection::MapEnd. Mutation to the field may invalidate the iterator.\n  virtual MapIterator MapBegin(\n      Message* message,\n      const FieldDescriptor* field) const;\n\n  // Returns a MapIterator referring to the theoretical element that would\n  // follow the last element in the map field. It does not point to any\n  // real element. Mutation to the field may invalidate the iterator.\n  virtual MapIterator MapEnd(\n      Message* message,\n      const FieldDescriptor* field) const;\n\n  // Get the number of <key, value> pair of a map field. The result may be\n  // different from FieldSize which can have duplicate keys.\n  virtual int MapSize(const Message& /* message */,\n                      const FieldDescriptor* /* field */) const {\n    return 0;\n  }\n\n  // Help method for MapIterator.\n  friend class MapIterator;\n  virtual internal::MapFieldBase* MapData(\n      Message* /* message */, const FieldDescriptor* /* field */) const {\n    return NULL;\n  }\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Reflection);\n};\n\n// Abstract interface for a factory for message objects.\nclass LIBPROTOBUF_EXPORT MessageFactory {\n public:\n  inline MessageFactory() {}\n  virtual ~MessageFactory();\n\n  // Given a Descriptor, gets or constructs the default (prototype) Message\n  // of that type.  You can then call that message's New() method to construct\n  // a mutable message of that type.\n  //\n  // Calling this method twice with the same Descriptor returns the same\n  // object.  The returned object remains property of the factory.  Also, any\n  // objects created by calling the prototype's New() method share some data\n  // with the prototype, so these must be destroyed before the MessageFactory\n  // is destroyed.\n  //\n  // The given descriptor must outlive the returned message, and hence must\n  // outlive the MessageFactory.\n  //\n  // Some implementations do not support all types.  GetPrototype() will\n  // return NULL if the descriptor passed in is not supported.\n  //\n  // This method may or may not be thread-safe depending on the implementation.\n  // Each implementation should document its own degree thread-safety.\n  virtual const Message* GetPrototype(const Descriptor* type) = 0;\n\n  // Gets a MessageFactory which supports all generated, compiled-in messages.\n  // In other words, for any compiled-in type FooMessage, the following is true:\n  //   MessageFactory::generated_factory()->GetPrototype(\n  //     FooMessage::descriptor()) == FooMessage::default_instance()\n  // This factory supports all types which are found in\n  // DescriptorPool::generated_pool().  If given a descriptor from any other\n  // pool, GetPrototype() will return NULL.  (You can also check if a\n  // descriptor is for a generated message by checking if\n  // descriptor->file()->pool() == DescriptorPool::generated_pool().)\n  //\n  // This factory is 100% thread-safe; calling GetPrototype() does not modify\n  // any shared data.\n  //\n  // This factory is a singleton.  The caller must not delete the object.\n  static MessageFactory* generated_factory();\n\n  // For internal use only:  Registers a .proto file at static initialization\n  // time, to be placed in generated_factory.  The first time GetPrototype()\n  // is called with a descriptor from this file, |register_messages| will be\n  // called, with the file name as the parameter.  It must call\n  // InternalRegisterGeneratedMessage() (below) to register each message type\n  // in the file.  This strange mechanism is necessary because descriptors are\n  // built lazily, so we can't register types by their descriptor until we\n  // know that the descriptor exists.  |filename| must be a permanent string.\n  static void InternalRegisterGeneratedFile(\n      const char* filename, void (*register_messages)(const string&));\n\n  // For internal use only:  Registers a message type.  Called only by the\n  // functions which are registered with InternalRegisterGeneratedFile(),\n  // above.\n  static void InternalRegisterGeneratedMessage(const Descriptor* descriptor,\n                                               const Message* prototype);\n\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MessageFactory);\n};\n\n#define DECLARE_GET_REPEATED_FIELD(TYPE)                         \\\ntemplate<>                                                       \\\nLIBPROTOBUF_EXPORT                                               \\\nconst RepeatedField<TYPE>& Reflection::GetRepeatedField<TYPE>(   \\\n    const Message& message, const FieldDescriptor* field) const; \\\n                                                                 \\\ntemplate<>                                                       \\\nLIBPROTOBUF_EXPORT                                               \\\nRepeatedField<TYPE>* Reflection::MutableRepeatedField<TYPE>(     \\\n    Message* message, const FieldDescriptor* field) const;\n\nDECLARE_GET_REPEATED_FIELD(int32)\nDECLARE_GET_REPEATED_FIELD(int64)\nDECLARE_GET_REPEATED_FIELD(uint32)\nDECLARE_GET_REPEATED_FIELD(uint64)\nDECLARE_GET_REPEATED_FIELD(float)\nDECLARE_GET_REPEATED_FIELD(double)\nDECLARE_GET_REPEATED_FIELD(bool)\n\n#undef DECLARE_GET_REPEATED_FIELD\n\n// =============================================================================\n// Implementation details for {Get,Mutable}RawRepeatedPtrField.  We provide\n// specializations for <string>, <StringPieceField> and <Message> and handle\n// everything else with the default template which will match any type having\n// a method with signature \"static const google::protobuf::Descriptor* descriptor()\".\n// Such a type presumably is a descendant of google::protobuf::Message.\n\ntemplate<>\ninline const RepeatedPtrField<string>& Reflection::GetRepeatedPtrField<string>(\n    const Message& message, const FieldDescriptor* field) const {\n  return *static_cast<RepeatedPtrField<string>* >(\n      MutableRawRepeatedString(const_cast<Message*>(&message), field, true));\n}\n\ntemplate<>\ninline RepeatedPtrField<string>* Reflection::MutableRepeatedPtrField<string>(\n    Message* message, const FieldDescriptor* field) const {\n  return static_cast<RepeatedPtrField<string>* >(\n      MutableRawRepeatedString(message, field, true));\n}\n\n\n// -----\n\ntemplate<>\ninline const RepeatedPtrField<Message>& Reflection::GetRepeatedPtrField(\n    const Message& message, const FieldDescriptor* field) const {\n  return *static_cast<const RepeatedPtrField<Message>* >(\n      GetRawRepeatedField(message, field, FieldDescriptor::CPPTYPE_MESSAGE,\n                          -1, NULL));\n}\n\ntemplate<>\ninline RepeatedPtrField<Message>* Reflection::MutableRepeatedPtrField(\n    Message* message, const FieldDescriptor* field) const {\n  return static_cast<RepeatedPtrField<Message>* >(\n      MutableRawRepeatedField(message, field,\n          FieldDescriptor::CPPTYPE_MESSAGE, -1,\n          NULL));\n}\n\ntemplate<typename PB>\ninline const RepeatedPtrField<PB>& Reflection::GetRepeatedPtrField(\n    const Message& message, const FieldDescriptor* field) const {\n  return *static_cast<const RepeatedPtrField<PB>* >(\n      GetRawRepeatedField(message, field, FieldDescriptor::CPPTYPE_MESSAGE,\n                          -1, PB::default_instance().GetDescriptor()));\n}\n\ntemplate<typename PB>\ninline RepeatedPtrField<PB>* Reflection::MutableRepeatedPtrField(\n    Message* message, const FieldDescriptor* field) const {\n  return static_cast<RepeatedPtrField<PB>* >(\n      MutableRawRepeatedField(message, field,\n          FieldDescriptor::CPPTYPE_MESSAGE, -1,\n          PB::default_instance().GetDescriptor()));\n}\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MESSAGE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/message_lite.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Authors: wink@google.com (Wink Saville),\n//          kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Defines MessageLite, the abstract interface implemented by all (lite\n// and non-lite) protocol message objects.\n\n#ifndef GOOGLE_PROTOBUF_MESSAGE_LITE_H__\n#define GOOGLE_PROTOBUF_MESSAGE_LITE_H__\n\n#include <google/protobuf/stubs/common.h>\n\n\nnamespace google {\nnamespace protobuf {\n  class Arena;\nnamespace io {\n  class CodedInputStream;\n  class CodedOutputStream;\n  class ZeroCopyInputStream;\n  class ZeroCopyOutputStream;\n}\nnamespace internal {\n  class WireFormatLite;\n}\n\n// Interface to light weight protocol messages.\n//\n// This interface is implemented by all protocol message objects.  Non-lite\n// messages additionally implement the Message interface, which is a\n// subclass of MessageLite.  Use MessageLite instead when you only need\n// the subset of features which it supports -- namely, nothing that uses\n// descriptors or reflection.  You can instruct the protocol compiler\n// to generate classes which implement only MessageLite, not the full\n// Message interface, by adding the following line to the .proto file:\n//\n//   option optimize_for = LITE_RUNTIME;\n//\n// This is particularly useful on resource-constrained systems where\n// the full protocol buffers runtime library is too big.\n//\n// Note that on non-constrained systems (e.g. servers) when you need\n// to link in lots of protocol definitions, a better way to reduce\n// total code footprint is to use optimize_for = CODE_SIZE.  This\n// will make the generated code smaller while still supporting all the\n// same features (at the expense of speed).  optimize_for = LITE_RUNTIME\n// is best when you only have a small number of message types linked\n// into your binary, in which case the size of the protocol buffers\n// runtime itself is the biggest problem.\nclass LIBPROTOBUF_EXPORT MessageLite {\n public:\n  inline MessageLite() {}\n  virtual ~MessageLite() {}\n\n  // Basic Operations ------------------------------------------------\n\n  // Get the name of this message type, e.g. \"foo.bar.BazProto\".\n  virtual string GetTypeName() const = 0;\n\n  // Construct a new instance of the same type.  Ownership is passed to the\n  // caller.\n  virtual MessageLite* New() const = 0;\n\n  // Construct a new instance on the arena. Ownership is passed to the caller\n  // if arena is a NULL. Default implementation for backwards compatibility.\n  virtual MessageLite* New(::google::protobuf::Arena* arena) const;\n\n  // Get the arena, if any, associated with this message. Virtual method\n  // required for generic operations but most arena-related operations should\n  // use the GetArenaNoVirtual() generated-code method. Default implementation\n  // to reduce code size by avoiding the need for per-type implementations when\n  // types do not implement arena support.\n  virtual ::google::protobuf::Arena* GetArena() const { return NULL; }\n\n  // Get a pointer that may be equal to this message's arena, or may not be. If\n  // the value returned by this method is equal to some arena pointer, then this\n  // message is on that arena; however, if this message is on some arena, this\n  // method may or may not return that arena's pointer. As a tradeoff, this\n  // method may be more efficient than GetArena(). The intent is to allow\n  // underlying representations that use e.g. tagged pointers to sometimes store\n  // the arena pointer directly, and sometimes in a more indirect way, and allow\n  // a fastpath comparison against the arena pointer when it's easy to obtain.\n  virtual void* GetMaybeArenaPointer() const { return GetArena(); }\n\n  // Clear all fields of the message and set them to their default values.\n  // Clear() avoids freeing memory, assuming that any memory allocated\n  // to hold parts of the message will be needed again to hold the next\n  // message.  If you actually want to free the memory used by a Message,\n  // you must delete it.\n  virtual void Clear() = 0;\n\n  // Quickly check if all required fields have values set.\n  virtual bool IsInitialized() const = 0;\n\n  // This is not implemented for Lite messages -- it just returns \"(cannot\n  // determine missing fields for lite message)\".  However, it is implemented\n  // for full messages.  See message.h.\n  virtual string InitializationErrorString() const;\n\n  // If |other| is the exact same class as this, calls MergeFrom().  Otherwise,\n  // results are undefined (probably crash).\n  virtual void CheckTypeAndMergeFrom(const MessageLite& other) = 0;\n\n  // Parsing ---------------------------------------------------------\n  // Methods for parsing in protocol buffer format.  Most of these are\n  // just simple wrappers around MergeFromCodedStream().  Clear() will be called\n  // before merging the input.\n\n  // Fill the message with a protocol buffer parsed from the given input stream.\n  // Returns false on a read error or if the input is in the wrong format.  A\n  // successful return does not indicate the entire input is consumed, ensure\n  // you call ConsumedEntireMessage() to check that if applicable.\n  bool ParseFromCodedStream(io::CodedInputStream* input);\n  // Like ParseFromCodedStream(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromCodedStream(io::CodedInputStream* input);\n  // Read a protocol buffer from the given zero-copy input stream.  If\n  // successful, the entire input will be consumed.\n  bool ParseFromZeroCopyStream(io::ZeroCopyInputStream* input);\n  // Like ParseFromZeroCopyStream(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromZeroCopyStream(io::ZeroCopyInputStream* input);\n  // Read a protocol buffer from the given zero-copy input stream, expecting\n  // the message to be exactly \"size\" bytes long.  If successful, exactly\n  // this many bytes will have been consumed from the input.\n  bool ParseFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input, int size);\n  // Like ParseFromBoundedZeroCopyStream(), but accepts messages that are\n  // missing required fields.\n  bool ParsePartialFromBoundedZeroCopyStream(io::ZeroCopyInputStream* input,\n                                             int size);\n  // Parses a protocol buffer contained in a string. Returns true on success.\n  // This function takes a string in the (non-human-readable) binary wire\n  // format, matching the encoding output by MessageLite::SerializeToString().\n  // If you'd like to convert a human-readable string into a protocol buffer\n  // object, see google::protobuf::TextFormat::ParseFromString().\n  bool ParseFromString(const string& data);\n  // Like ParseFromString(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromString(const string& data);\n  // Parse a protocol buffer contained in an array of bytes.\n  bool ParseFromArray(const void* data, int size);\n  // Like ParseFromArray(), but accepts messages that are missing\n  // required fields.\n  bool ParsePartialFromArray(const void* data, int size);\n\n\n  // Reads a protocol buffer from the stream and merges it into this\n  // Message.  Singular fields read from the input overwrite what is\n  // already in the Message and repeated fields are appended to those\n  // already present.\n  //\n  // It is the responsibility of the caller to call input->LastTagWas()\n  // (for groups) or input->ConsumedEntireMessage() (for non-groups) after\n  // this returns to verify that the message's end was delimited correctly.\n  //\n  // ParsefromCodedStream() is implemented as Clear() followed by\n  // MergeFromCodedStream().\n  bool MergeFromCodedStream(io::CodedInputStream* input);\n\n  // Like MergeFromCodedStream(), but succeeds even if required fields are\n  // missing in the input.\n  //\n  // MergeFromCodedStream() is just implemented as MergePartialFromCodedStream()\n  // followed by IsInitialized().\n  virtual bool MergePartialFromCodedStream(io::CodedInputStream* input) = 0;\n\n\n  // Serialization ---------------------------------------------------\n  // Methods for serializing in protocol buffer format.  Most of these\n  // are just simple wrappers around ByteSize() and SerializeWithCachedSizes().\n\n  // Write a protocol buffer of this message to the given output.  Returns\n  // false on a write error.  If the message is missing required fields,\n  // this may GOOGLE_CHECK-fail.\n  bool SerializeToCodedStream(io::CodedOutputStream* output) const;\n  // Like SerializeToCodedStream(), but allows missing required fields.\n  bool SerializePartialToCodedStream(io::CodedOutputStream* output) const;\n  // Write the message to the given zero-copy output stream.  All required\n  // fields must be set.\n  bool SerializeToZeroCopyStream(io::ZeroCopyOutputStream* output) const;\n  // Like SerializeToZeroCopyStream(), but allows missing required fields.\n  bool SerializePartialToZeroCopyStream(io::ZeroCopyOutputStream* output) const;\n  // Serialize the message and store it in the given string.  All required\n  // fields must be set.\n  bool SerializeToString(string* output) const;\n  // Like SerializeToString(), but allows missing required fields.\n  bool SerializePartialToString(string* output) const;\n  // Serialize the message and store it in the given byte array.  All required\n  // fields must be set.\n  bool SerializeToArray(void* data, int size) const;\n  // Like SerializeToArray(), but allows missing required fields.\n  bool SerializePartialToArray(void* data, int size) const;\n\n  // Make a string encoding the message. Is equivalent to calling\n  // SerializeToString() on a string and using that.  Returns the empty\n  // string if SerializeToString() would have returned an error.\n  // Note: If you intend to generate many such strings, you may\n  // reduce heap fragmentation by instead re-using the same string\n  // object with calls to SerializeToString().\n  string SerializeAsString() const;\n  // Like SerializeAsString(), but allows missing required fields.\n  string SerializePartialAsString() const;\n\n  // Like SerializeToString(), but appends to the data to the string's existing\n  // contents.  All required fields must be set.\n  bool AppendToString(string* output) const;\n  // Like AppendToString(), but allows missing required fields.\n  bool AppendPartialToString(string* output) const;\n\n  // Computes the serialized size of the message.  This recursively calls\n  // ByteSize() on all embedded messages.  Subclasses MUST override either\n  // ByteSize() or ByteSizeLong() (overriding both is fine).\n  //\n  // ByteSize() is generally linear in the number of fields defined for the\n  // proto.\n  virtual int ByteSize() const { return ByteSizeLong(); }\n  virtual size_t ByteSizeLong() const;\n\n  // Serializes the message without recomputing the size.  The message must\n  // not have changed since the last call to ByteSize(); if it has, the results\n  // are undefined.\n  virtual void SerializeWithCachedSizes(\n      io::CodedOutputStream* output) const = 0;\n\n  // A version of SerializeWithCachedSizesToArray, below, that does\n  // not guarantee deterministic serialization.\n  virtual uint8* SerializeWithCachedSizesToArray(uint8* target) const {\n    return InternalSerializeWithCachedSizesToArray(false, target);\n  }\n\n  // Returns the result of the last call to ByteSize().  An embedded message's\n  // size is needed both to serialize it (because embedded messages are\n  // length-delimited) and to compute the outer message's size.  Caching\n  // the size avoids computing it multiple times.\n  //\n  // ByteSize() does not automatically use the cached size when available\n  // because this would require invalidating it every time the message was\n  // modified, which would be too hard and expensive.  (E.g. if a deeply-nested\n  // sub-message is changed, all of its parents' cached sizes would need to be\n  // invalidated, which is too much work for an otherwise inlined setter\n  // method.)\n  virtual int GetCachedSize() const = 0;\n\n  // Functions below here are not part of the public interface.  It isn't\n  // enforced, but they should be treated as private, and will be private\n  // at some future time.  Unfortunately the implementation of the \"friend\"\n  // keyword in GCC is broken at the moment, but we expect it will be fixed.\n\n  // Like SerializeWithCachedSizes, but writes directly to *target, returning\n  // a pointer to the byte immediately after the last byte written.  \"target\"\n  // must point at a byte array of at least ByteSize() bytes.  If deterministic\n  // is true then we use deterministic serialization, e.g., map keys are sorted.\n  // FOR INTERNAL USE ONLY!\n  virtual uint8* InternalSerializeWithCachedSizesToArray(bool deterministic,\n                                                         uint8* target) const;\n\n private:\n  friend class internal::WireFormatLite;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MessageLite);\n};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_MESSAGE_LITE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/metadata.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This header file defines an internal class that encapsulates internal message\n// metadata (Unknown-field set, Arena pointer, ...) and allows its\n// representation to be made more space-efficient via various optimizations.\n//\n// Note that this is distinct from google::protobuf::Metadata, which encapsulates\n// Descriptor and Reflection pointers.\n\n#ifndef GOOGLE_PROTOBUF_METADATA_H__\n#define GOOGLE_PROTOBUF_METADATA_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/arena.h>\n#include <google/protobuf/unknown_field_set.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// This is the representation for messages that support arena allocation. It\n// uses a tagged pointer to either store the Arena pointer, if there are no\n// unknown fields, or a pointer to a block of memory with both the Arena pointer\n// and the UnknownFieldSet, if there are unknown fields. This optimization\n// allows for \"zero-overhead\" storage of the Arena pointer, relative to the\n// above baseline implementation.\n//\n// The tagged pointer uses the LSB to disambiguate cases, and uses bit 0 == 0 to\n// indicate an arena pointer and bit 0 == 1 to indicate a UFS+Arena-container\n// pointer.\nclass LIBPROTOBUF_EXPORT InternalMetadataWithArena {\n public:\n  InternalMetadataWithArena() : ptr_(NULL) {}\n  explicit InternalMetadataWithArena(Arena* arena)\n      : ptr_ (arena) {}\n\n  ~InternalMetadataWithArena() {\n    if (have_unknown_fields() && arena() == NULL) {\n      delete PtrValue<Container>();\n    }\n    ptr_ = NULL;\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE const UnknownFieldSet& unknown_fields() const {\n    if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) {\n      return PtrValue<Container>()->unknown_fields_;\n    } else {\n      return *UnknownFieldSet::default_instance();\n    }\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE UnknownFieldSet* mutable_unknown_fields() {\n    if (GOOGLE_PREDICT_TRUE(have_unknown_fields())) {\n      return &PtrValue<Container>()->unknown_fields_;\n    } else {\n      return mutable_unknown_fields_slow();\n    }\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE Arena* arena() const {\n    if (GOOGLE_PREDICT_FALSE(have_unknown_fields())) {\n      return PtrValue<Container>()->arena_;\n    } else {\n      return PtrValue<Arena>();\n    }\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE bool have_unknown_fields() const {\n    return PtrTag() == kTagContainer;\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void Swap(InternalMetadataWithArena* other) {\n    // Semantics here are that we swap only the unknown fields, not the arena\n    // pointer. We cannot simply swap ptr_ with other->ptr_ because we need to\n    // maintain our own arena ptr. Also, our ptr_ and other's ptr_ may be in\n    // different states (direct arena pointer vs. container with UFS) so we\n    // cannot simply swap ptr_ and then restore the arena pointers. We reuse\n    // UFS's swap implementation instead.\n    if (have_unknown_fields() || other->have_unknown_fields()) {\n      mutable_unknown_fields()->Swap(other->mutable_unknown_fields());\n    }\n  }\n\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void* raw_arena_ptr() const {\n    return ptr_;\n  }\n\n private:\n  void* ptr_;\n\n  // Tagged pointer implementation.\n  enum {\n    // ptr_ is an Arena*.\n    kTagArena = 0,\n    // ptr_ is a Container*.\n    kTagContainer = 1,\n  };\n  static const intptr_t kPtrTagMask = 1;\n  static const intptr_t kPtrValueMask = ~kPtrTagMask;\n\n  // Accessors for pointer tag and pointer value.\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE int PtrTag() const {\n    return reinterpret_cast<intptr_t>(ptr_) & kPtrTagMask;\n  }\n\n  template<typename T> T* PtrValue() const {\n    return reinterpret_cast<T*>(\n        reinterpret_cast<intptr_t>(ptr_) & kPtrValueMask);\n  }\n\n  // If ptr_'s tag is kTagContainer, it points to an instance of this struct.\n  struct Container {\n    UnknownFieldSet unknown_fields_;\n    Arena* arena_;\n  };\n\n  GOOGLE_ATTRIBUTE_NOINLINE UnknownFieldSet* mutable_unknown_fields_slow() {\n    Arena* my_arena = arena();\n    Container* container = Arena::Create<Container>(my_arena);\n    ptr_ = reinterpret_cast<void*>(\n        reinterpret_cast<intptr_t>(container) | kTagContainer);\n    container->arena_ = my_arena;\n    return &(container->unknown_fields_);\n  }\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_METADATA_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/reflection.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This header defines the RepeatedFieldRef class template used to access\n// repeated fields with protobuf reflection API.\n#ifndef GOOGLE_PROTOBUF_REFLECTION_H__\n#define GOOGLE_PROTOBUF_REFLECTION_H__\n\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n\n#include <google/protobuf/message.h>\n#include <google/protobuf/generated_enum_util.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\ntemplate<typename T, typename Enable = void>\nstruct RefTypeTraits;\n}  // namespace internal\n\ntemplate<typename T>\nRepeatedFieldRef<T> Reflection::GetRepeatedFieldRef(\n    const Message& message, const FieldDescriptor* field) const {\n  return RepeatedFieldRef<T>(message, field);\n}\n\ntemplate<typename T>\nMutableRepeatedFieldRef<T> Reflection::GetMutableRepeatedFieldRef(\n    Message* message, const FieldDescriptor* field) const {\n  return MutableRepeatedFieldRef<T>(message, field);\n}\n\n// RepeatedFieldRef definition for non-message types.\ntemplate<typename T>\nclass RepeatedFieldRef<\n    T, typename internal::enable_if<!internal::is_base_of<Message, T>::value>::type> {\n  typedef typename internal::RefTypeTraits<T>::iterator IteratorType;\n  typedef typename internal::RefTypeTraits<T>::AccessorType AccessorType;\n\n public:\n  bool empty() const {\n    return accessor_->IsEmpty(data_);\n  }\n  int size() const {\n    return accessor_->Size(data_);\n  }\n  T Get(int index) const {\n    return accessor_->template Get<T>(data_, index);\n  }\n\n  typedef IteratorType iterator;\n  typedef IteratorType const_iterator;\n  typedef T value_type;\n  typedef T& reference;\n  typedef const T& const_reference;\n  typedef int size_type;\n  typedef ptrdiff_t difference_type;\n\n  iterator begin() const {\n    return iterator(data_, accessor_, true);\n  }\n  iterator end() const {\n    return iterator(data_, accessor_, false);\n  }\n\n private:\n  friend class Reflection;\n  RepeatedFieldRef(\n      const Message& message,\n      const FieldDescriptor* field) {\n    const Reflection* reflection = message.GetReflection();\n    data_ = reflection->RepeatedFieldData(\n        const_cast<Message*>(&message), field,\n        internal::RefTypeTraits<T>::cpp_type, NULL);\n    accessor_ = reflection->RepeatedFieldAccessor(field);\n  }\n\n  const void* data_;\n  const AccessorType* accessor_;\n};\n\n// MutableRepeatedFieldRef definition for non-message types.\ntemplate<typename T>\nclass MutableRepeatedFieldRef<\n    T, typename internal::enable_if<!internal::is_base_of<Message, T>::value>::type> {\n  typedef typename internal::RefTypeTraits<T>::AccessorType AccessorType;\n\n public:\n  bool empty() const {\n    return accessor_->IsEmpty(data_);\n  }\n  int size() const {\n    return accessor_->Size(data_);\n  }\n  T Get(int index) const {\n    return accessor_->template Get<T>(data_, index);\n  }\n\n  void Set(int index, const T& value) const {\n    accessor_->template Set<T>(data_, index, value);\n  }\n  void Add(const T& value) const {\n    accessor_->template Add<T>(data_, value);\n  }\n  void RemoveLast() const {\n    accessor_->RemoveLast(data_);\n  }\n  void SwapElements(int index1, int index2) const {\n    accessor_->SwapElements(data_, index1, index2);\n  }\n  void Clear() const {\n    accessor_->Clear(data_);\n  }\n\n  void Swap(const MutableRepeatedFieldRef& other) const {\n    accessor_->Swap(data_, other.accessor_, other.data_);\n  }\n\n  template<typename Container>\n  void MergeFrom(const Container& container) const {\n    typedef typename Container::const_iterator Iterator;\n    for (Iterator it = container.begin(); it != container.end(); ++it) {\n      Add(*it);\n    }\n  }\n  template<typename Container>\n  void CopyFrom(const Container& container) const {\n    Clear();\n    MergeFrom(container);\n  }\n\n private:\n  friend class Reflection;\n  MutableRepeatedFieldRef(\n      Message* message,\n      const FieldDescriptor* field) {\n    const Reflection* reflection = message->GetReflection();\n    data_ = reflection->RepeatedFieldData(\n        message, field, internal::RefTypeTraits<T>::cpp_type, NULL);\n    accessor_ = reflection->RepeatedFieldAccessor(field);\n  }\n\n  void* data_;\n  const AccessorType* accessor_;\n};\n\n// RepeatedFieldRef definition for message types.\ntemplate<typename T>\nclass RepeatedFieldRef<\n    T, typename internal::enable_if<internal::is_base_of<Message, T>::value>::type> {\n  typedef typename internal::RefTypeTraits<T>::iterator IteratorType;\n  typedef typename internal::RefTypeTraits<T>::AccessorType AccessorType;\n\n public:\n  bool empty() const {\n    return accessor_->IsEmpty(data_);\n  }\n  int size() const {\n    return accessor_->Size(data_);\n  }\n  // This method returns a reference to the underlying message object if it\n  // exists. If a message object doesn't exist (e.g., data stored in serialized\n  // form), scratch_space will be filled with the data and a reference to it\n  // will be returned.\n  //\n  // Example:\n  //   RepeatedFieldRef<Message> h = ...\n  //   unique_ptr<Message> scratch_space(h.NewMessage());\n  //   const Message& item = h.Get(index, scratch_space.get());\n  const T& Get(int index, T* scratch_space) const {\n    return *static_cast<const T*>(accessor_->Get(data_, index, scratch_space));\n  }\n  // Create a new message of the same type as the messages stored in this\n  // repeated field. Caller takes ownership of the returned object.\n  T* NewMessage() const {\n    return static_cast<T*>(default_instance_->New());\n  }\n\n  typedef IteratorType iterator;\n  typedef IteratorType const_iterator;\n  typedef T value_type;\n  typedef T& reference;\n  typedef const T& const_reference;\n  typedef int size_type;\n  typedef ptrdiff_t difference_type;\n\n  iterator begin() const {\n    return iterator(data_, accessor_, true, NewMessage());\n  }\n  iterator end() const {\n    // The end iterator must not be dereferenced, no need for scratch space.\n    return iterator(data_, accessor_, false, NULL);\n  }\n\n private:\n  friend class Reflection;\n  RepeatedFieldRef(\n      const Message& message,\n      const FieldDescriptor* field) {\n    const Reflection* reflection = message.GetReflection();\n    data_ = reflection->RepeatedFieldData(\n        const_cast<Message*>(&message), field,\n        internal::RefTypeTraits<T>::cpp_type,\n        internal::RefTypeTraits<T>::GetMessageFieldDescriptor());\n    accessor_ = reflection->RepeatedFieldAccessor(field);\n    default_instance_ =\n        reflection->GetMessageFactory()->GetPrototype(field->message_type());\n  }\n\n  const void* data_;\n  const AccessorType* accessor_;\n  const Message* default_instance_;\n};\n\n// MutableRepeatedFieldRef definition for message types.\ntemplate<typename T>\nclass MutableRepeatedFieldRef<\n    T, typename internal::enable_if<internal::is_base_of<Message, T>::value>::type> {\n  typedef typename internal::RefTypeTraits<T>::AccessorType AccessorType;\n\n public:\n  bool empty() const {\n    return accessor_->IsEmpty(data_);\n  }\n  int size() const {\n    return accessor_->Size(data_);\n  }\n  // See comments for RepeatedFieldRef<Message>::Get()\n  const T& Get(int index, T* scratch_space) const {\n    return *static_cast<const T*>(accessor_->Get(data_, index, scratch_space));\n  }\n  // Create a new message of the same type as the messages stored in this\n  // repeated field. Caller takes ownership of the returned object.\n  T* NewMessage() const {\n    return static_cast<T*>(default_instance_->New());\n  }\n\n  void Set(int index, const T& value) const {\n    accessor_->Set(data_, index, &value);\n  }\n  void Add(const T& value) const {\n    accessor_->Add(data_, &value);\n  }\n  void RemoveLast() const {\n    accessor_->RemoveLast(data_);\n  }\n  void SwapElements(int index1, int index2) const {\n    accessor_->SwapElements(data_, index1, index2);\n  }\n  void Clear() const {\n    accessor_->Clear(data_);\n  }\n\n  void Swap(const MutableRepeatedFieldRef& other) const {\n    accessor_->Swap(data_, other.accessor_, other.data_);\n  }\n\n  template<typename Container>\n  void MergeFrom(const Container& container) const {\n    typedef typename Container::const_iterator Iterator;\n    for (Iterator it = container.begin(); it != container.end(); ++it) {\n      Add(*it);\n    }\n  }\n  template<typename Container>\n  void CopyFrom(const Container& container) const {\n    Clear();\n    MergeFrom(container);\n  }\n\n private:\n  friend class Reflection;\n  MutableRepeatedFieldRef(\n      Message* message,\n      const FieldDescriptor* field) {\n    const Reflection* reflection = message->GetReflection();\n    data_ = reflection->RepeatedFieldData(\n        message, field, internal::RefTypeTraits<T>::cpp_type,\n        internal::RefTypeTraits<T>::GetMessageFieldDescriptor());\n    accessor_ = reflection->RepeatedFieldAccessor(field);\n    default_instance_ =\n        reflection->GetMessageFactory()->GetPrototype(field->message_type());\n  }\n\n  void* data_;\n  const AccessorType* accessor_;\n  const Message* default_instance_;\n};\n\nnamespace internal {\n// Interfaces used to implement reflection RepeatedFieldRef API.\n// Reflection::GetRepeatedAccessor() should return a pointer to an singleton\n// object that implements the below interface.\n//\n// This interface passes/returns values using void pointers. The actual type\n// of the value depends on the field's cpp_type. Following is a mapping from\n// cpp_type to the type that should be used in this interface:\n//\n//   field->cpp_type()      T                Actual type of void*\n//   CPPTYPE_INT32        int32                   int32\n//   CPPTYPE_UINT32       uint32                  uint32\n//   CPPTYPE_INT64        int64                   int64\n//   CPPTYPE_UINT64       uint64                  uint64\n//   CPPTYPE_DOUBLE       double                  double\n//   CPPTYPE_FLOAT        float                   float\n//   CPPTYPE_BOOL         bool                    bool\n//   CPPTYPE_ENUM         generated enum type     int32\n//   CPPTYPE_STRING       string                  string\n//   CPPTYPE_MESSAGE      generated message type  google::protobuf::Message\n//                        or google::protobuf::Message\n//\n// Note that for enums we use int32 in the interface.\n//\n// You can map from T to the actual type using RefTypeTraits:\n//   typedef RefTypeTraits<T>::AccessorValueType ActualType;\nclass LIBPROTOBUF_EXPORT RepeatedFieldAccessor {\n public:\n  // Typedefs for clarity.\n  typedef void Field;\n  typedef void Value;\n  typedef void Iterator;\n\n  virtual ~RepeatedFieldAccessor();\n  virtual bool IsEmpty(const Field* data) const = 0;\n  virtual int Size(const Field* data) const = 0;\n  // Depends on the underlying representation of the repeated field, this\n  // method can return a pointer to the underlying object if such an object\n  // exists, or fill the data into scratch_space and return scratch_space.\n  // Callers of this method must ensure scratch_space is a valid pointer\n  // to a mutable object of the correct type.\n  virtual const Value* Get(\n      const Field* data, int index, Value* scratch_space) const = 0;\n\n  virtual void Clear(Field* data) const = 0;\n  virtual void Set(Field* data, int index, const Value* value) const = 0;\n  virtual void Add(Field* data, const Value* value) const = 0;\n  virtual void RemoveLast(Field* data) const = 0;\n  virtual void SwapElements(Field* data, int index1, int index2) const = 0;\n  virtual void Swap(Field* data, const RepeatedFieldAccessor* other_mutator,\n                    Field* other_data) const = 0;\n\n  // Create an iterator that points at the beginning of the repeated field.\n  virtual Iterator* BeginIterator(const Field* data) const = 0;\n  // Create an iterator that points at the end of the repeated field.\n  virtual Iterator* EndIterator(const Field* data) const = 0;\n  // Make a copy of an iterator and return the new copy.\n  virtual Iterator* CopyIterator(const Field* data,\n                                 const Iterator* iterator) const = 0;\n  // Move an iterator to point to the next element.\n  virtual Iterator* AdvanceIterator(const Field* data,\n                                    Iterator* iterator) const = 0;\n  // Compare whether two iterators point to the same element.\n  virtual bool EqualsIterator(const Field* data, const Iterator* a,\n                              const Iterator* b) const = 0;\n  // Delete an iterator created by BeginIterator(), EndIterator() and\n  // CopyIterator().\n  virtual void DeleteIterator(const Field* data, Iterator* iterator) const = 0;\n  // Like Get() but for iterators.\n  virtual const Value* GetIteratorValue(const Field* data,\n                                        const Iterator* iterator,\n                                        Value* scratch_space) const = 0;\n\n  // Templated methods that make using this interface easier for non-message\n  // types.\n  template<typename T>\n  T Get(const Field* data, int index) const {\n    typedef typename RefTypeTraits<T>::AccessorValueType ActualType;\n    ActualType scratch_space;\n    return static_cast<T>(\n        *reinterpret_cast<const ActualType*>(\n            Get(data, index, static_cast<Value*>(&scratch_space))));\n  }\n\n  template<typename T, typename ValueType>\n  void Set(Field* data, int index, const ValueType& value) const {\n    typedef typename RefTypeTraits<T>::AccessorValueType ActualType;\n    // In this RepeatedFieldAccessor interface we pass/return data using\n    // raw pointers. Type of the data these raw pointers point to should\n    // be ActualType. Here we have a ValueType object and want a ActualType\n    // pointer. We can't cast a ValueType pointer to an ActualType pointer\n    // directly because their type might be different (for enums ValueType\n    // may be a generated enum type while ActualType is int32). To be safe\n    // we make a copy to get a temporary ActualType object and use it.\n    ActualType tmp = static_cast<ActualType>(value);\n    Set(data, index, static_cast<const Value*>(&tmp));\n  }\n\n  template<typename T, typename ValueType>\n  void Add(Field* data, const ValueType& value) const {\n    typedef typename RefTypeTraits<T>::AccessorValueType ActualType;\n    // In this RepeatedFieldAccessor interface we pass/return data using\n    // raw pointers. Type of the data these raw pointers point to should\n    // be ActualType. Here we have a ValueType object and want a ActualType\n    // pointer. We can't cast a ValueType pointer to an ActualType pointer\n    // directly because their type might be different (for enums ValueType\n    // may be a generated enum type while ActualType is int32). To be safe\n    // we make a copy to get a temporary ActualType object and use it.\n    ActualType tmp = static_cast<ActualType>(value);\n    Add(data, static_cast<const Value*>(&tmp));\n  }\n};\n\n// Implement (Mutable)RepeatedFieldRef::iterator\ntemplate<typename T>\nclass RepeatedFieldRefIterator\n    : public std::iterator<std::forward_iterator_tag, T> {\n  typedef typename RefTypeTraits<T>::AccessorValueType AccessorValueType;\n  typedef typename RefTypeTraits<T>::IteratorValueType IteratorValueType;\n  typedef typename RefTypeTraits<T>::IteratorPointerType IteratorPointerType;\n\n public:\n  // Constructor for non-message fields.\n  RepeatedFieldRefIterator(const void* data,\n                           const RepeatedFieldAccessor* accessor, bool begin)\n      : data_(data),\n        accessor_(accessor),\n        iterator_(begin ? accessor->BeginIterator(data)\n                        : accessor->EndIterator(data)),\n        // The end iterator must not be dereferenced, no need for scratch space.\n        scratch_space_(begin ? new AccessorValueType : NULL) {}\n  // Constructor for message fields.\n  RepeatedFieldRefIterator(const void* data,\n                           const RepeatedFieldAccessor* accessor,\n                           bool begin,\n                           AccessorValueType* scratch_space)\n      : data_(data), accessor_(accessor),\n        iterator_(begin ? accessor->BeginIterator(data) :\n                          accessor->EndIterator(data)),\n        scratch_space_(scratch_space) {\n  }\n  ~RepeatedFieldRefIterator() {\n    accessor_->DeleteIterator(data_, iterator_);\n  }\n  RepeatedFieldRefIterator operator++(int) {\n    RepeatedFieldRefIterator tmp(*this);\n    iterator_ = accessor_->AdvanceIterator(data_, iterator_);\n    return tmp;\n  }\n  RepeatedFieldRefIterator& operator++() {\n    iterator_ = accessor_->AdvanceIterator(data_, iterator_);\n    return *this;\n  }\n  IteratorValueType operator*() const {\n    return static_cast<IteratorValueType>(\n        *static_cast<const AccessorValueType*>(\n            accessor_->GetIteratorValue(\n                data_, iterator_, scratch_space_.get())));\n  }\n  IteratorPointerType operator->() const {\n    return static_cast<IteratorPointerType>(\n        accessor_->GetIteratorValue(\n            data_, iterator_, scratch_space_.get()));\n  }\n  bool operator!=(const RepeatedFieldRefIterator& other) const {\n    assert(data_ == other.data_);\n    assert(accessor_ == other.accessor_);\n    return !accessor_->EqualsIterator(data_, iterator_, other.iterator_);\n  }\n  bool operator==(const RepeatedFieldRefIterator& other) const {\n    return !this->operator!=(other);\n  }\n\n  RepeatedFieldRefIterator(const RepeatedFieldRefIterator& other)\n      : data_(other.data_), accessor_(other.accessor_),\n        iterator_(accessor_->CopyIterator(data_, other.iterator_)) {\n  }\n  RepeatedFieldRefIterator& operator=(const RepeatedFieldRefIterator& other) {\n    if (this != &other) {\n      accessor_->DeleteIterator(data_, iterator_);\n      data_ = other.data_;\n      accessor_ = other.accessor_;\n      iterator_ = accessor_->CopyIterator(data_, other.iterator_);\n    }\n    return *this;\n  }\n\n protected:\n  const void* data_;\n  const RepeatedFieldAccessor* accessor_;\n  void* iterator_;\n  google::protobuf::scoped_ptr<AccessorValueType> scratch_space_;\n};\n\n// TypeTraits that maps the type parameter T of RepeatedFieldRef or\n// MutableRepeatedFieldRef to corresponding iterator type,\n// RepeatedFieldAccessor type, etc.\ntemplate<typename T>\nstruct PrimitiveTraits {\n  static const bool is_primitive = false;\n};\n#define DEFINE_PRIMITIVE(TYPE, type) \\\n    template<> struct PrimitiveTraits<type> { \\\n      static const bool is_primitive = true; \\\n      static const FieldDescriptor::CppType cpp_type = \\\n          FieldDescriptor::CPPTYPE_ ## TYPE; \\\n    };\nDEFINE_PRIMITIVE(INT32, int32)\nDEFINE_PRIMITIVE(UINT32, uint32)\nDEFINE_PRIMITIVE(INT64, int64)\nDEFINE_PRIMITIVE(UINT64, uint64)\nDEFINE_PRIMITIVE(FLOAT, float)\nDEFINE_PRIMITIVE(DOUBLE, double)\nDEFINE_PRIMITIVE(BOOL, bool)\n#undef DEFINE_PRIMITIVE\n\ntemplate<typename T>\nstruct RefTypeTraits<\n    T, typename internal::enable_if<PrimitiveTraits<T>::is_primitive>::type> {\n  typedef RepeatedFieldRefIterator<T> iterator;\n  typedef RepeatedFieldAccessor AccessorType;\n  typedef T AccessorValueType;\n  typedef T IteratorValueType;\n  typedef T* IteratorPointerType;\n  static const FieldDescriptor::CppType cpp_type =\n      PrimitiveTraits<T>::cpp_type;\n  static const Descriptor* GetMessageFieldDescriptor() {\n    return NULL;\n  }\n};\n\ntemplate<typename T>\nstruct RefTypeTraits<\n    T, typename internal::enable_if<is_proto_enum<T>::value>::type> {\n  typedef RepeatedFieldRefIterator<T> iterator;\n  typedef RepeatedFieldAccessor AccessorType;\n  // We use int32 for repeated enums in RepeatedFieldAccessor.\n  typedef int32 AccessorValueType;\n  typedef T IteratorValueType;\n  typedef int32* IteratorPointerType;\n  static const FieldDescriptor::CppType cpp_type =\n      FieldDescriptor::CPPTYPE_ENUM;\n  static const Descriptor* GetMessageFieldDescriptor() {\n    return NULL;\n  }\n};\n\ntemplate<typename T>\nstruct RefTypeTraits<\n    T, typename internal::enable_if< ::google::protobuf::internal::is_same<string, T>::value>::type> {\n  typedef RepeatedFieldRefIterator<T> iterator;\n  typedef RepeatedFieldAccessor AccessorType;\n  typedef string AccessorValueType;\n  typedef string IteratorValueType;\n  typedef string* IteratorPointerType;\n  static const FieldDescriptor::CppType cpp_type =\n      FieldDescriptor::CPPTYPE_STRING;\n  static const Descriptor* GetMessageFieldDescriptor() {\n    return NULL;\n  }\n};\n\ntemplate<typename T>\nstruct MessageDescriptorGetter {\n  static const Descriptor* get() {\n    return T::default_instance().GetDescriptor();\n  }\n};\ntemplate<>\nstruct MessageDescriptorGetter<Message> {\n  static const Descriptor* get() {\n    return NULL;\n  }\n};\n\ntemplate<typename T>\nstruct RefTypeTraits<\n    T, typename internal::enable_if<internal::is_base_of<Message, T>::value>::type> {\n  typedef RepeatedFieldRefIterator<T> iterator;\n  typedef RepeatedFieldAccessor AccessorType;\n  typedef Message AccessorValueType;\n  typedef const T& IteratorValueType;\n  typedef const T* IteratorPointerType;\n  static const FieldDescriptor::CppType cpp_type =\n      FieldDescriptor::CPPTYPE_MESSAGE;\n  static const Descriptor* GetMessageFieldDescriptor() {\n    return MessageDescriptorGetter<T>::get();\n  }\n};\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_REFLECTION_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/reflection_ops.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n\n#ifndef GOOGLE_PROTOBUF_REFLECTION_OPS_H__\n#define GOOGLE_PROTOBUF_REFLECTION_OPS_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/message.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Basic operations that can be performed using reflection.\n// These can be used as a cheap way to implement the corresponding\n// methods of the Message interface, though they are likely to be\n// slower than implementations tailored for the specific message type.\n//\n// This class should stay limited to operations needed to implement\n// the Message interface.\n//\n// This class is really a namespace that contains only static methods.\nclass LIBPROTOBUF_EXPORT ReflectionOps {\n public:\n  static void Copy(const Message& from, Message* to);\n  static void Merge(const Message& from, Message* to);\n  static void Clear(Message* message);\n  static bool IsInitialized(const Message& message);\n  static void DiscardUnknownFields(Message* message);\n\n  // Finds all unset required fields in the message and adds their full\n  // paths (e.g. \"foo.bar[5].baz\") to *names.  \"prefix\" will be attached to\n  // the front of each name.\n  static void FindInitializationErrors(const Message& message,\n                                       const string& prefix,\n                                       std::vector<string>* errors);\n\n private:\n  // All methods are static.  No need to construct.\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ReflectionOps);\n};\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_REFLECTION_OPS_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/repeated_field.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// RepeatedField and RepeatedPtrField are used by generated protocol message\n// classes to manipulate repeated fields.  These classes are very similar to\n// STL's vector, but include a number of optimizations found to be useful\n// specifically in the case of Protocol Buffers.  RepeatedPtrField is\n// particularly different from STL vector as it manages ownership of the\n// pointers that it contains.\n//\n// Typically, clients should not need to access RepeatedField objects directly,\n// but should instead use the accessor functions generated automatically by the\n// protocol compiler.\n\n#ifndef GOOGLE_PROTOBUF_REPEATED_FIELD_H__\n#define GOOGLE_PROTOBUF_REPEATED_FIELD_H__\n\n#ifdef _MSC_VER\n// This is required for min/max on VS2013 only.\n#include <algorithm>\n#endif\n\n#include <string>\n#include <iterator>\n#include <google/protobuf/stubs/casts.h>\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/type_traits.h>\n#include <google/protobuf/arena.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/message_lite.h>\n\nnamespace google {\n\nnamespace upb {\nnamespace google_opensource {\nclass GMR_Handlers;\n}  // namespace google_opensource\n}  // namespace upb\n\nnamespace protobuf {\n\nclass Message;\n\nnamespace internal {\n\nstatic const int kMinRepeatedFieldAllocationSize = 4;\n\n// A utility function for logging that doesn't need any template types.\nvoid LogIndexOutOfBounds(int index, int size);\n\ntemplate <typename Iter>\ninline int CalculateReserve(Iter begin, Iter end, std::forward_iterator_tag) {\n  return std::distance(begin, end);\n}\n\ntemplate <typename Iter>\ninline int CalculateReserve(Iter /*begin*/, Iter /*end*/,\n                            std::input_iterator_tag /*unused*/) {\n  return -1;\n}\n\ntemplate <typename Iter>\ninline int CalculateReserve(Iter begin, Iter end) {\n  typedef typename std::iterator_traits<Iter>::iterator_category Category;\n  return CalculateReserve(begin, end, Category());\n}\n}  // namespace internal\n\n\n// RepeatedField is used to represent repeated fields of a primitive type (in\n// other words, everything except strings and nested Messages).  Most users will\n// not ever use a RepeatedField directly; they will use the get-by-index,\n// set-by-index, and add accessors that are generated for all repeated fields.\ntemplate <typename Element>\nclass RepeatedField {\n public:\n  RepeatedField();\n  explicit RepeatedField(Arena* arena);\n  RepeatedField(const RepeatedField& other);\n  template <typename Iter>\n  RepeatedField(Iter begin, const Iter& end);\n  ~RepeatedField();\n\n  RepeatedField& operator=(const RepeatedField& other);\n\n  bool empty() const;\n  int size() const;\n\n  const Element& Get(int index) const;\n  Element* Mutable(int index);\n\n  const Element& operator[](int index) const { return Get(index); }\n  Element& operator[](int index) { return *Mutable(index); }\n\n  void Set(int index, const Element& value);\n  void Add(const Element& value);\n  Element* Add();\n  // Remove the last element in the array.\n  void RemoveLast();\n\n  // Extract elements with indices in \"[start .. start+num-1]\".\n  // Copy them into \"elements[0 .. num-1]\" if \"elements\" is not NULL.\n  // Caution: implementation also moves elements with indices [start+num ..].\n  // Calling this routine inside a loop can cause quadratic behavior.\n  void ExtractSubrange(int start, int num, Element* elements);\n\n  void Clear();\n  void MergeFrom(const RepeatedField& other);\n  void UnsafeMergeFrom(const RepeatedField& other);\n  void CopyFrom(const RepeatedField& other);\n\n  // Reserve space to expand the field to at least the given size.  If the\n  // array is grown, it will always be at least doubled in size.\n  void Reserve(int new_size);\n\n  // Resize the RepeatedField to a new, smaller size.  This is O(1).\n  void Truncate(int new_size);\n\n  void AddAlreadyReserved(const Element& value);\n  Element* AddAlreadyReserved();\n  int Capacity() const;\n\n  // Like STL resize.  Uses value to fill appended elements.\n  // Like Truncate() if new_size <= size(), otherwise this is\n  // O(new_size - size()).\n  void Resize(int new_size, const Element& value);\n\n  // Gets the underlying array.  This pointer is possibly invalidated by\n  // any add or remove operation.\n  Element* mutable_data();\n  const Element* data() const;\n\n  // Swap entire contents with \"other\". If they are separate arenas then, copies\n  // data between each other.\n  void Swap(RepeatedField* other);\n\n  // Swap entire contents with \"other\". Should be called only if the caller can\n  // guarantee that both repeated fields are on the same arena or are on the\n  // heap. Swapping between different arenas is disallowed and caught by a\n  // GOOGLE_DCHECK (see API docs for details).\n  void UnsafeArenaSwap(RepeatedField* other);\n\n  // Swap two elements.\n  void SwapElements(int index1, int index2);\n\n  // STL-like iterator support\n  typedef Element* iterator;\n  typedef const Element* const_iterator;\n  typedef Element value_type;\n  typedef value_type& reference;\n  typedef const value_type& const_reference;\n  typedef value_type* pointer;\n  typedef const value_type* const_pointer;\n  typedef int size_type;\n  typedef ptrdiff_t difference_type;\n\n  iterator begin();\n  const_iterator begin() const;\n  const_iterator cbegin() const;\n  iterator end();\n  const_iterator end() const;\n  const_iterator cend() const;\n\n  // Reverse iterator support\n  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n  typedef std::reverse_iterator<iterator> reverse_iterator;\n  reverse_iterator rbegin() {\n    return reverse_iterator(end());\n  }\n  const_reverse_iterator rbegin() const {\n    return const_reverse_iterator(end());\n  }\n  reverse_iterator rend() {\n    return reverse_iterator(begin());\n  }\n  const_reverse_iterator rend() const {\n    return const_reverse_iterator(begin());\n  }\n\n  // Returns the number of bytes used by the repeated field, excluding\n  // sizeof(*this)\n  int SpaceUsedExcludingSelf() const;\n\n  // Removes the element referenced by position.\n  //\n  // Returns an iterator to the element immediately following the removed\n  // element.\n  //\n  // Invalidates all iterators at or after the removed element, including end().\n  iterator erase(const_iterator position);\n\n  // Removes the elements in the range [first, last).\n  //\n  // Returns an iterator to the element immediately following the removed range.\n  //\n  // Invalidates all iterators at or after the removed range, including end().\n  iterator erase(const_iterator first, const_iterator last);\n\n  // Get the Arena on which this RepeatedField stores its elements.\n  ::google::protobuf::Arena* GetArena() const {\n    return GetArenaNoVirtual();\n  }\n\n private:\n  static const int kInitialSize = 0;\n  // A note on the representation here (see also comment below for\n  // RepeatedPtrFieldBase's struct Rep):\n  //\n  // We maintain the same sizeof(RepeatedField) as before we added arena support\n  // so that we do not degrade performance by bloating memory usage. Directly\n  // adding an arena_ element to RepeatedField is quite costly. By using\n  // indirection in this way, we keep the same size when the RepeatedField is\n  // empty (common case), and add only an 8-byte header to the elements array\n  // when non-empty. We make sure to place the size fields directly in the\n  // RepeatedField class to avoid costly cache misses due to the indirection.\n  int current_size_;\n  int total_size_;\n  struct Rep {\n    Arena* arena;\n    Element elements[1];\n  };\n  // We can not use sizeof(Rep) - sizeof(Element) due to the trailing padding on\n  // the struct. We can not use sizeof(Arena*) as well because there might be\n  // a \"gap\" after the field arena and before the field elements (e.g., when\n  // Element is double and pointer is 32bit).\n  static const size_t kRepHeaderSize;\n  // Contains arena ptr and the elements array. We also keep the invariant that\n  // if rep_ is NULL, then arena is NULL.\n  Rep* rep_;\n\n  friend class Arena;\n  typedef void InternalArenaConstructable_;\n\n  // Move the contents of |from| into |to|, possibly clobbering |from| in the\n  // process.  For primitive types this is just a memcpy(), but it could be\n  // specialized for non-primitive types to, say, swap each element instead.\n  void MoveArray(Element* to, Element* from, int size);\n\n  // Copy the elements of |from| into |to|.\n  void CopyArray(Element* to, const Element* from, int size);\n\n  inline void InternalSwap(RepeatedField* other);\n\n  // Internal helper expected by Arena methods.\n  inline Arena* GetArenaNoVirtual() const {\n    return (rep_ == NULL) ? NULL : rep_->arena;\n  }\n\n  // Internal helper to delete all elements and deallocate the storage.\n  // If Element has a trivial destructor (for example, if it's a fundamental\n  // type, like int32), the loop will be removed by the optimizer.\n  void InternalDeallocate(Rep* rep, int size) {\n    if (rep != NULL) {\n      Element* e = &rep->elements[0];\n      Element* limit = &rep->elements[size];\n      for (; e < limit; e++) {\n        e->Element::~Element();\n      }\n      if (rep->arena == NULL) {\n#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)\n        const size_t bytes = size * sizeof(*e) + kRepHeaderSize;\n        ::operator delete(static_cast<void*>(rep), bytes);\n#else\n        ::operator delete(static_cast<void*>(rep));\n#endif\n      }\n    }\n  }\n};\n\ntemplate<typename Element>\nconst size_t RepeatedField<Element>::kRepHeaderSize =\n    reinterpret_cast<size_t>(&reinterpret_cast<Rep*>(16)->elements[0]) - 16;\n\nnamespace internal {\ntemplate <typename It> class RepeatedPtrIterator;\ntemplate <typename It, typename VoidPtr> class RepeatedPtrOverPtrsIterator;\n}  // namespace internal\n\nnamespace internal {\n\n// This is a helper template to copy an array of elements effeciently when they\n// have a trivial copy constructor, and correctly otherwise. This really\n// shouldn't be necessary, but our compiler doesn't optimize std::copy very\n// effectively.\ntemplate <typename Element,\n          bool HasTrivialCopy = has_trivial_copy<Element>::value>\nstruct ElementCopier {\n  void operator()(Element* to, const Element* from, int array_size);\n};\n\n}  // namespace internal\n\nnamespace internal {\n\n// type-traits helper for RepeatedPtrFieldBase: we only want to invoke\n// arena-related \"copy if on different arena\" behavior if the necessary methods\n// exist on the contained type. In particular, we rely on MergeFrom() existing\n// as a general proxy for the fact that a copy will work, and we also provide a\n// specific override for string*.\ntemplate<typename T>\nstruct TypeImplementsMergeBehavior {\n  typedef char HasMerge;\n  typedef long HasNoMerge;\n\n  // We accept either of:\n  // - void MergeFrom(const T& other)\n  // - bool MergeFrom(const T& other)\n  //\n  // We mangle these names a bit to avoid compatibility issues in 'unclean'\n  // include environments that may have, e.g., \"#define test ...\" (yes, this\n  // exists).\n  template<typename U, typename RetType, RetType (U::*)(const U& arg)>\n      struct CheckType;\n  template<typename U> static HasMerge Check(\n      CheckType<U, void, &U::MergeFrom>*);\n  template<typename U> static HasMerge Check(\n      CheckType<U, bool, &U::MergeFrom>*);\n  template<typename U> static HasNoMerge Check(...);\n\n  // Resovles to either google::protobuf::internal::true_type or google::protobuf::internal::false_type.\n  typedef google::protobuf::internal::integral_constant<bool,\n               (sizeof(Check<T>(0)) == sizeof(HasMerge))> type;\n};\n\ntemplate<>\nstruct TypeImplementsMergeBehavior< ::std::string > {\n  typedef google::protobuf::internal::true_type type;\n};\n\n// This is the common base class for RepeatedPtrFields.  It deals only in void*\n// pointers.  Users should not use this interface directly.\n//\n// The methods of this interface correspond to the methods of RepeatedPtrField,\n// but may have a template argument called TypeHandler.  Its signature is:\n//   class TypeHandler {\n//    public:\n//     typedef MyType Type;\n//     static Type* New();\n//     static void Delete(Type*);\n//     static void Clear(Type*);\n//     static void Merge(const Type& from, Type* to);\n//\n//     // Only needs to be implemented if SpaceUsedExcludingSelf() is called.\n//     static int SpaceUsed(const Type&);\n//   };\nclass LIBPROTOBUF_EXPORT RepeatedPtrFieldBase {\n protected:\n  // The reflection implementation needs to call protected methods directly,\n  // reinterpreting pointers as being to Message instead of a specific Message\n  // subclass.\n  friend class GeneratedMessageReflection;\n\n  // ExtensionSet stores repeated message extensions as\n  // RepeatedPtrField<MessageLite>, but non-lite ExtensionSets need to\n  // implement SpaceUsed(), and thus need to call SpaceUsedExcludingSelf()\n  // reinterpreting MessageLite as Message.  ExtensionSet also needs to make\n  // use of AddFromCleared(), which is not part of the public interface.\n  friend class ExtensionSet;\n\n  // The MapFieldBase implementation needs to call protected methods directly,\n  // reinterpreting pointers as being to Message instead of a specific Message\n  // subclass.\n  friend class MapFieldBase;\n\n  // To parse directly into a proto2 generated class, the upb class GMR_Handlers\n  // needs to be able to modify a RepeatedPtrFieldBase directly.\n  friend class upb::google_opensource::GMR_Handlers;\n\n  RepeatedPtrFieldBase();\n  explicit RepeatedPtrFieldBase(::google::protobuf::Arena* arena);\n  ~RepeatedPtrFieldBase() {}\n\n  // Must be called from destructor.\n  template <typename TypeHandler>\n  void Destroy();\n\n  bool empty() const;\n  int size() const;\n\n  template <typename TypeHandler>\n  const typename TypeHandler::Type& Get(int index) const;\n  template <typename TypeHandler>\n  typename TypeHandler::Type* Mutable(int index);\n  template <typename TypeHandler>\n  void Delete(int index);\n  template <typename TypeHandler>\n  typename TypeHandler::Type* Add(typename TypeHandler::Type* prototype = NULL);\n\n  template <typename TypeHandler>\n  void RemoveLast();\n  template <typename TypeHandler>\n  void Clear();\n  template <typename TypeHandler>\n  void MergeFrom(const RepeatedPtrFieldBase& other);\n  template <typename TypeHandler>\n  void CopyFrom(const RepeatedPtrFieldBase& other);\n\n  void CloseGap(int start, int num);\n\n  void Reserve(int new_size);\n\n  int Capacity() const;\n\n  // Used for constructing iterators.\n  void* const* raw_data() const;\n  void** raw_mutable_data() const;\n\n  template <typename TypeHandler>\n  typename TypeHandler::Type** mutable_data();\n  template <typename TypeHandler>\n  const typename TypeHandler::Type* const* data() const;\n\n  template <typename TypeHandler>\n  GOOGLE_ATTRIBUTE_ALWAYS_INLINE void Swap(RepeatedPtrFieldBase* other);\n\n  void SwapElements(int index1, int index2);\n\n  template <typename TypeHandler>\n  int SpaceUsedExcludingSelf() const;\n\n\n  // Advanced memory management --------------------------------------\n\n  // Like Add(), but if there are no cleared objects to use, returns NULL.\n  template <typename TypeHandler>\n  typename TypeHandler::Type* AddFromCleared();\n\n  template<typename TypeHandler>\n  void AddAllocated(typename TypeHandler::Type* value) {\n    typename TypeImplementsMergeBehavior<typename TypeHandler::Type>::type t;\n    AddAllocatedInternal<TypeHandler>(value, t);\n  }\n\n  template <typename TypeHandler>\n  void UnsafeArenaAddAllocated(typename TypeHandler::Type* value);\n\n  template <typename TypeHandler>\n  typename TypeHandler::Type* ReleaseLast() {\n    typename TypeImplementsMergeBehavior<typename TypeHandler::Type>::type t;\n    return ReleaseLastInternal<TypeHandler>(t);\n  }\n\n  // Releases last element and returns it, but does not do out-of-arena copy.\n  // And just returns the raw pointer to the contained element in the arena.\n  template <typename TypeHandler>\n  typename TypeHandler::Type* UnsafeArenaReleaseLast();\n\n  int ClearedCount() const;\n  template <typename TypeHandler>\n  void AddCleared(typename TypeHandler::Type* value);\n  template <typename TypeHandler>\n  typename TypeHandler::Type* ReleaseCleared();\n\n protected:\n  inline void InternalSwap(RepeatedPtrFieldBase* other);\n\n  template <typename TypeHandler>\n  void AddAllocatedInternal(typename TypeHandler::Type* value,\n                            google::protobuf::internal::true_type);\n  template <typename TypeHandler>\n  void AddAllocatedInternal(typename TypeHandler::Type* value,\n                            google::protobuf::internal::false_type);\n\n  template <typename TypeHandler> GOOGLE_ATTRIBUTE_NOINLINE\n  void AddAllocatedSlowWithCopy(typename TypeHandler::Type* value,\n                                Arena* value_arena,\n                                Arena* my_arena);\n  template <typename TypeHandler> GOOGLE_ATTRIBUTE_NOINLINE\n  void AddAllocatedSlowWithoutCopy(typename TypeHandler::Type* value);\n\n  template <typename TypeHandler>\n  typename TypeHandler::Type* ReleaseLastInternal(google::protobuf::internal::true_type);\n  template <typename TypeHandler>\n  typename TypeHandler::Type* ReleaseLastInternal(google::protobuf::internal::false_type);\n\n  template<typename TypeHandler> GOOGLE_ATTRIBUTE_NOINLINE\n  void SwapFallback(RepeatedPtrFieldBase* other);\n\n  inline Arena* GetArenaNoVirtual() const {\n    return arena_;\n  }\n\n private:\n  static const int kInitialSize = 0;\n  // A few notes on internal representation:\n  //\n  // We use an indirected approach, with struct Rep, to keep\n  // sizeof(RepeatedPtrFieldBase) equivalent to what it was before arena support\n  // was added, namely, 3 8-byte machine words on x86-64. An instance of Rep is\n  // allocated only when the repeated field is non-empty, and it is a\n  // dynamically-sized struct (the header is directly followed by elements[]).\n  // We place arena_ and current_size_ directly in the object to avoid cache\n  // misses due to the indirection, because these fields are checked frequently.\n  // Placing all fields directly in the RepeatedPtrFieldBase instance costs\n  // significant performance for memory-sensitive workloads.\n  Arena* arena_;\n  int    current_size_;\n  int    total_size_;\n  struct Rep {\n    int    allocated_size;\n    void*  elements[1];\n  };\n  static const size_t kRepHeaderSize = sizeof(Rep) - sizeof(void*);\n  // Contains arena ptr and the elements array. We also keep the invariant that\n  // if rep_ is NULL, then arena is NULL.\n  Rep* rep_;\n\n  template <typename TypeHandler>\n  static inline typename TypeHandler::Type* cast(void* element) {\n    return reinterpret_cast<typename TypeHandler::Type*>(element);\n  }\n  template <typename TypeHandler>\n  static inline const typename TypeHandler::Type* cast(const void* element) {\n    return reinterpret_cast<const typename TypeHandler::Type*>(element);\n  }\n\n  // Non-templated inner function to avoid code duplication. Takes a function\n  // pointer to the type-specific (templated) inner allocate/merge loop.\n  void MergeFromInternal(\n      const RepeatedPtrFieldBase& other,\n      void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int));\n\n  template<typename TypeHandler>\n  void MergeFromInnerLoop(\n      void** our_elems, void** other_elems, int length, int already_allocated);\n\n  // Internal helper: extend array space if necessary to contain |extend_amount|\n  // more elements, and return a pointer to the element immediately following\n  // the old list of elements.  This interface factors out common behavior from\n  // Reserve() and MergeFrom() to reduce code size. |extend_amount| must be > 0.\n  void** InternalExtend(int extend_amount);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(RepeatedPtrFieldBase);\n};\n\ntemplate <typename GenericType>\nclass GenericTypeHandler {\n public:\n  typedef GenericType Type;\n  static inline GenericType* New(Arena* arena) {\n    return ::google::protobuf::Arena::CreateMaybeMessage<Type>(\n        arena, static_cast<GenericType*>(0));\n  }\n  // We force NewFromPrototype() to be non-inline to reduce code size:\n  // else, several other methods get inlined copies of message types'\n  // constructors.\n  GOOGLE_ATTRIBUTE_NOINLINE static GenericType* NewFromPrototype(\n      const GenericType* prototype, ::google::protobuf::Arena* arena = NULL);\n  static inline void Delete(GenericType* value, Arena* arena) {\n    if (arena == NULL) {\n      delete value;\n    }\n  }\n  static inline ::google::protobuf::Arena* GetArena(GenericType* value) {\n    return ::google::protobuf::Arena::GetArena<Type>(value);\n  }\n  static inline void* GetMaybeArenaPointer(GenericType* value) {\n    return ::google::protobuf::Arena::GetArena<Type>(value);\n  }\n\n  static inline void Clear(GenericType* value) { value->Clear(); }\n  GOOGLE_ATTRIBUTE_NOINLINE static void Merge(const GenericType& from,\n                                       GenericType* to);\n  static inline int SpaceUsed(const GenericType& value) {\n    return value.SpaceUsed();\n  }\n  static inline const Type& default_instance() {\n    return Type::default_instance();\n  }\n};\n\ntemplate <typename GenericType>\nGenericType* GenericTypeHandler<GenericType>::NewFromPrototype(\n    const GenericType* /* prototype */, ::google::protobuf::Arena* arena) {\n  return New(arena);\n}\ntemplate <typename GenericType>\nvoid GenericTypeHandler<GenericType>::Merge(const GenericType& from,\n                                            GenericType* to) {\n  to->MergeFrom(from);\n}\n\n// NewFromPrototype() and Merge() cannot be defined here; if they're declared\n// inline the compiler will complain about not matching GOOGLE_ATTRIBUTE_NOINLINE\n// above, and if not, compilation will result in multiple definitions.  These\n// are therefore declared as specializations here and defined in\n// message_lite.cc.\ntemplate<>\nMessageLite* GenericTypeHandler<MessageLite>::NewFromPrototype(\n    const MessageLite* prototype, google::protobuf::Arena* arena);\ntemplate<>\ninline google::protobuf::Arena* GenericTypeHandler<MessageLite>::GetArena(\n    MessageLite* value) {\n  return value->GetArena();\n}\ntemplate<>\ninline void* GenericTypeHandler<MessageLite>::GetMaybeArenaPointer(\n    MessageLite* value) {\n  return value->GetMaybeArenaPointer();\n}\ntemplate <>\nvoid GenericTypeHandler<MessageLite>::Merge(const MessageLite& from,\n                                            MessageLite* to);\ntemplate<>\ninline void GenericTypeHandler<string>::Clear(string* value) {\n  value->clear();\n}\ntemplate<>\nvoid GenericTypeHandler<string>::Merge(const string& from,\n                                       string* to);\n\n// Declarations of the specialization as we cannot define them here, as the\n// header that defines ProtocolMessage depends on types defined in this header.\n#define DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(TypeName)                 \\\n    template<>                                                                 \\\n    TypeName* GenericTypeHandler<TypeName>::NewFromPrototype(                  \\\n        const TypeName* prototype, google::protobuf::Arena* arena);                      \\\n    template<>                                                                 \\\n    google::protobuf::Arena* GenericTypeHandler<TypeName>::GetArena(                     \\\n        TypeName* value);                                                      \\\n    template<>                                                                 \\\n    void* GenericTypeHandler<TypeName>::GetMaybeArenaPointer(                  \\\n        TypeName* value);\n\n// Message specialization bodies defined in message.cc. This split is necessary\n// to allow proto2-lite (which includes this header) to be independent of\n// Message.\nDECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES(Message)\n\n\n#undef DECLARE_SPECIALIZATIONS_FOR_BASE_PROTO_TYPES\n\ntemplate <>\ninline const MessageLite& GenericTypeHandler<MessageLite>::default_instance() {\n  // Yes, the behavior of the code is undefined, but this function is only\n  // called when we're already deep into the world of undefined, because the\n  // caller called Get(index) out of bounds.\n  MessageLite* null = NULL;\n  return *null;\n}\n\ntemplate <>\ninline const Message& GenericTypeHandler<Message>::default_instance() {\n  // Yes, the behavior of the code is undefined, but this function is only\n  // called when we're already deep into the world of undefined, because the\n  // caller called Get(index) out of bounds.\n  Message* null = NULL;\n  return *null;\n}\n\n\n// HACK:  If a class is declared as DLL-exported in MSVC, it insists on\n//   generating copies of all its methods -- even inline ones -- to include\n//   in the DLL.  But SpaceUsed() calls StringSpaceUsedExcludingSelf() which\n//   isn't in the lite library, therefore the lite library cannot link if\n//   StringTypeHandler is exported.  So, we factor out StringTypeHandlerBase,\n//   export that, then make StringTypeHandler be a subclass which is NOT\n//   exported.\n// TODO(kenton):  Now that StringSpaceUsedExcludingSelf() is in the lite\n//   library, this can be cleaned up.\nclass LIBPROTOBUF_EXPORT StringTypeHandlerBase {\n public:\n  typedef string Type;\n\n  static inline string* New(Arena* arena) {\n    return Arena::Create<string>(arena);\n  }\n  static inline string* NewFromPrototype(const string*,\n                                         ::google::protobuf::Arena* arena) {\n    return New(arena);\n  }\n  static inline ::google::protobuf::Arena* GetArena(string*) {\n    return NULL;\n  }\n  static inline void* GetMaybeArenaPointer(string* /* value */) {\n    return NULL;\n  }\n  static inline void Delete(string* value, Arena* arena) {\n    if (arena == NULL) {\n      delete value;\n    }\n  }\n  static inline void Clear(string* value) { value->clear(); }\n  static inline void Merge(const string& from, string* to) { *to = from; }\n  static inline const Type& default_instance() {\n    return ::google::protobuf::internal::GetEmptyString();\n  }\n};\n\nclass StringTypeHandler : public StringTypeHandlerBase {\n public:\n  static int SpaceUsed(const string& value)  {\n    return static_cast<int>(sizeof(value)) + StringSpaceUsedExcludingSelf(value);\n  }\n};\n\n\n}  // namespace internal\n\n// RepeatedPtrField is like RepeatedField, but used for repeated strings or\n// Messages.\ntemplate <typename Element>\nclass RepeatedPtrField : public internal::RepeatedPtrFieldBase {\n public:\n  RepeatedPtrField();\n  explicit RepeatedPtrField(::google::protobuf::Arena* arena);\n\n  RepeatedPtrField(const RepeatedPtrField& other);\n  template <typename Iter>\n  RepeatedPtrField(Iter begin, const Iter& end);\n  ~RepeatedPtrField();\n\n  RepeatedPtrField& operator=(const RepeatedPtrField& other);\n\n  bool empty() const;\n  int size() const;\n\n  const Element& Get(int index) const;\n  Element* Mutable(int index);\n  Element* Add();\n\n  const Element& operator[](int index) const { return Get(index); }\n  Element& operator[](int index) { return *Mutable(index); }\n\n  // Remove the last element in the array.\n  // Ownership of the element is retained by the array.\n  void RemoveLast();\n\n  // Delete elements with indices in the range [start .. start+num-1].\n  // Caution: implementation moves all elements with indices [start+num .. ].\n  // Calling this routine inside a loop can cause quadratic behavior.\n  void DeleteSubrange(int start, int num);\n\n  void Clear();\n  void MergeFrom(const RepeatedPtrField& other);\n  void UnsafeMergeFrom(const RepeatedPtrField& other) { MergeFrom(other); }\n  void CopyFrom(const RepeatedPtrField& other);\n\n  // Reserve space to expand the field to at least the given size.  This only\n  // resizes the pointer array; it doesn't allocate any objects.  If the\n  // array is grown, it will always be at least doubled in size.\n  void Reserve(int new_size);\n\n  int Capacity() const;\n\n  // Gets the underlying array.  This pointer is possibly invalidated by\n  // any add or remove operation.\n  Element** mutable_data();\n  const Element* const* data() const;\n\n  // Swap entire contents with \"other\". If they are on separate arenas, then\n  // copies data.\n  void Swap(RepeatedPtrField* other);\n\n  // Swap entire contents with \"other\". Caller should guarantee that either both\n  // fields are on the same arena or both are on the heap. Swapping between\n  // different arenas with this function is disallowed and is caught via\n  // GOOGLE_DCHECK.\n  void UnsafeArenaSwap(RepeatedPtrField* other);\n\n  // Swap two elements.\n  void SwapElements(int index1, int index2);\n\n  // STL-like iterator support\n  typedef internal::RepeatedPtrIterator<Element> iterator;\n  typedef internal::RepeatedPtrIterator<const Element> const_iterator;\n  typedef Element value_type;\n  typedef value_type& reference;\n  typedef const value_type& const_reference;\n  typedef value_type* pointer;\n  typedef const value_type* const_pointer;\n  typedef int size_type;\n  typedef ptrdiff_t difference_type;\n\n  iterator begin();\n  const_iterator begin() const;\n  const_iterator cbegin() const;\n  iterator end();\n  const_iterator end() const;\n  const_iterator cend() const;\n\n  // Reverse iterator support\n  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n  typedef std::reverse_iterator<iterator> reverse_iterator;\n  reverse_iterator rbegin() {\n    return reverse_iterator(end());\n  }\n  const_reverse_iterator rbegin() const {\n    return const_reverse_iterator(end());\n  }\n  reverse_iterator rend() {\n    return reverse_iterator(begin());\n  }\n  const_reverse_iterator rend() const {\n    return const_reverse_iterator(begin());\n  }\n\n  // Custom STL-like iterator that iterates over and returns the underlying\n  // pointers to Element rather than Element itself.\n  typedef internal::RepeatedPtrOverPtrsIterator<Element, void*>\n  pointer_iterator;\n  typedef internal::RepeatedPtrOverPtrsIterator<const Element, const void*>\n  const_pointer_iterator;\n  pointer_iterator pointer_begin();\n  const_pointer_iterator pointer_begin() const;\n  pointer_iterator pointer_end();\n  const_pointer_iterator pointer_end() const;\n\n  // Returns (an estimate of) the number of bytes used by the repeated field,\n  // excluding sizeof(*this).\n  int SpaceUsedExcludingSelf() const;\n\n  // Advanced memory management --------------------------------------\n  // When hardcore memory management becomes necessary -- as it sometimes\n  // does here at Google -- the following methods may be useful.\n\n  // Add an already-allocated object, passing ownership to the\n  // RepeatedPtrField.\n  //\n  // Note that some special behavior occurs with respect to arenas:\n  //\n  //   (i) if this field holds submessages, the new submessage will be copied if\n  //   the original is in an arena and this RepeatedPtrField is either in a\n  //   different arena, or on the heap.\n  //   (ii) if this field holds strings, the passed-in string *must* be\n  //   heap-allocated, not arena-allocated. There is no way to dynamically check\n  //   this at runtime, so User Beware.\n  void AddAllocated(Element* value);\n\n  // Remove the last element and return it, passing ownership to the caller.\n  // Requires:  size() > 0\n  //\n  // If this RepeatedPtrField is on an arena, an object copy is required to pass\n  // ownership back to the user (for compatible semantics). Use\n  // UnsafeArenaReleaseLast() if this behavior is undesired.\n  Element* ReleaseLast();\n\n  // Add an already-allocated object, skipping arena-ownership checks. The user\n  // must guarantee that the given object is in the same arena as this\n  // RepeatedPtrField.\n  // It is also useful in legacy code that uses temporary ownership to avoid\n  // copies. Example:\n  // RepeatedPtrField<T> temp_field;\n  // temp_field.AddAllocated(new T);\n  // ... // Do something with temp_field\n  // temp_field.ExtractSubrange(0, temp_field.size(), NULL);\n  // If you put temp_field on the arena this fails, because the ownership\n  // transfers to the arena at the \"AddAllocated\" call and is not released\n  // anymore causing a double delete. UnsafeArenaAddAllocated prevents this.\n  void UnsafeArenaAddAllocated(Element* value);\n\n  // Remove the last element and return it.  Works only when operating on an\n  // arena. The returned pointer is to the original object in the arena, hence\n  // has the arena's lifetime.\n  // Requires:  current_size_ > 0\n  Element* UnsafeArenaReleaseLast();\n\n  // Extract elements with indices in the range \"[start .. start+num-1]\".\n  // The caller assumes ownership of the extracted elements and is responsible\n  // for deleting them when they are no longer needed.\n  // If \"elements\" is non-NULL, then pointers to the extracted elements\n  // are stored in \"elements[0 .. num-1]\" for the convenience of the caller.\n  // If \"elements\" is NULL, then the caller must use some other mechanism\n  // to perform any further operations (like deletion) on these elements.\n  // Caution: implementation also moves elements with indices [start+num ..].\n  // Calling this routine inside a loop can cause quadratic behavior.\n  //\n  // Memory copying behavior is identical to ReleaseLast(), described above: if\n  // this RepeatedPtrField is on an arena, an object copy is performed for each\n  // returned element, so that all returned element pointers are to\n  // heap-allocated copies. If this copy is not desired, the user should call\n  // UnsafeArenaExtractSubrange().\n  void ExtractSubrange(int start, int num, Element** elements);\n\n  // Identical to ExtractSubrange() described above, except that when this\n  // repeated field is on an arena, no object copies are performed. Instead, the\n  // raw object pointers are returned. Thus, if on an arena, the returned\n  // objects must not be freed, because they will not be heap-allocated objects.\n  void UnsafeArenaExtractSubrange(int start, int num, Element** elements);\n\n  // When elements are removed by calls to RemoveLast() or Clear(), they\n  // are not actually freed.  Instead, they are cleared and kept so that\n  // they can be reused later.  This can save lots of CPU time when\n  // repeatedly reusing a protocol message for similar purposes.\n  //\n  // Hardcore programs may choose to manipulate these cleared objects\n  // to better optimize memory management using the following routines.\n\n  // Get the number of cleared objects that are currently being kept\n  // around for reuse.\n  int ClearedCount() const;\n  // Add an element to the pool of cleared objects, passing ownership to\n  // the RepeatedPtrField.  The element must be cleared prior to calling\n  // this method.\n  //\n  // This method cannot be called when the repeated field is on an arena or when\n  // |value| is; both cases will trigger a GOOGLE_DCHECK-failure.\n  void AddCleared(Element* value);\n  // Remove a single element from the cleared pool and return it, passing\n  // ownership to the caller.  The element is guaranteed to be cleared.\n  // Requires:  ClearedCount() > 0\n  //\n  //\n  // This method cannot be called when the repeated field is on an arena; doing\n  // so will trigger a GOOGLE_DCHECK-failure.\n  Element* ReleaseCleared();\n\n  // Removes the element referenced by position.\n  //\n  // Returns an iterator to the element immediately following the removed\n  // element.\n  //\n  // Invalidates all iterators at or after the removed element, including end().\n  iterator erase(const_iterator position);\n\n  // Removes the elements in the range [first, last).\n  //\n  // Returns an iterator to the element immediately following the removed range.\n  //\n  // Invalidates all iterators at or after the removed range, including end().\n  iterator erase(const_iterator first, const_iterator last);\n\n  // Gets the arena on which this RepeatedPtrField stores its elements.\n  ::google::protobuf::Arena* GetArena() const {\n    return GetArenaNoVirtual();\n  }\n\n protected:\n  // Note:  RepeatedPtrField SHOULD NOT be subclassed by users.  We only\n  //   subclass it in one place as a hack for compatibility with proto1.  The\n  //   subclass needs to know about TypeHandler in order to call protected\n  //   methods on RepeatedPtrFieldBase.\n  class TypeHandler;\n\n  // Internal arena accessor expected by helpers in Arena.\n  inline Arena* GetArenaNoVirtual() const;\n\n private:\n  // Implementations for ExtractSubrange(). The copying behavior must be\n  // included only if the type supports the necessary operations (e.g.,\n  // MergeFrom()), so we must resolve this at compile time. ExtractSubrange()\n  // uses SFINAE to choose one of the below implementations.\n  void ExtractSubrangeInternal(int start, int num, Element** elements,\n                               google::protobuf::internal::true_type);\n  void ExtractSubrangeInternal(int start, int num, Element** elements,\n                               google::protobuf::internal::false_type);\n\n  friend class Arena;\n  typedef void InternalArenaConstructable_;\n\n};\n\n// implementation ====================================================\n\ntemplate <typename Element>\ninline RepeatedField<Element>::RepeatedField()\n  : current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n}\n\ntemplate <typename Element>\ninline RepeatedField<Element>::RepeatedField(Arena* arena)\n  : current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n // In case arena is NULL, then we do not create rep_, as code has an invariant\n // `rep_ == NULL then arena == NULL`.\n if (arena != NULL) {\n  rep_ = reinterpret_cast<Rep*>(\n      ::google::protobuf::Arena::CreateArray<char>(arena, kRepHeaderSize));\n  rep_->arena = arena;\n }\n}\n\ntemplate <typename Element>\ninline RepeatedField<Element>::RepeatedField(const RepeatedField& other)\n  : current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n  CopyFrom(other);\n}\n\ntemplate <typename Element>\ntemplate <typename Iter>\nRepeatedField<Element>::RepeatedField(Iter begin, const Iter& end)\n  : current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n  int reserve = internal::CalculateReserve(begin, end);\n  if (reserve != -1) {\n    Reserve(reserve);\n    for (; begin != end; ++begin) {\n      AddAlreadyReserved(*begin);\n    }\n  } else {\n    for (; begin != end; ++begin) {\n      Add(*begin);\n    }\n  }\n}\n\ntemplate <typename Element>\nRepeatedField<Element>::~RepeatedField() {\n  // See explanation in Reserve(): we need to invoke destructors here for the\n  // case that Element has a non-trivial destructor.\n  InternalDeallocate(rep_, total_size_);\n}\n\ntemplate <typename Element>\ninline RepeatedField<Element>&\nRepeatedField<Element>::operator=(const RepeatedField& other) {\n  if (this != &other)\n    CopyFrom(other);\n  return *this;\n}\n\ntemplate <typename Element>\ninline bool RepeatedField<Element>::empty() const {\n  return current_size_ == 0;\n}\n\ntemplate <typename Element>\ninline int RepeatedField<Element>::size() const {\n  return current_size_;\n}\n\ntemplate <typename Element>\ninline int RepeatedField<Element>::Capacity() const {\n  return total_size_;\n}\n\ntemplate<typename Element>\ninline void RepeatedField<Element>::AddAlreadyReserved(const Element& value) {\n  GOOGLE_DCHECK_LT(current_size_, total_size_);\n  rep_->elements[current_size_++] = value;\n}\n\ntemplate<typename Element>\ninline Element* RepeatedField<Element>::AddAlreadyReserved() {\n  GOOGLE_DCHECK_LT(current_size_, total_size_);\n  return &rep_->elements[current_size_++];\n}\n\ntemplate<typename Element>\ninline void RepeatedField<Element>::Resize(int new_size, const Element& value) {\n  GOOGLE_DCHECK_GE(new_size, 0);\n  if (new_size > current_size_) {\n    Reserve(new_size);\n    std::fill(&rep_->elements[current_size_],\n              &rep_->elements[new_size], value);\n  }\n  current_size_ = new_size;\n}\n\ntemplate <typename Element>\ninline const Element& RepeatedField<Element>::Get(int index) const {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  return rep_->elements[index];\n}\n\ntemplate <typename Element>\ninline Element* RepeatedField<Element>::Mutable(int index) {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  return &rep_->elements[index];\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::Set(int index, const Element& value) {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  rep_->elements[index] = value;\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::Add(const Element& value) {\n  if (current_size_ == total_size_) Reserve(total_size_ + 1);\n  rep_->elements[current_size_++] = value;\n}\n\ntemplate <typename Element>\ninline Element* RepeatedField<Element>::Add() {\n  if (current_size_ == total_size_) Reserve(total_size_ + 1);\n  return &rep_->elements[current_size_++];\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::RemoveLast() {\n  GOOGLE_DCHECK_GT(current_size_, 0);\n  current_size_--;\n}\n\ntemplate <typename Element>\nvoid RepeatedField<Element>::ExtractSubrange(\n    int start, int num, Element* elements) {\n  GOOGLE_DCHECK_GE(start, 0);\n  GOOGLE_DCHECK_GE(num, 0);\n  GOOGLE_DCHECK_LE(start + num, this->current_size_);\n\n  // Save the values of the removed elements if requested.\n  if (elements != NULL) {\n    for (int i = 0; i < num; ++i)\n      elements[i] = this->Get(i + start);\n  }\n\n  // Slide remaining elements down to fill the gap.\n  if (num > 0) {\n    for (int i = start + num; i < this->current_size_; ++i)\n      this->Set(i - num, this->Get(i));\n    this->Truncate(this->current_size_ - num);\n  }\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::Clear() {\n  current_size_ = 0;\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::UnsafeMergeFrom(const RepeatedField& other) {\n  if (other.current_size_ != 0) {\n    Reserve(current_size_ + other.current_size_);\n    CopyArray(rep_->elements + current_size_,\n              other.rep_->elements, other.current_size_);\n    current_size_ += other.current_size_;\n  }\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::MergeFrom(const RepeatedField& other) {\n  GOOGLE_CHECK_NE(&other, this);\n  UnsafeMergeFrom(other);\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::CopyFrom(const RepeatedField& other) {\n  if (&other == this) return;\n  Clear();\n  MergeFrom(other);\n}\n\ntemplate <typename Element>\ninline typename RepeatedField<Element>::iterator RepeatedField<Element>::erase(\n    const_iterator position) {\n  return erase(position, position + 1);\n}\n\ntemplate <typename Element>\ninline typename RepeatedField<Element>::iterator RepeatedField<Element>::erase(\n    const_iterator first, const_iterator last) {\n  size_type first_offset = first - cbegin();\n  if (first != last) {\n    Truncate(std::copy(last, cend(), begin() + first_offset) - cbegin());\n  }\n  return begin() + first_offset;\n}\n\ntemplate <typename Element>\ninline Element* RepeatedField<Element>::mutable_data() {\n  return rep_ ? rep_->elements : NULL;\n}\n\ntemplate <typename Element>\ninline const Element* RepeatedField<Element>::data() const {\n  return rep_ ? rep_->elements : NULL;\n}\n\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::InternalSwap(RepeatedField* other) {\n  std::swap(rep_, other->rep_);\n  std::swap(current_size_, other->current_size_);\n  std::swap(total_size_, other->total_size_);\n}\n\ntemplate <typename Element>\nvoid RepeatedField<Element>::Swap(RepeatedField* other) {\n  if (this == other) return;\n  if (GetArenaNoVirtual() ==  other->GetArenaNoVirtual()) {\n    InternalSwap(other);\n  } else {\n    RepeatedField<Element> temp(other->GetArenaNoVirtual());\n    temp.MergeFrom(*this);\n    CopyFrom(*other);\n    other->UnsafeArenaSwap(&temp);\n  }\n}\n\ntemplate <typename Element>\nvoid RepeatedField<Element>::UnsafeArenaSwap(RepeatedField* other) {\n  if (this == other) return;\n  GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());\n  InternalSwap(other);\n}\n\ntemplate <typename Element>\nvoid RepeatedField<Element>::SwapElements(int index1, int index2) {\n  using std::swap;  // enable ADL with fallback\n  swap(rep_->elements[index1], rep_->elements[index2]);\n}\n\ntemplate <typename Element>\ninline typename RepeatedField<Element>::iterator\nRepeatedField<Element>::begin() {\n  return rep_ ? rep_->elements : NULL;\n}\ntemplate <typename Element>\ninline typename RepeatedField<Element>::const_iterator\nRepeatedField<Element>::begin() const {\n  return rep_ ? rep_->elements : NULL;\n}\ntemplate <typename Element>\ninline typename RepeatedField<Element>::const_iterator\nRepeatedField<Element>::cbegin() const {\n  return rep_ ? rep_->elements : NULL;\n}\ntemplate <typename Element>\ninline typename RepeatedField<Element>::iterator\nRepeatedField<Element>::end() {\n  return rep_ ? rep_->elements + current_size_ : NULL;\n}\ntemplate <typename Element>\ninline typename RepeatedField<Element>::const_iterator\nRepeatedField<Element>::end() const {\n  return rep_ ? rep_->elements + current_size_ : NULL;\n}\ntemplate <typename Element>\ninline typename RepeatedField<Element>::const_iterator\nRepeatedField<Element>::cend() const {\n  return rep_ ? rep_->elements + current_size_ : NULL;\n}\n\ntemplate <typename Element>\ninline int RepeatedField<Element>::SpaceUsedExcludingSelf() const {\n  return rep_ ?\n      (total_size_ * sizeof(Element) + kRepHeaderSize) : 0;\n}\n\n// Avoid inlining of Reserve(): new, copy, and delete[] lead to a significant\n// amount of code bloat.\ntemplate <typename Element>\nvoid RepeatedField<Element>::Reserve(int new_size) {\n  if (total_size_ >= new_size) return;\n  Rep* old_rep = rep_;\n  Arena* arena = GetArenaNoVirtual();\n  new_size = std::max(google::protobuf::internal::kMinRepeatedFieldAllocationSize,\n                      std::max(total_size_ * 2, new_size));\n  GOOGLE_CHECK_LE(static_cast<size_t>(new_size),\n           (std::numeric_limits<size_t>::max() - kRepHeaderSize) /\n           sizeof(Element))\n      << \"Requested size is too large to fit into size_t.\";\n  size_t bytes = kRepHeaderSize + sizeof(Element) * new_size;\n  if (arena == NULL) {\n    rep_ = static_cast<Rep*>(::operator new(bytes));\n  } else {\n    rep_ = reinterpret_cast<Rep*>(\n            ::google::protobuf::Arena::CreateArray<char>(arena, bytes));\n  }\n  rep_->arena = arena;\n  int old_total_size = total_size_;\n  total_size_ = new_size;\n  // Invoke placement-new on newly allocated elements. We shouldn't have to do\n  // this, since Element is supposed to be POD, but a previous version of this\n  // code allocated storage with \"new Element[size]\" and some code uses\n  // RepeatedField with non-POD types, relying on constructor invocation. If\n  // Element has a trivial constructor (e.g., int32), gcc (tested with -O2)\n  // completely removes this loop because the loop body is empty, so this has no\n  // effect unless its side-effects are required for correctness.\n  // Note that we do this before MoveArray() below because Element's copy\n  // assignment implementation will want an initialized instance first.\n  Element* e = &rep_->elements[0];\n  Element* limit = &rep_->elements[total_size_];\n  for (; e < limit; e++) {\n    new (e) Element();\n  }\n  if (current_size_ > 0) {\n    MoveArray(rep_->elements, old_rep->elements, current_size_);\n  }\n\n  // Likewise, we need to invoke destructors on the old array.\n  InternalDeallocate(old_rep, old_total_size);\n\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::Truncate(int new_size) {\n  GOOGLE_DCHECK_LE(new_size, current_size_);\n  if (current_size_ > 0) {\n    current_size_ = new_size;\n  }\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::MoveArray(\n  Element* to, Element* from, int array_size) {\n  CopyArray(to, from, array_size);\n}\n\ntemplate <typename Element>\ninline void RepeatedField<Element>::CopyArray(\n  Element* to, const Element* from, int array_size) {\n  internal::ElementCopier<Element>()(to, from, array_size);\n}\n\nnamespace internal {\n\ntemplate <typename Element, bool HasTrivialCopy>\nvoid ElementCopier<Element, HasTrivialCopy>::operator()(\n  Element* to, const Element* from, int array_size) {\n  std::copy(from, from + array_size, to);\n}\n\ntemplate <typename Element>\nstruct ElementCopier<Element, true> {\n  void operator()(Element* to, const Element* from, int array_size) {\n    memcpy(to, from, array_size * sizeof(Element));\n  }\n};\n\n}  // namespace internal\n\n\n// -------------------------------------------------------------------\n\nnamespace internal {\n\ninline RepeatedPtrFieldBase::RepeatedPtrFieldBase()\n  : arena_(NULL),\n    current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n}\n\ninline RepeatedPtrFieldBase::RepeatedPtrFieldBase(::google::protobuf::Arena* arena)\n  : arena_(arena),\n    current_size_(0),\n    total_size_(0),\n    rep_(NULL) {\n}\n\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::Destroy() {\n  if (rep_ != NULL && arena_ == NULL) {\n    int n = rep_->allocated_size;\n    void* const* elements = rep_->elements;\n    for (int i = 0; i < n; i++) {\n      TypeHandler::Delete(cast<TypeHandler>(elements[i]), NULL);\n    }\n#if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)\n    const size_t size = total_size_ * sizeof(elements[0]) + kRepHeaderSize;\n    ::operator delete(static_cast<void*>(rep_), size);\n#else\n    ::operator delete(static_cast<void*>(rep_));\n#endif\n  }\n  rep_ = NULL;\n}\n\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::Swap(RepeatedPtrFieldBase* other) {\n  if (other->GetArenaNoVirtual() == GetArenaNoVirtual()) {\n    InternalSwap(other);\n  } else {\n    SwapFallback<TypeHandler>(other);\n  }\n}\n\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::SwapFallback(RepeatedPtrFieldBase* other) {\n  GOOGLE_DCHECK(other->GetArenaNoVirtual() != GetArenaNoVirtual());\n\n  // Copy semantics in this case. We try to improve efficiency by placing the\n  // temporary on |other|'s arena so that messages are copied cross-arena only\n  // once, not twice.\n  RepeatedPtrFieldBase temp(other->GetArenaNoVirtual());\n  temp.MergeFrom<TypeHandler>(*this);\n  this->Clear<TypeHandler>();\n  this->MergeFrom<TypeHandler>(*other);\n  other->Clear<TypeHandler>();\n  other->InternalSwap(&temp);\n  temp.Destroy<TypeHandler>();  // Frees rep_ if `other` had no arena.\n}\n\ninline bool RepeatedPtrFieldBase::empty() const {\n  return current_size_ == 0;\n}\n\ninline int RepeatedPtrFieldBase::size() const {\n  return current_size_;\n}\n\ntemplate <typename TypeHandler>\ninline const typename TypeHandler::Type&\nRepeatedPtrFieldBase::Get(int index) const {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  return *cast<TypeHandler>(rep_->elements[index]);\n}\n\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type*\nRepeatedPtrFieldBase::Mutable(int index) {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  return cast<TypeHandler>(rep_->elements[index]);\n}\n\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::Delete(int index) {\n  GOOGLE_DCHECK_GE(index, 0);\n  GOOGLE_DCHECK_LT(index, current_size_);\n  TypeHandler::Delete(cast<TypeHandler>(rep_->elements[index]), arena_);\n}\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type* RepeatedPtrFieldBase::Add(\n    typename TypeHandler::Type* prototype) {\n  if (rep_ != NULL && current_size_ < rep_->allocated_size) {\n    return cast<TypeHandler>(rep_->elements[current_size_++]);\n  }\n  if (!rep_ || rep_->allocated_size == total_size_) {\n    Reserve(total_size_ + 1);\n  }\n  ++rep_->allocated_size;\n  typename TypeHandler::Type* result =\n      TypeHandler::NewFromPrototype(prototype, arena_);\n  rep_->elements[current_size_++] = result;\n  return result;\n}\n\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::RemoveLast() {\n  GOOGLE_DCHECK_GT(current_size_, 0);\n  TypeHandler::Clear(cast<TypeHandler>(rep_->elements[--current_size_]));\n}\n\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::Clear() {\n  const int n = current_size_;\n  GOOGLE_DCHECK_GE(n, 0);\n  if (n > 0) {\n    void* const* elements = rep_->elements;\n    int i = 0;\n    do {\n      TypeHandler::Clear(cast<TypeHandler>(elements[i++]));\n    } while (i < n);\n    current_size_ = 0;\n  }\n}\n\n// To avoid unnecessary code duplication and reduce binary size, we use a\n// layered approach to implementing MergeFrom(). The toplevel method is\n// templated, so we get a small thunk per concrete message type in the binary.\n// This calls a shared implementation with most of the logic, passing a function\n// pointer to another type-specific piece of code that calls the object-allocate\n// and merge handlers.\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::MergeFrom(const RepeatedPtrFieldBase& other) {\n  GOOGLE_DCHECK_NE(&other, this);\n  if (other.current_size_ == 0) return;\n  MergeFromInternal(\n      other, &RepeatedPtrFieldBase::MergeFromInnerLoop<TypeHandler>);\n}\n\ninline void RepeatedPtrFieldBase::MergeFromInternal(\n    const RepeatedPtrFieldBase& other,\n    void (RepeatedPtrFieldBase::*inner_loop)(void**, void**, int, int)) {\n  // Note: wrapper has already guaranteed that other.rep_ != NULL here.\n  int other_size = other.current_size_;\n  void** other_elements = other.rep_->elements;\n  void** new_elements = InternalExtend(other_size);\n  int allocated_elems = rep_->allocated_size - current_size_;\n  (this->*inner_loop)(new_elements, other_elements,\n                      other_size, allocated_elems);\n  current_size_ += other_size;\n  if (rep_->allocated_size < current_size_) {\n    rep_->allocated_size = current_size_;\n  }\n}\n\n// Merges other_elems to our_elems.\ntemplate<typename TypeHandler>\nvoid RepeatedPtrFieldBase::MergeFromInnerLoop(\n    void** our_elems, void** other_elems, int length, int already_allocated) {\n  // Split into two loops, over ranges [0, allocated) and [allocated, length),\n  // to avoid a branch within the loop.\n  for (int i = 0; i < already_allocated && i < length; i++) {\n    // Already allocated: use existing element.\n    typename TypeHandler::Type* other_elem =\n        reinterpret_cast<typename TypeHandler::Type*>(other_elems[i]);\n    typename TypeHandler::Type* new_elem =\n        reinterpret_cast<typename TypeHandler::Type*>(our_elems[i]);\n    TypeHandler::Merge(*other_elem, new_elem);\n  }\n  Arena* arena = GetArenaNoVirtual();\n  for (int i = already_allocated; i < length; i++) {\n    // Not allocated: alloc a new element first, then merge it.\n    typename TypeHandler::Type* other_elem =\n        reinterpret_cast<typename TypeHandler::Type*>(other_elems[i]);\n    typename TypeHandler::Type* new_elem =\n        TypeHandler::NewFromPrototype(other_elem, arena);\n    TypeHandler::Merge(*other_elem, new_elem);\n    our_elems[i] = new_elem;\n  }\n}\n\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::CopyFrom(const RepeatedPtrFieldBase& other) {\n  if (&other == this) return;\n  RepeatedPtrFieldBase::Clear<TypeHandler>();\n  RepeatedPtrFieldBase::MergeFrom<TypeHandler>(other);\n}\n\ninline int RepeatedPtrFieldBase::Capacity() const {\n  return total_size_;\n}\n\ninline void* const* RepeatedPtrFieldBase::raw_data() const {\n  return rep_ ? rep_->elements : NULL;\n}\n\ninline void** RepeatedPtrFieldBase::raw_mutable_data() const {\n  return rep_ ? const_cast<void**>(rep_->elements) : NULL;\n}\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type** RepeatedPtrFieldBase::mutable_data() {\n  // TODO(kenton):  Breaks C++ aliasing rules.  We should probably remove this\n  //   method entirely.\n  return reinterpret_cast<typename TypeHandler::Type**>(raw_mutable_data());\n}\n\ntemplate <typename TypeHandler>\ninline const typename TypeHandler::Type* const*\nRepeatedPtrFieldBase::data() const {\n  // TODO(kenton):  Breaks C++ aliasing rules.  We should probably remove this\n  //   method entirely.\n  return reinterpret_cast<const typename TypeHandler::Type* const*>(raw_data());\n}\n\ninline void RepeatedPtrFieldBase::SwapElements(int index1, int index2) {\n  using std::swap;  // enable ADL with fallback\n  swap(rep_->elements[index1], rep_->elements[index2]);\n}\n\ntemplate <typename TypeHandler>\ninline int RepeatedPtrFieldBase::SpaceUsedExcludingSelf() const {\n  int allocated_bytes = total_size_ * sizeof(void*);\n  if (rep_ != NULL) {\n    for (int i = 0; i < rep_->allocated_size; ++i) {\n      allocated_bytes += TypeHandler::SpaceUsed(\n          *cast<TypeHandler>(rep_->elements[i]));\n    }\n    allocated_bytes += kRepHeaderSize;\n  }\n  return allocated_bytes;\n}\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type* RepeatedPtrFieldBase::AddFromCleared() {\n  if (rep_ != NULL && current_size_ < rep_->allocated_size) {\n    return cast<TypeHandler>(rep_->elements[current_size_++]);\n  } else {\n    return NULL;\n  }\n}\n\n// AddAllocated version that implements arena-safe copying behavior.\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::AddAllocatedInternal(\n    typename TypeHandler::Type* value,\n    google::protobuf::internal::true_type) {\n  Arena* element_arena = reinterpret_cast<Arena*>(\n      TypeHandler::GetMaybeArenaPointer(value));\n  Arena* arena = GetArenaNoVirtual();\n  if (arena == element_arena && rep_ &&\n      rep_->allocated_size < total_size_) {\n    // Fast path: underlying arena representation (tagged pointer) is equal to\n    // our arena pointer, and we can add to array without resizing it (at least\n    // one slot that is not allocated).\n    void** elems = rep_->elements;\n    if (current_size_ < rep_->allocated_size) {\n      // Make space at [current] by moving first allocated element to end of\n      // allocated list.\n      elems[rep_->allocated_size] = elems[current_size_];\n    }\n    elems[current_size_] = value;\n    current_size_ = current_size_ + 1;\n    rep_->allocated_size = rep_->allocated_size + 1;\n    return;\n  } else {\n    AddAllocatedSlowWithCopy<TypeHandler>(\n        value, TypeHandler::GetArena(value), arena);\n  }\n}\n\n// Slowpath handles all cases, copying if necessary.\ntemplate<typename TypeHandler>\nvoid RepeatedPtrFieldBase::AddAllocatedSlowWithCopy(\n    // Pass value_arena and my_arena to avoid duplicate virtual call (value) or\n    // load (mine).\n    typename TypeHandler::Type* value, Arena* value_arena, Arena* my_arena) {\n  // Ensure that either the value is in the same arena, or if not, we do the\n  // appropriate thing: Own() it (if it's on heap and we're in an arena) or copy\n  // it to our arena/heap (otherwise).\n  if (my_arena != NULL && value_arena == NULL) {\n    my_arena->Own(value);\n  } else if (my_arena != value_arena) {\n    typename TypeHandler::Type* new_value =\n        TypeHandler::NewFromPrototype(value, my_arena);\n    TypeHandler::Merge(*value, new_value);\n    TypeHandler::Delete(value, value_arena);\n    value = new_value;\n  }\n\n  UnsafeArenaAddAllocated<TypeHandler>(value);\n}\n\n// AddAllocated version that does not implement arena-safe copying behavior.\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::AddAllocatedInternal(\n    typename TypeHandler::Type* value,\n    google::protobuf::internal::false_type) {\n  if (rep_ &&  rep_->allocated_size < total_size_) {\n    // Fast path: underlying arena representation (tagged pointer) is equal to\n    // our arena pointer, and we can add to array without resizing it (at least\n    // one slot that is not allocated).\n    void** elems = rep_->elements;\n    if (current_size_ < rep_->allocated_size) {\n      // Make space at [current] by moving first allocated element to end of\n      // allocated list.\n      elems[rep_->allocated_size] = elems[current_size_];\n    }\n    elems[current_size_] = value;\n    current_size_ = current_size_ + 1;\n    ++rep_->allocated_size;\n    return;\n  } else {\n    UnsafeArenaAddAllocated<TypeHandler>(value);\n  }\n}\n\ntemplate <typename TypeHandler>\nvoid RepeatedPtrFieldBase::UnsafeArenaAddAllocated(\n    typename TypeHandler::Type* value) {\n  // Make room for the new pointer.\n  if (!rep_ || current_size_ == total_size_) {\n    // The array is completely full with no cleared objects, so grow it.\n    Reserve(total_size_ + 1);\n    ++rep_->allocated_size;\n  } else if (rep_->allocated_size == total_size_) {\n    // There is no more space in the pointer array because it contains some\n    // cleared objects awaiting reuse.  We don't want to grow the array in this\n    // case because otherwise a loop calling AddAllocated() followed by Clear()\n    // would leak memory.\n    TypeHandler::Delete(\n        cast<TypeHandler>(rep_->elements[current_size_]), arena_);\n  } else if (current_size_ < rep_->allocated_size) {\n    // We have some cleared objects.  We don't care about their order, so we\n    // can just move the first one to the end to make space.\n    rep_->elements[rep_->allocated_size] = rep_->elements[current_size_];\n    ++rep_->allocated_size;\n  } else {\n    // There are no cleared objects.\n    ++rep_->allocated_size;\n  }\n\n  rep_->elements[current_size_++] = value;\n}\n\n// ReleaseLast() for types that implement merge/copy behavior.\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type*\nRepeatedPtrFieldBase::ReleaseLastInternal(google::protobuf::internal::true_type) {\n  // First, release an element.\n  typename TypeHandler::Type* result = UnsafeArenaReleaseLast<TypeHandler>();\n  // Now perform a copy if we're on an arena.\n  Arena* arena = GetArenaNoVirtual();\n  if (arena == NULL) {\n    return result;\n  } else {\n    typename TypeHandler::Type* new_result =\n        TypeHandler::NewFromPrototype(result, NULL);\n    TypeHandler::Merge(*result, new_result);\n    return new_result;\n  }\n}\n\n// ReleaseLast() for types that *do not* implement merge/copy behavior -- this\n// is the same as UnsafeArenaReleaseLast(). Note that we GOOGLE_DCHECK-fail if we're on\n// an arena, since the user really should implement the copy operation in this\n// case.\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type*\nRepeatedPtrFieldBase::ReleaseLastInternal(google::protobuf::internal::false_type) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)\n      << \"ReleaseLast() called on a RepeatedPtrField that is on an arena, \"\n      << \"with a type that does not implement MergeFrom. This is unsafe; \"\n      << \"please implement MergeFrom for your type.\";\n  return UnsafeArenaReleaseLast<TypeHandler>();\n}\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type*\n  RepeatedPtrFieldBase::UnsafeArenaReleaseLast() {\n  GOOGLE_DCHECK_GT(current_size_, 0);\n  typename TypeHandler::Type* result =\n      cast<TypeHandler>(rep_->elements[--current_size_]);\n  --rep_->allocated_size;\n  if (current_size_ < rep_->allocated_size) {\n    // There are cleared elements on the end; replace the removed element\n    // with the last allocated element.\n    rep_->elements[current_size_] = rep_->elements[rep_->allocated_size];\n  }\n  return result;\n}\n\ninline int RepeatedPtrFieldBase::ClearedCount() const {\n  return rep_ ? (rep_->allocated_size - current_size_) : 0;\n}\n\ntemplate <typename TypeHandler>\ninline void RepeatedPtrFieldBase::AddCleared(\n    typename TypeHandler::Type* value) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)\n      << \"AddCleared() can only be used on a RepeatedPtrField not on an arena.\";\n  GOOGLE_DCHECK(TypeHandler::GetArena(value) == NULL)\n      << \"AddCleared() can only accept values not on an arena.\";\n  if (!rep_ || rep_->allocated_size == total_size_) {\n    Reserve(total_size_ + 1);\n  }\n  rep_->elements[rep_->allocated_size++] = value;\n}\n\ntemplate <typename TypeHandler>\ninline typename TypeHandler::Type* RepeatedPtrFieldBase::ReleaseCleared() {\n  GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)\n      << \"ReleaseCleared() can only be used on a RepeatedPtrField not on \"\n      << \"an arena.\";\n  GOOGLE_DCHECK(GetArenaNoVirtual() == NULL);\n  GOOGLE_DCHECK(rep_ != NULL);\n  GOOGLE_DCHECK_GT(rep_->allocated_size, current_size_);\n  return cast<TypeHandler>(rep_->elements[--rep_->allocated_size]);\n}\n\n}  // namespace internal\n\n// -------------------------------------------------------------------\n\ntemplate <typename Element>\nclass RepeatedPtrField<Element>::TypeHandler\n    : public internal::GenericTypeHandler<Element> {\n};\n\ntemplate <>\nclass RepeatedPtrField<string>::TypeHandler\n    : public internal::StringTypeHandler {\n};\n\n\ntemplate <typename Element>\ninline RepeatedPtrField<Element>::RepeatedPtrField()\n  : RepeatedPtrFieldBase() {}\n\ntemplate <typename Element>\ninline RepeatedPtrField<Element>::RepeatedPtrField(::google::protobuf::Arena* arena) :\n  RepeatedPtrFieldBase(arena) {}\n\ntemplate <typename Element>\ninline RepeatedPtrField<Element>::RepeatedPtrField(\n    const RepeatedPtrField& other)\n  : RepeatedPtrFieldBase() {\n  CopyFrom(other);\n}\n\ntemplate <typename Element>\ntemplate <typename Iter>\ninline RepeatedPtrField<Element>::RepeatedPtrField(\n    Iter begin, const Iter& end) {\n  int reserve = internal::CalculateReserve(begin, end);\n  if (reserve != -1) {\n    Reserve(reserve);\n  }\n  for (; begin != end; ++begin) {\n    *Add() = *begin;\n  }\n}\n\ntemplate <typename Element>\nRepeatedPtrField<Element>::~RepeatedPtrField() {\n  Destroy<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline RepeatedPtrField<Element>& RepeatedPtrField<Element>::operator=(\n    const RepeatedPtrField& other) {\n  if (this != &other)\n    CopyFrom(other);\n  return *this;\n}\n\ntemplate <typename Element>\ninline bool RepeatedPtrField<Element>::empty() const {\n  return RepeatedPtrFieldBase::empty();\n}\n\ntemplate <typename Element>\ninline int RepeatedPtrField<Element>::size() const {\n  return RepeatedPtrFieldBase::size();\n}\n\ntemplate <typename Element>\ninline const Element& RepeatedPtrField<Element>::Get(int index) const {\n  return RepeatedPtrFieldBase::Get<TypeHandler>(index);\n}\n\n\ntemplate <typename Element>\ninline Element* RepeatedPtrField<Element>::Mutable(int index) {\n  return RepeatedPtrFieldBase::Mutable<TypeHandler>(index);\n}\n\ntemplate <typename Element>\ninline Element* RepeatedPtrField<Element>::Add() {\n  return RepeatedPtrFieldBase::Add<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::RemoveLast() {\n  RepeatedPtrFieldBase::RemoveLast<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::DeleteSubrange(int start, int num) {\n  GOOGLE_DCHECK_GE(start, 0);\n  GOOGLE_DCHECK_GE(num, 0);\n  GOOGLE_DCHECK_LE(start + num, size());\n  for (int i = 0; i < num; ++i) {\n    RepeatedPtrFieldBase::Delete<TypeHandler>(start + i);\n  }\n  ExtractSubrange(start, num, NULL);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::ExtractSubrange(\n    int start, int num, Element** elements) {\n  typename internal::TypeImplementsMergeBehavior<\n      typename TypeHandler::Type>::type t;\n  ExtractSubrangeInternal(start, num, elements, t);\n}\n\n// ExtractSubrange() implementation for types that implement merge/copy\n// behavior.\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::ExtractSubrangeInternal(\n    int start, int num, Element** elements, google::protobuf::internal::true_type) {\n  GOOGLE_DCHECK_GE(start, 0);\n  GOOGLE_DCHECK_GE(num, 0);\n  GOOGLE_DCHECK_LE(start + num, size());\n\n  if (num > 0) {\n    // Save the values of the removed elements if requested.\n    if (elements != NULL) {\n      if (GetArenaNoVirtual() != NULL) {\n        // If we're on an arena, we perform a copy for each element so that the\n        // returned elements are heap-allocated.\n        for (int i = 0; i < num; ++i) {\n          Element* element = RepeatedPtrFieldBase::\n              Mutable<TypeHandler>(i + start);\n          typename TypeHandler::Type* new_value =\n              TypeHandler::NewFromPrototype(element, NULL);\n          TypeHandler::Merge(*element, new_value);\n          elements[i] = new_value;\n        }\n      } else {\n        for (int i = 0; i < num; ++i) {\n          elements[i] = RepeatedPtrFieldBase::Mutable<TypeHandler>(i + start);\n        }\n      }\n    }\n    CloseGap(start, num);\n  }\n}\n\n// ExtractSubrange() implementation for types that do not implement merge/copy\n// behavior.\ntemplate<typename Element>\ninline void RepeatedPtrField<Element>::ExtractSubrangeInternal(\n    int start, int num, Element** elements, google::protobuf::internal::false_type) {\n  // This case is identical to UnsafeArenaExtractSubrange(). However, since\n  // ExtractSubrange() must return heap-allocated objects by contract, and we\n  // cannot fulfill this contract if we are an on arena, we must GOOGLE_DCHECK() that\n  // we are not on an arena.\n  GOOGLE_DCHECK(GetArenaNoVirtual() == NULL)\n      << \"ExtractSubrange() when arena is non-NULL is only supported when \"\n      << \"the Element type supplies a MergeFrom() operation to make copies.\";\n  UnsafeArenaExtractSubrange(start, num, elements);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::UnsafeArenaExtractSubrange(\n    int start, int num, Element** elements) {\n  GOOGLE_DCHECK_GE(start, 0);\n  GOOGLE_DCHECK_GE(num, 0);\n  GOOGLE_DCHECK_LE(start + num, size());\n\n  if (num > 0) {\n    // Save the values of the removed elements if requested.\n    if (elements != NULL) {\n      for (int i = 0; i < num; ++i) {\n        elements[i] = RepeatedPtrFieldBase::Mutable<TypeHandler>(i + start);\n      }\n    }\n    CloseGap(start, num);\n  }\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::Clear() {\n  RepeatedPtrFieldBase::Clear<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::MergeFrom(\n    const RepeatedPtrField& other) {\n  RepeatedPtrFieldBase::MergeFrom<TypeHandler>(other);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::CopyFrom(\n    const RepeatedPtrField& other) {\n  RepeatedPtrFieldBase::CopyFrom<TypeHandler>(other);\n}\n\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::iterator\nRepeatedPtrField<Element>::erase(const_iterator position) {\n  return erase(position, position + 1);\n}\n\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::iterator\nRepeatedPtrField<Element>::erase(const_iterator first, const_iterator last) {\n  size_type pos_offset = std::distance(cbegin(), first);\n  size_type last_offset = std::distance(cbegin(), last);\n  DeleteSubrange(pos_offset, last_offset - pos_offset);\n  return begin() + pos_offset;\n}\n\ntemplate <typename Element>\ninline Element** RepeatedPtrField<Element>::mutable_data() {\n  return RepeatedPtrFieldBase::mutable_data<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline const Element* const* RepeatedPtrField<Element>::data() const {\n  return RepeatedPtrFieldBase::data<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::Swap(RepeatedPtrField* other) {\n  if (this == other)\n    return;\n  RepeatedPtrFieldBase::Swap<TypeHandler>(other);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::UnsafeArenaSwap(\n    RepeatedPtrField* other) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() == other->GetArenaNoVirtual());\n  if (this == other)\n      return;\n  RepeatedPtrFieldBase::InternalSwap(other);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::SwapElements(int index1, int index2) {\n  RepeatedPtrFieldBase::SwapElements(index1, index2);\n}\n\ntemplate <typename Element>\ninline Arena* RepeatedPtrField<Element>::GetArenaNoVirtual() const {\n  return RepeatedPtrFieldBase::GetArenaNoVirtual();\n}\n\ntemplate <typename Element>\ninline int RepeatedPtrField<Element>::SpaceUsedExcludingSelf() const {\n  return RepeatedPtrFieldBase::SpaceUsedExcludingSelf<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::AddAllocated(Element* value) {\n  RepeatedPtrFieldBase::AddAllocated<TypeHandler>(value);\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::UnsafeArenaAddAllocated(Element* value) {\n  RepeatedPtrFieldBase::UnsafeArenaAddAllocated<TypeHandler>(value);\n}\n\ntemplate <typename Element>\ninline Element* RepeatedPtrField<Element>::ReleaseLast() {\n  return RepeatedPtrFieldBase::ReleaseLast<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline Element* RepeatedPtrField<Element>::UnsafeArenaReleaseLast() {\n  return RepeatedPtrFieldBase::UnsafeArenaReleaseLast<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline int RepeatedPtrField<Element>::ClearedCount() const {\n  return RepeatedPtrFieldBase::ClearedCount();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::AddCleared(Element* value) {\n  return RepeatedPtrFieldBase::AddCleared<TypeHandler>(value);\n}\n\ntemplate <typename Element>\ninline Element* RepeatedPtrField<Element>::ReleaseCleared() {\n  return RepeatedPtrFieldBase::ReleaseCleared<TypeHandler>();\n}\n\ntemplate <typename Element>\ninline void RepeatedPtrField<Element>::Reserve(int new_size) {\n  return RepeatedPtrFieldBase::Reserve(new_size);\n}\n\ntemplate <typename Element>\ninline int RepeatedPtrField<Element>::Capacity() const {\n  return RepeatedPtrFieldBase::Capacity();\n}\n\n// -------------------------------------------------------------------\n\nnamespace internal {\n\n// STL-like iterator implementation for RepeatedPtrField.  You should not\n// refer to this class directly; use RepeatedPtrField<T>::iterator instead.\n//\n// The iterator for RepeatedPtrField<T>, RepeatedPtrIterator<T>, is\n// very similar to iterator_ptr<T**> in util/gtl/iterator_adaptors.h,\n// but adds random-access operators and is modified to wrap a void** base\n// iterator (since RepeatedPtrField stores its array as a void* array and\n// casting void** to T** would violate C++ aliasing rules).\n//\n// This code based on net/proto/proto-array-internal.h by Jeffrey Yasskin\n// (jyasskin@google.com).\ntemplate<typename Element>\nclass RepeatedPtrIterator\n    : public std::iterator<\n          std::random_access_iterator_tag, Element> {\n public:\n  typedef RepeatedPtrIterator<Element> iterator;\n  typedef std::iterator<\n          std::random_access_iterator_tag, Element> superclass;\n\n  // Shadow the value_type in std::iterator<> because const_iterator::value_type\n  // needs to be T, not const T.\n  typedef typename remove_const<Element>::type value_type;\n\n  // Let the compiler know that these are type names, so we don't have to\n  // write \"typename\" in front of them everywhere.\n  typedef typename superclass::reference reference;\n  typedef typename superclass::pointer pointer;\n  typedef typename superclass::difference_type difference_type;\n\n  RepeatedPtrIterator() : it_(NULL) {}\n  explicit RepeatedPtrIterator(void* const* it) : it_(it) {}\n\n  // Allow \"upcasting\" from RepeatedPtrIterator<T**> to\n  // RepeatedPtrIterator<const T*const*>.\n  template<typename OtherElement>\n  RepeatedPtrIterator(const RepeatedPtrIterator<OtherElement>& other)\n      : it_(other.it_) {\n    // Force a compiler error if the other type is not convertible to ours.\n    if (false) {\n      implicit_cast<Element*, OtherElement*>(0);\n    }\n  }\n\n  // dereferenceable\n  reference operator*() const { return *reinterpret_cast<Element*>(*it_); }\n  pointer   operator->() const { return &(operator*()); }\n\n  // {inc,dec}rementable\n  iterator& operator++() { ++it_; return *this; }\n  iterator  operator++(int) { return iterator(it_++); }\n  iterator& operator--() { --it_; return *this; }\n  iterator  operator--(int) { return iterator(it_--); }\n\n  // equality_comparable\n  bool operator==(const iterator& x) const { return it_ == x.it_; }\n  bool operator!=(const iterator& x) const { return it_ != x.it_; }\n\n  // less_than_comparable\n  bool operator<(const iterator& x) const { return it_ < x.it_; }\n  bool operator<=(const iterator& x) const { return it_ <= x.it_; }\n  bool operator>(const iterator& x) const { return it_ > x.it_; }\n  bool operator>=(const iterator& x) const { return it_ >= x.it_; }\n\n  // addable, subtractable\n  iterator& operator+=(difference_type d) {\n    it_ += d;\n    return *this;\n  }\n  friend iterator operator+(iterator it, const difference_type d) {\n    it += d;\n    return it;\n  }\n  friend iterator operator+(const difference_type d, iterator it) {\n    it += d;\n    return it;\n  }\n  iterator& operator-=(difference_type d) {\n    it_ -= d;\n    return *this;\n  }\n  friend iterator operator-(iterator it, difference_type d) {\n    it -= d;\n    return it;\n  }\n\n  // indexable\n  reference operator[](difference_type d) const { return *(*this + d); }\n\n  // random access iterator\n  difference_type operator-(const iterator& x) const { return it_ - x.it_; }\n\n private:\n  template<typename OtherElement>\n  friend class RepeatedPtrIterator;\n\n  // The internal iterator.\n  void* const* it_;\n};\n\n// Provide an iterator that operates on pointers to the underlying objects\n// rather than the objects themselves as RepeatedPtrIterator does.\n// Consider using this when working with stl algorithms that change\n// the array.\n// The VoidPtr template parameter holds the type-agnostic pointer value\n// referenced by the iterator.  It should either be \"void *\" for a mutable\n// iterator, or \"const void *\" for a constant iterator.\ntemplate<typename Element, typename VoidPtr>\nclass RepeatedPtrOverPtrsIterator\n    : public std::iterator<std::random_access_iterator_tag, Element*> {\n public:\n  typedef RepeatedPtrOverPtrsIterator<Element, VoidPtr> iterator;\n  typedef std::iterator<\n          std::random_access_iterator_tag, Element*> superclass;\n\n  // Shadow the value_type in std::iterator<> because const_iterator::value_type\n  // needs to be T, not const T.\n  typedef typename remove_const<Element*>::type value_type;\n\n  // Let the compiler know that these are type names, so we don't have to\n  // write \"typename\" in front of them everywhere.\n  typedef typename superclass::reference reference;\n  typedef typename superclass::pointer pointer;\n  typedef typename superclass::difference_type difference_type;\n\n  RepeatedPtrOverPtrsIterator() : it_(NULL) {}\n  explicit RepeatedPtrOverPtrsIterator(VoidPtr* it) : it_(it) {}\n\n  // dereferenceable\n  reference operator*() const { return *reinterpret_cast<Element**>(it_); }\n  pointer   operator->() const { return &(operator*()); }\n\n  // {inc,dec}rementable\n  iterator& operator++() { ++it_; return *this; }\n  iterator  operator++(int) { return iterator(it_++); }\n  iterator& operator--() { --it_; return *this; }\n  iterator  operator--(int) { return iterator(it_--); }\n\n  // equality_comparable\n  bool operator==(const iterator& x) const { return it_ == x.it_; }\n  bool operator!=(const iterator& x) const { return it_ != x.it_; }\n\n  // less_than_comparable\n  bool operator<(const iterator& x) const { return it_ < x.it_; }\n  bool operator<=(const iterator& x) const { return it_ <= x.it_; }\n  bool operator>(const iterator& x) const { return it_ > x.it_; }\n  bool operator>=(const iterator& x) const { return it_ >= x.it_; }\n\n  // addable, subtractable\n  iterator& operator+=(difference_type d) {\n    it_ += d;\n    return *this;\n  }\n  friend iterator operator+(iterator it, difference_type d) {\n    it += d;\n    return it;\n  }\n  friend iterator operator+(difference_type d, iterator it) {\n    it += d;\n    return it;\n  }\n  iterator& operator-=(difference_type d) {\n    it_ -= d;\n    return *this;\n  }\n  friend iterator operator-(iterator it, difference_type d) {\n    it -= d;\n    return it;\n  }\n\n  // indexable\n  reference operator[](difference_type d) const { return *(*this + d); }\n\n  // random access iterator\n  difference_type operator-(const iterator& x) const { return it_ - x.it_; }\n\n private:\n  template<typename OtherElement>\n  friend class RepeatedPtrIterator;\n\n  // The internal iterator.\n  VoidPtr* it_;\n};\n\nvoid RepeatedPtrFieldBase::InternalSwap(RepeatedPtrFieldBase* other) {\n  std::swap(rep_, other->rep_);\n  std::swap(current_size_, other->current_size_);\n  std::swap(total_size_, other->total_size_);\n}\n\n}  // namespace internal\n\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::iterator\nRepeatedPtrField<Element>::begin() {\n  return iterator(raw_data());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_iterator\nRepeatedPtrField<Element>::begin() const {\n  return iterator(raw_data());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_iterator\nRepeatedPtrField<Element>::cbegin() const {\n  return begin();\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::iterator\nRepeatedPtrField<Element>::end() {\n  return iterator(raw_data() + size());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_iterator\nRepeatedPtrField<Element>::end() const {\n  return iterator(raw_data() + size());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_iterator\nRepeatedPtrField<Element>::cend() const {\n  return end();\n}\n\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::pointer_iterator\nRepeatedPtrField<Element>::pointer_begin() {\n  return pointer_iterator(raw_mutable_data());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_pointer_iterator\nRepeatedPtrField<Element>::pointer_begin() const {\n  return const_pointer_iterator(const_cast<const void**>(raw_mutable_data()));\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::pointer_iterator\nRepeatedPtrField<Element>::pointer_end() {\n  return pointer_iterator(raw_mutable_data() + size());\n}\ntemplate <typename Element>\ninline typename RepeatedPtrField<Element>::const_pointer_iterator\nRepeatedPtrField<Element>::pointer_end() const {\n  return const_pointer_iterator(\n      const_cast<const void**>(raw_mutable_data() + size()));\n}\n\n\n// Iterators and helper functions that follow the spirit of the STL\n// std::back_insert_iterator and std::back_inserter but are tailor-made\n// for RepeatedField and RepeatedPtrField. Typical usage would be:\n//\n//   std::copy(some_sequence.begin(), some_sequence.end(),\n//             google::protobuf::RepeatedFieldBackInserter(proto.mutable_sequence()));\n//\n// Ported by johannes from util/gtl/proto-array-iterators.h\n\nnamespace internal {\n// A back inserter for RepeatedField objects.\ntemplate<typename T> class RepeatedFieldBackInsertIterator\n    : public std::iterator<std::output_iterator_tag, T> {\n public:\n  explicit RepeatedFieldBackInsertIterator(\n      RepeatedField<T>* const mutable_field)\n      : field_(mutable_field) {\n  }\n  RepeatedFieldBackInsertIterator<T>& operator=(const T& value) {\n    field_->Add(value);\n    return *this;\n  }\n  RepeatedFieldBackInsertIterator<T>& operator*() {\n    return *this;\n  }\n  RepeatedFieldBackInsertIterator<T>& operator++() {\n    return *this;\n  }\n  RepeatedFieldBackInsertIterator<T>& operator++(int /* unused */) {\n    return *this;\n  }\n\n private:\n  RepeatedField<T>* field_;\n};\n\n// A back inserter for RepeatedPtrField objects.\ntemplate<typename T> class RepeatedPtrFieldBackInsertIterator\n    : public std::iterator<std::output_iterator_tag, T> {\n public:\n  RepeatedPtrFieldBackInsertIterator(\n      RepeatedPtrField<T>* const mutable_field)\n      : field_(mutable_field) {\n  }\n  RepeatedPtrFieldBackInsertIterator<T>& operator=(const T& value) {\n    *field_->Add() = value;\n    return *this;\n  }\n  RepeatedPtrFieldBackInsertIterator<T>& operator=(\n      const T* const ptr_to_value) {\n    *field_->Add() = *ptr_to_value;\n    return *this;\n  }\n  RepeatedPtrFieldBackInsertIterator<T>& operator*() {\n    return *this;\n  }\n  RepeatedPtrFieldBackInsertIterator<T>& operator++() {\n    return *this;\n  }\n  RepeatedPtrFieldBackInsertIterator<T>& operator++(int /* unused */) {\n    return *this;\n  }\n\n private:\n  RepeatedPtrField<T>* field_;\n};\n\n// A back inserter for RepeatedPtrFields that inserts by transferring ownership\n// of a pointer.\ntemplate<typename T> class AllocatedRepeatedPtrFieldBackInsertIterator\n    : public std::iterator<std::output_iterator_tag, T> {\n public:\n  explicit AllocatedRepeatedPtrFieldBackInsertIterator(\n      RepeatedPtrField<T>* const mutable_field)\n      : field_(mutable_field) {\n  }\n  AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator=(\n      T* const ptr_to_value) {\n    field_->AddAllocated(ptr_to_value);\n    return *this;\n  }\n  AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator*() {\n    return *this;\n  }\n  AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++() {\n    return *this;\n  }\n  AllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++(\n      int /* unused */) {\n    return *this;\n  }\n\n private:\n  RepeatedPtrField<T>* field_;\n};\n\n// Almost identical to AllocatedRepeatedPtrFieldBackInsertIterator. This one\n// uses the UnsafeArenaAddAllocated instead.\ntemplate<typename T>\nclass UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator\n    : public std::iterator<std::output_iterator_tag, T> {\n public:\n  explicit UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator(\n    ::google::protobuf::RepeatedPtrField<T>* const mutable_field)\n  : field_(mutable_field) {\n  }\n  UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator=(\n    T const* const ptr_to_value) {\n    field_->UnsafeArenaAddAllocated(const_cast<T*>(ptr_to_value));\n    return *this;\n  }\n  UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator*() {\n    return *this;\n  }\n  UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++() {\n    return *this;\n  }\n  UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>& operator++(\n      int /* unused */) {\n    return *this;\n  }\n\n private:\n  ::google::protobuf::RepeatedPtrField<T>* field_;\n};\n\n}  // namespace internal\n\n// Provides a back insert iterator for RepeatedField instances,\n// similar to std::back_inserter().\ntemplate<typename T> internal::RepeatedFieldBackInsertIterator<T>\nRepeatedFieldBackInserter(RepeatedField<T>* const mutable_field) {\n  return internal::RepeatedFieldBackInsertIterator<T>(mutable_field);\n}\n\n// Provides a back insert iterator for RepeatedPtrField instances,\n// similar to std::back_inserter().\ntemplate<typename T> internal::RepeatedPtrFieldBackInsertIterator<T>\nRepeatedPtrFieldBackInserter(RepeatedPtrField<T>* const mutable_field) {\n  return internal::RepeatedPtrFieldBackInsertIterator<T>(mutable_field);\n}\n\n// Special back insert iterator for RepeatedPtrField instances, just in\n// case someone wants to write generic template code that can access both\n// RepeatedFields and RepeatedPtrFields using a common name.\ntemplate<typename T> internal::RepeatedPtrFieldBackInsertIterator<T>\nRepeatedFieldBackInserter(RepeatedPtrField<T>* const mutable_field) {\n  return internal::RepeatedPtrFieldBackInsertIterator<T>(mutable_field);\n}\n\n// Provides a back insert iterator for RepeatedPtrField instances\n// similar to std::back_inserter() which transfers the ownership while\n// copying elements.\ntemplate<typename T> internal::AllocatedRepeatedPtrFieldBackInsertIterator<T>\nAllocatedRepeatedPtrFieldBackInserter(\n    RepeatedPtrField<T>* const mutable_field) {\n  return internal::AllocatedRepeatedPtrFieldBackInsertIterator<T>(\n      mutable_field);\n}\n\n// Similar to AllocatedRepeatedPtrFieldBackInserter, using\n// UnsafeArenaAddAllocated instead of AddAllocated.\n// This is slightly faster if that matters. It is also useful in legacy code\n// that uses temporary ownership to avoid copies. Example:\n// RepeatedPtrField<T> temp_field;\n// temp_field.AddAllocated(new T);\n// ... // Do something with temp_field\n// temp_field.ExtractSubrange(0, temp_field.size(), NULL);\n// If you put temp_field on the arena this fails, because the ownership\n// transfers to the arena at the \"AddAllocated\" call and is not released anymore\n// causing a double delete. Using UnsafeArenaAddAllocated prevents this.\ntemplate<typename T>\ninternal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>\nUnsafeArenaAllocatedRepeatedPtrFieldBackInserter(\n    ::google::protobuf::RepeatedPtrField<T>* const mutable_field) {\n  return internal::UnsafeArenaAllocatedRepeatedPtrFieldBackInsertIterator<T>(\n      mutable_field);\n}\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_REPEATED_FIELD_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/service.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// DEPRECATED:  This module declares the abstract interfaces underlying proto2\n// RPC services.  These are intented to be independent of any particular RPC\n// implementation, so that proto2 services can be used on top of a variety\n// of implementations.  Starting with version 2.3.0, RPC implementations should\n// not try to build on these, but should instead provide code generator plugins\n// which generate code specific to the particular RPC implementation.  This way\n// the generated code can be more appropriate for the implementation in use\n// and can avoid unnecessary layers of indirection.\n//\n//\n// When you use the protocol compiler to compile a service definition, it\n// generates two classes:  An abstract interface for the service (with\n// methods matching the service definition) and a \"stub\" implementation.\n// A stub is just a type-safe wrapper around an RpcChannel which emulates a\n// local implementation of the service.\n//\n// For example, the service definition:\n//   service MyService {\n//     rpc Foo(MyRequest) returns(MyResponse);\n//   }\n// will generate abstract interface \"MyService\" and class \"MyService::Stub\".\n// You could implement a MyService as follows:\n//   class MyServiceImpl : public MyService {\n//    public:\n//     MyServiceImpl() {}\n//     ~MyServiceImpl() {}\n//\n//     // implements MyService ---------------------------------------\n//\n//     void Foo(google::protobuf::RpcController* controller,\n//              const MyRequest* request,\n//              MyResponse* response,\n//              Closure* done) {\n//       // ... read request and fill in response ...\n//       done->Run();\n//     }\n//   };\n// You would then register an instance of MyServiceImpl with your RPC server\n// implementation.  (How to do that depends on the implementation.)\n//\n// To call a remote MyServiceImpl, first you need an RpcChannel connected to it.\n// How to construct a channel depends, again, on your RPC implementation.\n// Here we use a hypothetical \"MyRpcChannel\" as an example:\n//   MyRpcChannel channel(\"rpc:hostname:1234/myservice\");\n//   MyRpcController controller;\n//   MyServiceImpl::Stub stub(&channel);\n//   FooRequest request;\n//   FooResponse response;\n//\n//   // ... fill in request ...\n//\n//   stub.Foo(&controller, request, &response, NewCallback(HandleResponse));\n//\n// On Thread-Safety:\n//\n// Different RPC implementations may make different guarantees about what\n// threads they may run callbacks on, and what threads the application is\n// allowed to use to call the RPC system.  Portable software should be ready\n// for callbacks to be called on any thread, but should not try to call the\n// RPC system from any thread except for the ones on which it received the\n// callbacks.  Realistically, though, simple software will probably want to\n// use a single-threaded RPC system while high-end software will want to\n// use multiple threads.  RPC implementations should provide multiple\n// choices.\n\n#ifndef GOOGLE_PROTOBUF_SERVICE_H__\n#define GOOGLE_PROTOBUF_SERVICE_H__\n\n#include <string>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/callback.h>\n\nnamespace google {\nnamespace protobuf {\n\n// Defined in this file.\nclass Service;\nclass RpcController;\nclass RpcChannel;\n\n// Defined in other files.\nclass Descriptor;            // descriptor.h\nclass ServiceDescriptor;     // descriptor.h\nclass MethodDescriptor;      // descriptor.h\nclass Message;               // message.h\n\n// Abstract base interface for protocol-buffer-based RPC services.  Services\n// themselves are abstract interfaces (implemented either by servers or as\n// stubs), but they subclass this base interface.  The methods of this\n// interface can be used to call the methods of the Service without knowing\n// its exact type at compile time (analogous to Reflection).\nclass LIBPROTOBUF_EXPORT Service {\n public:\n  inline Service() {}\n  virtual ~Service();\n\n  // When constructing a stub, you may pass STUB_OWNS_CHANNEL as the second\n  // parameter to the constructor to tell it to delete its RpcChannel when\n  // destroyed.\n  enum ChannelOwnership {\n    STUB_OWNS_CHANNEL,\n    STUB_DOESNT_OWN_CHANNEL\n  };\n\n  // Get the ServiceDescriptor describing this service and its methods.\n  virtual const ServiceDescriptor* GetDescriptor() = 0;\n\n  // Call a method of the service specified by MethodDescriptor.  This is\n  // normally implemented as a simple switch() that calls the standard\n  // definitions of the service's methods.\n  //\n  // Preconditions:\n  // * method->service() == GetDescriptor()\n  // * request and response are of the exact same classes as the objects\n  //   returned by GetRequestPrototype(method) and\n  //   GetResponsePrototype(method).\n  // * After the call has started, the request must not be modified and the\n  //   response must not be accessed at all until \"done\" is called.\n  // * \"controller\" is of the correct type for the RPC implementation being\n  //   used by this Service.  For stubs, the \"correct type\" depends on the\n  //   RpcChannel which the stub is using.  Server-side Service\n  //   implementations are expected to accept whatever type of RpcController\n  //   the server-side RPC implementation uses.\n  //\n  // Postconditions:\n  // * \"done\" will be called when the method is complete.  This may be\n  //   before CallMethod() returns or it may be at some point in the future.\n  // * If the RPC succeeded, \"response\" contains the response returned by\n  //   the server.\n  // * If the RPC failed, \"response\"'s contents are undefined.  The\n  //   RpcController can be queried to determine if an error occurred and\n  //   possibly to get more information about the error.\n  virtual void CallMethod(const MethodDescriptor* method,\n                          RpcController* controller,\n                          const Message* request,\n                          Message* response,\n                          Closure* done) = 0;\n\n  // CallMethod() requires that the request and response passed in are of a\n  // particular subclass of Message.  GetRequestPrototype() and\n  // GetResponsePrototype() get the default instances of these required types.\n  // You can then call Message::New() on these instances to construct mutable\n  // objects which you can then pass to CallMethod().\n  //\n  // Example:\n  //   const MethodDescriptor* method =\n  //     service->GetDescriptor()->FindMethodByName(\"Foo\");\n  //   Message* request  = stub->GetRequestPrototype (method)->New();\n  //   Message* response = stub->GetResponsePrototype(method)->New();\n  //   request->ParseFromString(input);\n  //   service->CallMethod(method, *request, response, callback);\n  virtual const Message& GetRequestPrototype(\n    const MethodDescriptor* method) const = 0;\n  virtual const Message& GetResponsePrototype(\n    const MethodDescriptor* method) const = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Service);\n};\n\n// An RpcController mediates a single method call.  The primary purpose of\n// the controller is to provide a way to manipulate settings specific to the\n// RPC implementation and to find out about RPC-level errors.\n//\n// The methods provided by the RpcController interface are intended to be a\n// \"least common denominator\" set of features which we expect all\n// implementations to support.  Specific implementations may provide more\n// advanced features (e.g. deadline propagation).\nclass LIBPROTOBUF_EXPORT RpcController {\n public:\n  inline RpcController() {}\n  virtual ~RpcController();\n\n  // Client-side methods ---------------------------------------------\n  // These calls may be made from the client side only.  Their results\n  // are undefined on the server side (may crash).\n\n  // Resets the RpcController to its initial state so that it may be reused in\n  // a new call.  Must not be called while an RPC is in progress.\n  virtual void Reset() = 0;\n\n  // After a call has finished, returns true if the call failed.  The possible\n  // reasons for failure depend on the RPC implementation.  Failed() must not\n  // be called before a call has finished.  If Failed() returns true, the\n  // contents of the response message are undefined.\n  virtual bool Failed() const = 0;\n\n  // If Failed() is true, returns a human-readable description of the error.\n  virtual string ErrorText() const = 0;\n\n  // Advises the RPC system that the caller desires that the RPC call be\n  // canceled.  The RPC system may cancel it immediately, may wait awhile and\n  // then cancel it, or may not even cancel the call at all.  If the call is\n  // canceled, the \"done\" callback will still be called and the RpcController\n  // will indicate that the call failed at that time.\n  virtual void StartCancel() = 0;\n\n  // Server-side methods ---------------------------------------------\n  // These calls may be made from the server side only.  Their results\n  // are undefined on the client side (may crash).\n\n  // Causes Failed() to return true on the client side.  \"reason\" will be\n  // incorporated into the message returned by ErrorText().  If you find\n  // you need to return machine-readable information about failures, you\n  // should incorporate it into your response protocol buffer and should\n  // NOT call SetFailed().\n  virtual void SetFailed(const string& reason) = 0;\n\n  // If true, indicates that the client canceled the RPC, so the server may\n  // as well give up on replying to it.  The server should still call the\n  // final \"done\" callback.\n  virtual bool IsCanceled() const = 0;\n\n  // Asks that the given callback be called when the RPC is canceled.  The\n  // callback will always be called exactly once.  If the RPC completes without\n  // being canceled, the callback will be called after completion.  If the RPC\n  // has already been canceled when NotifyOnCancel() is called, the callback\n  // will be called immediately.\n  //\n  // NotifyOnCancel() must be called no more than once per request.\n  virtual void NotifyOnCancel(Closure* callback) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(RpcController);\n};\n\n// Abstract interface for an RPC channel.  An RpcChannel represents a\n// communication line to a Service which can be used to call that Service's\n// methods.  The Service may be running on another machine.  Normally, you\n// should not call an RpcChannel directly, but instead construct a stub Service\n// wrapping it.  Example:\n//   RpcChannel* channel = new MyRpcChannel(\"remotehost.example.com:1234\");\n//   MyService* service = new MyService::Stub(channel);\n//   service->MyMethod(request, &response, callback);\nclass LIBPROTOBUF_EXPORT RpcChannel {\n public:\n  inline RpcChannel() {}\n  virtual ~RpcChannel();\n\n  // Call the given method of the remote service.  The signature of this\n  // procedure looks the same as Service::CallMethod(), but the requirements\n  // are less strict in one important way:  the request and response objects\n  // need not be of any specific class as long as their descriptors are\n  // method->input_type() and method->output_type().\n  virtual void CallMethod(const MethodDescriptor* method,\n                          RpcController* controller,\n                          const Message* request,\n                          Message* response,\n                          Closure* done) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(RpcChannel);\n};\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_SERVICE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/source_context.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/source_context.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fsource_5fcontext_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fsource_5fcontext_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fsource_5fcontext_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fsource_5fcontext_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fsource_5fcontext_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fsource_5fcontext_2eproto();\n\nclass SourceContext;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT SourceContext : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.SourceContext) */ {\n public:\n  SourceContext();\n  virtual ~SourceContext();\n\n  SourceContext(const SourceContext& from);\n\n  inline SourceContext& operator=(const SourceContext& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const SourceContext& default_instance();\n\n  static const SourceContext* internal_default_instance();\n\n  void Swap(SourceContext* other);\n\n  // implements Message ----------------------------------------------\n\n  inline SourceContext* New() const { return New(NULL); }\n\n  SourceContext* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const SourceContext& from);\n  void MergeFrom(const SourceContext& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(SourceContext* other);\n  void UnsafeMergeFrom(const SourceContext& from);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string file_name = 1;\n  void clear_file_name();\n  static const int kFileNameFieldNumber = 1;\n  const ::std::string& file_name() const;\n  void set_file_name(const ::std::string& value);\n  void set_file_name(const char* value);\n  void set_file_name(const char* value, size_t size);\n  ::std::string* mutable_file_name();\n  ::std::string* release_file_name();\n  void set_allocated_file_name(::std::string* file_name);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.SourceContext)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  ::google::protobuf::internal::ArenaStringPtr file_name_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fsource_5fcontext_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fsource_5fcontext_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fsource_5fcontext_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fsource_5fcontext_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<SourceContext> SourceContext_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// SourceContext\n\n// optional string file_name = 1;\ninline void SourceContext::clear_file_name() {\n  file_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline const ::std::string& SourceContext::file_name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.SourceContext.file_name)\n  return file_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceContext::set_file_name(const ::std::string& value) {\n  \n  file_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);\n  // @@protoc_insertion_point(field_set:google.protobuf.SourceContext.file_name)\n}\ninline void SourceContext::set_file_name(const char* value) {\n  \n  file_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));\n  // @@protoc_insertion_point(field_set_char:google.protobuf.SourceContext.file_name)\n}\ninline void SourceContext::set_file_name(const char* value, size_t size) {\n  \n  file_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(reinterpret_cast<const char*>(value), size));\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.SourceContext.file_name)\n}\ninline ::std::string* SourceContext::mutable_file_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.SourceContext.file_name)\n  return file_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline ::std::string* SourceContext::release_file_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.SourceContext.file_name)\n  \n  return file_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void SourceContext::set_allocated_file_name(::std::string* file_name) {\n  if (file_name != NULL) {\n    \n  } else {\n    \n  }\n  file_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), file_name);\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.SourceContext.file_name)\n}\n\ninline const SourceContext* SourceContext::internal_default_instance() {\n  return &SourceContext_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fsource_5fcontext_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/source_context.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"SourceContextProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// `SourceContext` represents information about the source of a\n// protobuf element, like the file in which it is defined.\nmessage SourceContext {\n  // The path-qualified name of the .proto file that contained the associated\n  // protobuf element.  For example: `\"google/protobuf/source_context.proto\"`.\n  string file_name = 1;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/struct.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/struct.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fstruct_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fstruct_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/map.h>\n#include <google/protobuf/map_field_inl.h>\n#include <google/protobuf/generated_enum_reflection.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fstruct_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fstruct_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fstruct_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fstruct_2eproto();\n\nclass ListValue;\nclass Struct;\nclass Value;\n\nenum NullValue {\n  NULL_VALUE = 0,\n  NullValue_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,\n  NullValue_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max\n};\nLIBPROTOBUF_EXPORT bool NullValue_IsValid(int value);\nconst NullValue NullValue_MIN = NULL_VALUE;\nconst NullValue NullValue_MAX = NULL_VALUE;\nconst int NullValue_ARRAYSIZE = NullValue_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* NullValue_descriptor();\ninline const ::std::string& NullValue_Name(NullValue value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    NullValue_descriptor(), value);\n}\ninline bool NullValue_Parse(\n    const ::std::string& name, NullValue* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<NullValue>(\n    NullValue_descriptor(), name, value);\n}\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Struct : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Struct) */ {\n public:\n  Struct();\n  virtual ~Struct();\n\n  Struct(const Struct& from);\n\n  inline Struct& operator=(const Struct& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Struct& default_instance();\n\n  static const Struct* internal_default_instance();\n\n  void UnsafeArenaSwap(Struct* other);\n  void Swap(Struct* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Struct* New() const { return New(NULL); }\n\n  Struct* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Struct& from);\n  void MergeFrom(const Struct& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Struct* other);\n  void UnsafeMergeFrom(const Struct& from);\n  protected:\n  explicit Struct(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n\n  // accessors -------------------------------------------------------\n\n  // map<string, .google.protobuf.Value> fields = 1;\n  int fields_size() const;\n  void clear_fields();\n  static const int kFieldsFieldNumber = 1;\n  const ::google::protobuf::Map< ::std::string, ::google::protobuf::Value >&\n      fields() const;\n  ::google::protobuf::Map< ::std::string, ::google::protobuf::Value >*\n      mutable_fields();\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Struct)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  typedef ::google::protobuf::internal::MapEntryLite<\n      ::std::string, ::google::protobuf::Value,\n      ::google::protobuf::internal::WireFormatLite::TYPE_STRING,\n      ::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,\n      0 >\n      Struct_FieldsEntry;\n  ::google::protobuf::internal::MapField<\n      ::std::string, ::google::protobuf::Value,\n      ::google::protobuf::internal::WireFormatLite::TYPE_STRING,\n      ::google::protobuf::internal::WireFormatLite::TYPE_MESSAGE,\n      0 > fields_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fstruct_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fstruct_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Struct> Struct_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Value : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Value) */ {\n public:\n  Value();\n  virtual ~Value();\n\n  Value(const Value& from);\n\n  inline Value& operator=(const Value& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Value& default_instance();\n\n  enum KindCase {\n    kNullValue = 1,\n    kNumberValue = 2,\n    kStringValue = 3,\n    kBoolValue = 4,\n    kStructValue = 5,\n    kListValue = 6,\n    KIND_NOT_SET = 0,\n  };\n\n  static const Value* internal_default_instance();\n\n  void UnsafeArenaSwap(Value* other);\n  void Swap(Value* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Value* New() const { return New(NULL); }\n\n  Value* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Value& from);\n  void MergeFrom(const Value& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Value* other);\n  void UnsafeMergeFrom(const Value& from);\n  protected:\n  explicit Value(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional .google.protobuf.NullValue null_value = 1;\n  private:\n  bool has_null_value() const;\n  public:\n  void clear_null_value();\n  static const int kNullValueFieldNumber = 1;\n  ::google::protobuf::NullValue null_value() const;\n  void set_null_value(::google::protobuf::NullValue value);\n\n  // optional double number_value = 2;\n  private:\n  bool has_number_value() const;\n  public:\n  void clear_number_value();\n  static const int kNumberValueFieldNumber = 2;\n  double number_value() const;\n  void set_number_value(double value);\n\n  // optional string string_value = 3;\n  private:\n  bool has_string_value() const;\n  public:\n  void clear_string_value();\n  static const int kStringValueFieldNumber = 3;\n  const ::std::string& string_value() const;\n  void set_string_value(const ::std::string& value);\n  void set_string_value(const char* value);\n  void set_string_value(const char* value, size_t size);\n  ::std::string* mutable_string_value();\n  ::std::string* release_string_value();\n  void set_allocated_string_value(::std::string* string_value);\n  ::std::string* unsafe_arena_release_string_value();\n  void unsafe_arena_set_allocated_string_value(\n      ::std::string* string_value);\n\n  // optional bool bool_value = 4;\n  private:\n  bool has_bool_value() const;\n  public:\n  void clear_bool_value();\n  static const int kBoolValueFieldNumber = 4;\n  bool bool_value() const;\n  void set_bool_value(bool value);\n\n  // optional .google.protobuf.Struct struct_value = 5;\n  bool has_struct_value() const;\n  void clear_struct_value();\n  static const int kStructValueFieldNumber = 5;\n  private:\n  void _slow_mutable_struct_value();\n  void _slow_set_allocated_struct_value(\n      ::google::protobuf::Arena* message_arena, ::google::protobuf::Struct** struct_value);\n  ::google::protobuf::Struct* _slow_release_struct_value();\n  public:\n  const ::google::protobuf::Struct& struct_value() const;\n  ::google::protobuf::Struct* mutable_struct_value();\n  ::google::protobuf::Struct* release_struct_value();\n  void set_allocated_struct_value(::google::protobuf::Struct* struct_value);\n  ::google::protobuf::Struct* unsafe_arena_release_struct_value();\n  void unsafe_arena_set_allocated_struct_value(\n      ::google::protobuf::Struct* struct_value);\n\n  // optional .google.protobuf.ListValue list_value = 6;\n  bool has_list_value() const;\n  void clear_list_value();\n  static const int kListValueFieldNumber = 6;\n  private:\n  void _slow_mutable_list_value();\n  void _slow_set_allocated_list_value(\n      ::google::protobuf::Arena* message_arena, ::google::protobuf::ListValue** list_value);\n  ::google::protobuf::ListValue* _slow_release_list_value();\n  public:\n  const ::google::protobuf::ListValue& list_value() const;\n  ::google::protobuf::ListValue* mutable_list_value();\n  ::google::protobuf::ListValue* release_list_value();\n  void set_allocated_list_value(::google::protobuf::ListValue* list_value);\n  ::google::protobuf::ListValue* unsafe_arena_release_list_value();\n  void unsafe_arena_set_allocated_list_value(\n      ::google::protobuf::ListValue* list_value);\n\n  KindCase kind_case() const;\n  // @@protoc_insertion_point(class_scope:google.protobuf.Value)\n private:\n  inline void set_has_null_value();\n  inline void set_has_number_value();\n  inline void set_has_string_value();\n  inline void set_has_bool_value();\n  inline void set_has_struct_value();\n  inline void set_has_list_value();\n\n  inline bool has_kind() const;\n  void clear_kind();\n  inline void clear_has_kind();\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  union KindUnion {\n    KindUnion() {}\n    int null_value_;\n    double number_value_;\n    ::google::protobuf::internal::ArenaStringPtr string_value_;\n    bool bool_value_;\n    ::google::protobuf::Struct* struct_value_;\n    ::google::protobuf::ListValue* list_value_;\n  } kind_;\n  mutable int _cached_size_;\n  ::google::protobuf::uint32 _oneof_case_[1];\n\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fstruct_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fstruct_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Value> Value_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT ListValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.ListValue) */ {\n public:\n  ListValue();\n  virtual ~ListValue();\n\n  ListValue(const ListValue& from);\n\n  inline ListValue& operator=(const ListValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const ListValue& default_instance();\n\n  static const ListValue* internal_default_instance();\n\n  void UnsafeArenaSwap(ListValue* other);\n  void Swap(ListValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline ListValue* New() const { return New(NULL); }\n\n  ListValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const ListValue& from);\n  void MergeFrom(const ListValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(ListValue* other);\n  void UnsafeMergeFrom(const ListValue& from);\n  protected:\n  explicit ListValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // repeated .google.protobuf.Value values = 1;\n  int values_size() const;\n  void clear_values();\n  static const int kValuesFieldNumber = 1;\n  const ::google::protobuf::Value& values(int index) const;\n  ::google::protobuf::Value* mutable_values(int index);\n  ::google::protobuf::Value* add_values();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Value >*\n      mutable_values();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Value >&\n      values() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.ListValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Value > values_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fstruct_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fstruct_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fstruct_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<ListValue> ListValue_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Struct\n\n// map<string, .google.protobuf.Value> fields = 1;\ninline int Struct::fields_size() const {\n  return fields_.size();\n}\ninline void Struct::clear_fields() {\n  fields_.Clear();\n}\ninline const ::google::protobuf::Map< ::std::string, ::google::protobuf::Value >&\nStruct::fields() const {\n  // @@protoc_insertion_point(field_map:google.protobuf.Struct.fields)\n  return fields_.GetMap();\n}\ninline ::google::protobuf::Map< ::std::string, ::google::protobuf::Value >*\nStruct::mutable_fields() {\n  // @@protoc_insertion_point(field_mutable_map:google.protobuf.Struct.fields)\n  return fields_.MutableMap();\n}\n\ninline const Struct* Struct::internal_default_instance() {\n  return &Struct_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Value\n\n// optional .google.protobuf.NullValue null_value = 1;\ninline bool Value::has_null_value() const {\n  return kind_case() == kNullValue;\n}\ninline void Value::set_has_null_value() {\n  _oneof_case_[0] = kNullValue;\n}\ninline void Value::clear_null_value() {\n  if (has_null_value()) {\n    kind_.null_value_ = 0;\n    clear_has_kind();\n  }\n}\ninline ::google::protobuf::NullValue Value::null_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.null_value)\n  if (has_null_value()) {\n    return static_cast< ::google::protobuf::NullValue >(kind_.null_value_);\n  }\n  return static_cast< ::google::protobuf::NullValue >(0);\n}\ninline void Value::set_null_value(::google::protobuf::NullValue value) {\n  if (!has_null_value()) {\n    clear_kind();\n    set_has_null_value();\n  }\n  kind_.null_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Value.null_value)\n}\n\n// optional double number_value = 2;\ninline bool Value::has_number_value() const {\n  return kind_case() == kNumberValue;\n}\ninline void Value::set_has_number_value() {\n  _oneof_case_[0] = kNumberValue;\n}\ninline void Value::clear_number_value() {\n  if (has_number_value()) {\n    kind_.number_value_ = 0;\n    clear_has_kind();\n  }\n}\ninline double Value::number_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.number_value)\n  if (has_number_value()) {\n    return kind_.number_value_;\n  }\n  return 0;\n}\ninline void Value::set_number_value(double value) {\n  if (!has_number_value()) {\n    clear_kind();\n    set_has_number_value();\n  }\n  kind_.number_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Value.number_value)\n}\n\n// optional string string_value = 3;\ninline bool Value::has_string_value() const {\n  return kind_case() == kStringValue;\n}\ninline void Value::set_has_string_value() {\n  _oneof_case_[0] = kStringValue;\n}\ninline void Value::clear_string_value() {\n  if (has_string_value()) {\n    kind_.string_value_.Destroy(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n        GetArenaNoVirtual());\n    clear_has_kind();\n  }\n}\ninline const ::std::string& Value::string_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.string_value)\n  if (has_string_value()) {\n    return kind_.string_value_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  return *&::google::protobuf::internal::GetEmptyStringAlreadyInited();\n}\ninline void Value::set_string_value(const ::std::string& value) {\n  if (!has_string_value()) {\n    clear_kind();\n    set_has_string_value();\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  kind_.string_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Value.string_value)\n}\ninline void Value::set_string_value(const char* value) {\n  if (!has_string_value()) {\n    clear_kind();\n    set_has_string_value();\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  kind_.string_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      ::std::string(value), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Value.string_value)\n}\ninline void Value::set_string_value(const char* value,\n                             size_t size) {\n  if (!has_string_value()) {\n    clear_kind();\n    set_has_string_value();\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  kind_.string_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size),\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Value.string_value)\n}\ninline ::std::string* Value::mutable_string_value() {\n  if (!has_string_value()) {\n    clear_kind();\n    set_has_string_value();\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  return kind_.string_value_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Value.string_value)\n}\ninline ::std::string* Value::release_string_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Value.string_value)\n  if (has_string_value()) {\n    clear_has_kind();\n    return kind_.string_value_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n        GetArenaNoVirtual());\n  } else {\n    return NULL;\n  }\n}\ninline ::std::string* Value::unsafe_arena_release_string_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Value.string_value)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (has_string_value()) {\n    clear_has_kind();\n    return kind_.string_value_.UnsafeArenaRelease(\n        &::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n  } else {\n    return NULL;\n  }\n}\ninline void Value::set_allocated_string_value(::std::string* string_value) {\n  if (!has_string_value()) {\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  clear_kind();\n  if (string_value != NULL) {\n    set_has_string_value();\n    kind_.string_value_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), string_value,\n        GetArenaNoVirtual());\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Value.string_value)\n}\ninline void Value::unsafe_arena_set_allocated_string_value(::std::string* string_value) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (!has_string_value()) {\n    kind_.string_value_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n  }\n  clear_kind();\n  if (string_value) {\n    set_has_string_value();\n    kind_.string_value_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), string_value, GetArenaNoVirtual());\n  }\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Value.string_value)\n}\n\n// optional bool bool_value = 4;\ninline bool Value::has_bool_value() const {\n  return kind_case() == kBoolValue;\n}\ninline void Value::set_has_bool_value() {\n  _oneof_case_[0] = kBoolValue;\n}\ninline void Value::clear_bool_value() {\n  if (has_bool_value()) {\n    kind_.bool_value_ = false;\n    clear_has_kind();\n  }\n}\ninline bool Value::bool_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.bool_value)\n  if (has_bool_value()) {\n    return kind_.bool_value_;\n  }\n  return false;\n}\ninline void Value::set_bool_value(bool value) {\n  if (!has_bool_value()) {\n    clear_kind();\n    set_has_bool_value();\n  }\n  kind_.bool_value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Value.bool_value)\n}\n\n// optional .google.protobuf.Struct struct_value = 5;\ninline bool Value::has_struct_value() const {\n  return kind_case() == kStructValue;\n}\ninline void Value::set_has_struct_value() {\n  _oneof_case_[0] = kStructValue;\n}\ninline void Value::clear_struct_value() {\n  if (has_struct_value()) {\n    if (GetArenaNoVirtual() == NULL) {\n      delete kind_.struct_value_;\n    }\n    clear_has_kind();\n  }\n}\ninline  const ::google::protobuf::Struct& Value::struct_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.struct_value)\n  return has_struct_value()\n      ? *kind_.struct_value_\n      : ::google::protobuf::Struct::default_instance();\n}\ninline ::google::protobuf::Struct* Value::mutable_struct_value() {\n  if (!has_struct_value()) {\n    clear_kind();\n    set_has_struct_value();\n    kind_.struct_value_ = \n      ::google::protobuf::Arena::CreateMessage< ::google::protobuf::Struct >(\n      GetArenaNoVirtual());\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Value.struct_value)\n  return kind_.struct_value_;\n}\ninline ::google::protobuf::Struct* Value::release_struct_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Value.struct_value)\n  if (has_struct_value()) {\n    clear_has_kind();\n    if (GetArenaNoVirtual() != NULL) {\n      ::google::protobuf::Struct* temp = new ::google::protobuf::Struct(*kind_.struct_value_);\n      kind_.struct_value_ = NULL;\n      return temp;\n    } else {\n      ::google::protobuf::Struct* temp = kind_.struct_value_;\n      kind_.struct_value_ = NULL;\n      return temp;\n    }\n  } else {\n    return NULL;\n  }\n}\ninline void Value::set_allocated_struct_value(::google::protobuf::Struct* struct_value) {\n  clear_kind();\n  if (struct_value) {\n    if (GetArenaNoVirtual() != NULL &&\n        ::google::protobuf::Arena::GetArena(struct_value) == NULL) {\n      GetArenaNoVirtual()->Own(struct_value);\n    } else if (GetArenaNoVirtual() !=\n               ::google::protobuf::Arena::GetArena(struct_value)) {\n      ::google::protobuf::Struct* new_struct_value = \n          ::google::protobuf::Arena::CreateMessage< ::google::protobuf::Struct >(\n          GetArenaNoVirtual());\n      new_struct_value->CopyFrom(*struct_value);\n      struct_value = new_struct_value;\n    }\n    set_has_struct_value();\n    kind_.struct_value_ = struct_value;\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Value.struct_value)\n}\ninline  ::google::protobuf::Struct* Value::unsafe_arena_release_struct_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Value.struct_value)\n  if (has_struct_value()) {\n    clear_has_kind();\n    ::google::protobuf::Struct* temp = kind_.struct_value_;\n    kind_.struct_value_ = NULL;\n    return temp;\n  } else {\n    return NULL;\n  }\n}\ninline  void Value::unsafe_arena_set_allocated_struct_value(::google::protobuf::Struct* struct_value) {\n  clear_kind();\n  if (struct_value) {\n    set_has_struct_value();\n    kind_.struct_value_ = struct_value;\n  }\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Value.struct_value)\n}\n\n// optional .google.protobuf.ListValue list_value = 6;\ninline bool Value::has_list_value() const {\n  return kind_case() == kListValue;\n}\ninline void Value::set_has_list_value() {\n  _oneof_case_[0] = kListValue;\n}\ninline void Value::clear_list_value() {\n  if (has_list_value()) {\n    if (GetArenaNoVirtual() == NULL) {\n      delete kind_.list_value_;\n    }\n    clear_has_kind();\n  }\n}\ninline  const ::google::protobuf::ListValue& Value::list_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Value.list_value)\n  return has_list_value()\n      ? *kind_.list_value_\n      : ::google::protobuf::ListValue::default_instance();\n}\ninline ::google::protobuf::ListValue* Value::mutable_list_value() {\n  if (!has_list_value()) {\n    clear_kind();\n    set_has_list_value();\n    kind_.list_value_ = \n      ::google::protobuf::Arena::CreateMessage< ::google::protobuf::ListValue >(\n      GetArenaNoVirtual());\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Value.list_value)\n  return kind_.list_value_;\n}\ninline ::google::protobuf::ListValue* Value::release_list_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Value.list_value)\n  if (has_list_value()) {\n    clear_has_kind();\n    if (GetArenaNoVirtual() != NULL) {\n      ::google::protobuf::ListValue* temp = new ::google::protobuf::ListValue(*kind_.list_value_);\n      kind_.list_value_ = NULL;\n      return temp;\n    } else {\n      ::google::protobuf::ListValue* temp = kind_.list_value_;\n      kind_.list_value_ = NULL;\n      return temp;\n    }\n  } else {\n    return NULL;\n  }\n}\ninline void Value::set_allocated_list_value(::google::protobuf::ListValue* list_value) {\n  clear_kind();\n  if (list_value) {\n    if (GetArenaNoVirtual() != NULL &&\n        ::google::protobuf::Arena::GetArena(list_value) == NULL) {\n      GetArenaNoVirtual()->Own(list_value);\n    } else if (GetArenaNoVirtual() !=\n               ::google::protobuf::Arena::GetArena(list_value)) {\n      ::google::protobuf::ListValue* new_list_value = \n          ::google::protobuf::Arena::CreateMessage< ::google::protobuf::ListValue >(\n          GetArenaNoVirtual());\n      new_list_value->CopyFrom(*list_value);\n      list_value = new_list_value;\n    }\n    set_has_list_value();\n    kind_.list_value_ = list_value;\n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Value.list_value)\n}\ninline  ::google::protobuf::ListValue* Value::unsafe_arena_release_list_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Value.list_value)\n  if (has_list_value()) {\n    clear_has_kind();\n    ::google::protobuf::ListValue* temp = kind_.list_value_;\n    kind_.list_value_ = NULL;\n    return temp;\n  } else {\n    return NULL;\n  }\n}\ninline  void Value::unsafe_arena_set_allocated_list_value(::google::protobuf::ListValue* list_value) {\n  clear_kind();\n  if (list_value) {\n    set_has_list_value();\n    kind_.list_value_ = list_value;\n  }\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Value.list_value)\n}\n\ninline bool Value::has_kind() const {\n  return kind_case() != KIND_NOT_SET;\n}\ninline void Value::clear_has_kind() {\n  _oneof_case_[0] = KIND_NOT_SET;\n}\ninline Value::KindCase Value::kind_case() const {\n  return Value::KindCase(_oneof_case_[0]);\n}\ninline const Value* Value::internal_default_instance() {\n  return &Value_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// ListValue\n\n// repeated .google.protobuf.Value values = 1;\ninline int ListValue::values_size() const {\n  return values_.size();\n}\ninline void ListValue::clear_values() {\n  values_.Clear();\n}\ninline const ::google::protobuf::Value& ListValue::values(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.ListValue.values)\n  return values_.Get(index);\n}\ninline ::google::protobuf::Value* ListValue::mutable_values(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.ListValue.values)\n  return values_.Mutable(index);\n}\ninline ::google::protobuf::Value* ListValue::add_values() {\n  // @@protoc_insertion_point(field_add:google.protobuf.ListValue.values)\n  return values_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Value >*\nListValue::mutable_values() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.ListValue.values)\n  return &values_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Value >&\nListValue::values() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.ListValue.values)\n  return values_;\n}\n\ninline const ListValue* ListValue::internal_default_instance() {\n  return &ListValue_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n#ifndef SWIG\nnamespace google {\nnamespace protobuf {\n\ntemplate <> struct is_proto_enum< ::google::protobuf::NullValue> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::NullValue>() {\n  return ::google::protobuf::NullValue_descriptor();\n}\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // SWIG\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fstruct_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/struct.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"github.com/golang/protobuf/ptypes/struct;structpb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"StructProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n\n// `Struct` represents a structured data value, consisting of fields\n// which map to dynamically typed values. In some languages, `Struct`\n// might be supported by a native representation. For example, in\n// scripting languages like JS a struct is represented as an\n// object. The details of that representation are described together\n// with the proto support for the language.\n//\n// The JSON representation for `Struct` is JSON object.\nmessage Struct {\n  // Unordered map of dynamically typed values.\n  map<string, Value> fields = 1;\n}\n\n// `Value` represents a dynamically typed value which can be either\n// null, a number, a string, a boolean, a recursive struct value, or a\n// list of values. A producer of value is expected to set one of that\n// variants, absence of any variant indicates an error.\n//\n// The JSON representation for `Value` is JSON value.\nmessage Value {\n  // The kind of value.\n  oneof kind {\n    // Represents a null value.\n    NullValue null_value = 1;\n    // Represents a double value.\n    double number_value = 2;\n    // Represents a string value.\n    string string_value = 3;\n    // Represents a boolean value.\n    bool bool_value = 4;\n    // Represents a structured value.\n    Struct struct_value = 5;\n    // Represents a repeated `Value`.\n    ListValue list_value = 6;\n  }\n}\n\n// `NullValue` is a singleton enumeration to represent the null value for the\n// `Value` type union.\n//\n//  The JSON representation for `NullValue` is JSON `null`.\nenum NullValue {\n  // Null value.\n  NULL_VALUE = 0;\n}\n\n// `ListValue` is a wrapper around a repeated field of values.\n//\n// The JSON representation for `ListValue` is JSON array.\nmessage ListValue {\n  // Repeated field of dynamically typed values.\n  repeated Value values = 1;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomic_sequence_num.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2014 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#ifndef GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_\n#define GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_\n\n#include <google/protobuf/stubs/atomicops.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\nclass SequenceNumber {\n public:\n  SequenceNumber() : word_(0) {}\n\n  AtomicWord GetNext() {\n    return NoBarrier_AtomicIncrement(&word_, 1) - 1;\n  }\n private:\n  AtomicWord word_;\n};\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMIC_SEQUENCE_NUM_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// The routines exported by this module are subtle.  If you use them, even if\n// you get the code right, it will depend on careful reasoning about atomicity\n// and memory ordering; it will be less readable, and harder to maintain.  If\n// you plan to use these routines, you should have a good reason, such as solid\n// evidence that performance would otherwise suffer, or there being no\n// alternative.  You should assume only properties explicitly guaranteed by the\n// specifications in this file.  You are almost certainly _not_ writing code\n// just for the x86; if you assume x86 semantics, x86 hardware bugs and\n// implementations on other archtectures will cause your code to break.  If you\n// do not know what you are doing, avoid these routines, and use a Mutex.\n//\n// It is incorrect to make direct assignments to/from an atomic variable.\n// You should use one of the Load or Store routines.  The NoBarrier\n// versions are provided when no barriers are needed:\n//   NoBarrier_Store()\n//   NoBarrier_Load()\n// Although there are currently no compiler enforcement, you are encouraged\n// to use these.\n\n// This header and the implementations for each platform (located in\n// atomicops_internals_*) must be kept in sync with the upstream code (V8).\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_H_\n\n// Don't include this file for people not concerned about thread safety.\n#ifndef GOOGLE_PROTOBUF_NO_THREAD_SAFETY\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/platform_macros.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n#if defined(GOOGLE_PROTOBUF_ARCH_POWER)\n#if defined(_LP64) || defined(__LP64__)\ntypedef int32 Atomic32;\ntypedef intptr_t Atomic64;\n#else\ntypedef intptr_t Atomic32;\ntypedef int64 Atomic64;\n#endif\n#else\ntypedef int32 Atomic32;\n#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT\n// We need to be able to go between Atomic64 and AtomicWord implicitly.  This\n// means Atomic64 and AtomicWord should be the same type on 64-bit.\n#if defined(__ILP32__) || defined(GOOGLE_PROTOBUF_OS_NACL)\n// NaCl's intptr_t is not actually 64-bits on 64-bit!\n// http://code.google.com/p/nativeclient/issues/detail?id=1162\n// sparcv9's pointer type is 32bits\ntypedef int64 Atomic64;\n#else\ntypedef intptr_t Atomic64;\n#endif\n#endif\n#endif\n\n// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or\n// Atomic64 routines below, depending on your architecture.\ntypedef intptr_t AtomicWord;\n\n// Atomically execute:\n//      result = *ptr;\n//      if (*ptr == old_value)\n//        *ptr = new_value;\n//      return result;\n//\n// I.e., replace \"*ptr\" with \"new_value\" if \"*ptr\" used to be \"old_value\".\n// Always return the old value of \"*ptr\"\n//\n// This routine implies no memory barriers.\nAtomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                  Atomic32 old_value,\n                                  Atomic32 new_value);\n\n// Atomically store new_value into *ptr, returning the previous value held in\n// *ptr.  This routine implies no memory barriers.\nAtomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);\n\n// Atomically increment *ptr by \"increment\".  Returns the new value of\n// *ptr with the increment applied.  This routine implies no memory barriers.\nAtomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);\n\nAtomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                 Atomic32 increment);\n\n// These following lower-level operations are typically useful only to people\n// implementing higher-level synchronization operations like spinlocks,\n// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or\n// a store with appropriate memory-ordering instructions.  \"Acquire\" operations\n// ensure that no later memory access can be reordered ahead of the operation.\n// \"Release\" operations ensure that no previous memory access can be reordered\n// after the operation.  \"Barrier\" operations have both \"Acquire\" and \"Release\"\n// semantics.   A MemoryBarrier() has \"Barrier\" semantics, but does no memory\n// access.\nAtomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                Atomic32 old_value,\n                                Atomic32 new_value);\nAtomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                Atomic32 old_value,\n                                Atomic32 new_value);\n\n#if defined(__MINGW32__) && defined(MemoryBarrier)\n#undef MemoryBarrier\n#endif\nvoid MemoryBarrier();\nvoid NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);\nvoid Acquire_Store(volatile Atomic32* ptr, Atomic32 value);\nvoid Release_Store(volatile Atomic32* ptr, Atomic32 value);\n\nAtomic32 NoBarrier_Load(volatile const Atomic32* ptr);\nAtomic32 Acquire_Load(volatile const Atomic32* ptr);\nAtomic32 Release_Load(volatile const Atomic32* ptr);\n\n// 64-bit atomic operations (only available on 64-bit processors).\n#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT\nAtomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                  Atomic64 old_value,\n                                  Atomic64 new_value);\nAtomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);\nAtomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);\nAtomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);\n\nAtomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                Atomic64 old_value,\n                                Atomic64 new_value);\nAtomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                Atomic64 old_value,\n                                Atomic64 new_value);\nvoid NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);\nvoid Acquire_Store(volatile Atomic64* ptr, Atomic64 value);\nvoid Release_Store(volatile Atomic64* ptr, Atomic64 value);\nAtomic64 NoBarrier_Load(volatile const Atomic64* ptr);\nAtomic64 Acquire_Load(volatile const Atomic64* ptr);\nAtomic64 Release_Load(volatile const Atomic64* ptr);\n#endif  // GOOGLE_PROTOBUF_ARCH_64_BIT\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n// Include our platform specific implementation.\n#define GOOGLE_PROTOBUF_ATOMICOPS_ERROR \\\n\"Atomic operations are not supported on your platform\"\n\n// ThreadSanitizer, http://clang.llvm.org/docs/ThreadSanitizer.html.\n#if defined(THREAD_SANITIZER)\n#include <google/protobuf/stubs/atomicops_internals_tsan.h>\n// MSVC.\n#elif defined(_MSC_VER)\n#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)\n#include <google/protobuf/stubs/atomicops_internals_x86_msvc.h>\n#else\n#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR\n#endif\n\n// Solaris\n#elif defined(GOOGLE_PROTOBUF_OS_SOLARIS)\n#include <google/protobuf/stubs/atomicops_internals_solaris.h>\n\n// AIX\n#elif defined(GOOGLE_PROTOBUF_OS_AIX)\n#include <google/protobuf/stubs/atomicops_internals_power.h>\n\n// Apple.\n#elif defined(GOOGLE_PROTOBUF_OS_APPLE)\n#include <google/protobuf/stubs/atomicops_internals_macosx.h>\n\n// GCC.\n#elif defined(__GNUC__)\n#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)\n#include <google/protobuf/stubs/atomicops_internals_x86_gcc.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_ARM) && defined(__linux__)\n#include <google/protobuf/stubs/atomicops_internals_arm_gcc.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_AARCH64)\n#include <google/protobuf/stubs/atomicops_internals_arm64_gcc.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_ARM_QNX)\n#include <google/protobuf/stubs/atomicops_internals_arm_qnx.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_MIPS) || defined(GOOGLE_PROTOBUF_ARCH_MIPS64)\n#include <google/protobuf/stubs/atomicops_internals_mips_gcc.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_POWER)\n#include <google/protobuf/stubs/atomicops_internals_power.h>\n#elif defined(__native_client__)\n#include <google/protobuf/stubs/atomicops_internals_pnacl.h>\n#elif defined(GOOGLE_PROTOBUF_ARCH_PPC)\n#include <google/protobuf/stubs/atomicops_internals_ppc_gcc.h>\n#elif (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))\n#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>\n#elif defined(__clang__)\n#if __has_extension(c_atomic)\n#include <google/protobuf/stubs/atomicops_internals_generic_gcc.h>\n#else\n#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR\n#endif\n#else\n#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR\n#endif\n\n// Unknown.\n#else\n#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR\n#endif\n\n// On some platforms we need additional declarations to make AtomicWord\n// compatible with our other Atomic* types.\n#if defined(GOOGLE_PROTOBUF_OS_APPLE)\n#include <google/protobuf/stubs/atomicops_internals_atomicword_compat.h>\n#endif\n\n#undef GOOGLE_PROTOBUF_ATOMICOPS_ERROR\n\n#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_arm64_gcc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline void MemoryBarrier() {\n  __asm__ __volatile__ (\"dmb ish\" ::: \"memory\");  // NOLINT\n}\n\n// NoBarrier versions of the operation include \"memory\" in the clobber list.\n// This is not required for direct usage of the NoBarrier versions of the\n// operations. However this is required for correctness when they are used as\n// part of the Acquire or Release versions, to ensure that nothing from outside\n// the call is reordered between the operation and the memory barrier. This does\n// not change the code generated, so has no or minimal impact on the\n// NoBarrier operations.\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                    \\n\\t\"\n    \"ldxr %w[prev], %[ptr]                 \\n\\t\"  // Load the previous value.\n    \"cmp %w[prev], %w[old_value]           \\n\\t\"\n    \"bne 1f                                \\n\\t\"\n    \"stxr %w[temp], %w[new_value], %[ptr]  \\n\\t\"  // Try to store the new value.\n    \"cbnz %w[temp], 0b                     \\n\\t\"  // Retry if it did not work.\n    \"1:                                    \\n\\t\"\n    : [prev]\"=&r\" (prev),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [old_value]\"IJr\" (old_value),\n      [new_value]\"r\" (new_value)\n    : \"cc\", \"memory\"\n  );  // NOLINT\n\n  return prev;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 result;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                    \\n\\t\"\n    \"ldxr %w[result], %[ptr]               \\n\\t\"  // Load the previous value.\n    \"stxr %w[temp], %w[new_value], %[ptr]  \\n\\t\"  // Try to store the new value.\n    \"cbnz %w[temp], 0b                     \\n\\t\"  // Retry if it did not work.\n    : [result]\"=&r\" (result),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [new_value]\"r\" (new_value)\n    : \"memory\"\n  );  // NOLINT\n\n  return result;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  Atomic32 result;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                       \\n\\t\"\n    \"ldxr %w[result], %[ptr]                  \\n\\t\"  // Load the previous value.\n    \"add %w[result], %w[result], %w[increment]\\n\\t\"\n    \"stxr %w[temp], %w[result], %[ptr]        \\n\\t\"  // Try to store the result.\n    \"cbnz %w[temp], 0b                        \\n\\t\"  // Retry on failure.\n    : [result]\"=&r\" (result),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [increment]\"IJr\" (increment)\n    : \"memory\"\n  );  // NOLINT\n\n  return result;\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  MemoryBarrier();\n  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);\n  MemoryBarrier();\n\n  return result;\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n\n  return prev;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  MemoryBarrier();\n  Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n\n  return prev;\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  __asm__ __volatile__ (  // NOLINT\n    \"stlr %w[value], %[ptr]  \\n\\t\"\n    : [ptr]\"=Q\" (*ptr)\n    : [value]\"r\" (value)\n    : \"memory\"\n  );  // NOLINT\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"ldar %w[value], %[ptr]  \\n\\t\"\n    : [value]\"=r\" (value)\n    : [ptr]\"Q\" (*ptr)\n    : \"memory\"\n  );  // NOLINT\n\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n// 64-bit versions of the operations.\n// See the 32-bit versions for comments.\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 prev;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                    \\n\\t\"\n    \"ldxr %[prev], %[ptr]                  \\n\\t\"\n    \"cmp %[prev], %[old_value]             \\n\\t\"\n    \"bne 1f                                \\n\\t\"\n    \"stxr %w[temp], %[new_value], %[ptr]   \\n\\t\"\n    \"cbnz %w[temp], 0b                     \\n\\t\"\n    \"1:                                    \\n\\t\"\n    : [prev]\"=&r\" (prev),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [old_value]\"IJr\" (old_value),\n      [new_value]\"r\" (new_value)\n    : \"cc\", \"memory\"\n  );  // NOLINT\n\n  return prev;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  Atomic64 result;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                    \\n\\t\"\n    \"ldxr %[result], %[ptr]                \\n\\t\"\n    \"stxr %w[temp], %[new_value], %[ptr]   \\n\\t\"\n    \"cbnz %w[temp], 0b                     \\n\\t\"\n    : [result]\"=&r\" (result),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [new_value]\"r\" (new_value)\n    : \"memory\"\n  );  // NOLINT\n\n  return result;\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  Atomic64 result;\n  int32_t temp;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"0:                                     \\n\\t\"\n    \"ldxr %[result], %[ptr]                 \\n\\t\"\n    \"add %[result], %[result], %[increment] \\n\\t\"\n    \"stxr %w[temp], %[result], %[ptr]       \\n\\t\"\n    \"cbnz %w[temp], 0b                      \\n\\t\"\n    : [result]\"=&r\" (result),\n      [temp]\"=&r\" (temp),\n      [ptr]\"+Q\" (*ptr)\n    : [increment]\"IJr\" (increment)\n    : \"memory\"\n  );  // NOLINT\n\n  return result;\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n  MemoryBarrier();\n  Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);\n  MemoryBarrier();\n\n  return result;\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n\n  return prev;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  MemoryBarrier();\n  Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n\n  return prev;\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  __asm__ __volatile__ (  // NOLINT\n    \"stlr %x[value], %[ptr]  \\n\\t\"\n    : [ptr]\"=Q\" (*ptr)\n    : [value]\"r\" (value)\n    : \"memory\"\n  );  // NOLINT\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 value;\n\n  __asm__ __volatile__ (  // NOLINT\n    \"ldar %x[value], %[ptr]  \\n\\t\"\n    : [value]\"=r\" (value)\n    : [ptr]\"Q\" (*ptr)\n    : \"memory\"\n  );  // NOLINT\n\n  return value;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM64_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_arm_gcc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n//\n// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// 0xffff0fc0 is the hard coded address of a function provided by\n// the kernel which implements an atomic compare-exchange. On older\n// ARM architecture revisions (pre-v6) this may be implemented using\n// a syscall. This address is stable, and in active use (hard coded)\n// by at least glibc-2.7 and the Android C library.\ntypedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,\n                                           Atomic32 new_value,\n                                           volatile Atomic32* ptr);\nLinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =\n    (LinuxKernelCmpxchgFunc) 0xffff0fc0;\n\ntypedef void (*LinuxKernelMemoryBarrierFunc)(void);\nLinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =\n    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;\n\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev_value = *ptr;\n  do {\n    if (!pLinuxKernelCmpxchg(old_value, new_value,\n                             const_cast<Atomic32*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 old_value;\n  do {\n    old_value = *ptr;\n  } while (pLinuxKernelCmpxchg(old_value, new_value,\n                               const_cast<Atomic32*>(ptr)));\n  return old_value;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return Barrier_AtomicIncrement(ptr, increment);\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  for (;;) {\n    // Atomic exchange the old value with an incremented one.\n    Atomic32 old_value = *ptr;\n    Atomic32 new_value = old_value + increment;\n    if (pLinuxKernelCmpxchg(old_value, new_value,\n                            const_cast<Atomic32*>(ptr)) == 0) {\n      // The exchange took place as expected.\n      return new_value;\n    }\n    // Otherwise, *ptr changed mid-loop and we need to retry.\n  }\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void MemoryBarrier() {\n  pLinuxKernelMemoryBarrier();\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_arm_qnx.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_\n\n// For _smp_cmpxchg()\n#include <pthread.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 QNXCmpxchg(Atomic32 old_value,\n                           Atomic32 new_value,\n                           volatile Atomic32* ptr) {\n  return static_cast<Atomic32>(\n      _smp_cmpxchg((volatile unsigned *)ptr,\n                   (unsigned)old_value,\n                   (unsigned)new_value));\n}\n\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev_value = *ptr;\n  do {\n    if (!QNXCmpxchg(old_value, new_value,\n                    const_cast<Atomic32*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 old_value;\n  do {\n    old_value = *ptr;\n  } while (QNXCmpxchg(old_value, new_value,\n                      const_cast<Atomic32*>(ptr)));\n  return old_value;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return Barrier_AtomicIncrement(ptr, increment);\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  for (;;) {\n    // Atomic exchange the old value with an incremented one.\n    Atomic32 old_value = *ptr;\n    Atomic32 new_value = old_value + increment;\n    if (QNXCmpxchg(old_value, new_value,\n                   const_cast<Atomic32*>(ptr)) == 0) {\n      // The exchange took place as expected.\n      return new_value;\n    }\n    // Otherwise, *ptr changed mid-loop and we need to retry.\n  }\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void MemoryBarrier() {\n  __sync_synchronize();\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_QNX_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_atomicword_compat.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_\n\n// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,\n// which in turn means int. On some LP32 platforms, intptr_t is an int, but\n// on others, it's a long. When AtomicWord and Atomic32 are based on different\n// fundamental types, their pointers are incompatible.\n//\n// This file defines function overloads to allow both AtomicWord and Atomic32\n// data to be used with this interface.\n//\n// On LP64 platforms, AtomicWord and Atomic64 are both always long,\n// so this problem doesn't occur.\n\n#if !defined(GOOGLE_PROTOBUF_ARCH_64_BIT)\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,\n                                           AtomicWord old_value,\n                                           AtomicWord new_value) {\n  return NoBarrier_CompareAndSwap(\n      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);\n}\n\ninline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,\n                                           AtomicWord new_value) {\n  return NoBarrier_AtomicExchange(\n      reinterpret_cast<volatile Atomic32*>(ptr), new_value);\n}\n\ninline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,\n                                            AtomicWord increment) {\n  return NoBarrier_AtomicIncrement(\n      reinterpret_cast<volatile Atomic32*>(ptr), increment);\n}\n\ninline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,\n                                          AtomicWord increment) {\n  return Barrier_AtomicIncrement(\n      reinterpret_cast<volatile Atomic32*>(ptr), increment);\n}\n\ninline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,\n                                         AtomicWord old_value,\n                                         AtomicWord new_value) {\n  return Acquire_CompareAndSwap(\n      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);\n}\n\ninline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,\n                                         AtomicWord old_value,\n                                         AtomicWord new_value) {\n  return Release_CompareAndSwap(\n      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {\n  NoBarrier_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);\n}\n\ninline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {\n  return Acquire_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);\n}\n\ninline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {\n  return Release_Store(reinterpret_cast<volatile Atomic32*>(ptr), value);\n}\n\ninline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {\n  return NoBarrier_Load(reinterpret_cast<volatile const Atomic32*>(ptr));\n}\n\ninline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {\n  return Acquire_Load(reinterpret_cast<volatile const Atomic32*>(ptr));\n}\n\ninline AtomicWord Release_Load(volatile const AtomicWord* ptr) {\n  return Release_Load(reinterpret_cast<volatile const Atomic32*>(ptr));\n}\n\n}   // namespace internal\n}   // namespace protobuf\n}   // namespace google\n\n#endif  // !defined(GOOGLE_PROTOBUF_ARCH_64_BIT)\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_generic_gcc.h",
    "content": "// Copyright 2013 Red Hat Inc.  All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Red Hat Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,\n                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n  return old_value;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,\n                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);\n  return old_value;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,\n                              __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);\n  return old_value;\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);\n}\n\ninline void MemoryBarrier() {\n  __sync_synchronize();\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST);\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return __atomic_load_n(ptr, __ATOMIC_RELAXED);\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);\n}\n\n#ifdef __LP64__\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,\n                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);\n  return old_value;\n}\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,\n                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);\n  return old_value;\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return __atomic_load_n(ptr, __ATOMIC_RELAXED);\n}\n\n#endif // defined(__LP64__)\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_macosx.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_\n\n#include <libkern/OSAtomic.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev_value;\n  do {\n    if (OSAtomicCompareAndSwap32(old_value, new_value,\n                                 const_cast<Atomic32*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 old_value;\n  do {\n    old_value = *ptr;\n  } while (!OSAtomicCompareAndSwap32(old_value, new_value,\n                                     const_cast<Atomic32*>(ptr)));\n  return old_value;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));\n}\n\ninline void MemoryBarrier() {\n  OSMemoryBarrier();\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 prev_value;\n  do {\n    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,\n                                        const_cast<Atomic32*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return Acquire_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n#ifdef __LP64__\n\n// 64-bit implementation on 64-bit platform\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 prev_value;\n  do {\n    if (OSAtomicCompareAndSwap64(old_value, new_value,\n                                 reinterpret_cast<volatile int64_t*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  Atomic64 old_value;\n  do {\n    old_value = *ptr;\n  } while (!OSAtomicCompareAndSwap64(old_value, new_value,\n                                     reinterpret_cast<volatile int64_t*>(ptr)));\n  return old_value;\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n  return OSAtomicAdd64Barrier(increment,\n                              reinterpret_cast<volatile int64_t*>(ptr));\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 prev_value;\n  do {\n    if (OSAtomicCompareAndSwap64Barrier(\n        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {\n      return old_value;\n    }\n    prev_value = *ptr;\n  } while (prev_value == old_value);\n  return prev_value;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  // The lib kern interface does not distinguish between\n  // Acquire and Release memory barriers; they are equivalent.\n  return Acquire_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n#endif  // defined(__LP64__)\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MACOSX_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_mips_gcc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_\n\n#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__(\"\" : : : \"memory\")\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Atomically execute:\n//      result = *ptr;\n//      if (*ptr == old_value)\n//        *ptr = new_value;\n//      return result;\n//\n// I.e., replace \"*ptr\" with \"new_value\" if \"*ptr\" used to be \"old_value\".\n// Always return the old value of \"*ptr\"\n//\n// This routine implies no memory barriers.\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev, tmp;\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"ll %0, %5\\n\"  // prev = *ptr\n                       \"bne %0, %3, 2f\\n\"  // if (prev != old_value) goto 2\n                       \"move %2, %4\\n\"  // tmp = new_value\n                       \"sc %2, %1\\n\"  // *ptr = tmp (with atomic check)\n                       \"beqz %2, 1b\\n\"  // start again on atomic error\n                       \"nop\\n\"  // delay slot nop\n                       \"2:\\n\"\n                       \".set pop\\n\"\n                       : \"=&r\" (prev), \"=m\" (*ptr), \"=&r\" (tmp)\n                       : \"r\" (old_value), \"r\" (new_value), \"m\" (*ptr)\n                       : \"memory\");\n  return prev;\n}\n\n// Atomically store new_value into *ptr, returning the previous value held in\n// *ptr.  This routine implies no memory barriers.\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 temp, old;\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"ll %1, %4\\n\"  // old = *ptr\n                       \"move %0, %3\\n\"  // temp = new_value\n                       \"sc %0, %2\\n\"  // *ptr = temp (with atomic check)\n                       \"beqz %0, 1b\\n\"  // start again on atomic error\n                       \"nop\\n\"  // delay slot nop\n                       \".set pop\\n\"\n                       : \"=&r\" (temp), \"=&r\" (old), \"=m\" (*ptr)\n                       : \"r\" (new_value), \"m\" (*ptr)\n                       : \"memory\");\n\n  return old;\n}\n\n// Atomically increment *ptr by \"increment\".  Returns the new value of\n// *ptr with the increment applied.  This routine implies no memory barriers.\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  Atomic32 temp, temp2;\n\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"ll %0, %4\\n\"  // temp = *ptr\n                       \"addu %1, %0, %3\\n\"  // temp2 = temp + increment\n                       \"sc %1, %2\\n\"  // *ptr = temp2 (with atomic check)\n                       \"beqz %1, 1b\\n\"  // start again on atomic error\n                       \"addu %1, %0, %3\\n\"  // temp2 = temp + increment\n                       \".set pop\\n\"\n                       : \"=&r\" (temp), \"=&r\" (temp2), \"=m\" (*ptr)\n                       : \"Ir\" (increment), \"m\" (*ptr)\n                       : \"memory\");\n  // temp2 now holds the final value.\n  return temp2;\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  ATOMICOPS_COMPILER_BARRIER();\n  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);\n  ATOMICOPS_COMPILER_BARRIER();\n  return res;\n}\n\n// \"Acquire\" operations\n// ensure that no later memory access can be reordered ahead of the operation.\n// \"Release\" operations ensure that no previous memory access can be reordered\n// after the operation.  \"Barrier\" operations have both \"Acquire\" and \"Release\"\n// semantics.   A MemoryBarrier() has \"Barrier\" semantics, but does no memory\n// access.\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  ATOMICOPS_COMPILER_BARRIER();\n  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  ATOMICOPS_COMPILER_BARRIER();\n  return res;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  ATOMICOPS_COMPILER_BARRIER();\n  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  ATOMICOPS_COMPILER_BARRIER();\n  return res;\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void MemoryBarrier() {\n  __asm__ __volatile__(\"sync\" : : : \"memory\");\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n#if defined(__LP64__)\n// 64-bit versions of the atomic ops.\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 prev, tmp;\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"lld %0, %5\\n\"  // prev = *ptr\n                       \"bne %0, %3, 2f\\n\"  // if (prev != old_value) goto 2\n                       \"move %2, %4\\n\"  // tmp = new_value\n                       \"scd %2, %1\\n\"  // *ptr = tmp (with atomic check)\n                       \"beqz %2, 1b\\n\"  // start again on atomic error\n                       \"nop\\n\"  // delay slot nop\n                       \"2:\\n\"\n                       \".set pop\\n\"\n                       : \"=&r\" (prev), \"=m\" (*ptr), \"=&r\" (tmp)\n                       : \"r\" (old_value), \"r\" (new_value), \"m\" (*ptr)\n                       : \"memory\");\n  return prev;\n}\n\n// Atomically store new_value into *ptr, returning the previous value held in\n// *ptr.  This routine implies no memory barriers.\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  Atomic64 temp, old;\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"lld %1, %4\\n\"  // old = *ptr\n                       \"move %0, %3\\n\"  // temp = new_value\n                       \"scd %0, %2\\n\"  // *ptr = temp (with atomic check)\n                       \"beqz %0, 1b\\n\"  // start again on atomic error\n                       \"nop\\n\"  // delay slot nop\n                       \".set pop\\n\"\n                       : \"=&r\" (temp), \"=&r\" (old), \"=m\" (*ptr)\n                       : \"r\" (new_value), \"m\" (*ptr)\n                       : \"memory\");\n\n  return old;\n}\n\n// Atomically increment *ptr by \"increment\".  Returns the new value of\n// *ptr with the increment applied.  This routine implies no memory barriers.\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  Atomic64 temp, temp2;\n\n  __asm__ __volatile__(\".set push\\n\"\n                       \".set noreorder\\n\"\n                       \"1:\\n\"\n                       \"lld %0, %4\\n\"  // temp = *ptr\n                       \"daddu %1, %0, %3\\n\"  // temp2 = temp + increment\n                       \"scd %1, %2\\n\"  // *ptr = temp2 (with atomic check)\n                       \"beqz %1, 1b\\n\"  // start again on atomic error\n                       \"daddu %1, %0, %3\\n\"  // temp2 = temp + increment\n                       \".set pop\\n\"\n                       : \"=&r\" (temp), \"=&r\" (temp2), \"=m\" (*ptr)\n                       : \"Ir\" (increment), \"m\" (*ptr)\n                       : \"memory\");\n  // temp2 now holds the final value.\n  return temp2;\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n  MemoryBarrier();\n  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);\n  MemoryBarrier();\n  return res;\n}\n\n// \"Acquire\" operations\n// ensure that no later memory access can be reordered ahead of the operation.\n// \"Release\" operations ensure that no previous memory access can be reordered\n// after the operation.  \"Barrier\" operations have both \"Acquire\" and \"Release\"\n// semantics.   A MemoryBarrier() has \"Barrier\" semantics, but does no memory\n// access.\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n  return res;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  MemoryBarrier();\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n#endif\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#undef ATOMICOPS_COMPILER_BARRIER\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_pnacl.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_\n\n#include <atomic>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// This implementation is transitional and maintains the original API for\n// atomicops.h. This requires casting memory locations to the atomic types, and\n// assumes that the API and the C++11 implementation are layout-compatible,\n// which isn't true for all implementations or hardware platforms. The static\n// assertion should detect this issue, were it to fire then this header\n// shouldn't be used.\n//\n// TODO(jfb) If this header manages to stay committed then the API should be\n//           modified, and all call sites updated.\ntypedef volatile std::atomic<Atomic32>* AtomicLocation32;\nstatic_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),\n              \"incompatible 32-bit atomic layout\");\n\ninline void MemoryBarrier() {\n#if defined(__GLIBCXX__)\n  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but\n  // not defined, leading to the linker complaining about undefined references.\n  __atomic_thread_fence(std::memory_order_seq_cst);\n#else\n  std::atomic_thread_fence(std::memory_order_seq_cst);\n#endif\n}\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  ((AtomicLocation32)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_relaxed,\n                                std::memory_order_relaxed);\n  return old_value;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  return ((AtomicLocation32)ptr)\n      ->exchange(new_value, std::memory_order_relaxed);\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return increment +\n         ((AtomicLocation32)ptr)\n             ->fetch_add(increment, std::memory_order_relaxed);\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  ((AtomicLocation32)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_acquire,\n                                std::memory_order_acquire);\n  return old_value;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  ((AtomicLocation32)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_release,\n                                std::memory_order_relaxed);\n  return old_value;\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  ((AtomicLocation32)ptr)->store(value, std::memory_order_release);\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);\n}\n\n#if defined(GOOGLE_PROTOBUF_ARCH_64_BIT)\n\ntypedef volatile std::atomic<Atomic64>* AtomicLocation64;\nstatic_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),\n              \"incompatible 64-bit atomic layout\");\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  ((AtomicLocation64)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_relaxed,\n                                std::memory_order_relaxed);\n  return old_value;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  return ((AtomicLocation64)ptr)\n      ->exchange(new_value, std::memory_order_relaxed);\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  return increment +\n         ((AtomicLocation64)ptr)\n             ->fetch_add(increment, std::memory_order_relaxed);\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  ((AtomicLocation64)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_acquire,\n                                std::memory_order_acquire);\n  return old_value;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  ((AtomicLocation64)ptr)\n      ->compare_exchange_strong(old_value,\n                                new_value,\n                                std::memory_order_release,\n                                std::memory_order_relaxed);\n  return old_value;\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  ((AtomicLocation64)ptr)->store(value, std::memory_order_release);\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);\n}\n\n#endif  // defined(GOOGLE_PROTOBUF_ARCH_64_BIT)\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PNACL_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_power.h",
    "content": "// Copyright 2014 Bloomberg Finance LP. All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Bloomberg Finance LP. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 result;\n\n  asm volatile (\n      \"1:     lwarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpw %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n      \"       stwcx. %[val], %[zero], %[obj]  \\n\\t\"  // store new value\n      \"       bne- 1b                         \\n\\t\"\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  Atomic32 result;\n\n  asm volatile (\n      \"1:     lwarx %[res], %[zero], %[obj]       \\n\\t\"\n      \"       stwcx. %[val], %[zero], %[obj]      \\n\\t\"\n      \"       bne- 1b                             \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  Atomic32 result;\n\n  asm volatile (\n      \"1:     lwarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       add %[res], %[val], %[res]      \\n\\t\"  // add the operand\n      \"       stwcx. %[res], %[zero], %[obj]  \\n\\t\"  // store old value\n                                                     // if still reserved\n      \"       bne- 1b                         \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (increment),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline void MemoryBarrier(void) {\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n      \"       isync                           \\n\\t\"\n              :\n              :\n              : \"memory\");\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  Atomic32 result;\n\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n\n      \"1:     lwarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       add %[res], %[val], %[res]      \\n\\t\"  // add the operand\n      \"       stwcx. %[res], %[zero], %[obj]  \\n\\t\"  // store old value\n                                                     // if still reserved\n      \"       bne- 1b                         \\n\\t\"\n      \"       isync                           \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (increment),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 result;\n\n  asm volatile (\n      \"1:     lwarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpw %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n      \"       stwcx. %[val], %[zero], %[obj]  \\n\\t\"  // store new value\n      \"       bne- 1b                         \\n\\t\"\n\n      \"       isync                           \\n\\t\"\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 result;\n\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n\n      \"1:     lwarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpw %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n      \"       stwcx. %[val], %[zero], %[obj]  \\n\\t\"  // store new value\n      \"       bne- 1b                         \\n\\t\"\n\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  asm volatile (\n      \"       stw %[val], %[obj]      \\n\\t\"\n      \"       isync                   \\n\\t\"\n              : [obj] \"=m\" (*ptr)\n              : [val]  \"b\"  (value));\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  asm volatile (\n      \"       lwsync                  \\n\\t\"\n      \"       stw %[val], %[obj]      \\n\\t\"\n              : [obj] \"=m\" (*ptr)\n              : [val]  \"b\"  (value));\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 result;\n\n  asm volatile (\n      \"1:     lwz %[res], %[obj]              \\n\\t\"\n      \"       cmpw %[res], %[res]             \\n\\t\" // create data\n                                                    // dependency for\n                                                    // load/load ordering\n      \"       bne- 1b                         \\n\\t\" // never taken\n\n      \"       isync                           \\n\\t\"\n              : [res]  \"=b\" (result)\n              : [obj]  \"m\"  (*ptr),\n                [zero] \"i\"  (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  Atomic32 result;\n\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n\n      \"1:     lwz %[res], %[obj]              \\n\\t\"\n      \"       cmpw %[res], %[res]             \\n\\t\" // create data\n                                                    // dependency for\n                                                    // load/load ordering\n      \"       bne- 1b                         \\n\\t\" // never taken\n              : [res]  \"=b\" (result)\n              : [obj]  \"m\"  (*ptr),\n                [zero] \"i\"  (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\n#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 result;\n\n  asm volatile (\n      \"1:     ldarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpd %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n\n      \"       stdcx. %[val], %[zero], %[obj]  \\n\\t\"  // store the new value\n      \"       bne- 1b                         \\n\\t\"\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  Atomic64 result;\n\n  asm volatile (\n      \"1:     ldarx %[res], %[zero], %[obj]       \\n\\t\"\n      \"       stdcx. %[val], %[zero], %[obj]      \\n\\t\"\n      \"       bne- 1b                             \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  Atomic64 result;\n\n  asm volatile (\n      \"1:     ldarx %[res], %[zero], %[obj]   \\n\\t\" // load and reserve\n      \"       add %[res], %[res], %[val]      \\n\\t\" // add the operand\n      \"       stdcx. %[res], %[zero], %[obj]  \\n\\t\" // store old value if\n                                                    // still reserved\n\n      \"       bne- 1b                         \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (increment),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n\n  Atomic64 result;\n\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n\n      \"1:     ldarx %[res], %[zero], %[obj]   \\n\\t\" // load and reserve\n      \"       add %[res], %[res], %[val]      \\n\\t\" // add the operand\n      \"       stdcx. %[res], %[zero], %[obj]  \\n\\t\" // store old value if\n                                                    // still reserved\n\n      \"       bne- 1b                         \\n\\t\"\n\n      \"       isync                           \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [val]  \"b\"   (increment),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 result;\n\n  asm volatile (\n      \"1:     ldarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpd %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n\n      \"       stdcx. %[val], %[zero], %[obj]  \\n\\t\"  // store the new value\n      \"       bne- 1b                         \\n\\t\"\n      \"       isync                           \\n\\t\"\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 result;\n\n  asm volatile (\n      \"       lwsync                          \\n\\t\"\n\n      \"1:     ldarx %[res], %[zero], %[obj]   \\n\\t\"  // load and reserve\n      \"       cmpd %[cmp], %[res]             \\n\\t\"  // compare values\n      \"       bne- 2f                         \\n\\t\"\n\n      \"       stdcx. %[val], %[zero], %[obj]  \\n\\t\"  // store the new value\n      \"       bne- 1b                         \\n\\t\"\n      \"2:                                     \\n\\t\"\n              : [res]  \"=&b\" (result)\n              : [obj]  \"b\"   (ptr),\n                [cmp]  \"b\"   (old_value),\n                [val]  \"b\"   (new_value),\n                [zero] \"i\"   (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  asm volatile (\n      \"       std %[val], %[obj]          \\n\\t\"\n      \"       isync                       \\n\\t\"\n              : [obj] \"=m\" (*ptr)\n              : [val] \"b\"  (value));\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  asm volatile (\n      \"       lwsync                      \\n\\t\"\n      \"       std %[val], %[obj]          \\n\\t\"\n              : [obj] \"=m\" (*ptr)\n              : [val] \"b\"  (value));\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 result;\n\n  asm volatile (\n      \"1:     ld %[res], %[obj]                   \\n\\t\"\n      \"       cmpd %[res], %[res]                 \\n\\t\" // create data\n                                                        // dependency for\n                                                        // load/load ordering\n      \"       bne- 1b                             \\n\\t\" // never taken\n\n      \"       isync                               \\n\\t\"\n              : [res]  \"=b\" (result)\n              : [obj]  \"m\"  (*ptr),\n                [zero] \"i\"  (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  Atomic64 result;\n\n  asm volatile (\n      \"       lwsync                              \\n\\t\"\n\n      \"1:     ld %[res], %[obj]                   \\n\\t\"\n      \"       cmpd %[res], %[res]                 \\n\\t\" // create data\n                                                        // dependency for\n                                                        // load/load ordering\n      \"       bne- 1b                             \\n\\t\" // never taken\n              : [res]  \"=b\" (result)\n              : [obj]  \"m\"  (*ptr),\n                [zero] \"i\"  (0)\n              : \"cr0\", \"ctr\");\n\n  return result;\n}\n#endif\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_ppc_gcc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2015 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: ogabbay@advaoptical.com (Oded Gabbay)\n// Cleaned up by: bsilver16384@gmail.com (Brian Silverman)\n//\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_\n\n#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__(\"\" : : : \"memory\")\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev;\n\n  __asm__ __volatile__(\n      \"0:                                  \\n\\t\"\n      \"lwarx %[prev],0,%[ptr]              \\n\\t\"\n      \"cmpw 0,%[prev],%[old_value]         \\n\\t\"\n      \"bne- 1f                             \\n\\t\"\n      \"stwcx. %[new_value],0,%[ptr]        \\n\\t\"\n      \"bne- 0b                             \\n\\t\"\n      \"1:                                  \\n\\t\"\n      : [prev] \"=&r\"(prev), \"+m\"(*ptr)\n      : [ptr] \"r\"(ptr), [old_value] \"r\"(old_value), [new_value] \"r\"(new_value)\n      : \"cc\", \"memory\");\n\n  return prev;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,\n                                         Atomic32 new_value) {\n  Atomic32 old;\n\n  __asm__ __volatile__(\n      \"0:                                  \\n\\t\"\n      \"lwarx %[old],0,%[ptr]               \\n\\t\"\n      \"stwcx. %[new_value],0,%[ptr]        \\n\\t\"\n      \"bne- 0b                             \\n\\t\"\n      : [old] \"=&r\"(old), \"+m\"(*ptr)\n      : [ptr] \"r\"(ptr), [new_value] \"r\"(new_value)\n      : \"cc\", \"memory\");\n\n  return old;\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,\n                                          Atomic32 increment) {\n  Atomic32 temp;\n\n  __asm__ __volatile__(\n      \"0:                                  \\n\\t\"\n      \"lwarx %[temp],0,%[ptr]              \\n\\t\"\n      \"add %[temp],%[increment],%[temp]    \\n\\t\"\n      \"stwcx. %[temp],0,%[ptr]             \\n\\t\"\n      \"bne- 0b                             \\n\\t\"\n      : [temp] \"=&r\"(temp)\n      : [increment] \"r\"(increment), [ptr] \"r\"(ptr)\n      : \"cc\", \"memory\");\n\n  return temp;\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,\n                                        Atomic32 increment) {\n  MemoryBarrier();\n  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);\n  MemoryBarrier();\n  return res;\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,\n                                       Atomic32 old_value, Atomic32 new_value) {\n  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n  return res;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,\n                                       Atomic32 old_value, Atomic32 new_value) {\n  MemoryBarrier();\n  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  return res;\n}\n\ninline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void MemoryBarrier() { __asm__ __volatile__(\"sync\" : : : \"memory\"); }\n\ninline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  MemoryBarrier();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { return *ptr; }\n\ninline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {\n  Atomic32 value = *ptr;\n  MemoryBarrier();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32 *ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#undef ATOMICOPS_COMPILER_BARRIER\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_PPC_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_solaris.h",
    "content": "// Copyright 2014 Google Inc. All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_\n\n#include <atomic.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  return (Atomic32)atomic_cas_32((volatile uint32_t*)ptr, (uint32_t)old_value, (uint32_t)new_value);\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  return (Atomic32)atomic_swap_32((volatile uint32_t*)ptr, (uint32_t)new_value);\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return (Atomic32)atomic_add_32_nv((volatile uint32_t*)ptr, (uint32_t)increment);\n}\n\ninline void MemoryBarrier(void) {\n\tmembar_producer();\n\tmembar_consumer();\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  MemoryBarrier();\n  Atomic32 ret = NoBarrier_AtomicIncrement(ptr, increment);\n  MemoryBarrier();\n\n  return ret;\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n\n  return ret;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  MemoryBarrier();\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  membar_producer();\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  membar_consumer();\n  *ptr = value;\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 val = *ptr;\n  membar_consumer();\n  return val;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  membar_producer();\n  return *ptr;\n}\n\n#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  return atomic_cas_64((volatile uint64_t*)ptr, (uint64_t)old_value, (uint64_t)new_value);\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) {\n  return atomic_swap_64((volatile uint64_t*)ptr, (uint64_t)new_value);\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {\n  return atomic_add_64_nv((volatile uint64_t*)ptr, increment);\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {\n  MemoryBarrier();\n  Atomic64 ret = atomic_add_64_nv((volatile uint64_t*)ptr, increment);\n  MemoryBarrier();\n  return ret;\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  MemoryBarrier();\n  return ret;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  MemoryBarrier();\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n  membar_producer();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  membar_consumer();\n  *ptr = value;\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 ret = *ptr;\n  membar_consumer();\n  return ret;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  membar_producer();\n  return *ptr;\n}\n#endif\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_\n\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_tsan.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2013 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation for compiler-based\n// ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).\n// Use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_\n\n#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__(\"\" : : : \"memory\")\n\n#include <sanitizer/tsan_interface_atomic.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 cmp = old_value;\n  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);\n  return cmp;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,\n                                         Atomic32 new_value) {\n  return __tsan_atomic32_exchange(ptr, new_value,\n      __tsan_memory_order_relaxed);\n}\n\ninline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,\n                                       Atomic32 new_value) {\n  return __tsan_atomic32_exchange(ptr, new_value,\n      __tsan_memory_order_acquire);\n}\n\ninline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,\n                                       Atomic32 new_value) {\n  return __tsan_atomic32_exchange(ptr, new_value,\n      __tsan_memory_order_release);\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,\n                                          Atomic32 increment) {\n  return increment + __tsan_atomic32_fetch_add(ptr, increment,\n      __tsan_memory_order_relaxed);\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,\n                                        Atomic32 increment) {\n  return increment + __tsan_atomic32_fetch_add(ptr, increment,\n      __tsan_memory_order_acq_rel);\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 cmp = old_value;\n  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_acquire, __tsan_memory_order_acquire);\n  return cmp;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 cmp = old_value;\n  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_release, __tsan_memory_order_relaxed);\n  return cmp;\n}\n\ninline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);\n}\n\ninline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);\n  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);\n}\n\ninline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {\n  __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {\n  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {\n  return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32 *ptr) {\n  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);\n  return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);\n}\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 cmp = old_value;\n  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);\n  return cmp;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,\n                                         Atomic64 new_value) {\n  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);\n}\n\ninline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,\n                                       Atomic64 new_value) {\n  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);\n}\n\ninline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,\n                                       Atomic64 new_value) {\n  return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,\n                                          Atomic64 increment) {\n  return increment + __tsan_atomic64_fetch_add(ptr, increment,\n      __tsan_memory_order_relaxed);\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,\n                                        Atomic64 increment) {\n  return increment + __tsan_atomic64_fetch_add(ptr, increment,\n      __tsan_memory_order_acq_rel);\n}\n\ninline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {\n  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);\n}\n\ninline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {\n  __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);\n  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);\n}\n\ninline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {\n  __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {\n  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {\n  return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64 *ptr) {\n  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);\n  return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 cmp = old_value;\n  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_acquire, __tsan_memory_order_acquire);\n  return cmp;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 cmp = old_value;\n  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,\n      __tsan_memory_order_release, __tsan_memory_order_relaxed);\n  return cmp;\n}\n\ninline void MemoryBarrier() {\n  __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#undef ATOMICOPS_COMPILER_BARRIER\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_x86_gcc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// This struct is not part of the public API of this module; clients may not\n// use it.\n// Features of this x86.  Values may not be correct before main() is run,\n// but are set conservatively.\nstruct AtomicOps_x86CPUFeatureStruct {\n  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence\n                             // after acquire compare-and-swap.\n  bool has_sse2;             // Processor has SSE2.\n};\nextern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;\n\n#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__(\"\" : : : \"memory\")\n\n// 32-bit low-level operations on any platform.\n\ninline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,\n                                         Atomic32 old_value,\n                                         Atomic32 new_value) {\n  Atomic32 prev;\n  __asm__ __volatile__(\"lock; cmpxchgl %1,%2\"\n                       : \"=a\" (prev)\n                       : \"q\" (new_value), \"m\" (*ptr), \"0\" (old_value)\n                       : \"memory\");\n  return prev;\n}\n\ninline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,\n                                         Atomic32 new_value) {\n  __asm__ __volatile__(\"xchgl %1,%0\"  // The lock prefix is implicit for xchg.\n                       : \"=r\" (new_value)\n                       : \"m\" (*ptr), \"0\" (new_value)\n                       : \"memory\");\n  return new_value;  // Now it's the previous value.\n}\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  Atomic32 temp = increment;\n  __asm__ __volatile__(\"lock; xaddl %0,%1\"\n                       : \"+r\" (temp), \"+m\" (*ptr)\n                       : : \"memory\");\n  // temp now holds the old value of *ptr\n  return temp + increment;\n}\n\ninline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,\n                                        Atomic32 increment) {\n  Atomic32 temp = increment;\n  __asm__ __volatile__(\"lock; xaddl %0,%1\"\n                       : \"+r\" (temp), \"+m\" (*ptr)\n                       : : \"memory\");\n  // temp now holds the old value of *ptr\n  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {\n    __asm__ __volatile__(\"lfence\" : : : \"memory\");\n  }\n  return temp + increment;\n}\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {\n    __asm__ __volatile__(\"lfence\" : : : \"memory\");\n  }\n  return x;\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\n#if defined(__x86_64__)\n\n// 64-bit implementations of memory barrier can be simpler, because it\n// \"mfence\" is guaranteed to exist.\ninline void MemoryBarrier() {\n  __asm__ __volatile__(\"mfence\" : : : \"memory\");\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\n#else\n\ninline void MemoryBarrier() {\n  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {\n    __asm__ __volatile__(\"mfence\" : : : \"memory\");\n  } else {  // mfence is faster but not present on PIII\n    Atomic32 x = 0;\n    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII\n  }\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {\n    *ptr = value;\n    __asm__ __volatile__(\"mfence\" : : : \"memory\");\n  } else {\n    NoBarrier_AtomicExchange(ptr, value);\n                          // acts as a barrier on PIII\n  }\n}\n#endif\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  ATOMICOPS_COMPILER_BARRIER();\n  *ptr = value;  // An x86 store acts as a release barrier.\n  // See comments in Atomic64 version of Release_Store(), below.\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.\n  // See comments in Atomic64 version of Release_Store(), below.\n  ATOMICOPS_COMPILER_BARRIER();\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n#if defined(__x86_64__)\n\n// 64-bit low-level operations on 64-bit platform.\n\ninline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,\n                                         Atomic64 old_value,\n                                         Atomic64 new_value) {\n  Atomic64 prev;\n  __asm__ __volatile__(\"lock; cmpxchgq %1,%2\"\n                       : \"=a\" (prev)\n                       : \"q\" (new_value), \"m\" (*ptr), \"0\" (old_value)\n                       : \"memory\");\n  return prev;\n}\n\ninline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,\n                                         Atomic64 new_value) {\n  __asm__ __volatile__(\"xchgq %1,%0\"  // The lock prefix is implicit for xchg.\n                       : \"=r\" (new_value)\n                       : \"m\" (*ptr), \"0\" (new_value)\n                       : \"memory\");\n  return new_value;  // Now it's the previous value.\n}\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  Atomic64 temp = increment;\n  __asm__ __volatile__(\"lock; xaddq %0,%1\"\n                       : \"+r\" (temp), \"+m\" (*ptr)\n                       : : \"memory\");\n  // temp now contains the previous value of *ptr\n  return temp + increment;\n}\n\ninline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,\n                                        Atomic64 increment) {\n  Atomic64 temp = increment;\n  __asm__ __volatile__(\"lock; xaddq %0,%1\"\n                       : \"+r\" (temp), \"+m\" (*ptr)\n                       : : \"memory\");\n  // temp now contains the previous value of *ptr\n  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {\n    __asm__ __volatile__(\"lfence\" : : : \"memory\");\n  }\n  return temp + increment;\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n  MemoryBarrier();\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  ATOMICOPS_COMPILER_BARRIER();\n\n  *ptr = value;  // An x86 store acts as a release barrier\n                 // for current AMD/Intel chips as of Jan 2008.\n                 // See also Acquire_Load(), below.\n\n  // When new chips come out, check:\n  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:\n  //  System Programming Guide, Chatper 7: Multiple-processor management,\n  //  Section 7.2, Memory Ordering.\n  // Last seen at:\n  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm\n  //\n  // x86 stores/loads fail to act as barriers for a few instructions (clflush\n  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are\n  // not generated by the compiler, and are rare.  Users of these instructions\n  // need to know about cache behaviour in any case since all of these involve\n  // either flushing cache lines or non-temporal cache hints.\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,\n                          // for current AMD/Intel chips as of Jan 2008.\n                          // See also Release_Store(), above.\n  ATOMICOPS_COMPILER_BARRIER();\n  return value;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {\n    __asm__ __volatile__(\"lfence\" : : : \"memory\");\n  }\n  return x;\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\n#endif  // defined(__x86_64__)\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#undef ATOMICOPS_COMPILER_BARRIER\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/atomicops_internals_x86_msvc.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file is an internal atomic implementation, use atomicops.h instead.\n\n#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_\n#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ninline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,\n                                          Atomic32 increment) {\n  return Barrier_AtomicIncrement(ptr, increment);\n}\n\n#if !(defined(_MSC_VER) && _MSC_VER >= 1400)\n#error \"We require at least vs2005 for MemoryBarrier\"\n#endif\n\ninline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,\n                                       Atomic32 old_value,\n                                       Atomic32 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {\n  NoBarrier_AtomicExchange(ptr, value);\n              // acts as a barrier in this implementation\n}\n\ninline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {\n  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005\n  // See comments in Atomic64 version of Release_Store() below.\n}\n\ninline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {\n  return *ptr;\n}\n\ninline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {\n  Atomic32 value = *ptr;\n  return value;\n}\n\ninline Atomic32 Release_Load(volatile const Atomic32* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\n#if defined(_WIN64)\n\n// 64-bit low-level operations on 64-bit platform.\n\ninline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,\n                                          Atomic64 increment) {\n  return Barrier_AtomicIncrement(ptr, increment);\n}\n\ninline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;\n}\n\ninline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {\n  NoBarrier_AtomicExchange(ptr, value);\n              // acts as a barrier in this implementation\n}\n\ninline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {\n  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005\n\n  // When new chips come out, check:\n  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:\n  //  System Programming Guide, Chatper 7: Multiple-processor management,\n  //  Section 7.2, Memory Ordering.\n  // Last seen at:\n  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm\n}\n\ninline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {\n  return *ptr;\n}\n\ninline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {\n  Atomic64 value = *ptr;\n  return value;\n}\n\ninline Atomic64 Release_Load(volatile const Atomic64* ptr) {\n  MemoryBarrier();\n  return *ptr;\n}\n\ninline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\ninline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,\n                                       Atomic64 old_value,\n                                       Atomic64 new_value) {\n  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);\n}\n\n#endif  // defined(_WIN64)\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/bytestream.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// This file declares the ByteSink and ByteSource abstract interfaces. These\n// interfaces represent objects that consume (ByteSink) or produce (ByteSource)\n// a sequence of bytes. Using these abstract interfaces in your APIs can help\n// make your code work with a variety of input and output types.\n//\n// This file also declares the following commonly used implementations of these\n// interfaces.\n//\n//   ByteSink:\n//      UncheckedArrayByteSink  Writes to an array, without bounds checking\n//      CheckedArrayByteSink    Writes to an array, with bounds checking\n//      GrowingArrayByteSink    Allocates and writes to a growable buffer\n//      StringByteSink          Writes to an STL string\n//      NullByteSink            Consumes a never-ending stream of bytes\n//\n//   ByteSource:\n//      ArrayByteSource         Reads from an array or string/StringPiece\n//      LimitedByteSource       Limits the number of bytes read from an\n\n#ifndef GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_\n#define GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_\n\n#include <stddef.h>\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/stringpiece.h>\n\nclass CordByteSink;\nclass MemBlock;\n\nnamespace google {\nnamespace protobuf {\nnamespace strings {\n\n// An abstract interface for an object that consumes a sequence of bytes. This\n// interface offers 3 different ways to append data, and a Flush() function.\n//\n// Example:\n//\n//   string my_data;\n//   ...\n//   ByteSink* sink = ...\n//   sink->Append(my_data.data(), my_data.size());\n//   sink->Flush();\n//\nclass LIBPROTOBUF_EXPORT ByteSink {\n public:\n  ByteSink() {}\n  virtual ~ByteSink() {}\n\n  // Appends the \"n\" bytes starting at \"bytes\".\n  virtual void Append(const char* bytes, size_t n) = 0;\n\n  // Flushes internal buffers. The default implemenation does nothing. ByteSink\n  // subclasses may use internal buffers that require calling Flush() at the end\n  // of the stream.\n  virtual void Flush();\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSink);\n};\n\n// An abstract interface for an object that produces a fixed-size sequence of\n// bytes.\n//\n// Example:\n//\n//   ByteSource* source = ...\n//   while (source->Available() > 0) {\n//     StringPiece data = source->Peek();\n//     ... do something with \"data\" ...\n//     source->Skip(data.length());\n//   }\n//\nclass LIBPROTOBUF_EXPORT ByteSource {\n public:\n  ByteSource() {}\n  virtual ~ByteSource() {}\n\n  // Returns the number of bytes left to read from the source. Available()\n  // should decrease by N each time Skip(N) is called. Available() may not\n  // increase. Available() returning 0 indicates that the ByteSource is\n  // exhausted.\n  //\n  // Note: Size() may have been a more appropriate name as it's more\n  //       indicative of the fixed-size nature of a ByteSource.\n  virtual size_t Available() const = 0;\n\n  // Returns a StringPiece of the next contiguous region of the source. Does not\n  // reposition the source. The returned region is empty iff Available() == 0.\n  //\n  // The returned region is valid until the next call to Skip() or until this\n  // object is destroyed, whichever occurs first.\n  //\n  // The length of the returned StringPiece will be <= Available().\n  virtual StringPiece Peek() = 0;\n\n  // Skips the next n bytes. Invalidates any StringPiece returned by a previous\n  // call to Peek().\n  //\n  // REQUIRES: Available() >= n\n  virtual void Skip(size_t n) = 0;\n\n  // Writes the next n bytes in this ByteSource to the given ByteSink, and\n  // advances this ByteSource past the copied bytes. The default implementation\n  // of this method just copies the bytes normally, but subclasses might\n  // override CopyTo to optimize certain cases.\n  //\n  // REQUIRES: Available() >= n\n  virtual void CopyTo(ByteSink* sink, size_t n);\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ByteSource);\n};\n\n//\n// Some commonly used implementations of ByteSink\n//\n\n// Implementation of ByteSink that writes to an unsized byte array. No\n// bounds-checking is performed--it is the caller's responsibility to ensure\n// that the destination array is large enough.\n//\n// Example:\n//\n//   char buf[10];\n//   UncheckedArrayByteSink sink(buf);\n//   sink.Append(\"hi\", 2);    // OK\n//   sink.Append(data, 100);  // WOOPS! Overflows buf[10].\n//\nclass LIBPROTOBUF_EXPORT UncheckedArrayByteSink : public ByteSink {\n public:\n  explicit UncheckedArrayByteSink(char* dest) : dest_(dest) {}\n  virtual void Append(const char* data, size_t n);\n\n  // Returns the current output pointer so that a caller can see how many bytes\n  // were produced.\n  //\n  // Note: this method is not part of the ByteSink interface.\n  char* CurrentDestination() const { return dest_; }\n\n private:\n  char* dest_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(UncheckedArrayByteSink);\n};\n\n// Implementation of ByteSink that writes to a sized byte array. This sink will\n// not write more than \"capacity\" bytes to outbuf. Once \"capacity\" bytes are\n// appended, subsequent bytes will be ignored and Overflowed() will return true.\n// Overflowed() does not cause a runtime error (i.e., it does not CHECK fail).\n//\n// Example:\n//\n//   char buf[10];\n//   CheckedArrayByteSink sink(buf, 10);\n//   sink.Append(\"hi\", 2);    // OK\n//   sink.Append(data, 100);  // Will only write 8 more bytes\n//\nclass LIBPROTOBUF_EXPORT CheckedArrayByteSink : public ByteSink {\n public:\n  CheckedArrayByteSink(char* outbuf, size_t capacity);\n  virtual void Append(const char* bytes, size_t n);\n\n  // Returns the number of bytes actually written to the sink.\n  size_t NumberOfBytesWritten() const { return size_; }\n\n  // Returns true if any bytes were discarded, i.e., if there was an\n  // attempt to write more than 'capacity' bytes.\n  bool Overflowed() const { return overflowed_; }\n\n private:\n  char* outbuf_;\n  const size_t capacity_;\n  size_t size_;\n  bool overflowed_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CheckedArrayByteSink);\n};\n\n// Implementation of ByteSink that allocates an internal buffer (a char array)\n// and expands it as needed to accommodate appended data (similar to a string),\n// and allows the caller to take ownership of the internal buffer via the\n// GetBuffer() method. The buffer returned from GetBuffer() must be deleted by\n// the caller with delete[]. GetBuffer() also sets the internal buffer to be\n// empty, and subsequent appends to the sink will create a new buffer. The\n// destructor will free the internal buffer if GetBuffer() was not called.\n//\n// Example:\n//\n//   GrowingArrayByteSink sink(10);\n//   sink.Append(\"hi\", 2);\n//   sink.Append(data, n);\n//   const char* buf = sink.GetBuffer();  // Ownership transferred\n//   delete[] buf;\n//\nclass LIBPROTOBUF_EXPORT GrowingArrayByteSink : public strings::ByteSink {\n public:\n  explicit GrowingArrayByteSink(size_t estimated_size);\n  virtual ~GrowingArrayByteSink();\n  virtual void Append(const char* bytes, size_t n);\n\n  // Returns the allocated buffer, and sets nbytes to its size. The caller takes\n  // ownership of the buffer and must delete it with delete[].\n  char* GetBuffer(size_t* nbytes);\n\n private:\n  void Expand(size_t amount);\n  void ShrinkToFit();\n\n  size_t capacity_;\n  char* buf_;\n  size_t size_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(GrowingArrayByteSink);\n};\n\n// Implementation of ByteSink that appends to the given string.\n// Existing contents of \"dest\" are not modified; new data is appended.\n//\n// Example:\n//\n//   string dest = \"Hello \";\n//   StringByteSink sink(&dest);\n//   sink.Append(\"World\", 5);\n//   assert(dest == \"Hello World\");\n//\nclass LIBPROTOBUF_EXPORT StringByteSink : public ByteSink {\n public:\n  explicit StringByteSink(string* dest) : dest_(dest) {}\n  virtual void Append(const char* data, size_t n);\n\n private:\n  string* dest_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StringByteSink);\n};\n\n// Implementation of ByteSink that discards all data.\n//\n// Example:\n//\n//   NullByteSink sink;\n//   sink.Append(data, data.size());  // All data ignored.\n//\nclass LIBPROTOBUF_EXPORT NullByteSink : public ByteSink {\n public:\n  NullByteSink() {}\n  virtual void Append(const char *data, size_t n) {}\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(NullByteSink);\n};\n\n//\n// Some commonly used implementations of ByteSource\n//\n\n// Implementation of ByteSource that reads from a StringPiece.\n//\n// Example:\n//\n//   string data = \"Hello\";\n//   ArrayByteSource source(data);\n//   assert(source.Available() == 5);\n//   assert(source.Peek() == \"Hello\");\n//\nclass LIBPROTOBUF_EXPORT ArrayByteSource : public ByteSource {\n public:\n  explicit ArrayByteSource(StringPiece s) : input_(s) {}\n\n  virtual size_t Available() const;\n  virtual StringPiece Peek();\n  virtual void Skip(size_t n);\n\n private:\n  StringPiece   input_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ArrayByteSource);\n};\n\n// Implementation of ByteSource that wraps another ByteSource, limiting the\n// number of bytes returned.\n//\n// The caller maintains ownership of the underlying source, and may not use the\n// underlying source while using the LimitByteSource object.  The underlying\n// source's pointer is advanced by n bytes every time this LimitByteSource\n// object is advanced by n.\n//\n// Example:\n//\n//   string data = \"Hello World\";\n//   ArrayByteSource abs(data);\n//   assert(abs.Available() == data.size());\n//\n//   LimitByteSource limit(abs, 5);\n//   assert(limit.Available() == 5);\n//   assert(limit.Peek() == \"Hello\");\n//\nclass LIBPROTOBUF_EXPORT LimitByteSource : public ByteSource {\n public:\n  // Returns at most \"limit\" bytes from \"source\".\n  LimitByteSource(ByteSource* source, size_t limit);\n\n  virtual size_t Available() const;\n  virtual StringPiece Peek();\n  virtual void Skip(size_t n);\n\n  // We override CopyTo so that we can forward to the underlying source, in\n  // case it has an efficient implementation of CopyTo.\n  virtual void CopyTo(ByteSink* sink, size_t n);\n\n private:\n  ByteSource* source_;\n  size_t limit_;\n};\n\n}  // namespace strings\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_BYTESTREAM_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/callback.h",
    "content": "#ifndef GOOGLE_PROTOBUF_STUBS_CALLBACK_H_\n#define GOOGLE_PROTOBUF_STUBS_CALLBACK_H_\n\n#include <google/protobuf/stubs/macros.h>\n#include <google/protobuf/stubs/type_traits.h>\n\n// ===================================================================\n// emulates google3/base/callback.h\n\nnamespace google {\nnamespace protobuf {\n\n// Abstract interface for a callback.  When calling an RPC, you must provide\n// a Closure to call when the procedure completes.  See the Service interface\n// in service.h.\n//\n// To automatically construct a Closure which calls a particular function or\n// method with a particular set of parameters, use the NewCallback() function.\n// Example:\n//   void FooDone(const FooResponse* response) {\n//     ...\n//   }\n//\n//   void CallFoo() {\n//     ...\n//     // When done, call FooDone() and pass it a pointer to the response.\n//     Closure* callback = NewCallback(&FooDone, response);\n//     // Make the call.\n//     service->Foo(controller, request, response, callback);\n//   }\n//\n// Example that calls a method:\n//   class Handler {\n//    public:\n//     ...\n//\n//     void FooDone(const FooResponse* response) {\n//       ...\n//     }\n//\n//     void CallFoo() {\n//       ...\n//       // When done, call FooDone() and pass it a pointer to the response.\n//       Closure* callback = NewCallback(this, &Handler::FooDone, response);\n//       // Make the call.\n//       service->Foo(controller, request, response, callback);\n//     }\n//   };\n//\n// Currently NewCallback() supports binding zero, one, or two arguments.\n//\n// Callbacks created with NewCallback() automatically delete themselves when\n// executed.  They should be used when a callback is to be called exactly\n// once (usually the case with RPC callbacks).  If a callback may be called\n// a different number of times (including zero), create it with\n// NewPermanentCallback() instead.  You are then responsible for deleting the\n// callback (using the \"delete\" keyword as normal).\n//\n// Note that NewCallback() is a bit touchy regarding argument types.  Generally,\n// the values you provide for the parameter bindings must exactly match the\n// types accepted by the callback function.  For example:\n//   void Foo(string s);\n//   NewCallback(&Foo, \"foo\");          // WON'T WORK:  const char* != string\n//   NewCallback(&Foo, string(\"foo\"));  // WORKS\n// Also note that the arguments cannot be references:\n//   void Foo(const string& s);\n//   string my_str;\n//   NewCallback(&Foo, my_str);  // WON'T WORK:  Can't use referecnes.\n// However, correctly-typed pointers will work just fine.\nclass LIBPROTOBUF_EXPORT Closure {\n public:\n  Closure() {}\n  virtual ~Closure();\n\n  virtual void Run() = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Closure);\n};\n\ntemplate<typename R>\nclass ResultCallback {\n public:\n  ResultCallback() {}\n  virtual ~ResultCallback() {}\n\n  virtual R Run() = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback);\n};\n\ntemplate<typename R, typename A1>\nclass LIBPROTOBUF_EXPORT ResultCallback1 {\n public:\n  ResultCallback1() {}\n  virtual ~ResultCallback1() {}\n\n  virtual R Run(A1) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback1);\n};\n\ntemplate<typename R, typename A1, typename A2>\nclass LIBPROTOBUF_EXPORT ResultCallback2 {\n public:\n  ResultCallback2() {}\n  virtual ~ResultCallback2() {}\n\n  virtual R Run(A1,A2) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ResultCallback2);\n};\n\nnamespace internal {\n\nclass LIBPROTOBUF_EXPORT FunctionClosure0 : public Closure {\n public:\n  typedef void (*FunctionType)();\n\n  FunctionClosure0(FunctionType function, bool self_deleting)\n    : function_(function), self_deleting_(self_deleting) {}\n  ~FunctionClosure0();\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    function_();\n    if (needs_delete) delete this;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n};\n\ntemplate <typename Class>\nclass MethodClosure0 : public Closure {\n public:\n  typedef void (Class::*MethodType)();\n\n  MethodClosure0(Class* object, MethodType method, bool self_deleting)\n    : object_(object), method_(method), self_deleting_(self_deleting) {}\n  ~MethodClosure0() {}\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    (object_->*method_)();\n    if (needs_delete) delete this;\n  }\n\n private:\n  Class* object_;\n  MethodType method_;\n  bool self_deleting_;\n};\n\ntemplate <typename Arg1>\nclass FunctionClosure1 : public Closure {\n public:\n  typedef void (*FunctionType)(Arg1 arg1);\n\n  FunctionClosure1(FunctionType function, bool self_deleting,\n                   Arg1 arg1)\n    : function_(function), self_deleting_(self_deleting),\n      arg1_(arg1) {}\n  ~FunctionClosure1() {}\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    function_(arg1_);\n    if (needs_delete) delete this;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n  Arg1 arg1_;\n};\n\ntemplate <typename Class, typename Arg1>\nclass MethodClosure1 : public Closure {\n public:\n  typedef void (Class::*MethodType)(Arg1 arg1);\n\n  MethodClosure1(Class* object, MethodType method, bool self_deleting,\n                 Arg1 arg1)\n    : object_(object), method_(method), self_deleting_(self_deleting),\n      arg1_(arg1) {}\n  ~MethodClosure1() {}\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    (object_->*method_)(arg1_);\n    if (needs_delete) delete this;\n  }\n\n private:\n  Class* object_;\n  MethodType method_;\n  bool self_deleting_;\n  Arg1 arg1_;\n};\n\ntemplate <typename Arg1, typename Arg2>\nclass FunctionClosure2 : public Closure {\n public:\n  typedef void (*FunctionType)(Arg1 arg1, Arg2 arg2);\n\n  FunctionClosure2(FunctionType function, bool self_deleting,\n                   Arg1 arg1, Arg2 arg2)\n    : function_(function), self_deleting_(self_deleting),\n      arg1_(arg1), arg2_(arg2) {}\n  ~FunctionClosure2() {}\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    function_(arg1_, arg2_);\n    if (needs_delete) delete this;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n};\n\ntemplate <typename Class, typename Arg1, typename Arg2>\nclass MethodClosure2 : public Closure {\n public:\n  typedef void (Class::*MethodType)(Arg1 arg1, Arg2 arg2);\n\n  MethodClosure2(Class* object, MethodType method, bool self_deleting,\n                 Arg1 arg1, Arg2 arg2)\n    : object_(object), method_(method), self_deleting_(self_deleting),\n      arg1_(arg1), arg2_(arg2) {}\n  ~MethodClosure2() {}\n\n  void Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    (object_->*method_)(arg1_, arg2_);\n    if (needs_delete) delete this;\n  }\n\n private:\n  Class* object_;\n  MethodType method_;\n  bool self_deleting_;\n  Arg1 arg1_;\n  Arg2 arg2_;\n};\n\ntemplate<typename R>\nclass FunctionResultCallback_0_0 : public ResultCallback<R> {\n public:\n  typedef R (*FunctionType)();\n\n  FunctionResultCallback_0_0(FunctionType function, bool self_deleting)\n      : function_(function), self_deleting_(self_deleting) {}\n  ~FunctionResultCallback_0_0() {}\n\n  R Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    R result = function_();\n    if (needs_delete) delete this;\n    return result;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n};\n\ntemplate<typename R, typename P1>\nclass FunctionResultCallback_1_0 : public ResultCallback<R> {\n public:\n  typedef R (*FunctionType)(P1);\n\n  FunctionResultCallback_1_0(FunctionType function, bool self_deleting,\n                             P1 p1)\n      : function_(function), self_deleting_(self_deleting), p1_(p1) {}\n  ~FunctionResultCallback_1_0() {}\n\n  R Run() {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    R result = function_(p1_);\n    if (needs_delete) delete this;\n    return result;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n  P1 p1_;\n};\n\ntemplate<typename R, typename Arg1>\nclass FunctionResultCallback_0_1 : public ResultCallback1<R, Arg1> {\n public:\n  typedef R (*FunctionType)(Arg1 arg1);\n\n  FunctionResultCallback_0_1(FunctionType function, bool self_deleting)\n      : function_(function), self_deleting_(self_deleting) {}\n  ~FunctionResultCallback_0_1() {}\n\n  R Run(Arg1 a1) {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    R result = function_(a1);\n    if (needs_delete) delete this;\n    return result;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n};\n\ntemplate<typename R, typename P1, typename A1>\nclass FunctionResultCallback_1_1 : public ResultCallback1<R, A1> {\n public:\n  typedef R (*FunctionType)(P1, A1);\n\n  FunctionResultCallback_1_1(FunctionType function, bool self_deleting,\n                             P1 p1)\n      : function_(function), self_deleting_(self_deleting), p1_(p1) {}\n  ~FunctionResultCallback_1_1() {}\n\n  R Run(A1 a1) {\n    bool needs_delete = self_deleting_;  // read in case callback deletes\n    R result = function_(p1_, a1);\n    if (needs_delete) delete this;\n    return result;\n  }\n\n private:\n  FunctionType function_;\n  bool self_deleting_;\n  P1 p1_;\n};\n\ntemplate <typename T>\nstruct InternalConstRef {\n  typedef typename remove_reference<T>::type base_type;\n  typedef const base_type& type;\n};\n\ntemplate <typename R, typename T, typename P1, typename P2, typename P3,\n          typename P4, typename P5, typename A1, typename A2>\nclass MethodResultCallback_5_2 : public ResultCallback2<R, A1, A2> {\n public:\n  typedef R (T::*MethodType)(P1, P2, P3, P4, P5, A1, A2);\n  MethodResultCallback_5_2(T* object, MethodType method, bool self_deleting,\n                           P1 p1, P2 p2, P3 p3, P4 p4, P5 p5)\n      : object_(object),\n        method_(method),\n        self_deleting_(self_deleting),\n        p1_(p1),\n        p2_(p2),\n        p3_(p3),\n        p4_(p4),\n        p5_(p5) {}\n  ~MethodResultCallback_5_2() {}\n\n  R Run(A1 a1, A2 a2) {\n    bool needs_delete = self_deleting_;\n    R result = (object_->*method_)(p1_, p2_, p3_, p4_, p5_, a1, a2);\n    if (needs_delete) delete this;\n    return result;\n  }\n\n private:\n  T* object_;\n  MethodType method_;\n  bool self_deleting_;\n  typename remove_reference<P1>::type p1_;\n  typename remove_reference<P2>::type p2_;\n  typename remove_reference<P3>::type p3_;\n  typename remove_reference<P4>::type p4_;\n  typename remove_reference<P5>::type p5_;\n};\n\n}  // namespace internal\n\n// See Closure.\ninline Closure* NewCallback(void (*function)()) {\n  return new internal::FunctionClosure0(function, true);\n}\n\n// See Closure.\ninline Closure* NewPermanentCallback(void (*function)()) {\n  return new internal::FunctionClosure0(function, false);\n}\n\n// See Closure.\ntemplate <typename Class>\ninline Closure* NewCallback(Class* object, void (Class::*method)()) {\n  return new internal::MethodClosure0<Class>(object, method, true);\n}\n\n// See Closure.\ntemplate <typename Class>\ninline Closure* NewPermanentCallback(Class* object, void (Class::*method)()) {\n  return new internal::MethodClosure0<Class>(object, method, false);\n}\n\n// See Closure.\ntemplate <typename Arg1>\ninline Closure* NewCallback(void (*function)(Arg1),\n                            Arg1 arg1) {\n  return new internal::FunctionClosure1<Arg1>(function, true, arg1);\n}\n\n// See Closure.\ntemplate <typename Arg1>\ninline Closure* NewPermanentCallback(void (*function)(Arg1),\n                                     Arg1 arg1) {\n  return new internal::FunctionClosure1<Arg1>(function, false, arg1);\n}\n\n// See Closure.\ntemplate <typename Class, typename Arg1>\ninline Closure* NewCallback(Class* object, void (Class::*method)(Arg1),\n                            Arg1 arg1) {\n  return new internal::MethodClosure1<Class, Arg1>(object, method, true, arg1);\n}\n\n// See Closure.\ntemplate <typename Class, typename Arg1>\ninline Closure* NewPermanentCallback(Class* object, void (Class::*method)(Arg1),\n                                     Arg1 arg1) {\n  return new internal::MethodClosure1<Class, Arg1>(object, method, false, arg1);\n}\n\n// See Closure.\ntemplate <typename Arg1, typename Arg2>\ninline Closure* NewCallback(void (*function)(Arg1, Arg2),\n                            Arg1 arg1, Arg2 arg2) {\n  return new internal::FunctionClosure2<Arg1, Arg2>(\n    function, true, arg1, arg2);\n}\n\n// See Closure.\ntemplate <typename Arg1, typename Arg2>\ninline Closure* NewPermanentCallback(void (*function)(Arg1, Arg2),\n                                     Arg1 arg1, Arg2 arg2) {\n  return new internal::FunctionClosure2<Arg1, Arg2>(\n    function, false, arg1, arg2);\n}\n\n// See Closure.\ntemplate <typename Class, typename Arg1, typename Arg2>\ninline Closure* NewCallback(Class* object, void (Class::*method)(Arg1, Arg2),\n                            Arg1 arg1, Arg2 arg2) {\n  return new internal::MethodClosure2<Class, Arg1, Arg2>(\n    object, method, true, arg1, arg2);\n}\n\n// See Closure.\ntemplate <typename Class, typename Arg1, typename Arg2>\ninline Closure* NewPermanentCallback(\n    Class* object, void (Class::*method)(Arg1, Arg2),\n    Arg1 arg1, Arg2 arg2) {\n  return new internal::MethodClosure2<Class, Arg1, Arg2>(\n    object, method, false, arg1, arg2);\n}\n\n// See ResultCallback\ntemplate<typename R>\ninline ResultCallback<R>* NewCallback(R (*function)()) {\n  return new internal::FunctionResultCallback_0_0<R>(function, true);\n}\n\n// See ResultCallback\ntemplate<typename R>\ninline ResultCallback<R>* NewPermanentCallback(R (*function)()) {\n  return new internal::FunctionResultCallback_0_0<R>(function, false);\n}\n\n// See ResultCallback\ntemplate<typename R, typename P1>\ninline ResultCallback<R>* NewCallback(R (*function)(P1), P1 p1) {\n  return new internal::FunctionResultCallback_1_0<R, P1>(\n      function, true, p1);\n}\n\n// See ResultCallback\ntemplate<typename R, typename P1>\ninline ResultCallback<R>* NewPermanentCallback(\n    R (*function)(P1), P1 p1) {\n  return new internal::FunctionResultCallback_1_0<R, P1>(\n      function, false, p1);\n}\n\n// See ResultCallback1\ntemplate<typename R, typename A1>\ninline ResultCallback1<R, A1>* NewCallback(R (*function)(A1)) {\n  return new internal::FunctionResultCallback_0_1<R, A1>(function, true);\n}\n\n// See ResultCallback1\ntemplate<typename R, typename A1>\ninline ResultCallback1<R, A1>* NewPermanentCallback(R (*function)(A1)) {\n  return new internal::FunctionResultCallback_0_1<R, A1>(function, false);\n}\n\n// See ResultCallback1\ntemplate<typename R, typename P1, typename A1>\ninline ResultCallback1<R, A1>* NewCallback(R (*function)(P1, A1), P1 p1) {\n  return new internal::FunctionResultCallback_1_1<R, P1, A1>(\n      function, true, p1);\n}\n\n// See ResultCallback1\ntemplate<typename R, typename P1, typename A1>\ninline ResultCallback1<R, A1>* NewPermanentCallback(\n    R (*function)(P1, A1), P1 p1) {\n  return new internal::FunctionResultCallback_1_1<R, P1, A1>(\n      function, false, p1);\n}\n\n// See MethodResultCallback_5_2\ntemplate <typename R, typename T, typename P1, typename P2, typename P3,\n          typename P4, typename P5, typename A1, typename A2>\ninline ResultCallback2<R, A1, A2>* NewPermanentCallback(\n    T* object, R (T::*function)(P1, P2, P3, P4, P5, A1, A2),\n    typename internal::InternalConstRef<P1>::type p1,\n    typename internal::InternalConstRef<P2>::type p2,\n    typename internal::InternalConstRef<P3>::type p3,\n    typename internal::InternalConstRef<P4>::type p4,\n    typename internal::InternalConstRef<P5>::type p5) {\n  return new internal::MethodResultCallback_5_2<R, T, P1, P2, P3, P4, P5, A1,\n                                                A2>(object, function, false, p1,\n                                                    p2, p3, p4, p5);\n}\n\n// A function which does nothing.  Useful for creating no-op callbacks, e.g.:\n//   Closure* nothing = NewCallback(&DoNothing);\nvoid LIBPROTOBUF_EXPORT DoNothing();\n\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_CALLBACK_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/casts.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2014 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_CASTS_H__\n#define GOOGLE_PROTOBUF_CASTS_H__\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/type_traits.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n// Use implicit_cast as a safe version of static_cast or const_cast\n// for upcasting in the type hierarchy (i.e. casting a pointer to Foo\n// to a pointer to SuperclassOfFoo or casting a pointer to Foo to\n// a const pointer to Foo).\n// When you use implicit_cast, the compiler checks that the cast is safe.\n// Such explicit implicit_casts are necessary in surprisingly many\n// situations where C++ demands an exact type match instead of an\n// argument type convertable to a target type.\n//\n// The From type can be inferred, so the preferred syntax for using\n// implicit_cast is the same as for static_cast etc.:\n//\n//   implicit_cast<ToType>(expr)\n//\n// implicit_cast would have been part of the C++ standard library,\n// but the proposal was submitted too late.  It will probably make\n// its way into the language in the future.\ntemplate<typename To, typename From>\ninline To implicit_cast(From const &f) {\n  return f;\n}\n\n// When you upcast (that is, cast a pointer from type Foo to type\n// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts\n// always succeed.  When you downcast (that is, cast a pointer from\n// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because\n// how do you know the pointer is really of type SubclassOfFoo?  It\n// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,\n// when you downcast, you should use this macro.  In debug mode, we\n// use dynamic_cast<> to double-check the downcast is legal (we die\n// if it's not).  In normal mode, we do the efficient static_cast<>\n// instead.  Thus, it's important to test in debug mode to make sure\n// the cast is legal!\n//    This is the only place in the code we should use dynamic_cast<>.\n// In particular, you SHOULDN'T be using dynamic_cast<> in order to\n// do RTTI (eg code like this:\n//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);\n//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);\n// You should design the code some other way not to need this.\n\ntemplate<typename To, typename From>     // use like this: down_cast<T*>(foo);\ninline To down_cast(From* f) {                   // so we only accept pointers\n  // Ensures that To is a sub-type of From *.  This test is here only\n  // for compile-time type checking, and has no overhead in an\n  // optimized build at run-time, as it will be optimized away\n  // completely.\n  if (false) {\n    implicit_cast<From*, To>(0);\n  }\n\n#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)\n  assert(f == NULL || dynamic_cast<To>(f) != NULL);  // RTTI: debug mode only!\n#endif\n  return static_cast<To>(f);\n}\n\ntemplate<typename To, typename From>    // use like this: down_cast<T&>(foo);\ninline To down_cast(From& f) {\n  typedef typename remove_reference<To>::type* ToAsPointer;\n  // Ensures that To is a sub-type of From *.  This test is here only\n  // for compile-time type checking, and has no overhead in an\n  // optimized build at run-time, as it will be optimized away\n  // completely.\n  if (false) {\n    implicit_cast<From*, ToAsPointer>(0);\n  }\n\n#if !defined(NDEBUG) && !defined(GOOGLE_PROTOBUF_NO_RTTI)\n  // RTTI: debug mode only!\n  assert(dynamic_cast<ToAsPointer>(&f) != NULL);\n#endif\n  return *static_cast<ToAsPointer>(&f);\n}\n\ntemplate<typename To, typename From>\ninline To bit_cast(const From& from) {\n  GOOGLE_COMPILE_ASSERT(sizeof(From) == sizeof(To),\n                        bit_cast_with_different_sizes);\n  To dest;\n  memcpy(&dest, &from, sizeof(dest));\n  return dest;\n}\n\n}  // namespace internal\n\n// We made these internal so that they would show up as such in the docs,\n// but we don't want to stick \"internal::\" in front of them everywhere.\nusing internal::implicit_cast;\nusing internal::down_cast;\nusing internal::bit_cast;\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_CASTS_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/common.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda) and others\n//\n// Contains basic types and utilities used by the rest of the library.\n\n#ifndef GOOGLE_PROTOBUF_COMMON_H__\n#define GOOGLE_PROTOBUF_COMMON_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/port.h>\n#include <google/protobuf/stubs/macros.h>\n#include <google/protobuf/stubs/platform_macros.h>\n\n// TODO(liujisi): Remove the following includes after the include clean-up.\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/stubs/scoped_ptr.h>\n#include <google/protobuf/stubs/mutex.h>\n#include <google/protobuf/stubs/callback.h>\n\n#ifndef PROTOBUF_USE_EXCEPTIONS\n#if defined(_MSC_VER) && defined(_CPPUNWIND)\n  #define PROTOBUF_USE_EXCEPTIONS 1\n#elif defined(__EXCEPTIONS)\n  #define PROTOBUF_USE_EXCEPTIONS 1\n#else\n  #define PROTOBUF_USE_EXCEPTIONS 0\n#endif\n#endif\n\n#if PROTOBUF_USE_EXCEPTIONS\n#include <exception>\n#endif\n#if defined(__APPLE__)\n#include <TargetConditionals.h>  // for TARGET_OS_IPHONE\n#endif\n\n#if defined(__ANDROID__) || defined(GOOGLE_PROTOBUF_OS_ANDROID) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || defined(GOOGLE_PROTOBUF_OS_IPHONE)\n#include <pthread.h>\n#endif\n\n#if defined(_WIN32) && defined(GetMessage)\n// Allow GetMessage to be used as a valid method name in protobuf classes.\n// windows.h defines GetMessage() as a macro.  Let's re-define it as an inline\n// function.  The inline function should be equivalent for C++ users.\ninline BOOL GetMessage_Win32(\n    LPMSG lpMsg, HWND hWnd,\n    UINT wMsgFilterMin, UINT wMsgFilterMax) {\n  return GetMessage(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);\n}\n#undef GetMessage\ninline BOOL GetMessage(\n    LPMSG lpMsg, HWND hWnd,\n    UINT wMsgFilterMin, UINT wMsgFilterMax) {\n  return GetMessage_Win32(lpMsg, hWnd, wMsgFilterMin, wMsgFilterMax);\n}\n#endif\n\nnamespace std {}\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Some of these constants are macros rather than const ints so that they can\n// be used in #if directives.\n\n// The current version, represented as a single integer to make comparison\n// easier:  major * 10^6 + minor * 10^3 + micro\n#define GOOGLE_PROTOBUF_VERSION 3001000\n\n// The minimum library version which works with the current version of the\n// headers.\n#define GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION 3001000\n\n// The minimum header version which works with the current version of\n// the library.  This constant should only be used by protoc's C++ code\n// generator.\nstatic const int kMinHeaderVersionForLibrary = 3001000;\n\n// The minimum protoc version which works with the current version of the\n// headers.\n#define GOOGLE_PROTOBUF_MIN_PROTOC_VERSION 3001000\n\n// The minimum header version which works with the current version of\n// protoc.  This constant should only be used in VerifyVersion().\nstatic const int kMinHeaderVersionForProtoc = 3001000;\n\n// Verifies that the headers and libraries are compatible.  Use the macro\n// below to call this.\nvoid LIBPROTOBUF_EXPORT VerifyVersion(int headerVersion, int minLibraryVersion,\n                                      const char* filename);\n\n// Converts a numeric version number to a string.\nstd::string LIBPROTOBUF_EXPORT VersionString(int version);\n\n}  // namespace internal\n\n// Place this macro in your main() function (or somewhere before you attempt\n// to use the protobuf library) to verify that the version you link against\n// matches the headers you compiled against.  If a version mismatch is\n// detected, the process will abort.\n#define GOOGLE_PROTOBUF_VERIFY_VERSION                                    \\\n  ::google::protobuf::internal::VerifyVersion(                            \\\n    GOOGLE_PROTOBUF_VERSION, GOOGLE_PROTOBUF_MIN_LIBRARY_VERSION,         \\\n    __FILE__)\n\n\n// ===================================================================\n// from google3/util/utf8/public/unilib.h\n\nclass StringPiece;\nnamespace internal {\n\n// Checks if the buffer contains structurally-valid UTF-8.  Implemented in\n// structurally_valid.cc.\nLIBPROTOBUF_EXPORT bool IsStructurallyValidUTF8(const char* buf, int len);\n\ninline bool IsStructurallyValidUTF8(const std::string& str) {\n  return IsStructurallyValidUTF8(str.data(), static_cast<int>(str.length()));\n}\n\n// Returns initial number of bytes of structually valid UTF-8.\nLIBPROTOBUF_EXPORT int UTF8SpnStructurallyValid(const StringPiece& str);\n\n// Coerce UTF-8 byte string in src_str to be\n// a structurally-valid equal-length string by selectively\n// overwriting illegal bytes with replace_char (typically ' ' or '?').\n// replace_char must be legal printable 7-bit Ascii 0x20..0x7e.\n// src_str is read-only.\n//\n// Returns pointer to output buffer, src_str.data() if no changes were made,\n//  or idst if some bytes were changed. idst is allocated by the caller\n//  and must be at least as big as src_str\n//\n// Optimized for: all structurally valid and no byte copying is done.\n//\nLIBPROTOBUF_EXPORT char* UTF8CoerceToStructurallyValid(\n    const StringPiece& str, char* dst, char replace_char);\n\n}  // namespace internal\n\n\n// ===================================================================\n// Shutdown support.\n\n// Shut down the entire protocol buffers library, deleting all static-duration\n// objects allocated by the library or by generated .pb.cc files.\n//\n// There are two reasons you might want to call this:\n// * You use a draconian definition of \"memory leak\" in which you expect\n//   every single malloc() to have a corresponding free(), even for objects\n//   which live until program exit.\n// * You are writing a dynamically-loaded library which needs to clean up\n//   after itself when the library is unloaded.\n//\n// It is safe to call this multiple times.  However, it is not safe to use\n// any other part of the protocol buffers library after\n// ShutdownProtobufLibrary() has been called.\nLIBPROTOBUF_EXPORT void ShutdownProtobufLibrary();\n\nnamespace internal {\n\n// Register a function to be called when ShutdownProtocolBuffers() is called.\nLIBPROTOBUF_EXPORT void OnShutdown(void (*func)());\n\n}  // namespace internal\n\n#if PROTOBUF_USE_EXCEPTIONS\nclass FatalException : public std::exception {\n public:\n  FatalException(const char* filename, int line, const std::string& message)\n      : filename_(filename), line_(line), message_(message) {}\n  virtual ~FatalException() throw();\n\n  virtual const char* what() const throw();\n\n  const char* filename() const { return filename_; }\n  int line() const { return line_; }\n  const std::string& message() const { return message_; }\n\n private:\n  const char* filename_;\n  const int line_;\n  const std::string message_;\n};\n#endif\n\n// This is at the end of the file instead of the beginning to work around a bug\n// in some versions of MSVC.\nusing namespace std;  // Don't do this at home, kids.\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_COMMON_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/fastmem.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2014 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Fast memory copying and comparison routines.\n//   strings::fastmemcmp_inlined() replaces memcmp()\n//   strings::memcpy_inlined() replaces memcpy()\n//   strings::memeq(a, b, n) replaces memcmp(a, b, n) == 0\n//\n// strings::*_inlined() routines are inline versions of the\n// routines exported by this module.  Sometimes using the inlined\n// versions is faster.  Measure before using the inlined versions.\n//\n// Performance measurement:\n//   strings::fastmemcmp_inlined\n//     Analysis: memcmp, fastmemcmp_inlined, fastmemcmp\n//     2012-01-30\n\n#ifndef GOOGLE_PROTOBUF_STUBS_FASTMEM_H_\n#define GOOGLE_PROTOBUF_STUBS_FASTMEM_H_\n\n#include <stddef.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Return true if the n bytes at a equal the n bytes at b.\n// The regions are allowed to overlap.\n//\n// The performance is similar to the performance memcmp(), but faster for\n// moderately-sized inputs, or inputs that share a common prefix and differ\n// somewhere in their last 8 bytes. Further optimizations can be added later\n// if it makes sense to do so.:w\ninline bool memeq(const char* a, const char* b, size_t n) {\n  size_t n_rounded_down = n & ~static_cast<size_t>(7);\n  if (GOOGLE_PREDICT_FALSE(n_rounded_down == 0)) {  // n <= 7\n    return memcmp(a, b, n) == 0;\n  }\n  // n >= 8\n  uint64 u = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);\n  uint64 v = GOOGLE_UNALIGNED_LOAD64(a + n - 8) ^ GOOGLE_UNALIGNED_LOAD64(b + n - 8);\n  if ((u | v) != 0) {  // The first or last 8 bytes differ.\n    return false;\n  }\n  a += 8;\n  b += 8;\n  n = n_rounded_down - 8;\n  if (n > 128) {\n    // As of 2012, memcmp on x86-64 uses a big unrolled loop with SSE2\n    // instructions, and while we could try to do something faster, it\n    // doesn't seem worth pursuing.\n    return memcmp(a, b, n) == 0;\n  }\n  for (; n >= 16; n -= 16) {\n    uint64 x = GOOGLE_UNALIGNED_LOAD64(a) ^ GOOGLE_UNALIGNED_LOAD64(b);\n    uint64 y = GOOGLE_UNALIGNED_LOAD64(a + 8) ^ GOOGLE_UNALIGNED_LOAD64(b + 8);\n    if ((x | y) != 0) {\n      return false;\n    }\n    a += 16;\n    b += 16;\n  }\n  // n must be 0 or 8 now because it was a multiple of 8 at the top of the loop.\n  return n == 0 || GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b);\n}\n\ninline int fastmemcmp_inlined(const char *a, const char *b, size_t n) {\n  if (n >= 64) {\n    return memcmp(a, b, n);\n  }\n  const char* a_limit = a + n;\n  while (a + sizeof(uint64) <= a_limit &&\n         GOOGLE_UNALIGNED_LOAD64(a) == GOOGLE_UNALIGNED_LOAD64(b)) {\n    a += sizeof(uint64);\n    b += sizeof(uint64);\n  }\n  if (a + sizeof(uint32) <= a_limit &&\n      GOOGLE_UNALIGNED_LOAD32(a) == GOOGLE_UNALIGNED_LOAD32(b)) {\n    a += sizeof(uint32);\n    b += sizeof(uint32);\n  }\n  while (a < a_limit) {\n    int d = static_cast<uint32>(*a++) - static_cast<uint32>(*b++);\n    if (d) return d;\n  }\n  return 0;\n}\n\n// The standard memcpy operation is slow for variable small sizes.\n// This implementation inlines the optimal realization for sizes 1 to 16.\n// To avoid code bloat don't use it in case of not performance-critical spots,\n// nor when you don't expect very frequent values of size <= 16.\ninline void memcpy_inlined(char *dst, const char *src, size_t size) {\n  // Compiler inlines code with minimal amount of data movement when third\n  // parameter of memcpy is a constant.\n  switch (size) {\n    case  1: memcpy(dst, src, 1); break;\n    case  2: memcpy(dst, src, 2); break;\n    case  3: memcpy(dst, src, 3); break;\n    case  4: memcpy(dst, src, 4); break;\n    case  5: memcpy(dst, src, 5); break;\n    case  6: memcpy(dst, src, 6); break;\n    case  7: memcpy(dst, src, 7); break;\n    case  8: memcpy(dst, src, 8); break;\n    case  9: memcpy(dst, src, 9); break;\n    case 10: memcpy(dst, src, 10); break;\n    case 11: memcpy(dst, src, 11); break;\n    case 12: memcpy(dst, src, 12); break;\n    case 13: memcpy(dst, src, 13); break;\n    case 14: memcpy(dst, src, 14); break;\n    case 15: memcpy(dst, src, 15); break;\n    case 16: memcpy(dst, src, 16); break;\n    default: memcpy(dst, src, size); break;\n  }\n}\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_FASTMEM_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/hash.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//\n// Deals with the fact that hash_map is not defined everywhere.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_HASH_H__\n#define GOOGLE_PROTOBUF_STUBS_HASH_H__\n\n#include <string.h>\n#include <google/protobuf/stubs/common.h>\n\n#define GOOGLE_PROTOBUF_HAVE_HASH_MAP 1\n#define GOOGLE_PROTOBUF_HAVE_HASH_SET 1\n\n// Android\n#if defined(__ANDROID__)\n# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP\n# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP\n\n// Use C++11 unordered_{map|set} if available.\n#elif ((_LIBCPP_STD_VER >= 11) || \\\n      (((__cplusplus >= 201103L) || defined(__GXX_EXPERIMENTAL_CXX0X)) && \\\n      (__GLIBCXX__ > 20090421)))\n# define GOOGLE_PROTOBUF_HAS_CXX11_HASH\n\n// For XCode >= 4.6:  the compiler is clang with libc++.\n// For earlier XCode version: the compiler is gcc-4.2.1 with libstdc++.\n// libc++ provides <unordered_map> and friends even in non C++11 mode,\n// and it does not provide the tr1 library. Therefore the following macro\n// checks against this special case.\n// Note that we should not test the __APPLE_CC__ version number or the\n// __clang__ macro, since the new compiler can still use -stdlib=libstdc++, in\n// which case <unordered_map> is not compilable without -std=c++11\n#elif defined(__APPLE_CC__)\n# if __GNUC__ >= 4\n#  define GOOGLE_PROTOBUF_HAS_TR1\n# else\n// Not tested for gcc < 4... These setting can compile under 4.2.1 though.\n#  define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx\n#  include <ext/hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <ext/hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n# endif\n\n// Version checks for gcc.\n#elif defined(__GNUC__)\n// For GCC 4.x+, use tr1::unordered_map/set; otherwise, follow the\n// instructions from:\n// https://gcc.gnu.org/onlinedocs/libstdc++/manual/backwards.html\n# if __GNUC__ >= 4\n#  define GOOGLE_PROTOBUF_HAS_TR1\n# elif __GNUC__ >= 3\n#  include <backward/hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <backward/hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n#  if __GNUC__ == 3 && __GNUC_MINOR__ == 0\n#   define GOOGLE_PROTOBUF_HASH_NAMESPACE std       // GCC 3.0\n#  else\n#   define GOOGLE_PROTOBUF_HASH_NAMESPACE __gnu_cxx // GCC 3.1 and later\n#  endif\n# else\n#  define GOOGLE_PROTOBUF_HASH_NAMESPACE\n#  include <hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n# endif\n\n// Version checks for MSC.\n// Apparently Microsoft decided to move hash_map *back* to the std namespace in\n// MSVC 2010:\n// http://blogs.msdn.com/vcblog/archive/2009/05/25/stl-breaking-changes-in-visual-studio-2010-beta-1.aspx\n// And.. they are moved back to stdext in MSVC 2013 (haven't checked 2012). That\n// said, use unordered_map for MSVC 2010 and beyond is our safest bet.\n#elif defined(_MSC_VER)\n# if _MSC_VER >= 1600  // Since Visual Studio 2010\n#  define GOOGLE_PROTOBUF_HAS_CXX11_HASH\n#  define GOOGLE_PROTOBUF_HASH_COMPARE std::hash_compare\n# elif _MSC_VER >= 1500  // Since Visual Studio 2008\n#  define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext\n#  include <hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n#  define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare\n#  define GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE\n# elif _MSC_VER >= 1310\n#  define GOOGLE_PROTOBUF_HASH_NAMESPACE stdext\n#  include <hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n#  define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare\n# else\n#  define GOOGLE_PROTOBUF_HASH_NAMESPACE std\n#  include <hash_map>\n#  define GOOGLE_PROTOBUF_HASH_MAP_CLASS hash_map\n#  include <hash_set>\n#  define GOOGLE_PROTOBUF_HASH_SET_CLASS hash_set\n#  define GOOGLE_PROTOBUF_HASH_COMPARE stdext::hash_compare\n# endif\n\n// **ADD NEW COMPILERS SUPPORT HERE.**\n// For other compilers, undefine the macro and fallback to use std::map, in\n// google/protobuf/stubs/hash.h\n#else\n# undef GOOGLE_PROTOBUF_HAVE_HASH_MAP\n# undef GOOGLE_PROTOBUF_HAVE_HASH_SET\n#endif\n\n#if defined(GOOGLE_PROTOBUF_HAS_CXX11_HASH)\n# define GOOGLE_PROTOBUF_HASH_NAMESPACE std\n# include <unordered_map>\n# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map\n# include <unordered_set>\n# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set\n#elif defined(GOOGLE_PROTOBUF_HAS_TR1)\n# define GOOGLE_PROTOBUF_HASH_NAMESPACE std::tr1\n# include <tr1/unordered_map>\n# define GOOGLE_PROTOBUF_HASH_MAP_CLASS unordered_map\n# include <tr1/unordered_set>\n# define GOOGLE_PROTOBUF_HASH_SET_CLASS unordered_set\n#endif\n\n# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START \\\n  namespace google {                                      \\\n  namespace protobuf {\n# define GOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END }}\n\n#undef GOOGLE_PROTOBUF_HAS_CXX11_HASH\n#undef GOOGLE_PROTOBUF_HAS_TR1\n\n#if defined(GOOGLE_PROTOBUF_HAVE_HASH_MAP) && \\\n    defined(GOOGLE_PROTOBUF_HAVE_HASH_SET)\n#else\n#define GOOGLE_PROTOBUF_MISSING_HASH\n#include <map>\n#include <set>\n#endif\n\nnamespace google {\nnamespace protobuf {\n\n#ifdef GOOGLE_PROTOBUF_MISSING_HASH\n#undef GOOGLE_PROTOBUF_MISSING_HASH\n\n// This system doesn't have hash_map or hash_set.  Emulate them using map and\n// set.\n\n// Make hash<T> be the same as less<T>.  Note that everywhere where custom\n// hash functions are defined in the protobuf code, they are also defined such\n// that they can be used as \"less\" functions, which is required by MSVC anyway.\ntemplate <typename Key>\nstruct hash {\n  // Dummy, just to make derivative hash functions compile.\n  int operator()(const Key& key) {\n    GOOGLE_LOG(FATAL) << \"Should never be called.\";\n    return 0;\n  }\n\n  inline bool operator()(const Key& a, const Key& b) const {\n    return a < b;\n  }\n};\n\n// Make sure char* is compared by value.\ntemplate <>\nstruct hash<const char*> {\n  // Dummy, just to make derivative hash functions compile.\n  int operator()(const char* key) {\n    GOOGLE_LOG(FATAL) << \"Should never be called.\";\n    return 0;\n  }\n\n  inline bool operator()(const char* a, const char* b) const {\n    return strcmp(a, b) < 0;\n  }\n};\n\ntemplate <typename Key, typename Data,\n          typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key>,\n          typename Alloc = std::allocator< std::pair<const Key, Data> > >\nclass hash_map : public std::map<Key, Data, HashFcn, Alloc> {\n  typedef std::map<Key, Data, HashFcn, Alloc> BaseClass;\n\n public:\n  hash_map(int a = 0, const HashFcn& b = HashFcn(),\n           const EqualKey& c = EqualKey(),\n           const Alloc& d = Alloc()) : BaseClass(b, d) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\ntemplate <typename Key,\n          typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key> >\nclass hash_set : public std::set<Key, HashFcn> {\n public:\n  hash_set(int = 0) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\n#elif defined(_MSC_VER) && !defined(_STLPORT_VERSION)\n\ntemplate <typename Key>\nstruct hash : public GOOGLE_PROTOBUF_HASH_COMPARE<Key> {\n};\n\n// MSVC's hash_compare<const char*> hashes based on the string contents but\n// compares based on the string pointer.  WTF?\nclass CstringLess {\n public:\n  inline bool operator()(const char* a, const char* b) const {\n    return strcmp(a, b) < 0;\n  }\n};\n\ntemplate <>\nstruct hash<const char*>\n    : public GOOGLE_PROTOBUF_HASH_COMPARE<const char*, CstringLess> {};\n\n#ifdef GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE\n\ntemplate <typename Key, typename HashFcn, typename EqualKey>\nstruct InternalHashCompare : public GOOGLE_PROTOBUF_HASH_COMPARE<Key> {\n  InternalHashCompare() {}\n  InternalHashCompare(HashFcn hashfcn, EqualKey equalkey)\n      : hashfcn_(hashfcn), equalkey_(equalkey) {}\n  size_t operator()(const Key& key) const { return hashfcn_(key); }\n  bool operator()(const Key& key1, const Key& key2) const {\n    return !equalkey_(key1, key2);\n  }\n  HashFcn hashfcn_;\n  EqualKey equalkey_;\n};\n\ntemplate <typename Key, typename Data,\n          typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key>,\n          typename Alloc = std::allocator< std::pair<const Key, Data> > >\nclass hash_map\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n          Key, Data, InternalHashCompare<Key, HashFcn, EqualKey>, Alloc> {\n  typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n      Key, Data, InternalHashCompare<Key, HashFcn, EqualKey>, Alloc> BaseClass;\n\n public:\n  hash_map(int a = 0, const HashFcn& b = HashFcn(),\n           const EqualKey& c = EqualKey(), const Alloc& d = Alloc())\n      : BaseClass(InternalHashCompare<Key, HashFcn, EqualKey>(b, c), d) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\ntemplate <typename Key, typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key> >\nclass hash_set\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<\n          Key, InternalHashCompare<Key, HashFcn, EqualKey> > {\n public:\n  hash_set(int = 0) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\n#else  // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE\n\ntemplate <typename Key, typename Data,\n          typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key>,\n          typename Alloc = std::allocator< std::pair<const Key, Data> > >\nclass hash_map\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n          Key, Data, HashFcn, EqualKey, Alloc> {\n  typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n      Key, Data, HashFcn, EqualKey, Alloc> BaseClass;\n\n public:\n  hash_map(int a = 0, const HashFcn& b = HashFcn(),\n           const EqualKey& c = EqualKey(),\n           const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\ntemplate <typename Key, typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key> >\nclass hash_set\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<\n          Key, HashFcn, EqualKey> {\n public:\n  hash_set(int = 0) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n#endif  // GOOGLE_PROTOBUF_CONTAINERS_NEED_HASH_COMPARE\n\n#else  // defined(_MSC_VER) && !defined(_STLPORT_VERSION)\n\ntemplate <typename Key>\nstruct hash : public GOOGLE_PROTOBUF_HASH_NAMESPACE::hash<Key> {\n};\n\ntemplate <typename Key>\nstruct hash<const Key*> {\n  inline size_t operator()(const Key* key) const {\n    return reinterpret_cast<size_t>(key);\n  }\n};\n\n// Unlike the old SGI version, the TR1 \"hash\" does not special-case char*.  So,\n// we go ahead and provide our own implementation.\ntemplate <>\nstruct hash<const char*> {\n  inline size_t operator()(const char* str) const {\n    size_t result = 0;\n    for (; *str != '\\0'; str++) {\n      result = 5 * result + *str;\n    }\n    return result;\n  }\n};\n\ntemplate<>\nstruct hash<bool> {\n  size_t operator()(bool x) const {\n    return static_cast<size_t>(x);\n  }\n};\n\ntemplate <typename Key, typename Data,\n          typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key>,\n          typename Alloc = std::allocator< std::pair<const Key, Data> > >\nclass hash_map\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n          Key, Data, HashFcn, EqualKey, Alloc> {\n  typedef GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_MAP_CLASS<\n      Key, Data, HashFcn, EqualKey, Alloc> BaseClass;\n\n public:\n  hash_map(int a = 0, const HashFcn& b = HashFcn(),\n           const EqualKey& c = EqualKey(),\n           const Alloc& d = Alloc()) : BaseClass(a, b, c, d) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\ntemplate <typename Key, typename HashFcn = hash<Key>,\n          typename EqualKey = std::equal_to<Key> >\nclass hash_set\n    : public GOOGLE_PROTOBUF_HASH_NAMESPACE::GOOGLE_PROTOBUF_HASH_SET_CLASS<\n          Key, HashFcn, EqualKey> {\n public:\n  hash_set(int = 0) {}\n\n  HashFcn hash_function() const { return HashFcn(); }\n};\n\n#endif  // !GOOGLE_PROTOBUF_MISSING_HASH\n\ntemplate <>\nstruct hash<string> {\n  inline size_t operator()(const string& key) const {\n    return hash<const char*>()(key.c_str());\n  }\n\n  static const size_t bucket_size = 4;\n  static const size_t min_buckets = 8;\n  inline bool operator()(const string& a, const string& b) const {\n    return a < b;\n  }\n};\n\ntemplate <typename First, typename Second>\nstruct hash<pair<First, Second> > {\n  inline size_t operator()(const pair<First, Second>& key) const {\n    size_t first_hash = hash<First>()(key.first);\n    size_t second_hash = hash<Second>()(key.second);\n\n    // FIXME(kenton):  What is the best way to compute this hash?  I have\n    // no idea!  This seems a bit better than an XOR.\n    return first_hash * ((1 << 16) - 1) + second_hash;\n  }\n\n  static const size_t bucket_size = 4;\n  static const size_t min_buckets = 8;\n  inline bool operator()(const pair<First, Second>& a,\n                           const pair<First, Second>& b) const {\n    return a < b;\n  }\n};\n\n// Used by GCC/SGI STL only.  (Why isn't this provided by the standard\n// library?  :( )\nstruct streq {\n  inline bool operator()(const char* a, const char* b) const {\n    return strcmp(a, b) == 0;\n  }\n};\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_HASH_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/logging.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_LOGGING_H_\n#define GOOGLE_PROTOBUF_STUBS_LOGGING_H_\n\n#include <google/protobuf/stubs/macros.h>\n#include <google/protobuf/stubs/port.h>\n\n// ===================================================================\n// emulates google3/base/logging.h\n\nnamespace google {\nnamespace protobuf {\n\nenum LogLevel {\n  LOGLEVEL_INFO,     // Informational.  This is never actually used by\n                     // libprotobuf.\n  LOGLEVEL_WARNING,  // Warns about issues that, although not technically a\n                     // problem now, could cause problems in the future.  For\n                     // example, a // warning will be printed when parsing a\n                     // message that is near the message size limit.\n  LOGLEVEL_ERROR,    // An error occurred which should never happen during\n                     // normal use.\n  LOGLEVEL_FATAL,    // An error occurred from which the library cannot\n                     // recover.  This usually indicates a programming error\n                     // in the code which calls the library, especially when\n                     // compiled in debug mode.\n\n#ifdef NDEBUG\n  LOGLEVEL_DFATAL = LOGLEVEL_ERROR\n#else\n  LOGLEVEL_DFATAL = LOGLEVEL_FATAL\n#endif\n};\n\nclass StringPiece;\nnamespace util {\nclass Status;\n}\nclass uint128;\nnamespace internal {\n\nclass LogFinisher;\n\nclass LIBPROTOBUF_EXPORT LogMessage {\n public:\n  LogMessage(LogLevel level, const char* filename, int line);\n  ~LogMessage();\n\n  LogMessage& operator<<(const std::string& value);\n  LogMessage& operator<<(const char* value);\n  LogMessage& operator<<(char value);\n  LogMessage& operator<<(int value);\n  LogMessage& operator<<(uint value);\n  LogMessage& operator<<(long value);\n  LogMessage& operator<<(unsigned long value);\n  LogMessage& operator<<(long long value);\n  LogMessage& operator<<(unsigned long long value);\n  LogMessage& operator<<(double value);\n  LogMessage& operator<<(void* value);\n  LogMessage& operator<<(const StringPiece& value);\n  LogMessage& operator<<(const ::google::protobuf::util::Status& status);\n  LogMessage& operator<<(const uint128& value);\n\n private:\n  friend class LogFinisher;\n  void Finish();\n\n  LogLevel level_;\n  const char* filename_;\n  int line_;\n  std::string message_;\n};\n\n// Used to make the entire \"LOG(BLAH) << etc.\" expression have a void return\n// type and print a newline after each message.\nclass LIBPROTOBUF_EXPORT LogFinisher {\n public:\n  void operator=(LogMessage& other);\n};\n\ntemplate<typename T>\nbool IsOk(T status) { return status.ok(); }\ntemplate<>\ninline bool IsOk(bool status) { return status; }\n\n}  // namespace internal\n\n// Undef everything in case we're being mixed with some other Google library\n// which already defined them itself.  Presumably all Google libraries will\n// support the same syntax for these so it should not be a big deal if they\n// end up using our definitions instead.\n#undef GOOGLE_LOG\n#undef GOOGLE_LOG_IF\n\n#undef GOOGLE_CHECK\n#undef GOOGLE_CHECK_OK\n#undef GOOGLE_CHECK_EQ\n#undef GOOGLE_CHECK_NE\n#undef GOOGLE_CHECK_LT\n#undef GOOGLE_CHECK_LE\n#undef GOOGLE_CHECK_GT\n#undef GOOGLE_CHECK_GE\n#undef GOOGLE_CHECK_NOTNULL\n\n#undef GOOGLE_DLOG\n#undef GOOGLE_DCHECK\n#undef GOOGLE_DCHECK_OK\n#undef GOOGLE_DCHECK_EQ\n#undef GOOGLE_DCHECK_NE\n#undef GOOGLE_DCHECK_LT\n#undef GOOGLE_DCHECK_LE\n#undef GOOGLE_DCHECK_GT\n#undef GOOGLE_DCHECK_GE\n\n#define GOOGLE_LOG(LEVEL)                                                 \\\n  ::google::protobuf::internal::LogFinisher() =                           \\\n    ::google::protobuf::internal::LogMessage(                             \\\n      ::google::protobuf::LOGLEVEL_##LEVEL, __FILE__, __LINE__)\n#define GOOGLE_LOG_IF(LEVEL, CONDITION) \\\n  !(CONDITION) ? (void)0 : GOOGLE_LOG(LEVEL)\n\n#define GOOGLE_CHECK(EXPRESSION) \\\n  GOOGLE_LOG_IF(FATAL, !(EXPRESSION)) << \"CHECK failed: \" #EXPRESSION \": \"\n#define GOOGLE_CHECK_OK(A) GOOGLE_CHECK(::google::protobuf::internal::IsOk(A))\n#define GOOGLE_CHECK_EQ(A, B) GOOGLE_CHECK((A) == (B))\n#define GOOGLE_CHECK_NE(A, B) GOOGLE_CHECK((A) != (B))\n#define GOOGLE_CHECK_LT(A, B) GOOGLE_CHECK((A) <  (B))\n#define GOOGLE_CHECK_LE(A, B) GOOGLE_CHECK((A) <= (B))\n#define GOOGLE_CHECK_GT(A, B) GOOGLE_CHECK((A) >  (B))\n#define GOOGLE_CHECK_GE(A, B) GOOGLE_CHECK((A) >= (B))\n\nnamespace internal {\ntemplate<typename T>\nT* CheckNotNull(const char* /* file */, int /* line */,\n                const char* name, T* val) {\n  if (val == NULL) {\n    GOOGLE_LOG(FATAL) << name;\n  }\n  return val;\n}\n}  // namespace internal\n#define GOOGLE_CHECK_NOTNULL(A) \\\n  ::google::protobuf::internal::CheckNotNull(\\\n      __FILE__, __LINE__, \"'\" #A \"' must not be NULL\", (A))\n\n#ifdef NDEBUG\n\n#define GOOGLE_DLOG(LEVEL) GOOGLE_LOG_IF(LEVEL, false)\n\n#define GOOGLE_DCHECK(EXPRESSION) while(false) GOOGLE_CHECK(EXPRESSION)\n#define GOOGLE_DCHECK_OK(E) GOOGLE_DCHECK(::google::protobuf::internal::IsOk(E))\n#define GOOGLE_DCHECK_EQ(A, B) GOOGLE_DCHECK((A) == (B))\n#define GOOGLE_DCHECK_NE(A, B) GOOGLE_DCHECK((A) != (B))\n#define GOOGLE_DCHECK_LT(A, B) GOOGLE_DCHECK((A) <  (B))\n#define GOOGLE_DCHECK_LE(A, B) GOOGLE_DCHECK((A) <= (B))\n#define GOOGLE_DCHECK_GT(A, B) GOOGLE_DCHECK((A) >  (B))\n#define GOOGLE_DCHECK_GE(A, B) GOOGLE_DCHECK((A) >= (B))\n\n#else  // NDEBUG\n\n#define GOOGLE_DLOG GOOGLE_LOG\n\n#define GOOGLE_DCHECK    GOOGLE_CHECK\n#define GOOGLE_DCHECK_OK GOOGLE_CHECK_OK\n#define GOOGLE_DCHECK_EQ GOOGLE_CHECK_EQ\n#define GOOGLE_DCHECK_NE GOOGLE_CHECK_NE\n#define GOOGLE_DCHECK_LT GOOGLE_CHECK_LT\n#define GOOGLE_DCHECK_LE GOOGLE_CHECK_LE\n#define GOOGLE_DCHECK_GT GOOGLE_CHECK_GT\n#define GOOGLE_DCHECK_GE GOOGLE_CHECK_GE\n\n#endif  // !NDEBUG\n\ntypedef void LogHandler(LogLevel level, const char* filename, int line,\n                        const std::string& message);\n\n// The protobuf library sometimes writes warning and error messages to\n// stderr.  These messages are primarily useful for developers, but may\n// also help end users figure out a problem.  If you would prefer that\n// these messages be sent somewhere other than stderr, call SetLogHandler()\n// to set your own handler.  This returns the old handler.  Set the handler\n// to NULL to ignore log messages (but see also LogSilencer, below).\n//\n// Obviously, SetLogHandler is not thread-safe.  You should only call it\n// at initialization time, and probably not from library code.  If you\n// simply want to suppress log messages temporarily (e.g. because you\n// have some code that tends to trigger them frequently and you know\n// the warnings are not important to you), use the LogSilencer class\n// below.\nLIBPROTOBUF_EXPORT LogHandler* SetLogHandler(LogHandler* new_func);\n\n// Create a LogSilencer if you want to temporarily suppress all log\n// messages.  As long as any LogSilencer objects exist, non-fatal\n// log messages will be discarded (the current LogHandler will *not*\n// be called).  Constructing a LogSilencer is thread-safe.  You may\n// accidentally suppress log messages occurring in another thread, but\n// since messages are generally for debugging purposes only, this isn't\n// a big deal.  If you want to intercept log messages, use SetLogHandler().\nclass LIBPROTOBUF_EXPORT LogSilencer {\n public:\n  LogSilencer();\n  ~LogSilencer();\n};\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_LOGGING_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/macros.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_MACROS_H__\n#define GOOGLE_PROTOBUF_MACROS_H__\n\n#include <google/protobuf/stubs/port.h>\n\nnamespace google {\nnamespace protobuf {\n\n#undef GOOGLE_DISALLOW_EVIL_CONSTRUCTORS\n#define GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeName)    \\\n  TypeName(const TypeName&);                           \\\n  void operator=(const TypeName&)\n\n#undef GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS\n#define GOOGLE_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \\\n  TypeName();                                           \\\n  TypeName(const TypeName&);                            \\\n  void operator=(const TypeName&)\n\n// ===================================================================\n// from google3/base/basictypes.h\n\n// The GOOGLE_ARRAYSIZE(arr) macro returns the # of elements in an array arr.\n// The expression is a compile-time constant, and therefore can be\n// used in defining new arrays, for example.\n//\n// GOOGLE_ARRAYSIZE catches a few type errors.  If you see a compiler error\n//\n//   \"warning: division by zero in ...\"\n//\n// when using GOOGLE_ARRAYSIZE, you are (wrongfully) giving it a pointer.\n// You should only use GOOGLE_ARRAYSIZE on statically allocated arrays.\n//\n// The following comments are on the implementation details, and can\n// be ignored by the users.\n//\n// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in\n// the array) and sizeof(*(arr)) (the # of bytes in one array\n// element).  If the former is divisible by the latter, perhaps arr is\n// indeed an array, in which case the division result is the # of\n// elements in the array.  Otherwise, arr cannot possibly be an array,\n// and we generate a compiler error to prevent the code from\n// compiling.\n//\n// Since the size of bool is implementation-defined, we need to cast\n// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final\n// result has type size_t.\n//\n// This macro is not perfect as it wrongfully accepts certain\n// pointers, namely where the pointer size is divisible by the pointee\n// size.  Since all our code has to go through a 32-bit compiler,\n// where a pointer is 4 bytes, this means all pointers to a type whose\n// size is 3 or greater than 4 will be (righteously) rejected.\n//\n// Kudos to Jorg Brown for this simple and elegant implementation.\n\n#undef GOOGLE_ARRAYSIZE\n#define GOOGLE_ARRAYSIZE(a) \\\n  ((sizeof(a) / sizeof(*(a))) / \\\n   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))\n\n// The COMPILE_ASSERT macro can be used to verify that a compile time\n// expression is true. For example, you could use it to verify the\n// size of a static array:\n//\n//   COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,\n//                  content_type_names_incorrect_size);\n//\n// or to make sure a struct is smaller than a certain size:\n//\n//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);\n//\n// The second argument to the macro is the name of the variable. If\n// the expression is false, most compilers will issue a warning/error\n// containing the name of the variable.\n\nnamespace internal {\n\ntemplate <bool>\nstruct CompileAssert {\n};\n\n}  // namespace internal\n\n#undef GOOGLE_COMPILE_ASSERT\n#if __cplusplus >= 201103L\n#define GOOGLE_COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)\n#else\n#define GOOGLE_COMPILE_ASSERT(expr, msg) \\\n  ::google::protobuf::internal::CompileAssert<(bool(expr))> \\\n          msg[bool(expr) ? 1 : -1]; \\\n  (void)msg\n// Implementation details of COMPILE_ASSERT:\n//\n// - COMPILE_ASSERT works by defining an array type that has -1\n//   elements (and thus is invalid) when the expression is false.\n//\n// - The simpler definition\n//\n//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]\n//\n//   does not work, as gcc supports variable-length arrays whose sizes\n//   are determined at run-time (this is gcc's extension and not part\n//   of the C++ standard).  As a result, gcc fails to reject the\n//   following code with the simple definition:\n//\n//     int foo;\n//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is\n//                               // not a compile-time constant.\n//\n// - By using the type CompileAssert<(bool(expr))>, we ensures that\n//   expr is a compile-time constant.  (Template arguments must be\n//   determined at compile-time.)\n//\n// - The outter parentheses in CompileAssert<(bool(expr))> are necessary\n//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written\n//\n//     CompileAssert<bool(expr)>\n//\n//   instead, these compilers will refuse to compile\n//\n//     COMPILE_ASSERT(5 > 0, some_message);\n//\n//   (They seem to think the \">\" in \"5 > 0\" marks the end of the\n//   template argument list.)\n//\n// - The array size is (bool(expr) ? 1 : -1), instead of simply\n//\n//     ((expr) ? 1 : -1).\n//\n//   This is to avoid running into a bug in MS VC 7.1, which\n//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.\n#endif  // __cplusplus >= 201103L\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_MACROS_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/mutex.h",
    "content": "// Copyright (c) 2006, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_MUTEX_H_\n#define GOOGLE_PROTOBUF_STUBS_MUTEX_H_\n\n#ifdef GOOGLE_PROTOBUF_NO_THREADLOCAL\n#include <pthread.h>\n#endif\n\n#include <google/protobuf/stubs/macros.h>\n\n// ===================================================================\n// emulates google3/base/mutex.h\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// A Mutex is a non-reentrant (aka non-recursive) mutex.  At most one thread T\n// may hold a mutex at a given time.  If T attempts to Lock() the same Mutex\n// while holding it, T will deadlock.\nclass LIBPROTOBUF_EXPORT Mutex {\n public:\n  // Create a Mutex that is not held by anybody.\n  Mutex();\n\n  // Destructor\n  ~Mutex();\n\n  // Block if necessary until this Mutex is free, then acquire it exclusively.\n  void Lock();\n\n  // Release this Mutex.  Caller must hold it exclusively.\n  void Unlock();\n\n  // Crash if this Mutex is not held exclusively by this thread.\n  // May fail to crash when it should; will never crash when it should not.\n  void AssertHeld();\n\n private:\n  struct Internal;\n  Internal* mInternal;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Mutex);\n};\n\n// Undefine the macros  to workaround the conflicts with Google internal\n// MutexLock implementation.\n// TODO(liujisi): Remove the undef once internal macros are removed.\n#undef MutexLock\n#undef ReaderMutexLock\n#undef WriterMutexLock\n#undef MutexLockMaybe\n\n// MutexLock(mu) acquires mu when constructed and releases it when destroyed.\nclass LIBPROTOBUF_EXPORT MutexLock {\n public:\n  explicit MutexLock(Mutex *mu) : mu_(mu) { this->mu_->Lock(); }\n  ~MutexLock() { this->mu_->Unlock(); }\n private:\n  Mutex *const mu_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLock);\n};\n\n// TODO(kenton):  Implement these?  Hard to implement portably.\ntypedef MutexLock ReaderMutexLock;\ntypedef MutexLock WriterMutexLock;\n\n// MutexLockMaybe is like MutexLock, but is a no-op when mu is NULL.\nclass LIBPROTOBUF_EXPORT MutexLockMaybe {\n public:\n  explicit MutexLockMaybe(Mutex *mu) :\n    mu_(mu) { if (this->mu_ != NULL) { this->mu_->Lock(); } }\n  ~MutexLockMaybe() { if (this->mu_ != NULL) { this->mu_->Unlock(); } }\n private:\n  Mutex *const mu_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MutexLockMaybe);\n};\n\n#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)\ntemplate<typename T>\nclass ThreadLocalStorage {\n public:\n  ThreadLocalStorage() {\n    pthread_key_create(&key_, &ThreadLocalStorage::Delete);\n  }\n  ~ThreadLocalStorage() {\n    pthread_key_delete(key_);\n  }\n  T* Get() {\n    T* result = static_cast<T*>(pthread_getspecific(key_));\n    if (result == NULL) {\n      result = new T();\n      pthread_setspecific(key_, result);\n    }\n    return result;\n  }\n private:\n  static void Delete(void* value) {\n    delete static_cast<T*>(value);\n  }\n  pthread_key_t key_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadLocalStorage);\n};\n#endif\n\n}  // namespace internal\n\n// We made these internal so that they would show up as such in the docs,\n// but we don't want to stick \"internal::\" in front of them everywhere.\nusing internal::Mutex;\nusing internal::MutexLock;\nusing internal::ReaderMutexLock;\nusing internal::WriterMutexLock;\nusing internal::MutexLockMaybe;\n\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_MUTEX_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/once.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//\n// emulates google3/base/once.h\n//\n// This header is intended to be included only by internal .cc files and\n// generated .pb.cc files.  Users should not use this directly.\n//\n// This is basically a portable version of pthread_once().\n//\n// This header declares:\n// * A type called ProtobufOnceType.\n// * A macro GOOGLE_PROTOBUF_DECLARE_ONCE() which declares a variable of type\n//   ProtobufOnceType.  This is the only legal way to declare such a variable.\n//   The macro may only be used at the global scope (you cannot create local or\n//   class member variables of this type).\n// * A function GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()).\n//   This function, when invoked multiple times given the same ProtobufOnceType\n//   object, will invoke init_func on the first call only, and will make sure\n//   none of the calls return before that first call to init_func has finished.\n// * The user can provide a parameter which GoogleOnceInit() forwards to the\n//   user-provided function when it is called. Usage example:\n//     int a = 10;\n//     GoogleOnceInit(&my_once, &MyFunctionExpectingIntArgument, &a);\n// * This implementation guarantees that ProtobufOnceType is a POD (i.e. no\n//   static initializer generated).\n//\n// This implements a way to perform lazy initialization.  It's more efficient\n// than using mutexes as no lock is needed if initialization has already\n// happened.\n//\n// Example usage:\n//   void Init();\n//   GOOGLE_PROTOBUF_DECLARE_ONCE(once_init);\n//\n//   // Calls Init() exactly once.\n//   void InitOnce() {\n//     GoogleOnceInit(&once_init, &Init);\n//   }\n//\n// Note that if GoogleOnceInit() is called before main() has begun, it must\n// only be called by the thread that will eventually call main() -- that is,\n// the thread that performs dynamic initialization.  In general this is a safe\n// assumption since people don't usually construct threads before main() starts,\n// but it is technically not guaranteed.  Unfortunately, Win32 provides no way\n// whatsoever to statically-initialize its synchronization primitives, so our\n// only choice is to assume that dynamic initialization is single-threaded.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_ONCE_H__\n#define GOOGLE_PROTOBUF_STUBS_ONCE_H__\n\n#include <google/protobuf/stubs/atomicops.h>\n#include <google/protobuf/stubs/callback.h>\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\n\n#ifdef GOOGLE_PROTOBUF_NO_THREAD_SAFETY\n\ntypedef bool ProtobufOnceType;\n\n#define GOOGLE_PROTOBUF_ONCE_INIT false\n\ninline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {\n  if (!*once) {\n    *once = true;\n    init_func();\n  }\n}\n\ntemplate <typename Arg>\ninline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)(Arg),\n    Arg arg) {\n  if (!*once) {\n    *once = true;\n    init_func(arg);\n  }\n}\n\n#else\n\nenum {\n  ONCE_STATE_UNINITIALIZED = 0,\n  ONCE_STATE_EXECUTING_CLOSURE = 1,\n  ONCE_STATE_DONE = 2\n};\n\ntypedef internal::AtomicWord ProtobufOnceType;\n\n#define GOOGLE_PROTOBUF_ONCE_INIT ::google::protobuf::ONCE_STATE_UNINITIALIZED\n\nLIBPROTOBUF_EXPORT\nvoid GoogleOnceInitImpl(ProtobufOnceType* once, Closure* closure);\n\ninline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)()) {\n  if (internal::Acquire_Load(once) != ONCE_STATE_DONE) {\n    internal::FunctionClosure0 func(init_func, false);\n    GoogleOnceInitImpl(once, &func);\n  }\n}\n\ntemplate <typename Arg>\ninline void GoogleOnceInit(ProtobufOnceType* once, void (*init_func)(Arg*),\n    Arg* arg) {\n  if (internal::Acquire_Load(once) != ONCE_STATE_DONE) {\n    internal::FunctionClosure1<Arg*> func(init_func, false, arg);\n    GoogleOnceInitImpl(once, &func);\n  }\n}\n\n#endif  // GOOGLE_PROTOBUF_NO_THREAD_SAFETY\n\nclass GoogleOnceDynamic {\n public:\n  GoogleOnceDynamic() : state_(GOOGLE_PROTOBUF_ONCE_INIT) { }\n\n  // If this->Init() has not been called before by any thread,\n  // execute (*func_with_arg)(arg) then return.\n  // Otherwise, wait until that prior invocation has finished\n  // executing its function, then return.\n  template<typename T>\n  void Init(void (*func_with_arg)(T*), T* arg) {\n    GoogleOnceInit<T>(&this->state_,\n                      func_with_arg,\n                      arg);\n  }\n private:\n  ProtobufOnceType state_;\n};\n\n#define GOOGLE_PROTOBUF_DECLARE_ONCE(NAME) \\\n  ::google::protobuf::ProtobufOnceType NAME = GOOGLE_PROTOBUF_ONCE_INIT\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_ONCE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/platform_macros.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2012 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_PLATFORM_MACROS_H_\n#define GOOGLE_PROTOBUF_PLATFORM_MACROS_H_\n\n#define GOOGLE_PROTOBUF_PLATFORM_ERROR \\\n#error \"Host platform was not detected as supported by protobuf\"\n\n// Processor architecture detection.  For more info on what's defined, see:\n//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx\n//   http://www.agner.org/optimize/calling_conventions.pdf\n//   or with gcc, run: \"echo | gcc -E -dM -\"\n#if defined(_M_X64) || defined(__x86_64__)\n#define GOOGLE_PROTOBUF_ARCH_X64 1\n#define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n#elif defined(_M_IX86) || defined(__i386__)\n#define GOOGLE_PROTOBUF_ARCH_IA32 1\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#elif defined(__QNX__)\n#define GOOGLE_PROTOBUF_ARCH_ARM_QNX 1\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#elif defined(__ARMEL__)\n#define GOOGLE_PROTOBUF_ARCH_ARM 1\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#elif defined(__aarch64__)\n#define GOOGLE_PROTOBUF_ARCH_AARCH64 1\n#define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n#elif defined(__MIPSEL__)\n#if defined(__LP64__)\n#define GOOGLE_PROTOBUF_ARCH_MIPS64 1\n#define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n#else\n#define GOOGLE_PROTOBUF_ARCH_MIPS 1\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#endif\n#elif defined(__pnacl__)\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#elif defined(sparc)\n#define GOOGLE_PROTOBUF_ARCH_SPARC 1\n#if defined(__sparc_v9__) || defined(__sparcv9) || defined(__arch64__)\n#define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n#else\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#endif\n#elif defined(_POWER) || defined(__powerpc64__) || defined(__PPC64__)\n#define GOOGLE_PROTOBUF_ARCH_POWER 1\n#define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n#elif defined(__PPC__)\n#define GOOGLE_PROTOBUF_ARCH_PPC 1\n#define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n#elif defined(__GNUC__)\n# if (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))\n// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h\n# elif defined(__clang__)\n#  if !__has_extension(c_atomic)\nGOOGLE_PROTOBUF_PLATFORM_ERROR\n#  endif\n// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h\n# endif\n# if __LP64__\n#  define GOOGLE_PROTOBUF_ARCH_64_BIT 1\n# else\n#  define GOOGLE_PROTOBUF_ARCH_32_BIT 1\n# endif\n#else\nGOOGLE_PROTOBUF_PLATFORM_ERROR\n#endif\n\n#if defined(__APPLE__)\n#define GOOGLE_PROTOBUF_OS_APPLE\n#include <TargetConditionals.h>\n#if TARGET_OS_IPHONE\n#define GOOGLE_PROTOBUF_OS_IPHONE\n#endif\n#elif defined(__EMSCRIPTEN__)\n#define GOOGLE_PROTOBUF_OS_EMSCRIPTEN\n#elif defined(__native_client__)\n#define GOOGLE_PROTOBUF_OS_NACL\n#elif defined(sun)\n#define GOOGLE_PROTOBUF_OS_SOLARIS\n#elif defined(_AIX)\n#define GOOGLE_PROTOBUF_OS_AIX\n#elif defined(__ANDROID__)\n#define GOOGLE_PROTOBUF_OS_ANDROID\n#endif\n\n#undef GOOGLE_PROTOBUF_PLATFORM_ERROR\n\n#if defined(GOOGLE_PROTOBUF_OS_ANDROID) || defined(GOOGLE_PROTOBUF_OS_IPHONE)\n// Android ndk does not support the __thread keyword very well yet. Here\n// we use pthread_key_create()/pthread_getspecific()/... methods for\n// TLS support on android.\n// iOS also does not support the __thread keyword.\n#define GOOGLE_PROTOBUF_NO_THREADLOCAL\n#endif\n\n#endif  // GOOGLE_PROTOBUF_PLATFORM_MACROS_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/port.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_\n#define GOOGLE_PROTOBUF_STUBS_PORT_H_\n\n#include <assert.h>\n#include <stdlib.h>\n#include <cstddef>\n#include <string>\n#include <string.h>\n#if defined(__osf__)\n// Tru64 lacks stdint.h, but has inttypes.h which defines a superset of\n// what stdint.h would define.\n#include <inttypes.h>\n#elif !defined(_MSC_VER)\n#include <stdint.h>\n#endif\n\n#undef PROTOBUF_LITTLE_ENDIAN\n#ifdef _WIN32\n  // Assuming windows is always little-endian.\n  // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for\n  // optimization but also for correctness. We should define an\n  // different macro to test the big-endian code path in coded_stream.\n  #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)\n    #define PROTOBUF_LITTLE_ENDIAN 1\n  #endif\n  #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)\n    // If MSVC has \"/RTCc\" set, it will complain about truncating casts at\n    // runtime.  This file contains some intentional truncating casts.\n    #pragma runtime_checks(\"c\", off)\n  #endif\n#else\n  #include <sys/param.h>   // __BYTE_ORDER\n  #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \\\n         (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \\\n      !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)\n    #define PROTOBUF_LITTLE_ENDIAN 1\n  #endif\n#endif\n#if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS)\n  #ifdef LIBPROTOBUF_EXPORTS\n    #define LIBPROTOBUF_EXPORT __declspec(dllexport)\n  #else\n    #define LIBPROTOBUF_EXPORT __declspec(dllimport)\n  #endif\n  #ifdef LIBPROTOC_EXPORTS\n    #define LIBPROTOC_EXPORT   __declspec(dllexport)\n  #else\n    #define LIBPROTOC_EXPORT   __declspec(dllimport)\n  #endif\n#else\n  #define LIBPROTOBUF_EXPORT\n  #define LIBPROTOC_EXPORT\n#endif\n\n// These #includes are for the byte swap functions declared later on.\n#ifdef _MSC_VER\n#include <stdlib.h>  // NOLINT(build/include)\n#elif defined(__APPLE__)\n#include <libkern/OSByteOrder.h>\n#elif defined(__GLIBC__) || defined(__CYGWIN__)\n#include <byteswap.h>  // IWYU pragma: export\n#endif\n\n// ===================================================================\n// from google3/base/port.h\nnamespace google {\nnamespace protobuf {\n\ntypedef unsigned int uint;\n\n#ifdef _MSC_VER\ntypedef signed __int8  int8;\ntypedef __int16 int16;\ntypedef __int32 int32;\ntypedef __int64 int64;\n\ntypedef unsigned __int8  uint8;\ntypedef unsigned __int16 uint16;\ntypedef unsigned __int32 uint32;\ntypedef unsigned __int64 uint64;\n#else\ntypedef int8_t int8;\ntypedef int16_t int16;\ntypedef int32_t int32;\ntypedef int64_t int64;\n\ntypedef uint8_t uint8;\ntypedef uint16_t uint16;\ntypedef uint32_t uint32;\ntypedef uint64_t uint64;\n#endif\n\n// long long macros to be used because gcc and vc++ use different suffixes,\n// and different size specifiers in format strings\n#undef GOOGLE_LONGLONG\n#undef GOOGLE_ULONGLONG\n#undef GOOGLE_LL_FORMAT\n\n#ifdef _MSC_VER\n#define GOOGLE_LONGLONG(x) x##I64\n#define GOOGLE_ULONGLONG(x) x##UI64\n#define GOOGLE_LL_FORMAT \"I64\"  // As in printf(\"%I64d\", ...)\n#else\n// By long long, we actually mean int64.\n#define GOOGLE_LONGLONG(x) x##LL\n#define GOOGLE_ULONGLONG(x) x##ULL\n// Used to format real long long integers.\n#define GOOGLE_LL_FORMAT \"ll\"  // As in \"%lld\". Note that \"q\" is poor form also.\n#endif\n\nstatic const int32 kint32max = 0x7FFFFFFF;\nstatic const int32 kint32min = -kint32max - 1;\nstatic const int64 kint64max = GOOGLE_LONGLONG(0x7FFFFFFFFFFFFFFF);\nstatic const int64 kint64min = -kint64max - 1;\nstatic const uint32 kuint32max = 0xFFFFFFFFu;\nstatic const uint64 kuint64max = GOOGLE_ULONGLONG(0xFFFFFFFFFFFFFFFF);\n\n// -------------------------------------------------------------------\n// Annotations:  Some parts of the code have been annotated in ways that might\n//   be useful to some compilers or tools, but are not supported universally.\n//   You can #define these annotations yourself if the default implementation\n//   is not right for you.\n\n#ifndef GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1))\n// For functions we want to force inline.\n// Introduced in gcc 3.1.\n#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE __attribute__ ((always_inline))\n#else\n// Other compilers will have to figure it out for themselves.\n#define GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n#endif\n#endif\n\n#ifndef GOOGLE_ATTRIBUTE_NOINLINE\n#if defined(__GNUC__) && (__GNUC__ > 3 ||(__GNUC__ == 3 && __GNUC_MINOR__ >= 1))\n// For functions we want to force not inline.\n// Introduced in gcc 3.1.\n#define GOOGLE_ATTRIBUTE_NOINLINE __attribute__ ((noinline))\n#elif defined(_MSC_VER) && (_MSC_VER >= 1400)\n// Seems to have been around since at least Visual Studio 2005\n#define GOOGLE_ATTRIBUTE_NOINLINE __declspec(noinline)\n#else\n// Other compilers will have to figure it out for themselves.\n#define GOOGLE_ATTRIBUTE_NOINLINE\n#endif\n#endif\n\n#ifndef GOOGLE_ATTRIBUTE_NORETURN\n#ifdef __GNUC__\n// Tell the compiler that a given function never returns.\n#define GOOGLE_ATTRIBUTE_NORETURN __attribute__((noreturn))\n#else\n#define GOOGLE_ATTRIBUTE_NORETURN\n#endif\n#endif\n\n#ifndef GOOGLE_ATTRIBUTE_DEPRECATED\n#ifdef __GNUC__\n// If the method/variable/type is used anywhere, produce a warning.\n#define GOOGLE_ATTRIBUTE_DEPRECATED __attribute__((deprecated))\n#else\n#define GOOGLE_ATTRIBUTE_DEPRECATED\n#endif\n#endif\n\n#ifndef GOOGLE_PREDICT_TRUE\n#ifdef __GNUC__\n// Provided at least since GCC 3.0.\n#define GOOGLE_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))\n#else\n#define GOOGLE_PREDICT_TRUE(x) (x)\n#endif\n#endif\n\n#ifndef GOOGLE_PREDICT_FALSE\n#ifdef __GNUC__\n// Provided at least since GCC 3.0.\n#define GOOGLE_PREDICT_FALSE(x) (__builtin_expect(x, 0))\n#else\n#define GOOGLE_PREDICT_FALSE(x) (x)\n#endif\n#endif\n\n// Delimits a block of code which may write to memory which is simultaneously\n// written by other threads, but which has been determined to be thread-safe\n// (e.g. because it is an idempotent write).\n#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN\n#define GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN()\n#endif\n#ifndef GOOGLE_SAFE_CONCURRENT_WRITES_END\n#define GOOGLE_SAFE_CONCURRENT_WRITES_END()\n#endif\n\n#if defined(__clang__) && defined(__has_cpp_attribute) \\\n    && !defined(GOOGLE_PROTOBUF_OS_APPLE)\n# if defined(GOOGLE_PROTOBUF_OS_NACL) || defined(EMSCRIPTEN) || \\\n     __has_cpp_attribute(clang::fallthrough)\n#  define GOOGLE_FALLTHROUGH_INTENDED [[clang::fallthrough]]\n# endif\n#endif\n\n#ifndef GOOGLE_FALLTHROUGH_INTENDED\n# define GOOGLE_FALLTHROUGH_INTENDED\n#endif\n\n#define GOOGLE_GUARDED_BY(x)\n#define GOOGLE_ATTRIBUTE_COLD\n\n// x86 and x86-64 can perform unaligned loads/stores directly.\n#if defined(_M_X64) || defined(__x86_64__) || \\\n    defined(_M_IX86) || defined(__i386__)\n\n#define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))\n#define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))\n#define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))\n\n#define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))\n#define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))\n#define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))\n\n#else\ninline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) {\n  uint16 t;\n  memcpy(&t, p, sizeof t);\n  return t;\n}\n\ninline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) {\n  uint32 t;\n  memcpy(&t, p, sizeof t);\n  return t;\n}\n\ninline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) {\n  uint64 t;\n  memcpy(&t, p, sizeof t);\n  return t;\n}\n\ninline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) {\n  memcpy(p, &v, sizeof v);\n}\n\ninline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) {\n  memcpy(p, &v, sizeof v);\n}\n\ninline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) {\n  memcpy(p, &v, sizeof v);\n}\n#endif\n\n#if defined(_MSC_VER)\n#define GOOGLE_THREAD_LOCAL __declspec(thread)\n#else\n#define GOOGLE_THREAD_LOCAL __thread\n#endif\n\n// The following guarantees declaration of the byte swap functions.\n#ifdef _MSC_VER\n#define bswap_16(x) _byteswap_ushort(x)\n#define bswap_32(x) _byteswap_ulong(x)\n#define bswap_64(x) _byteswap_uint64(x)\n\n#elif defined(__APPLE__)\n// Mac OS X / Darwin features\n#define bswap_16(x) OSSwapInt16(x)\n#define bswap_32(x) OSSwapInt32(x)\n#define bswap_64(x) OSSwapInt64(x)\n\n#elif !defined(__GLIBC__) && !defined(__CYGWIN__)\n\nstatic inline uint16 bswap_16(uint16 x) {\n  return static_cast<uint16>(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8));\n}\n#define bswap_16(x) bswap_16(x)\nstatic inline uint32 bswap_32(uint32 x) {\n  return (((x & 0xFF) << 24) |\n          ((x & 0xFF00) << 8) |\n          ((x & 0xFF0000) >> 8) |\n          ((x & 0xFF000000) >> 24));\n}\n#define bswap_32(x) bswap_32(x)\nstatic inline uint64 bswap_64(uint64 x) {\n  return (((x & GOOGLE_ULONGLONG(0xFF)) << 56) |\n          ((x & GOOGLE_ULONGLONG(0xFF00)) << 40) |\n          ((x & GOOGLE_ULONGLONG(0xFF0000)) << 24) |\n          ((x & GOOGLE_ULONGLONG(0xFF000000)) << 8) |\n          ((x & GOOGLE_ULONGLONG(0xFF00000000)) >> 8) |\n          ((x & GOOGLE_ULONGLONG(0xFF0000000000)) >> 24) |\n          ((x & GOOGLE_ULONGLONG(0xFF000000000000)) >> 40) |\n          ((x & GOOGLE_ULONGLONG(0xFF00000000000000)) >> 56));\n}\n#define bswap_64(x) bswap_64(x)\n\n#endif\n\n// ===================================================================\n// from google3/util/bits/bits.h\n\nclass Bits {\n public:\n  static uint32 Log2FloorNonZero(uint32 n) {\n#if defined(__GNUC__)\n  return 31 ^ __builtin_clz(n);\n#elif defined(COMPILER_MSVC) && defined(_M_IX86)\n  _asm {\n    bsr ebx, n\n    mov n, ebx\n  }\n  return n;\n#else\n  return Log2FloorNonZero_Portable(n);\n#endif\n  }\n\n  static uint64 Log2FloorNonZero64(uint64 n) {\n#if defined(__GNUC__)\n  return 63 ^ __builtin_clzll(n);\n#else\n  return Log2FloorNonZero64_Portable(n);\n#endif\n  }\n private:\n  static int Log2FloorNonZero_Portable(uint32 n) {\n    if (n == 0)\n      return -1;\n    int log = 0;\n    uint32 value = n;\n    for (int i = 4; i >= 0; --i) {\n      int shift = (1 << i);\n      uint32 x = value >> shift;\n      if (x != 0) {\n        value = x;\n        log += shift;\n      }\n    }\n    assert(value == 1);\n    return log;\n  }\n\n  static int Log2FloorNonZero64_Portable(uint64 n) {\n    const uint32 topbits = static_cast<uint32>(n >> 32);\n    if (topbits == 0) {\n      // Top bits are zero, so scan in bottom bits\n      return Log2FloorNonZero(static_cast<uint32>(n));\n    } else {\n      return 32 + Log2FloorNonZero(topbits);\n    }\n  }\n};\n\n// ===================================================================\n// from google3/util/endian/endian.h\nLIBPROTOBUF_EXPORT uint32 ghtonl(uint32 x);\n\nclass BigEndian {\n public:\n#ifdef PROTOBUF_LITTLE_ENDIAN\n\n  static uint16 FromHost16(uint16 x) { return bswap_16(x); }\n  static uint16 ToHost16(uint16 x) { return bswap_16(x); }\n\n  static uint32 FromHost32(uint32 x) { return bswap_32(x); }\n  static uint32 ToHost32(uint32 x) { return bswap_32(x); }\n\n  static uint64 FromHost64(uint64 x) { return bswap_64(x); }\n  static uint64 ToHost64(uint64 x) { return bswap_64(x); }\n\n  static bool IsLittleEndian() { return true; }\n\n#else\n\n  static uint16 FromHost16(uint16 x) { return x; }\n  static uint16 ToHost16(uint16 x) { return x; }\n\n  static uint32 FromHost32(uint32 x) { return x; }\n  static uint32 ToHost32(uint32 x) { return x; }\n\n  static uint64 FromHost64(uint64 x) { return x; }\n  static uint64 ToHost64(uint64 x) { return x; }\n\n  static bool IsLittleEndian() { return false; }\n\n#endif /* ENDIAN */\n\n  // Functions to do unaligned loads and stores in big-endian order.\n  static uint16 Load16(const void *p) {\n    return ToHost16(GOOGLE_UNALIGNED_LOAD16(p));\n  }\n\n  static void Store16(void *p, uint16 v) {\n    GOOGLE_UNALIGNED_STORE16(p, FromHost16(v));\n  }\n\n  static uint32 Load32(const void *p) {\n    return ToHost32(GOOGLE_UNALIGNED_LOAD32(p));\n  }\n\n  static void Store32(void *p, uint32 v) {\n    GOOGLE_UNALIGNED_STORE32(p, FromHost32(v));\n  }\n\n  static uint64 Load64(const void *p) {\n    return ToHost64(GOOGLE_UNALIGNED_LOAD64(p));\n  }\n\n  static void Store64(void *p, uint64 v) {\n    GOOGLE_UNALIGNED_STORE64(p, FromHost64(v));\n  }\n};\n\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_PORT_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/scoped_ptr.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_\n#define GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_\n\n#include <google/protobuf/stubs/port.h>\n\nnamespace google {\nnamespace protobuf {\n\n// ===================================================================\n// from google3/base/scoped_ptr.h\n\nnamespace internal {\n\n//  This is an implementation designed to match the anticipated future TR2\n//  implementation of the scoped_ptr class, and its closely-related brethren,\n//  scoped_array, scoped_ptr_malloc, and make_scoped_ptr.\n\ntemplate <class C> class scoped_ptr;\ntemplate <class C> class scoped_array;\n\n// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>\n// automatically deletes the pointer it holds (if any).\n// That is, scoped_ptr<T> owns the T object that it points to.\n// Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object.\n//\n// The size of a scoped_ptr is small:\n// sizeof(scoped_ptr<C>) == sizeof(C*)\ntemplate <class C>\nclass scoped_ptr {\n public:\n\n  // The element type\n  typedef C element_type;\n\n  // Constructor.  Defaults to initializing with NULL.\n  // There is no way to create an uninitialized scoped_ptr.\n  // The input parameter must be allocated with new.\n  explicit scoped_ptr(C* p = NULL) : ptr_(p) { }\n\n  // Destructor.  If there is a C object, delete it.\n  // We don't need to test ptr_ == NULL because C++ does that for us.\n  ~scoped_ptr() {\n    enum { type_must_be_complete = sizeof(C) };\n    delete ptr_;\n  }\n\n  // Reset.  Deletes the current owned object, if any.\n  // Then takes ownership of a new object, if given.\n  // this->reset(this->get()) works.\n  void reset(C* p = NULL) {\n    if (p != ptr_) {\n      enum { type_must_be_complete = sizeof(C) };\n      delete ptr_;\n      ptr_ = p;\n    }\n  }\n\n  // Accessors to get the owned object.\n  // operator* and operator-> will assert() if there is no current object.\n  C& operator*() const {\n    assert(ptr_ != NULL);\n    return *ptr_;\n  }\n  C* operator->() const  {\n    assert(ptr_ != NULL);\n    return ptr_;\n  }\n  C* get() const { return ptr_; }\n\n  // Comparison operators.\n  // These return whether two scoped_ptr refer to the same object, not just to\n  // two different but equal objects.\n  bool operator==(C* p) const { return ptr_ == p; }\n  bool operator!=(C* p) const { return ptr_ != p; }\n\n  // Swap two scoped pointers.\n  void swap(scoped_ptr& p2) {\n    C* tmp = ptr_;\n    ptr_ = p2.ptr_;\n    p2.ptr_ = tmp;\n  }\n\n  // Release a pointer.\n  // The return value is the current pointer held by this object.\n  // If this object holds a NULL pointer, the return value is NULL.\n  // After this operation, this object will hold a NULL pointer,\n  // and will not own the object any more.\n  C* release() {\n    C* retVal = ptr_;\n    ptr_ = NULL;\n    return retVal;\n  }\n\n private:\n  C* ptr_;\n\n  // Forbid comparison of scoped_ptr types.  If C2 != C, it totally doesn't\n  // make sense, and if C2 == C, it still doesn't make sense because you should\n  // never have the same object owned by two different scoped_ptrs.\n  template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;\n  template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;\n\n  // Disallow evil constructors\n  scoped_ptr(const scoped_ptr&);\n  void operator=(const scoped_ptr&);\n};\n\n// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate\n// with new [] and the destructor deletes objects with delete [].\n//\n// As with scoped_ptr<C>, a scoped_array<C> either points to an object\n// or is NULL.  A scoped_array<C> owns the object that it points to.\n//\n// Size: sizeof(scoped_array<C>) == sizeof(C*)\ntemplate <class C>\nclass scoped_array {\n public:\n\n  // The element type\n  typedef C element_type;\n\n  // Constructor.  Defaults to initializing with NULL.\n  // There is no way to create an uninitialized scoped_array.\n  // The input parameter must be allocated with new [].\n  explicit scoped_array(C* p = NULL) : array_(p) { }\n\n  // Destructor.  If there is a C object, delete it.\n  // We don't need to test ptr_ == NULL because C++ does that for us.\n  ~scoped_array() {\n    enum { type_must_be_complete = sizeof(C) };\n    delete[] array_;\n  }\n\n  // Reset.  Deletes the current owned object, if any.\n  // Then takes ownership of a new object, if given.\n  // this->reset(this->get()) works.\n  void reset(C* p = NULL) {\n    if (p != array_) {\n      enum { type_must_be_complete = sizeof(C) };\n      delete[] array_;\n      array_ = p;\n    }\n  }\n\n  // Get one element of the current object.\n  // Will assert() if there is no current object, or index i is negative.\n  C& operator[](std::ptrdiff_t i) const {\n    assert(i >= 0);\n    assert(array_ != NULL);\n    return array_[i];\n  }\n\n  // Get a pointer to the zeroth element of the current object.\n  // If there is no current object, return NULL.\n  C* get() const {\n    return array_;\n  }\n\n  // Comparison operators.\n  // These return whether two scoped_array refer to the same object, not just to\n  // two different but equal objects.\n  bool operator==(C* p) const { return array_ == p; }\n  bool operator!=(C* p) const { return array_ != p; }\n\n  // Swap two scoped arrays.\n  void swap(scoped_array& p2) {\n    C* tmp = array_;\n    array_ = p2.array_;\n    p2.array_ = tmp;\n  }\n\n  // Release an array.\n  // The return value is the current pointer held by this object.\n  // If this object holds a NULL pointer, the return value is NULL.\n  // After this operation, this object will hold a NULL pointer,\n  // and will not own the object any more.\n  C* release() {\n    C* retVal = array_;\n    array_ = NULL;\n    return retVal;\n  }\n\n private:\n  C* array_;\n\n  // Forbid comparison of different scoped_array types.\n  template <class C2> bool operator==(scoped_array<C2> const& p2) const;\n  template <class C2> bool operator!=(scoped_array<C2> const& p2) const;\n\n  // Disallow evil constructors\n  scoped_array(const scoped_array&);\n  void operator=(const scoped_array&);\n};\n\n}  // namespace internal\n\n// We made these internal so that they would show up as such in the docs,\n// but we don't want to stick \"internal::\" in front of them everywhere.\nusing internal::scoped_ptr;\nusing internal::scoped_array;\n\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_SCOPED_PTR_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/shared_ptr.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2014 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// from google3/util/gtl/shared_ptr.h\n\n#ifndef GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__\n#define GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__\n\n#include <google/protobuf/stubs/atomicops.h>\n\n#include <algorithm>  // for swap\n#include <stddef.h>\n#include <memory>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Alias to std::shared_ptr for any C++11 platform,\n// and for any supported MSVC compiler.\n#if !defined(UTIL_GTL_USE_STD_SHARED_PTR) && \\\n    (defined(COMPILER_MSVC) || defined(LANG_CXX11))\n#define UTIL_GTL_USE_STD_SHARED_PTR 1\n#endif\n\n#if defined(UTIL_GTL_USE_STD_SHARED_PTR) && UTIL_GTL_USE_STD_SHARED_PTR\n\n// These are transitional.  They will be going away soon.\n// Please just #include <memory> and just type std::shared_ptr yourself, instead\n// of relying on this file.\n//\n// Migration doc: http://go/std-shared-ptr-lsc\nusing std::enable_shared_from_this;\nusing std::shared_ptr;\nusing std::static_pointer_cast;\nusing std::weak_ptr;\n\n#else  // below, UTIL_GTL_USE_STD_SHARED_PTR not set or set to 0.\n\n// For everything else there is the google3 implementation.\ninline bool RefCountDec(volatile Atomic32 *ptr) {\n  return Barrier_AtomicIncrement(ptr, -1) != 0;\n}\n\ninline void RefCountInc(volatile Atomic32 *ptr) {\n  NoBarrier_AtomicIncrement(ptr, 1);\n}\n\ntemplate <typename T> class shared_ptr;\ntemplate <typename T> class weak_ptr;\n\n// This class is an internal implementation detail for shared_ptr. If two\n// shared_ptrs point to the same object, they also share a control block.\n// An \"empty\" shared_pointer refers to NULL and also has a NULL control block.\n// It contains all of the state that's needed for reference counting or any\n// other kind of resource management. In this implementation the control block\n// happens to consist of two atomic words, the reference count (the number\n// of shared_ptrs that share ownership of the object) and the weak count\n// (the number of weak_ptrs that observe the object, plus 1 if the\n// refcount is nonzero).\n//\n// The \"plus 1\" is to prevent a race condition in the shared_ptr and\n// weak_ptr destructors. We need to make sure the control block is\n// only deleted once, so we need to make sure that at most one\n// object sees the weak count decremented from 1 to 0.\nclass SharedPtrControlBlock {\n  template <typename T> friend class shared_ptr;\n  template <typename T> friend class weak_ptr;\n private:\n  SharedPtrControlBlock() : refcount_(1), weak_count_(1) { }\n  Atomic32 refcount_;\n  Atomic32 weak_count_;\n};\n\n// Forward declaration. The class is defined below.\ntemplate <typename T> class enable_shared_from_this;\n\ntemplate <typename T>\nclass shared_ptr {\n  template <typename U> friend class weak_ptr;\n public:\n  typedef T element_type;\n\n  shared_ptr() : ptr_(NULL), control_block_(NULL) {}\n\n  explicit shared_ptr(T* ptr)\n      : ptr_(ptr),\n        control_block_(ptr != NULL ? new SharedPtrControlBlock : NULL) {\n    // If p is non-null and T inherits from enable_shared_from_this, we\n    // set up the data that shared_from_this needs.\n    MaybeSetupWeakThis(ptr);\n  }\n\n  // Copy constructor: makes this object a copy of ptr, and increments\n  // the reference count.\n  template <typename U>\n  shared_ptr(const shared_ptr<U>& ptr)\n      : ptr_(NULL),\n        control_block_(NULL) {\n    Initialize(ptr);\n  }\n  // Need non-templated version to prevent the compiler-generated default\n  shared_ptr(const shared_ptr<T>& ptr)\n      : ptr_(NULL),\n        control_block_(NULL) {\n    Initialize(ptr);\n  }\n\n  // Assignment operator. Replaces the existing shared_ptr with ptr.\n  // Increment ptr's reference count and decrement the one being replaced.\n  template <typename U>\n  shared_ptr<T>& operator=(const shared_ptr<U>& ptr) {\n    if (ptr_ != ptr.ptr_) {\n      shared_ptr<T> me(ptr);   // will hold our previous state to be destroyed.\n      swap(me);\n    }\n    return *this;\n  }\n\n  // Need non-templated version to prevent the compiler-generated default\n  shared_ptr<T>& operator=(const shared_ptr<T>& ptr) {\n    if (ptr_ != ptr.ptr_) {\n      shared_ptr<T> me(ptr);   // will hold our previous state to be destroyed.\n      swap(me);\n    }\n    return *this;\n  }\n\n  // TODO(austern): Consider providing this constructor. The draft C++ standard\n  // (20.8.10.2.1) includes it. However, it says that this constructor throws\n  // a bad_weak_ptr exception when ptr is expired. Is it better to provide this\n  // constructor and make it do something else, like fail with a CHECK, or to\n  // leave this constructor out entirely?\n  //\n  // template <typename U>\n  // shared_ptr(const weak_ptr<U>& ptr);\n\n  ~shared_ptr() {\n    if (ptr_ != NULL) {\n      if (!RefCountDec(&control_block_->refcount_)) {\n        delete ptr_;\n\n        // weak_count_ is defined as the number of weak_ptrs that observe\n        // ptr_, plus 1 if refcount_ is nonzero.\n        if (!RefCountDec(&control_block_->weak_count_)) {\n          delete control_block_;\n        }\n      }\n    }\n  }\n\n  // Replaces underlying raw pointer with the one passed in.  The reference\n  // count is set to one (or zero if the pointer is NULL) for the pointer\n  // being passed in and decremented for the one being replaced.\n  //\n  // If you have a compilation error with this code, make sure you aren't\n  // passing NULL, nullptr, or 0 to this function.  Call reset without an\n  // argument to reset to a null ptr.\n  template <typename Y>\n  void reset(Y* p) {\n    if (p != ptr_) {\n      shared_ptr<T> tmp(p);\n      tmp.swap(*this);\n    }\n  }\n\n  void reset() {\n    reset(static_cast<T*>(NULL));\n  }\n\n  // Exchanges the contents of this with the contents of r.  This function\n  // supports more efficient swapping since it eliminates the need for a\n  // temporary shared_ptr object.\n  void swap(shared_ptr<T>& r) {\n    using std::swap;  // http://go/using-std-swap\n    swap(ptr_, r.ptr_);\n    swap(control_block_, r.control_block_);\n  }\n\n  // The following function is useful for gaining access to the underlying\n  // pointer when a shared_ptr remains in scope so the reference-count is\n  // known to be > 0 (e.g. for parameter passing).\n  T* get() const {\n    return ptr_;\n  }\n\n  T& operator*() const {\n    return *ptr_;\n  }\n\n  T* operator->() const {\n    return ptr_;\n  }\n\n  long use_count() const {\n    return control_block_ ? control_block_->refcount_ : 1;\n  }\n\n  bool unique() const {\n    return use_count() == 1;\n  }\n\n private:\n  // If r is non-empty, initialize *this to share ownership with r,\n  // increasing the underlying reference count.\n  // If r is empty, *this remains empty.\n  // Requires: this is empty, namely this->ptr_ == NULL.\n  template <typename U>\n  void Initialize(const shared_ptr<U>& r) {\n    // This performs a static_cast on r.ptr_ to U*, which is a no-op since it\n    // is already a U*. So initialization here requires that r.ptr_ is\n    // implicitly convertible to T*.\n    InitializeWithStaticCast<U>(r);\n  }\n\n  // Initializes *this as described in Initialize, but additionally performs a\n  // static_cast from r.ptr_ (V*) to U*.\n  // NOTE(gfc): We'd need a more general form to support const_pointer_cast and\n  // dynamic_pointer_cast, but those operations are sufficiently discouraged\n  // that supporting static_pointer_cast is sufficient.\n  template <typename U, typename V>\n  void InitializeWithStaticCast(const shared_ptr<V>& r) {\n    if (r.control_block_ != NULL) {\n      RefCountInc(&r.control_block_->refcount_);\n\n      ptr_ = static_cast<U*>(r.ptr_);\n      control_block_ = r.control_block_;\n    }\n  }\n\n  // Helper function for the constructor that takes a raw pointer. If T\n  // doesn't inherit from enable_shared_from_this<T> then we have nothing to\n  // do, so this function is trivial and inline. The other version is declared\n  // out of line, after the class definition of enable_shared_from_this.\n  void MaybeSetupWeakThis(enable_shared_from_this<T>* ptr);\n  void MaybeSetupWeakThis(...) { }\n\n  T* ptr_;\n  SharedPtrControlBlock* control_block_;\n\n#ifndef SWIG\n  template <typename U>\n  friend class shared_ptr;\n\n  template <typename U, typename V>\n  friend shared_ptr<U> static_pointer_cast(const shared_ptr<V>& rhs);\n#endif\n};\n\n// Matches the interface of std::swap as an aid to generic programming.\ntemplate <typename T> void swap(shared_ptr<T>& r, shared_ptr<T>& s) {\n  r.swap(s);\n}\n\ntemplate <typename T, typename U>\nshared_ptr<T> static_pointer_cast(const shared_ptr<U>& rhs) {\n  shared_ptr<T> lhs;\n  lhs.template InitializeWithStaticCast<T>(rhs);\n  return lhs;\n}\n\n// See comments at the top of the file for a description of why this\n// class exists, and the draft C++ standard (as of July 2009 the\n// latest draft is N2914) for the detailed specification.\ntemplate <typename T>\nclass weak_ptr {\n  template <typename U> friend class weak_ptr;\n public:\n  typedef T element_type;\n\n  // Create an empty (i.e. already expired) weak_ptr.\n  weak_ptr() : ptr_(NULL), control_block_(NULL) { }\n\n  // Create a weak_ptr that observes the same object that ptr points\n  // to.  Note that there is no race condition here: we know that the\n  // control block can't disappear while we're looking at it because\n  // it is owned by at least one shared_ptr, ptr.\n  template <typename U> weak_ptr(const shared_ptr<U>& ptr) {\n    CopyFrom(ptr.ptr_, ptr.control_block_);\n  }\n\n  // Copy a weak_ptr. The object it points to might disappear, but we\n  // don't care: we're only working with the control block, and it can't\n  // disappear while we're looking at because it's owned by at least one\n  // weak_ptr, ptr.\n  template <typename U> weak_ptr(const weak_ptr<U>& ptr) {\n    CopyFrom(ptr.ptr_, ptr.control_block_);\n  }\n\n  // Need non-templated version to prevent default copy constructor\n  weak_ptr(const weak_ptr& ptr) {\n    CopyFrom(ptr.ptr_, ptr.control_block_);\n  }\n\n  // Destroy the weak_ptr. If no shared_ptr owns the control block, and if\n  // we are the last weak_ptr to own it, then it can be deleted. Note that\n  // weak_count_ is defined as the number of weak_ptrs sharing this control\n  // block, plus 1 if there are any shared_ptrs. We therefore know that it's\n  // safe to delete the control block when weak_count_ reaches 0, without\n  // having to perform any additional tests.\n  ~weak_ptr() {\n    if (control_block_ != NULL &&\n        !RefCountDec(&control_block_->weak_count_)) {\n      delete control_block_;\n    }\n  }\n\n  weak_ptr& operator=(const weak_ptr& ptr) {\n    if (&ptr != this) {\n      weak_ptr tmp(ptr);\n      tmp.swap(*this);\n    }\n    return *this;\n  }\n  template <typename U> weak_ptr& operator=(const weak_ptr<U>& ptr) {\n    weak_ptr tmp(ptr);\n    tmp.swap(*this);\n    return *this;\n  }\n  template <typename U> weak_ptr& operator=(const shared_ptr<U>& ptr) {\n    weak_ptr tmp(ptr);\n    tmp.swap(*this);\n    return *this;\n  }\n\n  void swap(weak_ptr& ptr) {\n    using std::swap;  // http://go/using-std-swap\n    swap(ptr_, ptr.ptr_);\n    swap(control_block_, ptr.control_block_);\n  }\n\n  void reset() {\n    weak_ptr tmp;\n    tmp.swap(*this);\n  }\n\n  // Return the number of shared_ptrs that own the object we are observing.\n  // Note that this number can be 0 (if this pointer has expired).\n  long use_count() const {\n    return control_block_ != NULL ? control_block_->refcount_ : 0;\n  }\n\n  bool expired() const { return use_count() == 0; }\n\n  // Return a shared_ptr that owns the object we are observing. If we\n  // have expired, the shared_ptr will be empty. We have to be careful\n  // about concurrency, though, since some other thread might be\n  // destroying the last owning shared_ptr while we're in this\n  // function.  We want to increment the refcount only if it's nonzero\n  // and get the new value, and we want that whole operation to be\n  // atomic.\n  shared_ptr<T> lock() const {\n    shared_ptr<T> result;\n    if (control_block_ != NULL) {\n      Atomic32 old_refcount;\n      do {\n        old_refcount = control_block_->refcount_;\n        if (old_refcount == 0)\n          break;\n      } while (old_refcount !=\n               NoBarrier_CompareAndSwap(\n                   &control_block_->refcount_, old_refcount,\n                   old_refcount + 1));\n      if (old_refcount > 0) {\n        result.ptr_ = ptr_;\n        result.control_block_ = control_block_;\n      }\n    }\n\n    return result;\n  }\n\n private:\n  void CopyFrom(T* ptr, SharedPtrControlBlock* control_block) {\n    ptr_ = ptr;\n    control_block_ = control_block;\n    if (control_block_ != NULL)\n      RefCountInc(&control_block_->weak_count_);\n  }\n\n private:\n  element_type* ptr_;\n  SharedPtrControlBlock* control_block_;\n};\n\ntemplate <typename T> void swap(weak_ptr<T>& r, weak_ptr<T>& s) {\n  r.swap(s);\n}\n\n// See comments at the top of the file for a description of why this class\n// exists, and section 20.8.10.5 of the draft C++ standard (as of July 2009\n// the latest draft is N2914) for the detailed specification.\ntemplate <typename T>\nclass enable_shared_from_this {\n  friend class shared_ptr<T>;\n public:\n  // Precondition: there must be a shared_ptr that owns *this and that was\n  // created, directly or indirectly, from a raw pointer of type T*. (The\n  // latter part of the condition is technical but not quite redundant; it\n  // rules out some complicated uses involving inheritance hierarchies.)\n  shared_ptr<T> shared_from_this() {\n    // Behavior is undefined if the precondition isn't satisfied; we choose\n    // to die with a CHECK failure.\n    CHECK(!weak_this_.expired()) << \"No shared_ptr owns this object\";\n    return weak_this_.lock();\n  }\n  shared_ptr<const T> shared_from_this() const {\n    CHECK(!weak_this_.expired()) << \"No shared_ptr owns this object\";\n    return weak_this_.lock();\n  }\n\n protected:\n  enable_shared_from_this() { }\n  enable_shared_from_this(const enable_shared_from_this& other) { }\n  enable_shared_from_this& operator=(const enable_shared_from_this& other) {\n    return *this;\n  }\n  ~enable_shared_from_this() { }\n\n private:\n  weak_ptr<T> weak_this_;\n};\n\n// This is a helper function called by shared_ptr's constructor from a raw\n// pointer. If T inherits from enable_shared_from_this<T>, it sets up\n// weak_this_ so that shared_from_this works correctly. If T does not inherit\n// from weak_this we get a different overload, defined inline, which does\n// nothing.\ntemplate<typename T>\nvoid shared_ptr<T>::MaybeSetupWeakThis(enable_shared_from_this<T>* ptr) {\n  if (ptr) {\n    CHECK(ptr->weak_this_.expired()) << \"Object already owned by a shared_ptr\";\n    ptr->weak_this_ = *this;\n  }\n}\n\n#endif  // UTIL_GTL_USE_STD_SHARED_PTR\n\n}  // internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_SHARED_PTR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/singleton.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2014 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#ifndef GOOGLE_PROTOBUF_STUBS_SINGLETON_H__\n#define GOOGLE_PROTOBUF_STUBS_SINGLETON_H__\n\n#include <google/protobuf/stubs/atomicops.h>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/once.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\ntemplate<typename T>\nclass Singleton {\n public:\n  static T* get() {\n    GoogleOnceInit(&once_, &Singleton<T>::Init);\n    return instance_;\n  }\n  static void ShutDown() {\n    delete instance_;\n    instance_ = NULL;\n  }\n private:\n  static void Init() {\n    instance_ = new T();\n  }\n  static ProtobufOnceType once_;\n  static T* instance_;\n};\n\ntemplate<typename T>\nProtobufOnceType Singleton<T>::once_;\n\ntemplate<typename T>\nT* Singleton<T>::instance_ = NULL;\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_SINGLETON_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/status.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#ifndef GOOGLE_PROTOBUF_STUBS_STATUS_H_\n#define GOOGLE_PROTOBUF_STUBS_STATUS_H_\n\n#include <iosfwd>\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/stringpiece.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace util {\nnamespace error {\n// These values must match error codes defined in google/rpc/code.proto.\nenum Code {\n  OK = 0,\n  CANCELLED = 1,\n  UNKNOWN = 2,\n  INVALID_ARGUMENT = 3,\n  DEADLINE_EXCEEDED = 4,\n  NOT_FOUND = 5,\n  ALREADY_EXISTS = 6,\n  PERMISSION_DENIED = 7,\n  UNAUTHENTICATED = 16,\n  RESOURCE_EXHAUSTED = 8,\n  FAILED_PRECONDITION = 9,\n  ABORTED = 10,\n  OUT_OF_RANGE = 11,\n  UNIMPLEMENTED = 12,\n  INTERNAL = 13,\n  UNAVAILABLE = 14,\n  DATA_LOSS = 15,\n};\n}  // namespace error\n\nclass LIBPROTOBUF_EXPORT Status {\n public:\n  // Creates a \"successful\" status.\n  Status();\n\n  // Create a status in the canonical error space with the specified\n  // code, and error message.  If \"code == 0\", error_message is\n  // ignored and a Status object identical to Status::OK is\n  // constructed.\n  Status(error::Code error_code, StringPiece error_message);\n  Status(const Status&);\n  Status& operator=(const Status& x);\n  ~Status() {}\n\n  // Some pre-defined Status objects\n  static const Status OK;             // Identical to 0-arg constructor\n  static const Status CANCELLED;\n  static const Status UNKNOWN;\n\n  // Accessor\n  bool ok() const {\n    return error_code_ == error::OK;\n  }\n  int error_code() const {\n    return error_code_;\n  }\n  StringPiece error_message() const {\n    return error_message_;\n  }\n\n  bool operator==(const Status& x) const;\n  bool operator!=(const Status& x) const {\n    return !operator==(x);\n  }\n\n  // Return a combination of the error code name and message.\n  string ToString() const;\n\n private:\n  error::Code error_code_;\n  string error_message_;\n};\n\n// Prints a human-readable representation of 'x' to 'os'.\nLIBPROTOBUF_EXPORT ostream& operator<<(ostream& os, const Status& x);\n\n#define EXPECT_OK(value) EXPECT_TRUE((value).ok())\n\n}  // namespace util\n}  // namespace protobuf\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_STUBS_STATUS_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/stl_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// from google3/util/gtl/stl_util.h\n\n#ifndef GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__\n#define GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__\n\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\n\n// STLDeleteContainerPointers()\n//  For a range within a container of pointers, calls delete\n//  (non-array version) on these pointers.\n// NOTE: for these three functions, we could just implement a DeleteObject\n// functor and then call for_each() on the range and functor, but this\n// requires us to pull in all of algorithm.h, which seems expensive.\n// For hash_[multi]set, it is important that this deletes behind the iterator\n// because the hash_set may call the hash function on the iterator when it is\n// advanced, which could result in the hash function trying to deference a\n// stale pointer.\ntemplate <class ForwardIterator>\nvoid STLDeleteContainerPointers(ForwardIterator begin,\n                                ForwardIterator end) {\n  while (begin != end) {\n    ForwardIterator temp = begin;\n    ++begin;\n    delete *temp;\n  }\n}\n\n// Inside Google, this function implements a horrible, disgusting hack in which\n// we reach into the string's private implementation and resize it without\n// initializing the new bytes.  In some cases doing this can significantly\n// improve performance.  However, since it's totally non-portable it has no\n// place in open source code.  Feel free to fill this function in with your\n// own disgusting hack if you want the perf boost.\ninline void STLStringResizeUninitialized(string* s, size_t new_size) {\n  s->resize(new_size);\n}\n\n// Return a mutable char* pointing to a string's internal buffer,\n// which may not be null-terminated. Writing through this pointer will\n// modify the string.\n//\n// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the\n// next call to a string method that invalidates iterators.\n//\n// As of 2006-04, there is no standard-blessed way of getting a\n// mutable reference to a string's internal buffer. However, issue 530\n// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530)\n// proposes this as the method. According to Matt Austern, this should\n// already work on all current implementations.\ninline char* string_as_array(string* str) {\n  // DO NOT USE const_cast<char*>(str->data())! See the unittest for why.\n  return str->empty() ? NULL : &*str->begin();\n}\n\n// STLDeleteElements() deletes all the elements in an STL container and clears\n// the container.  This function is suitable for use with a vector, set,\n// hash_set, or any other STL container which defines sensible begin(), end(),\n// and clear() methods.\n//\n// If container is NULL, this function is a no-op.\n//\n// As an alternative to calling STLDeleteElements() directly, consider\n// ElementDeleter (defined below), which ensures that your container's elements\n// are deleted when the ElementDeleter goes out of scope.\ntemplate <class T>\nvoid STLDeleteElements(T *container) {\n  if (!container) return;\n  STLDeleteContainerPointers(container->begin(), container->end());\n  container->clear();\n}\n\n// Given an STL container consisting of (key, value) pairs, STLDeleteValues\n// deletes all the \"value\" components and clears the container.  Does nothing\n// in the case it's given a NULL pointer.\n\ntemplate <class T>\nvoid STLDeleteValues(T *v) {\n  if (!v) return;\n  for (typename T::iterator i = v->begin(); i != v->end(); ++i) {\n    delete i->second;\n  }\n  v->clear();\n}\n\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_STUBS_STL_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/stringpiece.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// A StringPiece points to part or all of a string, Cord, double-quoted string\n// literal, or other string-like object.  A StringPiece does *not* own the\n// string to which it points.  A StringPiece is not null-terminated.\n//\n// You can use StringPiece as a function or method parameter.  A StringPiece\n// parameter can receive a double-quoted string literal argument, a \"const\n// char*\" argument, a string argument, or a StringPiece argument with no data\n// copying.  Systematic use of StringPiece for arguments reduces data\n// copies and strlen() calls.\n//\n// Prefer passing StringPieces by value:\n//   void MyFunction(StringPiece arg);\n// If circumstances require, you may also pass by const reference:\n//   void MyFunction(const StringPiece& arg);  // not preferred\n// Both of these have the same lifetime semantics.  Passing by value\n// generates slightly smaller code.  For more discussion, see the thread\n// go/stringpiecebyvalue on c-users.\n//\n// StringPiece is also suitable for local variables if you know that\n// the lifetime of the underlying object is longer than the lifetime\n// of your StringPiece variable.\n//\n// Beware of binding a StringPiece to a temporary:\n//   StringPiece sp = obj.MethodReturningString();  // BAD: lifetime problem\n//\n// This code is okay:\n//   string str = obj.MethodReturningString();  // str owns its contents\n//   StringPiece sp(str);  // GOOD, because str outlives sp\n//\n// StringPiece is sometimes a poor choice for a return value and usually a poor\n// choice for a data member.  If you do use a StringPiece this way, it is your\n// responsibility to ensure that the object pointed to by the StringPiece\n// outlives the StringPiece.\n//\n// A StringPiece may represent just part of a string; thus the name \"Piece\".\n// For example, when splitting a string, vector<StringPiece> is a natural data\n// type for the output.  For another example, a Cord is a non-contiguous,\n// potentially very long string-like object.  The Cord class has an interface\n// that iteratively provides StringPiece objects that point to the\n// successive pieces of a Cord object.\n//\n// A StringPiece is not null-terminated.  If you write code that scans a\n// StringPiece, you must check its length before reading any characters.\n// Common idioms that work on null-terminated strings do not work on\n// StringPiece objects.\n//\n// There are several ways to create a null StringPiece:\n//   StringPiece()\n//   StringPiece(NULL)\n//   StringPiece(NULL, 0)\n// For all of the above, sp.data() == NULL, sp.length() == 0,\n// and sp.empty() == true.  Also, if you create a StringPiece with\n// a non-NULL pointer then sp.data() != NULL.  Once created,\n// sp.data() will stay either NULL or not-NULL, except if you call\n// sp.clear() or sp.set().\n//\n// Thus, you can use StringPiece(NULL) to signal an out-of-band value\n// that is different from other StringPiece values.  This is similar\n// to the way that const char* p1 = NULL; is different from\n// const char* p2 = \"\";.\n//\n// There are many ways to create an empty StringPiece:\n//   StringPiece()\n//   StringPiece(NULL)\n//   StringPiece(NULL, 0)\n//   StringPiece(\"\")\n//   StringPiece(\"\", 0)\n//   StringPiece(\"abcdef\", 0)\n//   StringPiece(\"abcdef\"+6, 0)\n// For all of the above, sp.length() will be 0 and sp.empty() will be true.\n// For some empty StringPiece values, sp.data() will be NULL.\n// For some empty StringPiece values, sp.data() will not be NULL.\n//\n// Be careful not to confuse: null StringPiece and empty StringPiece.\n// The set of empty StringPieces properly includes the set of null StringPieces.\n// That is, every null StringPiece is an empty StringPiece,\n// but some non-null StringPieces are empty Stringpieces too.\n//\n// All empty StringPiece values compare equal to each other.\n// Even a null StringPieces compares equal to a non-null empty StringPiece:\n//  StringPiece() == StringPiece(\"\", 0)\n//  StringPiece(NULL) == StringPiece(\"abc\", 0)\n//  StringPiece(NULL, 0) == StringPiece(\"abcdef\"+6, 0)\n//\n// Look carefully at this example:\n//   StringPiece(\"\") == NULL\n// True or false?  TRUE, because StringPiece::operator== converts\n// the right-hand side from NULL to StringPiece(NULL),\n// and then compares two zero-length spans of characters.\n// However, we are working to make this example produce a compile error.\n//\n// Suppose you want to write:\n//   bool TestWhat?(StringPiece sp) { return sp == NULL; }  // BAD\n// Do not do that.  Write one of these instead:\n//   bool TestNull(StringPiece sp) { return sp.data() == NULL; }\n//   bool TestEmpty(StringPiece sp) { return sp.empty(); }\n// The intent of TestWhat? is unclear.  Did you mean TestNull or TestEmpty?\n// Right now, TestWhat? behaves likes TestEmpty.\n// We are working to make TestWhat? produce a compile error.\n// TestNull is good to test for an out-of-band signal.\n// TestEmpty is good to test for an empty StringPiece.\n//\n// Caveats (again):\n// (1) The lifetime of the pointed-to string (or piece of a string)\n//     must be longer than the lifetime of the StringPiece.\n// (2) There may or may not be a '\\0' character after the end of\n//     StringPiece data.\n// (3) A null StringPiece is empty.\n//     An empty StringPiece may or may not be a null StringPiece.\n\n#ifndef GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_\n#define GOOGLE_PROTOBUF_STUBS_STRINGPIECE_H_\n\n#include <assert.h>\n#include <stddef.h>\n#include <string.h>\n#include <iosfwd>\n#include <limits>\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/hash.h>\n\nnamespace google {\nnamespace protobuf {\n// StringPiece has *two* size types.\n// StringPiece::size_type\n//   is unsigned\n//   is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64\n//   no future changes intended\n// stringpiece_ssize_type\n//   is signed\n//   is 32 bits in LP32, 64 bits in LP64, 64 bits in LLP64\n//   future changes intended: http://go/64BitStringPiece\n//\ntypedef string::difference_type stringpiece_ssize_type;\n\n// STRINGPIECE_CHECK_SIZE protects us from 32-bit overflows.\n// TODO(mec): delete this after stringpiece_ssize_type goes 64 bit.\n#if !defined(NDEBUG)\n#define STRINGPIECE_CHECK_SIZE 1\n#elif defined(_FORTIFY_SOURCE) && _FORTIFY_SOURCE > 0\n#define STRINGPIECE_CHECK_SIZE 1\n#else\n#define STRINGPIECE_CHECK_SIZE 0\n#endif\n\nclass LIBPROTOBUF_EXPORT StringPiece {\n private:\n  const char* ptr_;\n  stringpiece_ssize_type length_;\n\n  // Prevent overflow in debug mode or fortified mode.\n  // sizeof(stringpiece_ssize_type) may be smaller than sizeof(size_t).\n  static stringpiece_ssize_type CheckedSsizeTFromSizeT(size_t size) {\n#if STRINGPIECE_CHECK_SIZE > 0\n#ifdef max\n#undef max\n#endif\n    if (size > static_cast<size_t>(\n        std::numeric_limits<stringpiece_ssize_type>::max())) {\n      // Some people grep for this message in logs\n      // so take care if you ever change it.\n      LogFatalSizeTooBig(size, \"size_t to int conversion\");\n    }\n#endif\n    return static_cast<stringpiece_ssize_type>(size);\n  }\n\n  // Out-of-line error path.\n  static void LogFatalSizeTooBig(size_t size, const char* details);\n\n public:\n  // We provide non-explicit singleton constructors so users can pass\n  // in a \"const char*\" or a \"string\" wherever a \"StringPiece\" is\n  // expected.\n  //\n  // Style guide exception granted:\n  // http://goto/style-guide-exception-20978288\n  StringPiece() : ptr_(NULL), length_(0) {}\n\n  StringPiece(const char* str)  // NOLINT(runtime/explicit)\n      : ptr_(str), length_(0) {\n    if (str != NULL) {\n      length_ = CheckedSsizeTFromSizeT(strlen(str));\n    }\n  }\n\n  template <class Allocator>\n  StringPiece(  // NOLINT(runtime/explicit)\n      const std::basic_string<char, std::char_traits<char>, Allocator>& str)\n      : ptr_(str.data()), length_(0) {\n    length_ = CheckedSsizeTFromSizeT(str.size());\n  }\n\n  StringPiece(const char* offset, stringpiece_ssize_type len)\n      : ptr_(offset), length_(len) {\n    assert(len >= 0);\n  }\n\n  // Substring of another StringPiece.\n  // pos must be non-negative and <= x.length().\n  StringPiece(StringPiece x, stringpiece_ssize_type pos);\n  // Substring of another StringPiece.\n  // pos must be non-negative and <= x.length().\n  // len must be non-negative and will be pinned to at most x.length() - pos.\n  StringPiece(StringPiece x,\n              stringpiece_ssize_type pos,\n              stringpiece_ssize_type len);\n\n  // data() may return a pointer to a buffer with embedded NULs, and the\n  // returned buffer may or may not be null terminated.  Therefore it is\n  // typically a mistake to pass data() to a routine that expects a NUL\n  // terminated string.\n  const char* data() const { return ptr_; }\n  stringpiece_ssize_type size() const { return length_; }\n  stringpiece_ssize_type length() const { return length_; }\n  bool empty() const { return length_ == 0; }\n\n  void clear() {\n    ptr_ = NULL;\n    length_ = 0;\n  }\n\n  void set(const char* data, stringpiece_ssize_type len) {\n    assert(len >= 0);\n    ptr_ = data;\n    length_ = len;\n  }\n\n  void set(const char* str) {\n    ptr_ = str;\n    if (str != NULL)\n      length_ = CheckedSsizeTFromSizeT(strlen(str));\n    else\n      length_ = 0;\n  }\n\n  void set(const void* data, stringpiece_ssize_type len) {\n    ptr_ = reinterpret_cast<const char*>(data);\n    length_ = len;\n  }\n\n  char operator[](stringpiece_ssize_type i) const {\n    assert(0 <= i);\n    assert(i < length_);\n    return ptr_[i];\n  }\n\n  void remove_prefix(stringpiece_ssize_type n) {\n    assert(length_ >= n);\n    ptr_ += n;\n    length_ -= n;\n  }\n\n  void remove_suffix(stringpiece_ssize_type n) {\n    assert(length_ >= n);\n    length_ -= n;\n  }\n\n  // returns {-1, 0, 1}\n  int compare(StringPiece x) const {\n    const stringpiece_ssize_type min_size =\n        length_ < x.length_ ? length_ : x.length_;\n    int r = memcmp(ptr_, x.ptr_, min_size);\n    if (r < 0) return -1;\n    if (r > 0) return 1;\n    if (length_ < x.length_) return -1;\n    if (length_ > x.length_) return 1;\n    return 0;\n  }\n\n  string as_string() const {\n    return ToString();\n  }\n  // We also define ToString() here, since many other string-like\n  // interfaces name the routine that converts to a C++ string\n  // \"ToString\", and it's confusing to have the method that does that\n  // for a StringPiece be called \"as_string()\".  We also leave the\n  // \"as_string()\" method defined here for existing code.\n  string ToString() const {\n    if (ptr_ == NULL) return string();\n    return string(data(), size());\n  }\n\n  operator string() const {\n    return ToString();\n  }\n\n  void CopyToString(string* target) const;\n  void AppendToString(string* target) const;\n\n  bool starts_with(StringPiece x) const {\n    return (length_ >= x.length_) && (memcmp(ptr_, x.ptr_, x.length_) == 0);\n  }\n\n  bool ends_with(StringPiece x) const {\n    return ((length_ >= x.length_) &&\n            (memcmp(ptr_ + (length_-x.length_), x.ptr_, x.length_) == 0));\n  }\n\n  // Checks whether StringPiece starts with x and if so advances the beginning\n  // of it to past the match.  It's basically a shortcut for starts_with\n  // followed by remove_prefix.\n  bool Consume(StringPiece x);\n  // Like above but for the end of the string.\n  bool ConsumeFromEnd(StringPiece x);\n\n  // standard STL container boilerplate\n  typedef char value_type;\n  typedef const char* pointer;\n  typedef const char& reference;\n  typedef const char& const_reference;\n  typedef size_t size_type;\n  typedef ptrdiff_t difference_type;\n  static const size_type npos;\n  typedef const char* const_iterator;\n  typedef const char* iterator;\n  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;\n  typedef std::reverse_iterator<iterator> reverse_iterator;\n  iterator begin() const { return ptr_; }\n  iterator end() const { return ptr_ + length_; }\n  const_reverse_iterator rbegin() const {\n    return const_reverse_iterator(ptr_ + length_);\n  }\n  const_reverse_iterator rend() const {\n    return const_reverse_iterator(ptr_);\n  }\n  stringpiece_ssize_type max_size() const { return length_; }\n  stringpiece_ssize_type capacity() const { return length_; }\n\n  // cpplint.py emits a false positive [build/include_what_you_use]\n  stringpiece_ssize_type copy(char* buf, size_type n, size_type pos = 0) const;  // NOLINT\n\n  bool contains(StringPiece s) const;\n\n  stringpiece_ssize_type find(StringPiece s, size_type pos = 0) const;\n  stringpiece_ssize_type find(char c, size_type pos = 0) const;\n  stringpiece_ssize_type rfind(StringPiece s, size_type pos = npos) const;\n  stringpiece_ssize_type rfind(char c, size_type pos = npos) const;\n\n  stringpiece_ssize_type find_first_of(StringPiece s, size_type pos = 0) const;\n  stringpiece_ssize_type find_first_of(char c, size_type pos = 0) const {\n    return find(c, pos);\n  }\n  stringpiece_ssize_type find_first_not_of(StringPiece s,\n                                           size_type pos = 0) const;\n  stringpiece_ssize_type find_first_not_of(char c, size_type pos = 0) const;\n  stringpiece_ssize_type find_last_of(StringPiece s,\n                                      size_type pos = npos) const;\n  stringpiece_ssize_type find_last_of(char c, size_type pos = npos) const {\n    return rfind(c, pos);\n  }\n  stringpiece_ssize_type find_last_not_of(StringPiece s,\n                                          size_type pos = npos) const;\n  stringpiece_ssize_type find_last_not_of(char c, size_type pos = npos) const;\n\n  StringPiece substr(size_type pos, size_type n = npos) const;\n};\n\n// This large function is defined inline so that in a fairly common case where\n// one of the arguments is a literal, the compiler can elide a lot of the\n// following comparisons.\ninline bool operator==(StringPiece x, StringPiece y) {\n  stringpiece_ssize_type len = x.size();\n  if (len != y.size()) {\n    return false;\n  }\n\n  return x.data() == y.data() || len <= 0 ||\n      memcmp(x.data(), y.data(), len) == 0;\n}\n\ninline bool operator!=(StringPiece x, StringPiece y) {\n  return !(x == y);\n}\n\ninline bool operator<(StringPiece x, StringPiece y) {\n  const stringpiece_ssize_type min_size =\n      x.size() < y.size() ? x.size() : y.size();\n  const int r = memcmp(x.data(), y.data(), min_size);\n  return (r < 0) || (r == 0 && x.size() < y.size());\n}\n\ninline bool operator>(StringPiece x, StringPiece y) {\n  return y < x;\n}\n\ninline bool operator<=(StringPiece x, StringPiece y) {\n  return !(x > y);\n}\n\ninline bool operator>=(StringPiece x, StringPiece y) {\n  return !(x < y);\n}\n\n// allow StringPiece to be logged\nextern std::ostream& operator<<(std::ostream& o, StringPiece piece);\n\nnamespace internal {\n// StringPiece is not a POD and can not be used in an union (pre C++11). We\n// need a POD version of it.\nstruct StringPiecePod {\n  // Create from a StringPiece.\n  static StringPiecePod CreateFromStringPiece(StringPiece str) {\n    StringPiecePod pod;\n    pod.data_ = str.data();\n    pod.size_ = str.size();\n    return pod;\n  }\n\n  // Cast to StringPiece.\n  operator StringPiece() const { return StringPiece(data_, size_); }\n\n  bool operator==(const char* value) const {\n    return StringPiece(data_, size_) == StringPiece(value);\n  }\n\n  char operator[](stringpiece_ssize_type i) const {\n    assert(0 <= i);\n    assert(i < size_);\n    return data_[i];\n  }\n\n  const char* data() const { return data_; }\n\n  stringpiece_ssize_type size() const {\n    return size_;\n  }\n\n  std::string ToString() const { return std::string(data_, size_); }\n private:\n  const char* data_;\n  stringpiece_ssize_type size_;\n};\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\nGOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_START\ntemplate<> struct hash<StringPiece> {\n  size_t operator()(const StringPiece& s) const {\n    size_t result = 0;\n    for (const char *str = s.data(), *end = str + s.size(); str < end; str++) {  \n      result = 5 * result + *str;\n    }\n    return result;\n  }\n};\nGOOGLE_PROTOBUF_HASH_NAMESPACE_DECLARATION_END\n\n#endif  // STRINGS_STRINGPIECE_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/template_util.h",
    "content": "// Copyright 2005 Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// ----\n// Author: lar@google.com (Laramie Leavitt)\n//\n// Template metaprogramming utility functions.\n//\n// This code is compiled directly on many platforms, including client\n// platforms like Windows, Mac, and embedded systems.  Before making\n// any changes here, make sure that you're not breaking any platforms.\n//\n//\n// The names chosen here reflect those used in tr1 and the boost::mpl\n// library, there are similar operations used in the Loki library as\n// well.  I prefer the boost names for 2 reasons:\n// 1.  I think that portions of the Boost libraries are more likely to\n// be included in the c++ standard.\n// 2.  It is not impossible that some of the boost libraries will be\n// included in our own build in the future.\n// Both of these outcomes means that we may be able to directly replace\n// some of these with boost equivalents.\n//\n#ifndef GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_\n#define GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Types small_ and big_ are guaranteed such that sizeof(small_) <\n// sizeof(big_)\ntypedef char small_;\n\nstruct big_ {\n  char dummy[2];\n};\n\n// Identity metafunction.\ntemplate <class T>\nstruct identity_ {\n  typedef T type;\n};\n\n// integral_constant, defined in tr1, is a wrapper for an integer\n// value. We don't really need this generality; we could get away\n// with hardcoding the integer type to bool. We use the fully\n// general integer_constant for compatibility with tr1.\n\ntemplate<class T, T v>\nstruct integral_constant {\n  static const T value = v;\n  typedef T value_type;\n  typedef integral_constant<T, v> type;\n};\n\ntemplate <class T, T v> const T integral_constant<T, v>::value;\n\n\n// Abbreviations: true_type and false_type are structs that represent boolean\n// true and false values. Also define the boost::mpl versions of those names,\n// true_ and false_.\ntypedef integral_constant<bool, true>  true_type;\ntypedef integral_constant<bool, false> false_type;\ntypedef true_type  true_;\ntypedef false_type false_;\n\n// if_ is a templatized conditional statement.\n// if_<cond, A, B> is a compile time evaluation of cond.\n// if_<>::type contains A if cond is true, B otherwise.\ntemplate<bool cond, typename A, typename B>\nstruct if_{\n  typedef A type;\n};\n\ntemplate<typename A, typename B>\nstruct if_<false, A, B> {\n  typedef B type;\n};\n\n\n// type_equals_ is a template type comparator, similar to Loki IsSameType.\n// type_equals_<A, B>::value is true iff \"A\" is the same type as \"B\".\n//\n// New code should prefer base::is_same, defined in base/type_traits.h.\n// It is functionally identical, but is_same is the standard spelling.\ntemplate<typename A, typename B>\nstruct type_equals_ : public false_ {\n};\n\ntemplate<typename A>\nstruct type_equals_<A, A> : public true_ {\n};\n\n// and_ is a template && operator.\n// and_<A, B>::value evaluates \"A::value && B::value\".\ntemplate<typename A, typename B>\nstruct and_ : public integral_constant<bool, (A::value && B::value)> {\n};\n\n// or_ is a template || operator.\n// or_<A, B>::value evaluates \"A::value || B::value\".\ntemplate<typename A, typename B>\nstruct or_ : public integral_constant<bool, (A::value || B::value)> {\n};\n\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_TEMPLATE_UTIL_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/stubs/type_traits.h",
    "content": "// Copyright (c) 2006, Google Inc.\n// All rights reserved.\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// ----\n// Author: Matt Austern\n//\n// This code is compiled directly on many platforms, including client\n// platforms like Windows, Mac, and embedded systems.  Before making\n// any changes here, make sure that you're not breaking any platforms.\n//\n// Define a small subset of tr1 type traits. The traits we define are:\n//   enable_if\n//   is_integral\n//   is_floating_point\n//   is_pointer\n//   is_enum\n//   is_reference\n//   is_pod\n//   has_trivial_constructor\n//   has_trivial_copy\n//   has_trivial_assign\n//   has_trivial_destructor\n//   remove_const\n//   remove_volatile\n//   remove_cv\n//   remove_reference\n//   add_reference\n//   remove_pointer\n//   is_same\n//   is_convertible\n// We can add more type traits as required.\n\n#ifndef GOOGLE_PROTOBUF_TYPE_TRAITS_H_\n#define GOOGLE_PROTOBUF_TYPE_TRAITS_H_\n\n#include <cstddef>                  // for NULL\n#include <utility>                  // For pair\n\n#include <google/protobuf/stubs/template_util.h>  // For true_type and false_type\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\ntemplate<typename B, typename D>\nstruct is_base_of {\n  typedef char (&yes)[1];\n  typedef char (&no)[2];\n\n  // BEGIN GOOGLE LOCAL MODIFICATION -- check is a #define on Mac.\n  #undef check\n  // END GOOGLE LOCAL MODIFICATION\n\n  static yes check(const B*);\n  static no check(const void*);\n\n  enum {\n    value = sizeof(check(static_cast<const D*>(NULL))) == sizeof(yes),\n  };\n};\n\ntemplate <bool cond, class T = void> struct enable_if;\ntemplate <class T> struct is_integral;\ntemplate <class T> struct is_floating_point;\ntemplate <class T> struct is_pointer;\n// MSVC can't compile this correctly, and neither can gcc 3.3.5 (at least)\n#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)\n// is_enum uses is_convertible, which is not available on MSVC.\ntemplate <class T> struct is_enum;\n#endif\ntemplate <class T> struct is_reference;\ntemplate <class T> struct is_pod;\ntemplate <class T> struct has_trivial_constructor;\ntemplate <class T> struct has_trivial_copy;\ntemplate <class T> struct has_trivial_assign;\ntemplate <class T> struct has_trivial_destructor;\ntemplate <class T> struct remove_const;\ntemplate <class T> struct remove_volatile;\ntemplate <class T> struct remove_cv;\ntemplate <class T> struct remove_reference;\ntemplate <class T> struct add_reference;\ntemplate <class T> struct remove_pointer;\ntemplate <class T, class U> struct is_same;\n#if !(defined(__GNUC__) && __GNUC__ <= 3)\ntemplate <class From, class To> struct is_convertible;\n#endif\n\n// enable_if, equivalent semantics to c++11 std::enable_if, specifically:\n//   \"If B is true, the member typedef type shall equal T; otherwise, there\n//    shall be no member typedef type.\"\n// Specified by 20.9.7.6 [Other transformations]\n\ntemplate<bool cond, class T> struct enable_if { typedef T type; };\ntemplate<class T> struct enable_if<false, T> {};\n// is_integral is false except for the built-in integer types. A\n// cv-qualified type is integral if and only if the underlying type is.\ntemplate <class T> struct is_integral : false_type { };\ntemplate<> struct is_integral<bool> : true_type { };\ntemplate<> struct is_integral<char> : true_type { };\ntemplate<> struct is_integral<unsigned char> : true_type { };\ntemplate<> struct is_integral<signed char> : true_type { };\n#if defined(_MSC_VER)\n// wchar_t is not by default a distinct type from unsigned short in\n// Microsoft C.\n// See http://msdn2.microsoft.com/en-us/library/dh8che7s(VS.80).aspx\ntemplate<> struct is_integral<__wchar_t> : true_type { };\n#else\ntemplate<> struct is_integral<wchar_t> : true_type { };\n#endif\ntemplate<> struct is_integral<short> : true_type { };\ntemplate<> struct is_integral<unsigned short> : true_type { };\ntemplate<> struct is_integral<int> : true_type { };\ntemplate<> struct is_integral<unsigned int> : true_type { };\ntemplate<> struct is_integral<long> : true_type { };\ntemplate<> struct is_integral<unsigned long> : true_type { };\n#ifdef HAVE_LONG_LONG\ntemplate<> struct is_integral<long long> : true_type { };\ntemplate<> struct is_integral<unsigned long long> : true_type { };\n#endif\n#if defined(_MSC_VER)\n// With VC, __int8, __int16, and __int32 are synonymous with standard types\n// with the same size, but __int64 has not equivalent (i.e., it's neither\n// long, nor long long and should be treated differnetly).\n// https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx\ntemplate<> struct is_integral<__int64> : true_type { };\ntemplate<> struct is_integral<unsigned __int64> : true_type {};\n#endif\ntemplate <class T> struct is_integral<const T> : is_integral<T> { };\ntemplate <class T> struct is_integral<volatile T> : is_integral<T> { };\ntemplate <class T> struct is_integral<const volatile T> : is_integral<T> { };\n\n// is_floating_point is false except for the built-in floating-point types.\n// A cv-qualified type is integral if and only if the underlying type is.\ntemplate <class T> struct is_floating_point : false_type { };\ntemplate<> struct is_floating_point<float> : true_type { };\ntemplate<> struct is_floating_point<double> : true_type { };\ntemplate<> struct is_floating_point<long double> : true_type { };\ntemplate <class T> struct is_floating_point<const T>\n    : is_floating_point<T> { };\ntemplate <class T> struct is_floating_point<volatile T>\n    : is_floating_point<T> { };\ntemplate <class T> struct is_floating_point<const volatile T>\n    : is_floating_point<T> { };\n\n// is_pointer is false except for pointer types. A cv-qualified type (e.g.\n// \"int* const\", as opposed to \"int const*\") is cv-qualified if and only if\n// the underlying type is.\ntemplate <class T> struct is_pointer : false_type { };\ntemplate <class T> struct is_pointer<T*> : true_type { };\ntemplate <class T> struct is_pointer<const T> : is_pointer<T> { };\ntemplate <class T> struct is_pointer<volatile T> : is_pointer<T> { };\ntemplate <class T> struct is_pointer<const volatile T> : is_pointer<T> { };\n\n#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)\n\nnamespace type_traits_internal {\n\ntemplate <class T> struct is_class_or_union {\n  template <class U> static small_ tester(void (U::*)());\n  template <class U> static big_ tester(...);\n  static const bool value = sizeof(tester<T>(0)) == sizeof(small_);\n};\n\n// is_convertible chokes if the first argument is an array. That's why\n// we use add_reference here.\ntemplate <bool NotUnum, class T> struct is_enum_impl\n    : is_convertible<typename add_reference<T>::type, int> { };\n\ntemplate <class T> struct is_enum_impl<true, T> : false_type { };\n\n}  // namespace type_traits_internal\n\n// Specified by TR1 [4.5.1] primary type categories.\n\n// Implementation note:\n//\n// Each type is either void, integral, floating point, array, pointer,\n// reference, member object pointer, member function pointer, enum,\n// union or class. Out of these, only integral, floating point, reference,\n// class and enum types are potentially convertible to int. Therefore,\n// if a type is not a reference, integral, floating point or class and\n// is convertible to int, it's a enum. Adding cv-qualification to a type\n// does not change whether it's an enum.\n//\n// Is-convertible-to-int check is done only if all other checks pass,\n// because it can't be used with some types (e.g. void or classes with\n// inaccessible conversion operators).\ntemplate <class T> struct is_enum\n    : type_traits_internal::is_enum_impl<\n          is_same<T, void>::value ||\n              is_integral<T>::value ||\n              is_floating_point<T>::value ||\n              is_reference<T>::value ||\n              type_traits_internal::is_class_or_union<T>::value,\n          T> { };\n\ntemplate <class T> struct is_enum<const T> : is_enum<T> { };\ntemplate <class T> struct is_enum<volatile T> : is_enum<T> { };\ntemplate <class T> struct is_enum<const volatile T> : is_enum<T> { };\n\n#endif\n\n// is_reference is false except for reference types.\ntemplate<typename T> struct is_reference : false_type {};\ntemplate<typename T> struct is_reference<T&> : true_type {};\n\n\n// We can't get is_pod right without compiler help, so fail conservatively.\n// We will assume it's false except for arithmetic types, enumerations,\n// pointers and cv-qualified versions thereof. Note that std::pair<T,U>\n// is not a POD even if T and U are PODs.\ntemplate <class T> struct is_pod\n : integral_constant<bool, (is_integral<T>::value ||\n                            is_floating_point<T>::value ||\n#if !defined(_MSC_VER) && !(defined(__GNUC__) && __GNUC__ <= 3)\n                            // is_enum is not available on MSVC.\n                            is_enum<T>::value ||\n#endif\n                            is_pointer<T>::value)> { };\ntemplate <class T> struct is_pod<const T> : is_pod<T> { };\ntemplate <class T> struct is_pod<volatile T> : is_pod<T> { };\ntemplate <class T> struct is_pod<const volatile T> : is_pod<T> { };\n\n\n// We can't get has_trivial_constructor right without compiler help, so\n// fail conservatively. We will assume it's false except for: (1) types\n// for which is_pod is true. (2) std::pair of types with trivial\n// constructors. (3) array of a type with a trivial constructor.\n// (4) const versions thereof.\ntemplate <class T> struct has_trivial_constructor : is_pod<T> { };\ntemplate <class T, class U> struct has_trivial_constructor<std::pair<T, U> >\n  : integral_constant<bool,\n                      (has_trivial_constructor<T>::value &&\n                       has_trivial_constructor<U>::value)> { };\ntemplate <class A, int N> struct has_trivial_constructor<A[N]>\n  : has_trivial_constructor<A> { };\ntemplate <class T> struct has_trivial_constructor<const T>\n  : has_trivial_constructor<T> { };\n\n// We can't get has_trivial_copy right without compiler help, so fail\n// conservatively. We will assume it's false except for: (1) types\n// for which is_pod is true. (2) std::pair of types with trivial copy\n// constructors. (3) array of a type with a trivial copy constructor.\n// (4) const versions thereof.\ntemplate <class T> struct has_trivial_copy : is_pod<T> { };\ntemplate <class T, class U> struct has_trivial_copy<std::pair<T, U> >\n  : integral_constant<bool,\n                      (has_trivial_copy<T>::value &&\n                       has_trivial_copy<U>::value)> { };\ntemplate <class A, int N> struct has_trivial_copy<A[N]>\n  : has_trivial_copy<A> { };\ntemplate <class T> struct has_trivial_copy<const T> : has_trivial_copy<T> { };\n\n// We can't get has_trivial_assign right without compiler help, so fail\n// conservatively. We will assume it's false except for: (1) types\n// for which is_pod is true. (2) std::pair of types with trivial copy\n// constructors. (3) array of a type with a trivial assign constructor.\ntemplate <class T> struct has_trivial_assign : is_pod<T> { };\ntemplate <class T, class U> struct has_trivial_assign<std::pair<T, U> >\n  : integral_constant<bool,\n                      (has_trivial_assign<T>::value &&\n                       has_trivial_assign<U>::value)> { };\ntemplate <class A, int N> struct has_trivial_assign<A[N]>\n  : has_trivial_assign<A> { };\n\n// We can't get has_trivial_destructor right without compiler help, so\n// fail conservatively. We will assume it's false except for: (1) types\n// for which is_pod is true. (2) std::pair of types with trivial\n// destructors. (3) array of a type with a trivial destructor.\n// (4) const versions thereof.\ntemplate <class T> struct has_trivial_destructor : is_pod<T> { };\ntemplate <class T, class U> struct has_trivial_destructor<std::pair<T, U> >\n  : integral_constant<bool,\n                      (has_trivial_destructor<T>::value &&\n                       has_trivial_destructor<U>::value)> { };\ntemplate <class A, int N> struct has_trivial_destructor<A[N]>\n  : has_trivial_destructor<A> { };\ntemplate <class T> struct has_trivial_destructor<const T>\n  : has_trivial_destructor<T> { };\n\n// Specified by TR1 [4.7.1]\ntemplate<typename T> struct remove_const { typedef T type; };\ntemplate<typename T> struct remove_const<T const> { typedef T type; };\ntemplate<typename T> struct remove_volatile { typedef T type; };\ntemplate<typename T> struct remove_volatile<T volatile> { typedef T type; };\ntemplate<typename T> struct remove_cv {\n  typedef typename remove_const<typename remove_volatile<T>::type>::type type;\n};\n\n\n// Specified by TR1 [4.7.2] Reference modifications.\ntemplate<typename T> struct remove_reference { typedef T type; };\ntemplate<typename T> struct remove_reference<T&> { typedef T type; };\n\ntemplate <typename T> struct add_reference { typedef T& type; };\ntemplate <typename T> struct add_reference<T&> { typedef T& type; };\n\n// Specified by TR1 [4.7.4] Pointer modifications.\ntemplate<typename T> struct remove_pointer { typedef T type; };\ntemplate<typename T> struct remove_pointer<T*> { typedef T type; };\ntemplate<typename T> struct remove_pointer<T* const> { typedef T type; };\ntemplate<typename T> struct remove_pointer<T* volatile> { typedef T type; };\ntemplate<typename T> struct remove_pointer<T* const volatile> {\n  typedef T type; };\n\n// Specified by TR1 [4.6] Relationships between types\ntemplate<typename T, typename U> struct is_same : public false_type { };\ntemplate<typename T> struct is_same<T, T> : public true_type { };\n\n// Specified by TR1 [4.6] Relationships between types\n#if !(defined(__GNUC__) && __GNUC__ <= 3)\nnamespace type_traits_internal {\n\n// This class is an implementation detail for is_convertible, and you\n// don't need to know how it works to use is_convertible. For those\n// who care: we declare two different functions, one whose argument is\n// of type To and one with a variadic argument list. We give them\n// return types of different size, so we can use sizeof to trick the\n// compiler into telling us which function it would have chosen if we\n// had called it with an argument of type From.  See Alexandrescu's\n// _Modern C++ Design_ for more details on this sort of trick.\n\ntemplate <typename From, typename To>\nstruct ConvertHelper {\n  static small_ Test(To);\n  static big_ Test(...);\n  static From Create();\n  enum {\n    value = sizeof(Test(Create())) == sizeof(small_)\n  };\n};\n}  // namespace type_traits_internal\n\n// Inherits from true_type if From is convertible to To, false_type otherwise.\ntemplate <typename From, typename To>\nstruct is_convertible\n    : integral_constant<bool,\n                        type_traits_internal::ConvertHelper<From, To>::value> {\n};\n#endif\n\n}  // namespace internal\n}  // namespace protobuf\n}  // namespace google\n\n#endif  // GOOGLE_PROTOBUF_TYPE_TRAITS_H_\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/text_format.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: jschorr@google.com (Joseph Schorr)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Utilities for printing and parsing protocol messages in a human-readable,\n// text-based format.\n\n#ifndef GOOGLE_PROTOBUF_TEXT_FORMAT_H__\n#define GOOGLE_PROTOBUF_TEXT_FORMAT_H__\n\n#include <map>\n#include <memory>\n#ifndef _SHARED_PTR_H\n#include <google/protobuf/stubs/shared_ptr.h>\n#endif\n#include <string>\n#include <vector>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/message.h>\n\nnamespace google {\nnamespace protobuf {\n\nnamespace io {\n  class ErrorCollector;      // tokenizer.h\n}\n\n// This class implements protocol buffer text format.  Printing and parsing\n// protocol messages in text format is useful for debugging and human editing\n// of messages.\n//\n// This class is really a namespace that contains only static methods.\nclass LIBPROTOBUF_EXPORT TextFormat {\n public:\n  // Outputs a textual representation of the given message to the given\n  // output stream.\n  static bool Print(const Message& message, io::ZeroCopyOutputStream* output);\n\n  // Print the fields in an UnknownFieldSet.  They are printed by tag number\n  // only.  Embedded messages are heuristically identified by attempting to\n  // parse them.\n  static bool PrintUnknownFields(const UnknownFieldSet& unknown_fields,\n                                 io::ZeroCopyOutputStream* output);\n\n  // Like Print(), but outputs directly to a string.\n  // Note: output will be cleared before prior to printing, and will\n  // be left empty even if printing fails.\n  static bool PrintToString(const Message& message, string* output);\n\n  // Like PrintUnknownFields(), but outputs directly to a string.\n  static bool PrintUnknownFieldsToString(const UnknownFieldSet& unknown_fields,\n                                         string* output);\n\n  // Outputs a textual representation of the value of the field supplied on\n  // the message supplied. For non-repeated fields, an index of -1 must\n  // be supplied. Note that this method will print the default value for a\n  // field if it is not set.\n  static void PrintFieldValueToString(const Message& message,\n                                      const FieldDescriptor* field,\n                                      int index,\n                                      string* output);\n\n  // The default printer that converts scalar values from fields into\n  // their string representation.\n  // You can derive from this FieldValuePrinter if you want to have\n  // fields to be printed in a different way and register it at the\n  // Printer.\n  class LIBPROTOBUF_EXPORT FieldValuePrinter {\n   public:\n    FieldValuePrinter();\n    virtual ~FieldValuePrinter();\n    virtual string PrintBool(bool val) const;\n    virtual string PrintInt32(int32 val) const;\n    virtual string PrintUInt32(uint32 val) const;\n    virtual string PrintInt64(int64 val) const;\n    virtual string PrintUInt64(uint64 val) const;\n    virtual string PrintFloat(float val) const;\n    virtual string PrintDouble(double val) const;\n    virtual string PrintString(const string& val) const;\n    virtual string PrintBytes(const string& val) const;\n    virtual string PrintEnum(int32 val, const string& name) const;\n    virtual string PrintFieldName(const Message& message,\n                                  const Reflection* reflection,\n                                  const FieldDescriptor* field) const;\n    virtual string PrintMessageStart(const Message& message,\n                                     int field_index,\n                                     int field_count,\n                                     bool single_line_mode) const;\n    virtual string PrintMessageEnd(const Message& message,\n                                   int field_index,\n                                   int field_count,\n                                   bool single_line_mode) const;\n\n   private:\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FieldValuePrinter);\n  };\n\n  // Class for those users which require more fine-grained control over how\n  // a protobuffer message is printed out.\n  class LIBPROTOBUF_EXPORT Printer {\n   public:\n    Printer();\n    ~Printer();\n\n    // Like TextFormat::Print\n    bool Print(const Message& message, io::ZeroCopyOutputStream* output) const;\n    // Like TextFormat::PrintUnknownFields\n    bool PrintUnknownFields(const UnknownFieldSet& unknown_fields,\n                            io::ZeroCopyOutputStream* output) const;\n    // Like TextFormat::PrintToString\n    bool PrintToString(const Message& message, string* output) const;\n    // Like TextFormat::PrintUnknownFieldsToString\n    bool PrintUnknownFieldsToString(const UnknownFieldSet& unknown_fields,\n                                    string* output) const;\n    // Like TextFormat::PrintFieldValueToString\n    void PrintFieldValueToString(const Message& message,\n                                 const FieldDescriptor* field,\n                                 int index,\n                                 string* output) const;\n\n    // Adjust the initial indent level of all output.  Each indent level is\n    // equal to two spaces.\n    void SetInitialIndentLevel(int indent_level) {\n      initial_indent_level_ = indent_level;\n    }\n\n    // If printing in single line mode, then the entire message will be output\n    // on a single line with no line breaks.\n    void SetSingleLineMode(bool single_line_mode) {\n      single_line_mode_ = single_line_mode;\n    }\n\n    bool IsInSingleLineMode() {\n      return single_line_mode_;\n    }\n\n    // If use_field_number is true, uses field number instead of field name.\n    void SetUseFieldNumber(bool use_field_number) {\n      use_field_number_ = use_field_number;\n    }\n\n    // Set true to print repeated primitives in a format like:\n    //   field_name: [1, 2, 3, 4]\n    // instead of printing each value on its own line.  Short format applies\n    // only to primitive values -- i.e. everything except strings and\n    // sub-messages/groups.\n    void SetUseShortRepeatedPrimitives(bool use_short_repeated_primitives) {\n      use_short_repeated_primitives_ = use_short_repeated_primitives;\n    }\n\n    // Set true to output UTF-8 instead of ASCII.  The only difference\n    // is that bytes >= 0x80 in string fields will not be escaped,\n    // because they are assumed to be part of UTF-8 multi-byte\n    // sequences. This will change the default FieldValuePrinter.\n    void SetUseUtf8StringEscaping(bool as_utf8);\n\n    // Set the default FieldValuePrinter that is used for all fields that\n    // don't have a field-specific printer registered.\n    // Takes ownership of the printer.\n    void SetDefaultFieldValuePrinter(const FieldValuePrinter* printer);\n\n    // Sets whether we want to hide unknown fields or not.\n    // Usually unknown fields are printed in a generic way that includes the\n    // tag number of the field instead of field name. However, sometimes it\n    // is useful to be able to print the message without unknown fields (e.g.\n    // for the python protobuf version to maintain consistency between its pure\n    // python and c++ implementations).\n    void SetHideUnknownFields(bool hide) {\n      hide_unknown_fields_ = hide;\n    }\n\n    // If print_message_fields_in_index_order is true, print fields of a proto\n    // message using the order defined in source code instead of the field\n    // number. By default, use the field number order.\n    void SetPrintMessageFieldsInIndexOrder(\n        bool print_message_fields_in_index_order) {\n      print_message_fields_in_index_order_ =\n          print_message_fields_in_index_order;\n    }\n\n    // If expand==true, expand google.protobuf.Any payloads. The output\n    // will be of form\n    //    [type_url] { <value_printed_in_text> }\n    //\n    // If expand==false, print Any using the default printer. The output will\n    // look like\n    //    type_url: \"<type_url>\"  value: \"serialized_content\"\n    void SetExpandAny(bool expand) {\n      expand_any_ = expand;\n    }\n\n    // If non-zero, we truncate all string fields that are  longer than this\n    // threshold.  This is useful when the proto message has very long strings,\n    // e.g., dump of encoded image file.\n    //\n    // NOTE(hfgong):  Setting a non-zero value breaks round-trip safe\n    // property of TextFormat::Printer.  That is, from the printed message, we\n    // cannot fully recover the original string field any more.\n    void SetTruncateStringFieldLongerThan(\n        const int64 truncate_string_field_longer_than) {\n      truncate_string_field_longer_than_ = truncate_string_field_longer_than;\n    }\n\n    // Register a custom field-specific FieldValuePrinter for fields\n    // with a particular FieldDescriptor.\n    // Returns \"true\" if the registration succeeded, or \"false\", if there is\n    // already a printer for that FieldDescriptor.\n    // Takes ownership of the printer on successful registration.\n    bool RegisterFieldValuePrinter(const FieldDescriptor* field,\n                                   const FieldValuePrinter* printer);\n\n   private:\n    // Forward declaration of an internal class used to print the text\n    // output to the OutputStream (see text_format.cc for implementation).\n    class TextGenerator;\n\n    // Internal Print method, used for writing to the OutputStream via\n    // the TextGenerator class.\n    void Print(const Message& message,\n               TextGenerator& generator) const;\n\n    // Print a single field.\n    void PrintField(const Message& message,\n                    const Reflection* reflection,\n                    const FieldDescriptor* field,\n                    TextGenerator& generator) const;\n\n    // Print a repeated primitive field in short form.\n    void PrintShortRepeatedField(const Message& message,\n                                 const Reflection* reflection,\n                                 const FieldDescriptor* field,\n                                 TextGenerator& generator) const;\n\n    // Print the name of a field -- i.e. everything that comes before the\n    // ':' for a single name/value pair.\n    void PrintFieldName(const Message& message,\n                        const Reflection* reflection,\n                        const FieldDescriptor* field,\n                        TextGenerator& generator) const;\n\n    // Outputs a textual representation of the value of the field supplied on\n    // the message supplied or the default value if not set.\n    void PrintFieldValue(const Message& message,\n                         const Reflection* reflection,\n                         const FieldDescriptor* field,\n                         int index,\n                         TextGenerator& generator) const;\n\n    // Print the fields in an UnknownFieldSet.  They are printed by tag number\n    // only.  Embedded messages are heuristically identified by attempting to\n    // parse them.\n    void PrintUnknownFields(const UnknownFieldSet& unknown_fields,\n                            TextGenerator& generator) const;\n\n    bool PrintAny(const Message& message, TextGenerator& generator) const;\n\n    int initial_indent_level_;\n\n    bool single_line_mode_;\n\n    bool use_field_number_;\n\n    bool use_short_repeated_primitives_;\n\n    bool hide_unknown_fields_;\n\n    bool print_message_fields_in_index_order_;\n\n    bool expand_any_;\n\n    int64 truncate_string_field_longer_than_;\n\n    google::protobuf::scoped_ptr<const FieldValuePrinter> default_field_value_printer_;\n    typedef std::map<const FieldDescriptor*,\n                     const FieldValuePrinter*> CustomPrinterMap;\n    CustomPrinterMap custom_printers_;\n  };\n\n  // Parses a text-format protocol message from the given input stream to\n  // the given message object. This function parses the human-readable format\n  // written by Print(). Returns true on success. The message is cleared first,\n  // even if the function fails -- See Merge() to avoid this behavior.\n  //\n  // Example input: \"user {\\n id: 123 extra { gender: MALE language: 'en' }\\n}\"\n  //\n  // One use for this function is parsing handwritten strings in test code.\n  // Another use is to parse the output from google::protobuf::Message::DebugString()\n  // (or ShortDebugString()), because these functions output using\n  // google::protobuf::TextFormat::Print().\n  //\n  // If you would like to read a protocol buffer serialized in the\n  // (non-human-readable) binary wire format, see\n  // google::protobuf::MessageLite::ParseFromString().\n  static bool Parse(io::ZeroCopyInputStream* input, Message* output);\n  // Like Parse(), but reads directly from a string.\n  static bool ParseFromString(const string& input, Message* output);\n\n  // Like Parse(), but the data is merged into the given message, as if\n  // using Message::MergeFrom().\n  static bool Merge(io::ZeroCopyInputStream* input, Message* output);\n  // Like Merge(), but reads directly from a string.\n  static bool MergeFromString(const string& input, Message* output);\n\n  // Parse the given text as a single field value and store it into the\n  // given field of the given message. If the field is a repeated field,\n  // the new value will be added to the end\n  static bool ParseFieldValueFromString(const string& input,\n                                        const FieldDescriptor* field,\n                                        Message* message);\n\n  // Interface that TextFormat::Parser can use to find extensions.\n  // This class may be extended in the future to find more information\n  // like fields, etc.\n  class LIBPROTOBUF_EXPORT Finder {\n   public:\n    virtual ~Finder();\n\n    // Try to find an extension of *message by fully-qualified field\n    // name.  Returns NULL if no extension is known for this name or number.\n    virtual const FieldDescriptor* FindExtension(\n        Message* message,\n        const string& name) const = 0;\n  };\n\n  // A location in the parsed text.\n  struct ParseLocation {\n    int line;\n    int column;\n\n    ParseLocation() : line(-1), column(-1) {}\n    ParseLocation(int line_param, int column_param)\n        : line(line_param), column(column_param) {}\n  };\n\n  // Data structure which is populated with the locations of each field\n  // value parsed from the text.\n  class LIBPROTOBUF_EXPORT ParseInfoTree {\n   public:\n    ParseInfoTree();\n    ~ParseInfoTree();\n\n    // Returns the parse location for index-th value of the field in the parsed\n    // text. If none exists, returns a location with line = -1. Index should be\n    // -1 for not-repeated fields.\n    ParseLocation GetLocation(const FieldDescriptor* field, int index) const;\n\n    // Returns the parse info tree for the given field, which must be a message\n    // type. The nested information tree is owned by the root tree and will be\n    // deleted when it is deleted.\n    ParseInfoTree* GetTreeForNested(const FieldDescriptor* field,\n                                    int index) const;\n\n   private:\n    // Allow the text format parser to record information into the tree.\n    friend class TextFormat;\n\n    // Records the starting location of a single value for a field.\n    void RecordLocation(const FieldDescriptor* field, ParseLocation location);\n\n    // Create and records a nested tree for a nested message field.\n    ParseInfoTree* CreateNested(const FieldDescriptor* field);\n\n    // Defines the map from the index-th field descriptor to its parse location.\n    typedef std::map<const FieldDescriptor*,\n                     std::vector<ParseLocation> > LocationMap;\n\n    // Defines the map from the index-th field descriptor to the nested parse\n    // info tree.\n    typedef std::map<const FieldDescriptor*,\n                     std::vector<ParseInfoTree*> > NestedMap;\n\n    LocationMap locations_;\n    NestedMap nested_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ParseInfoTree);\n  };\n\n  // For more control over parsing, use this class.\n  class LIBPROTOBUF_EXPORT Parser {\n   public:\n    Parser();\n    ~Parser();\n\n    // Like TextFormat::Parse().\n    bool Parse(io::ZeroCopyInputStream* input, Message* output);\n    // Like TextFormat::ParseFromString().\n    bool ParseFromString(const string& input, Message* output);\n    // Like TextFormat::Merge().\n    bool Merge(io::ZeroCopyInputStream* input, Message* output);\n    // Like TextFormat::MergeFromString().\n    bool MergeFromString(const string& input, Message* output);\n\n    // Set where to report parse errors.  If NULL (the default), errors will\n    // be printed to stderr.\n    void RecordErrorsTo(io::ErrorCollector* error_collector) {\n      error_collector_ = error_collector;\n    }\n\n    // Set how parser finds extensions.  If NULL (the default), the\n    // parser will use the standard Reflection object associated with\n    // the message being parsed.\n    void SetFinder(Finder* finder) {\n      finder_ = finder;\n    }\n\n    // Sets where location information about the parse will be written. If NULL\n    // (the default), then no location will be written.\n    void WriteLocationsTo(ParseInfoTree* tree) {\n      parse_info_tree_ = tree;\n    }\n\n    // Normally parsing fails if, after parsing, output->IsInitialized()\n    // returns false.  Call AllowPartialMessage(true) to skip this check.\n    void AllowPartialMessage(bool allow) {\n      allow_partial_ = allow;\n    }\n\n    // Allow field names to be matched case-insensitively.\n    // This is not advisable if there are fields that only differ in case, or\n    // if you want to enforce writing in the canonical form.\n    // This is 'false' by default.\n    void AllowCaseInsensitiveField(bool allow) {\n      allow_case_insensitive_field_ = allow;\n    }\n\n    // Like TextFormat::ParseFieldValueFromString\n    bool ParseFieldValueFromString(const string& input,\n                                   const FieldDescriptor* field,\n                                   Message* output);\n\n\n    void AllowFieldNumber(bool allow) {\n      allow_field_number_ = allow;\n    }\n\n   private:\n    // Forward declaration of an internal class used to parse text\n    // representations (see text_format.cc for implementation).\n    class ParserImpl;\n\n    // Like TextFormat::Merge().  The provided implementation is used\n    // to do the parsing.\n    bool MergeUsingImpl(io::ZeroCopyInputStream* input,\n                        Message* output,\n                        ParserImpl* parser_impl);\n\n    io::ErrorCollector* error_collector_;\n    Finder* finder_;\n    ParseInfoTree* parse_info_tree_;\n    bool allow_partial_;\n    bool allow_case_insensitive_field_;\n    bool allow_unknown_field_;\n    bool allow_unknown_enum_;\n    bool allow_field_number_;\n    bool allow_relaxed_whitespace_;\n    bool allow_singular_overwrites_;\n  };\n\n\n private:\n  // Hack: ParseInfoTree declares TextFormat as a friend which should extend\n  // the friendship to TextFormat::Parser::ParserImpl, but unfortunately some\n  // old compilers (e.g. GCC 3.4.6) don't implement this correctly. We provide\n  // helpers for ParserImpl to call methods of ParseInfoTree.\n  static inline void RecordLocation(ParseInfoTree* info_tree,\n                                    const FieldDescriptor* field,\n                                    ParseLocation location);\n  static inline ParseInfoTree* CreateNested(ParseInfoTree* info_tree,\n                                            const FieldDescriptor* field);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TextFormat);\n};\n\ninline void TextFormat::RecordLocation(ParseInfoTree* info_tree,\n                                       const FieldDescriptor* field,\n                                       ParseLocation location) {\n  info_tree->RecordLocation(field, location);\n}\n\n\ninline TextFormat::ParseInfoTree* TextFormat::CreateNested(\n    ParseInfoTree* info_tree, const FieldDescriptor* field) {\n  return info_tree->CreateNested(field);\n}\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_TEXT_FORMAT_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/timestamp.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/timestamp.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2ftimestamp_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2ftimestamp_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftimestamp_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftimestamp_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2ftimestamp_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2ftimestamp_2eproto();\n\nclass Timestamp;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Timestamp : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Timestamp) */ {\n public:\n  Timestamp();\n  virtual ~Timestamp();\n\n  Timestamp(const Timestamp& from);\n\n  inline Timestamp& operator=(const Timestamp& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Timestamp& default_instance();\n\n  static const Timestamp* internal_default_instance();\n\n  void UnsafeArenaSwap(Timestamp* other);\n  void Swap(Timestamp* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Timestamp* New() const { return New(NULL); }\n\n  Timestamp* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Timestamp& from);\n  void MergeFrom(const Timestamp& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Timestamp* other);\n  void UnsafeMergeFrom(const Timestamp& from);\n  protected:\n  explicit Timestamp(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int64 seconds = 1;\n  void clear_seconds();\n  static const int kSecondsFieldNumber = 1;\n  ::google::protobuf::int64 seconds() const;\n  void set_seconds(::google::protobuf::int64 value);\n\n  // optional int32 nanos = 2;\n  void clear_nanos();\n  static const int kNanosFieldNumber = 2;\n  ::google::protobuf::int32 nanos() const;\n  void set_nanos(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Timestamp)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::int64 seconds_;\n  ::google::protobuf::int32 nanos_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftimestamp_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftimestamp_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftimestamp_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftimestamp_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Timestamp> Timestamp_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Timestamp\n\n// optional int64 seconds = 1;\ninline void Timestamp::clear_seconds() {\n  seconds_ = GOOGLE_LONGLONG(0);\n}\ninline ::google::protobuf::int64 Timestamp::seconds() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Timestamp.seconds)\n  return seconds_;\n}\ninline void Timestamp::set_seconds(::google::protobuf::int64 value) {\n  \n  seconds_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Timestamp.seconds)\n}\n\n// optional int32 nanos = 2;\ninline void Timestamp::clear_nanos() {\n  nanos_ = 0;\n}\ninline ::google::protobuf::int32 Timestamp::nanos() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Timestamp.nanos)\n  return nanos_;\n}\ninline void Timestamp::set_nanos(::google::protobuf::int32 value) {\n  \n  nanos_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Timestamp.nanos)\n}\n\ninline const Timestamp* Timestamp::internal_default_instance() {\n  return &Timestamp_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2ftimestamp_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/timestamp.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"github.com/golang/protobuf/ptypes/timestamp\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TimestampProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Timestamp represents a point in time independent of any time zone\n// or calendar, represented as seconds and fractions of seconds at\n// nanosecond resolution in UTC Epoch time. It is encoded using the\n// Proleptic Gregorian Calendar which extends the Gregorian calendar\n// backwards to year one. It is encoded assuming all minutes are 60\n// seconds long, i.e. leap seconds are \"smeared\" so that no leap second\n// table is needed for interpretation. Range is from\n// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.\n// By restricting to that range, we ensure that we can convert to\n// and from  RFC 3339 date strings.\n// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).\n//\n// Example 1: Compute Timestamp from POSIX `time()`.\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(time(NULL));\n//     timestamp.set_nanos(0);\n//\n// Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n//\n//     struct timeval tv;\n//     gettimeofday(&tv, NULL);\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(tv.tv_sec);\n//     timestamp.set_nanos(tv.tv_usec * 1000);\n//\n// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n//\n//     FILETIME ft;\n//     GetSystemTimeAsFileTime(&ft);\n//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n//\n//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n//     Timestamp timestamp;\n//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n//\n// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n//\n//     long millis = System.currentTimeMillis();\n//\n//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)\n//         .setNanos((int) ((millis % 1000) * 1000000)).build();\n//\n//\n// Example 5: Compute Timestamp from current time in Python.\n//\n//     timestamp = Timestamp()\n//     timestamp.GetCurrentTime()\n//\n//\nmessage Timestamp {\n\n  // Represents seconds of UTC time since Unix epoch\n  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n  // 9999-12-31T23:59:59Z inclusive.\n  int64 seconds = 1;\n\n  // Non-negative fractions of a second at nanosecond resolution. Negative\n  // second values with fractions must still have non-negative nanos values\n  // that count forward in time. Must be from 0 to 999,999,999\n  // inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/type.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/type.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2ftype_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2ftype_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/generated_enum_reflection.h>\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/any.pb.h>\n#include <google/protobuf/source_context.pb.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\nclass Enum;\nclass EnumValue;\nclass Field;\nclass Option;\nclass Type;\n\nenum Field_Kind {\n  Field_Kind_TYPE_UNKNOWN = 0,\n  Field_Kind_TYPE_DOUBLE = 1,\n  Field_Kind_TYPE_FLOAT = 2,\n  Field_Kind_TYPE_INT64 = 3,\n  Field_Kind_TYPE_UINT64 = 4,\n  Field_Kind_TYPE_INT32 = 5,\n  Field_Kind_TYPE_FIXED64 = 6,\n  Field_Kind_TYPE_FIXED32 = 7,\n  Field_Kind_TYPE_BOOL = 8,\n  Field_Kind_TYPE_STRING = 9,\n  Field_Kind_TYPE_GROUP = 10,\n  Field_Kind_TYPE_MESSAGE = 11,\n  Field_Kind_TYPE_BYTES = 12,\n  Field_Kind_TYPE_UINT32 = 13,\n  Field_Kind_TYPE_ENUM = 14,\n  Field_Kind_TYPE_SFIXED32 = 15,\n  Field_Kind_TYPE_SFIXED64 = 16,\n  Field_Kind_TYPE_SINT32 = 17,\n  Field_Kind_TYPE_SINT64 = 18,\n  Field_Kind_Field_Kind_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,\n  Field_Kind_Field_Kind_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max\n};\nLIBPROTOBUF_EXPORT bool Field_Kind_IsValid(int value);\nconst Field_Kind Field_Kind_Kind_MIN = Field_Kind_TYPE_UNKNOWN;\nconst Field_Kind Field_Kind_Kind_MAX = Field_Kind_TYPE_SINT64;\nconst int Field_Kind_Kind_ARRAYSIZE = Field_Kind_Kind_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* Field_Kind_descriptor();\ninline const ::std::string& Field_Kind_Name(Field_Kind value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    Field_Kind_descriptor(), value);\n}\ninline bool Field_Kind_Parse(\n    const ::std::string& name, Field_Kind* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<Field_Kind>(\n    Field_Kind_descriptor(), name, value);\n}\nenum Field_Cardinality {\n  Field_Cardinality_CARDINALITY_UNKNOWN = 0,\n  Field_Cardinality_CARDINALITY_OPTIONAL = 1,\n  Field_Cardinality_CARDINALITY_REQUIRED = 2,\n  Field_Cardinality_CARDINALITY_REPEATED = 3,\n  Field_Cardinality_Field_Cardinality_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,\n  Field_Cardinality_Field_Cardinality_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max\n};\nLIBPROTOBUF_EXPORT bool Field_Cardinality_IsValid(int value);\nconst Field_Cardinality Field_Cardinality_Cardinality_MIN = Field_Cardinality_CARDINALITY_UNKNOWN;\nconst Field_Cardinality Field_Cardinality_Cardinality_MAX = Field_Cardinality_CARDINALITY_REPEATED;\nconst int Field_Cardinality_Cardinality_ARRAYSIZE = Field_Cardinality_Cardinality_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* Field_Cardinality_descriptor();\ninline const ::std::string& Field_Cardinality_Name(Field_Cardinality value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    Field_Cardinality_descriptor(), value);\n}\ninline bool Field_Cardinality_Parse(\n    const ::std::string& name, Field_Cardinality* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<Field_Cardinality>(\n    Field_Cardinality_descriptor(), name, value);\n}\nenum Syntax {\n  SYNTAX_PROTO2 = 0,\n  SYNTAX_PROTO3 = 1,\n  Syntax_INT_MIN_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32min,\n  Syntax_INT_MAX_SENTINEL_DO_NOT_USE_ = ::google::protobuf::kint32max\n};\nLIBPROTOBUF_EXPORT bool Syntax_IsValid(int value);\nconst Syntax Syntax_MIN = SYNTAX_PROTO2;\nconst Syntax Syntax_MAX = SYNTAX_PROTO3;\nconst int Syntax_ARRAYSIZE = Syntax_MAX + 1;\n\nLIBPROTOBUF_EXPORT const ::google::protobuf::EnumDescriptor* Syntax_descriptor();\ninline const ::std::string& Syntax_Name(Syntax value) {\n  return ::google::protobuf::internal::NameOfEnum(\n    Syntax_descriptor(), value);\n}\ninline bool Syntax_Parse(\n    const ::std::string& name, Syntax* value) {\n  return ::google::protobuf::internal::ParseNamedEnum<Syntax>(\n    Syntax_descriptor(), name, value);\n}\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT Type : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Type) */ {\n public:\n  Type();\n  virtual ~Type();\n\n  Type(const Type& from);\n\n  inline Type& operator=(const Type& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Type& default_instance();\n\n  static const Type* internal_default_instance();\n\n  void UnsafeArenaSwap(Type* other);\n  void Swap(Type* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Type* New() const { return New(NULL); }\n\n  Type* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Type& from);\n  void MergeFrom(const Type& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Type* other);\n  void UnsafeMergeFrom(const Type& from);\n  protected:\n  explicit Type(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n  ::std::string* unsafe_arena_release_name();\n  void unsafe_arena_set_allocated_name(\n      ::std::string* name);\n\n  // repeated .google.protobuf.Field fields = 2;\n  int fields_size() const;\n  void clear_fields();\n  static const int kFieldsFieldNumber = 2;\n  const ::google::protobuf::Field& fields(int index) const;\n  ::google::protobuf::Field* mutable_fields(int index);\n  ::google::protobuf::Field* add_fields();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Field >*\n      mutable_fields();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Field >&\n      fields() const;\n\n  // repeated string oneofs = 3;\n  int oneofs_size() const;\n  void clear_oneofs();\n  static const int kOneofsFieldNumber = 3;\n  const ::std::string& oneofs(int index) const;\n  ::std::string* mutable_oneofs(int index);\n  void set_oneofs(int index, const ::std::string& value);\n  void set_oneofs(int index, const char* value);\n  void set_oneofs(int index, const char* value, size_t size);\n  ::std::string* add_oneofs();\n  void add_oneofs(const ::std::string& value);\n  void add_oneofs(const char* value);\n  void add_oneofs(const char* value, size_t size);\n  const ::google::protobuf::RepeatedPtrField< ::std::string>& oneofs() const;\n  ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_oneofs();\n\n  // repeated .google.protobuf.Option options = 4;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 4;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // optional .google.protobuf.SourceContext source_context = 5;\n  bool has_source_context() const;\n  void clear_source_context();\n  static const int kSourceContextFieldNumber = 5;\n  private:\n  void _slow_mutable_source_context();\n  ::google::protobuf::SourceContext* _slow_release_source_context();\n  public:\n  const ::google::protobuf::SourceContext& source_context() const;\n  ::google::protobuf::SourceContext* mutable_source_context();\n  ::google::protobuf::SourceContext* release_source_context();\n  void set_allocated_source_context(::google::protobuf::SourceContext* source_context);\n  ::google::protobuf::SourceContext* unsafe_arena_release_source_context();\n  void unsafe_arena_set_allocated_source_context(\n      ::google::protobuf::SourceContext* source_context);\n\n  // optional .google.protobuf.Syntax syntax = 6;\n  void clear_syntax();\n  static const int kSyntaxFieldNumber = 6;\n  ::google::protobuf::Syntax syntax() const;\n  void set_syntax(::google::protobuf::Syntax value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Type)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Field > fields_;\n  ::google::protobuf::RepeatedPtrField< ::std::string> oneofs_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::SourceContext* source_context_;\n  int syntax_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Type> Type_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Field : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Field) */ {\n public:\n  Field();\n  virtual ~Field();\n\n  Field(const Field& from);\n\n  inline Field& operator=(const Field& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Field& default_instance();\n\n  static const Field* internal_default_instance();\n\n  void UnsafeArenaSwap(Field* other);\n  void Swap(Field* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Field* New() const { return New(NULL); }\n\n  Field* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Field& from);\n  void MergeFrom(const Field& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Field* other);\n  void UnsafeMergeFrom(const Field& from);\n  protected:\n  explicit Field(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  typedef Field_Kind Kind;\n  static const Kind TYPE_UNKNOWN =\n    Field_Kind_TYPE_UNKNOWN;\n  static const Kind TYPE_DOUBLE =\n    Field_Kind_TYPE_DOUBLE;\n  static const Kind TYPE_FLOAT =\n    Field_Kind_TYPE_FLOAT;\n  static const Kind TYPE_INT64 =\n    Field_Kind_TYPE_INT64;\n  static const Kind TYPE_UINT64 =\n    Field_Kind_TYPE_UINT64;\n  static const Kind TYPE_INT32 =\n    Field_Kind_TYPE_INT32;\n  static const Kind TYPE_FIXED64 =\n    Field_Kind_TYPE_FIXED64;\n  static const Kind TYPE_FIXED32 =\n    Field_Kind_TYPE_FIXED32;\n  static const Kind TYPE_BOOL =\n    Field_Kind_TYPE_BOOL;\n  static const Kind TYPE_STRING =\n    Field_Kind_TYPE_STRING;\n  static const Kind TYPE_GROUP =\n    Field_Kind_TYPE_GROUP;\n  static const Kind TYPE_MESSAGE =\n    Field_Kind_TYPE_MESSAGE;\n  static const Kind TYPE_BYTES =\n    Field_Kind_TYPE_BYTES;\n  static const Kind TYPE_UINT32 =\n    Field_Kind_TYPE_UINT32;\n  static const Kind TYPE_ENUM =\n    Field_Kind_TYPE_ENUM;\n  static const Kind TYPE_SFIXED32 =\n    Field_Kind_TYPE_SFIXED32;\n  static const Kind TYPE_SFIXED64 =\n    Field_Kind_TYPE_SFIXED64;\n  static const Kind TYPE_SINT32 =\n    Field_Kind_TYPE_SINT32;\n  static const Kind TYPE_SINT64 =\n    Field_Kind_TYPE_SINT64;\n  static inline bool Kind_IsValid(int value) {\n    return Field_Kind_IsValid(value);\n  }\n  static const Kind Kind_MIN =\n    Field_Kind_Kind_MIN;\n  static const Kind Kind_MAX =\n    Field_Kind_Kind_MAX;\n  static const int Kind_ARRAYSIZE =\n    Field_Kind_Kind_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  Kind_descriptor() {\n    return Field_Kind_descriptor();\n  }\n  static inline const ::std::string& Kind_Name(Kind value) {\n    return Field_Kind_Name(value);\n  }\n  static inline bool Kind_Parse(const ::std::string& name,\n      Kind* value) {\n    return Field_Kind_Parse(name, value);\n  }\n\n  typedef Field_Cardinality Cardinality;\n  static const Cardinality CARDINALITY_UNKNOWN =\n    Field_Cardinality_CARDINALITY_UNKNOWN;\n  static const Cardinality CARDINALITY_OPTIONAL =\n    Field_Cardinality_CARDINALITY_OPTIONAL;\n  static const Cardinality CARDINALITY_REQUIRED =\n    Field_Cardinality_CARDINALITY_REQUIRED;\n  static const Cardinality CARDINALITY_REPEATED =\n    Field_Cardinality_CARDINALITY_REPEATED;\n  static inline bool Cardinality_IsValid(int value) {\n    return Field_Cardinality_IsValid(value);\n  }\n  static const Cardinality Cardinality_MIN =\n    Field_Cardinality_Cardinality_MIN;\n  static const Cardinality Cardinality_MAX =\n    Field_Cardinality_Cardinality_MAX;\n  static const int Cardinality_ARRAYSIZE =\n    Field_Cardinality_Cardinality_ARRAYSIZE;\n  static inline const ::google::protobuf::EnumDescriptor*\n  Cardinality_descriptor() {\n    return Field_Cardinality_descriptor();\n  }\n  static inline const ::std::string& Cardinality_Name(Cardinality value) {\n    return Field_Cardinality_Name(value);\n  }\n  static inline bool Cardinality_Parse(const ::std::string& name,\n      Cardinality* value) {\n    return Field_Cardinality_Parse(name, value);\n  }\n\n  // accessors -------------------------------------------------------\n\n  // optional .google.protobuf.Field.Kind kind = 1;\n  void clear_kind();\n  static const int kKindFieldNumber = 1;\n  ::google::protobuf::Field_Kind kind() const;\n  void set_kind(::google::protobuf::Field_Kind value);\n\n  // optional .google.protobuf.Field.Cardinality cardinality = 2;\n  void clear_cardinality();\n  static const int kCardinalityFieldNumber = 2;\n  ::google::protobuf::Field_Cardinality cardinality() const;\n  void set_cardinality(::google::protobuf::Field_Cardinality value);\n\n  // optional int32 number = 3;\n  void clear_number();\n  static const int kNumberFieldNumber = 3;\n  ::google::protobuf::int32 number() const;\n  void set_number(::google::protobuf::int32 value);\n\n  // optional string name = 4;\n  void clear_name();\n  static const int kNameFieldNumber = 4;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n  ::std::string* unsafe_arena_release_name();\n  void unsafe_arena_set_allocated_name(\n      ::std::string* name);\n\n  // optional string type_url = 6;\n  void clear_type_url();\n  static const int kTypeUrlFieldNumber = 6;\n  const ::std::string& type_url() const;\n  void set_type_url(const ::std::string& value);\n  void set_type_url(const char* value);\n  void set_type_url(const char* value, size_t size);\n  ::std::string* mutable_type_url();\n  ::std::string* release_type_url();\n  void set_allocated_type_url(::std::string* type_url);\n  ::std::string* unsafe_arena_release_type_url();\n  void unsafe_arena_set_allocated_type_url(\n      ::std::string* type_url);\n\n  // optional int32 oneof_index = 7;\n  void clear_oneof_index();\n  static const int kOneofIndexFieldNumber = 7;\n  ::google::protobuf::int32 oneof_index() const;\n  void set_oneof_index(::google::protobuf::int32 value);\n\n  // optional bool packed = 8;\n  void clear_packed();\n  static const int kPackedFieldNumber = 8;\n  bool packed() const;\n  void set_packed(bool value);\n\n  // repeated .google.protobuf.Option options = 9;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 9;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // optional string json_name = 10;\n  void clear_json_name();\n  static const int kJsonNameFieldNumber = 10;\n  const ::std::string& json_name() const;\n  void set_json_name(const ::std::string& value);\n  void set_json_name(const char* value);\n  void set_json_name(const char* value, size_t size);\n  ::std::string* mutable_json_name();\n  ::std::string* release_json_name();\n  void set_allocated_json_name(::std::string* json_name);\n  ::std::string* unsafe_arena_release_json_name();\n  void unsafe_arena_set_allocated_json_name(\n      ::std::string* json_name);\n\n  // optional string default_value = 11;\n  void clear_default_value();\n  static const int kDefaultValueFieldNumber = 11;\n  const ::std::string& default_value() const;\n  void set_default_value(const ::std::string& value);\n  void set_default_value(const char* value);\n  void set_default_value(const char* value, size_t size);\n  ::std::string* mutable_default_value();\n  ::std::string* release_default_value();\n  void set_allocated_default_value(::std::string* default_value);\n  ::std::string* unsafe_arena_release_default_value();\n  void unsafe_arena_set_allocated_default_value(\n      ::std::string* default_value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Field)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::internal::ArenaStringPtr type_url_;\n  ::google::protobuf::internal::ArenaStringPtr json_name_;\n  ::google::protobuf::internal::ArenaStringPtr default_value_;\n  int kind_;\n  int cardinality_;\n  ::google::protobuf::int32 number_;\n  ::google::protobuf::int32 oneof_index_;\n  bool packed_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Field> Field_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Enum : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Enum) */ {\n public:\n  Enum();\n  virtual ~Enum();\n\n  Enum(const Enum& from);\n\n  inline Enum& operator=(const Enum& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Enum& default_instance();\n\n  static const Enum* internal_default_instance();\n\n  void UnsafeArenaSwap(Enum* other);\n  void Swap(Enum* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Enum* New() const { return New(NULL); }\n\n  Enum* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Enum& from);\n  void MergeFrom(const Enum& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Enum* other);\n  void UnsafeMergeFrom(const Enum& from);\n  protected:\n  explicit Enum(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n  ::std::string* unsafe_arena_release_name();\n  void unsafe_arena_set_allocated_name(\n      ::std::string* name);\n\n  // repeated .google.protobuf.EnumValue enumvalue = 2;\n  int enumvalue_size() const;\n  void clear_enumvalue();\n  static const int kEnumvalueFieldNumber = 2;\n  const ::google::protobuf::EnumValue& enumvalue(int index) const;\n  ::google::protobuf::EnumValue* mutable_enumvalue(int index);\n  ::google::protobuf::EnumValue* add_enumvalue();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValue >*\n      mutable_enumvalue();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValue >&\n      enumvalue() const;\n\n  // repeated .google.protobuf.Option options = 3;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // optional .google.protobuf.SourceContext source_context = 4;\n  bool has_source_context() const;\n  void clear_source_context();\n  static const int kSourceContextFieldNumber = 4;\n  private:\n  void _slow_mutable_source_context();\n  ::google::protobuf::SourceContext* _slow_release_source_context();\n  public:\n  const ::google::protobuf::SourceContext& source_context() const;\n  ::google::protobuf::SourceContext* mutable_source_context();\n  ::google::protobuf::SourceContext* release_source_context();\n  void set_allocated_source_context(::google::protobuf::SourceContext* source_context);\n  ::google::protobuf::SourceContext* unsafe_arena_release_source_context();\n  void unsafe_arena_set_allocated_source_context(\n      ::google::protobuf::SourceContext* source_context);\n\n  // optional .google.protobuf.Syntax syntax = 5;\n  void clear_syntax();\n  static const int kSyntaxFieldNumber = 5;\n  ::google::protobuf::Syntax syntax() const;\n  void set_syntax(::google::protobuf::Syntax value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Enum)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValue > enumvalue_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::SourceContext* source_context_;\n  int syntax_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Enum> Enum_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT EnumValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.EnumValue) */ {\n public:\n  EnumValue();\n  virtual ~EnumValue();\n\n  EnumValue(const EnumValue& from);\n\n  inline EnumValue& operator=(const EnumValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const EnumValue& default_instance();\n\n  static const EnumValue* internal_default_instance();\n\n  void UnsafeArenaSwap(EnumValue* other);\n  void Swap(EnumValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline EnumValue* New() const { return New(NULL); }\n\n  EnumValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const EnumValue& from);\n  void MergeFrom(const EnumValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(EnumValue* other);\n  void UnsafeMergeFrom(const EnumValue& from);\n  protected:\n  explicit EnumValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n  ::std::string* unsafe_arena_release_name();\n  void unsafe_arena_set_allocated_name(\n      ::std::string* name);\n\n  // optional int32 number = 2;\n  void clear_number();\n  static const int kNumberFieldNumber = 2;\n  ::google::protobuf::int32 number() const;\n  void set_number(::google::protobuf::int32 value);\n\n  // repeated .google.protobuf.Option options = 3;\n  int options_size() const;\n  void clear_options();\n  static const int kOptionsFieldNumber = 3;\n  const ::google::protobuf::Option& options(int index) const;\n  ::google::protobuf::Option* mutable_options(int index);\n  ::google::protobuf::Option* add_options();\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\n      mutable_options();\n  const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\n      options() const;\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.EnumValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option > options_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::int32 number_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<EnumValue> EnumValue_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Option : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Option) */ {\n public:\n  Option();\n  virtual ~Option();\n\n  Option(const Option& from);\n\n  inline Option& operator=(const Option& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Option& default_instance();\n\n  static const Option* internal_default_instance();\n\n  void UnsafeArenaSwap(Option* other);\n  void Swap(Option* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Option* New() const { return New(NULL); }\n\n  Option* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Option& from);\n  void MergeFrom(const Option& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Option* other);\n  void UnsafeMergeFrom(const Option& from);\n  protected:\n  explicit Option(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string name = 1;\n  void clear_name();\n  static const int kNameFieldNumber = 1;\n  const ::std::string& name() const;\n  void set_name(const ::std::string& value);\n  void set_name(const char* value);\n  void set_name(const char* value, size_t size);\n  ::std::string* mutable_name();\n  ::std::string* release_name();\n  void set_allocated_name(::std::string* name);\n  ::std::string* unsafe_arena_release_name();\n  void unsafe_arena_set_allocated_name(\n      ::std::string* name);\n\n  // optional .google.protobuf.Any value = 2;\n  bool has_value() const;\n  void clear_value();\n  static const int kValueFieldNumber = 2;\n  private:\n  void _slow_mutable_value();\n  ::google::protobuf::Any* _slow_release_value();\n  public:\n  const ::google::protobuf::Any& value() const;\n  ::google::protobuf::Any* mutable_value();\n  ::google::protobuf::Any* release_value();\n  void set_allocated_value(::google::protobuf::Any* value);\n  ::google::protobuf::Any* unsafe_arena_release_value();\n  void unsafe_arena_set_allocated_value(\n      ::google::protobuf::Any* value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Option)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::internal::ArenaStringPtr name_;\n  ::google::protobuf::Any* value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2ftype_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2ftype_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2ftype_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Option> Option_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// Type\n\n// optional string name = 1;\ninline void Type::clear_name() {\n  name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Type::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.name)\n  return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Type::set_name(const ::std::string& value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Type.name)\n}\ninline void Type::set_name(const char* value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Type.name)\n}\ninline void Type::set_name(const char* value,\n    size_t size) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Type.name)\n}\ninline ::std::string* Type::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Type.name)\n  return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Type::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Type.name)\n  \n  return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Type::unsafe_arena_release_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Type.name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Type::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Type.name)\n}\ninline void Type::unsafe_arena_set_allocated_name(\n    ::std::string* name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Type.name)\n}\n\n// repeated .google.protobuf.Field fields = 2;\ninline int Type::fields_size() const {\n  return fields_.size();\n}\ninline void Type::clear_fields() {\n  fields_.Clear();\n}\ninline const ::google::protobuf::Field& Type::fields(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.fields)\n  return fields_.Get(index);\n}\ninline ::google::protobuf::Field* Type::mutable_fields(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Type.fields)\n  return fields_.Mutable(index);\n}\ninline ::google::protobuf::Field* Type::add_fields() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Type.fields)\n  return fields_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Field >*\nType::mutable_fields() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Type.fields)\n  return &fields_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Field >&\nType::fields() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Type.fields)\n  return fields_;\n}\n\n// repeated string oneofs = 3;\ninline int Type::oneofs_size() const {\n  return oneofs_.size();\n}\ninline void Type::clear_oneofs() {\n  oneofs_.Clear();\n}\ninline const ::std::string& Type::oneofs(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.oneofs)\n  return oneofs_.Get(index);\n}\ninline ::std::string* Type::mutable_oneofs(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Type.oneofs)\n  return oneofs_.Mutable(index);\n}\ninline void Type::set_oneofs(int index, const ::std::string& value) {\n  // @@protoc_insertion_point(field_set:google.protobuf.Type.oneofs)\n  oneofs_.Mutable(index)->assign(value);\n}\ninline void Type::set_oneofs(int index, const char* value) {\n  oneofs_.Mutable(index)->assign(value);\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Type.oneofs)\n}\ninline void Type::set_oneofs(int index, const char* value, size_t size) {\n  oneofs_.Mutable(index)->assign(\n    reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Type.oneofs)\n}\ninline ::std::string* Type::add_oneofs() {\n  // @@protoc_insertion_point(field_add_mutable:google.protobuf.Type.oneofs)\n  return oneofs_.Add();\n}\ninline void Type::add_oneofs(const ::std::string& value) {\n  oneofs_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add:google.protobuf.Type.oneofs)\n}\ninline void Type::add_oneofs(const char* value) {\n  oneofs_.Add()->assign(value);\n  // @@protoc_insertion_point(field_add_char:google.protobuf.Type.oneofs)\n}\ninline void Type::add_oneofs(const char* value, size_t size) {\n  oneofs_.Add()->assign(reinterpret_cast<const char*>(value), size);\n  // @@protoc_insertion_point(field_add_pointer:google.protobuf.Type.oneofs)\n}\ninline const ::google::protobuf::RepeatedPtrField< ::std::string>&\nType::oneofs() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Type.oneofs)\n  return oneofs_;\n}\ninline ::google::protobuf::RepeatedPtrField< ::std::string>*\nType::mutable_oneofs() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Type.oneofs)\n  return &oneofs_;\n}\n\n// repeated .google.protobuf.Option options = 4;\ninline int Type::options_size() const {\n  return options_.size();\n}\ninline void Type::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& Type::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* Type::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Type.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* Type::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Type.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nType::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Type.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nType::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Type.options)\n  return options_;\n}\n\n// optional .google.protobuf.SourceContext source_context = 5;\ninline bool Type::has_source_context() const {\n  return this != internal_default_instance() && source_context_ != NULL;\n}\ninline void Type::clear_source_context() {\n  if (GetArenaNoVirtual() == NULL && source_context_ != NULL) delete source_context_;\n  source_context_ = NULL;\n}\ninline const ::google::protobuf::SourceContext& Type::source_context() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.source_context)\n  return source_context_ != NULL ? *source_context_\n                         : *::google::protobuf::SourceContext::internal_default_instance();\n}\ninline ::google::protobuf::SourceContext* Type::mutable_source_context() {\n  \n  if (source_context_ == NULL) {\n    _slow_mutable_source_context();\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Type.source_context)\n  return source_context_;\n}\ninline ::google::protobuf::SourceContext* Type::release_source_context() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Type.source_context)\n  \n  if (GetArenaNoVirtual() != NULL) {\n    return _slow_release_source_context();\n  } else {\n    ::google::protobuf::SourceContext* temp = source_context_;\n    source_context_ = NULL;\n    return temp;\n  }\n}\ninline  void Type::set_allocated_source_context(::google::protobuf::SourceContext* source_context) {\n  ::google::protobuf::Arena* message_arena = GetArenaNoVirtual();\n  if (message_arena == NULL) {\n    delete source_context_;\n  }\n  if (source_context != NULL) {\n    if (message_arena != NULL) {\n      message_arena->Own(source_context);\n    }\n  }\n  source_context_ = source_context;\n  if (source_context) {\n    \n  } else {\n    \n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Type.source_context)\n}\n\n// optional .google.protobuf.Syntax syntax = 6;\ninline void Type::clear_syntax() {\n  syntax_ = 0;\n}\ninline ::google::protobuf::Syntax Type::syntax() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Type.syntax)\n  return static_cast< ::google::protobuf::Syntax >(syntax_);\n}\ninline void Type::set_syntax(::google::protobuf::Syntax value) {\n  \n  syntax_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Type.syntax)\n}\n\ninline const Type* Type::internal_default_instance() {\n  return &Type_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Field\n\n// optional .google.protobuf.Field.Kind kind = 1;\ninline void Field::clear_kind() {\n  kind_ = 0;\n}\ninline ::google::protobuf::Field_Kind Field::kind() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.kind)\n  return static_cast< ::google::protobuf::Field_Kind >(kind_);\n}\ninline void Field::set_kind(::google::protobuf::Field_Kind value) {\n  \n  kind_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.kind)\n}\n\n// optional .google.protobuf.Field.Cardinality cardinality = 2;\ninline void Field::clear_cardinality() {\n  cardinality_ = 0;\n}\ninline ::google::protobuf::Field_Cardinality Field::cardinality() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.cardinality)\n  return static_cast< ::google::protobuf::Field_Cardinality >(cardinality_);\n}\ninline void Field::set_cardinality(::google::protobuf::Field_Cardinality value) {\n  \n  cardinality_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.cardinality)\n}\n\n// optional int32 number = 3;\ninline void Field::clear_number() {\n  number_ = 0;\n}\ninline ::google::protobuf::int32 Field::number() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.number)\n  return number_;\n}\ninline void Field::set_number(::google::protobuf::int32 value) {\n  \n  number_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.number)\n}\n\n// optional string name = 4;\ninline void Field::clear_name() {\n  name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Field::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.name)\n  return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Field::set_name(const ::std::string& value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.name)\n}\ninline void Field::set_name(const char* value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Field.name)\n}\ninline void Field::set_name(const char* value,\n    size_t size) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Field.name)\n}\ninline ::std::string* Field::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Field.name)\n  return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Field.name)\n  \n  return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::unsafe_arena_release_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Field.name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Field::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Field.name)\n}\ninline void Field::unsafe_arena_set_allocated_name(\n    ::std::string* name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Field.name)\n}\n\n// optional string type_url = 6;\ninline void Field::clear_type_url() {\n  type_url_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Field::type_url() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.type_url)\n  return type_url_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Field::set_type_url(const ::std::string& value) {\n  \n  type_url_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.type_url)\n}\ninline void Field::set_type_url(const char* value) {\n  \n  type_url_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Field.type_url)\n}\ninline void Field::set_type_url(const char* value,\n    size_t size) {\n  \n  type_url_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Field.type_url)\n}\ninline ::std::string* Field::mutable_type_url() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Field.type_url)\n  return type_url_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::release_type_url() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Field.type_url)\n  \n  return type_url_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::unsafe_arena_release_type_url() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Field.type_url)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return type_url_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Field::set_allocated_type_url(::std::string* type_url) {\n  if (type_url != NULL) {\n    \n  } else {\n    \n  }\n  type_url_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type_url,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Field.type_url)\n}\ninline void Field::unsafe_arena_set_allocated_type_url(\n    ::std::string* type_url) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (type_url != NULL) {\n    \n  } else {\n    \n  }\n  type_url_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      type_url, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Field.type_url)\n}\n\n// optional int32 oneof_index = 7;\ninline void Field::clear_oneof_index() {\n  oneof_index_ = 0;\n}\ninline ::google::protobuf::int32 Field::oneof_index() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.oneof_index)\n  return oneof_index_;\n}\ninline void Field::set_oneof_index(::google::protobuf::int32 value) {\n  \n  oneof_index_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.oneof_index)\n}\n\n// optional bool packed = 8;\ninline void Field::clear_packed() {\n  packed_ = false;\n}\ninline bool Field::packed() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.packed)\n  return packed_;\n}\ninline void Field::set_packed(bool value) {\n  \n  packed_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.packed)\n}\n\n// repeated .google.protobuf.Option options = 9;\ninline int Field::options_size() const {\n  return options_.size();\n}\ninline void Field::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& Field::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* Field::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Field.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* Field::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Field.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nField::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Field.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nField::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Field.options)\n  return options_;\n}\n\n// optional string json_name = 10;\ninline void Field::clear_json_name() {\n  json_name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Field::json_name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.json_name)\n  return json_name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Field::set_json_name(const ::std::string& value) {\n  \n  json_name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.json_name)\n}\ninline void Field::set_json_name(const char* value) {\n  \n  json_name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Field.json_name)\n}\ninline void Field::set_json_name(const char* value,\n    size_t size) {\n  \n  json_name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Field.json_name)\n}\ninline ::std::string* Field::mutable_json_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Field.json_name)\n  return json_name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::release_json_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Field.json_name)\n  \n  return json_name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::unsafe_arena_release_json_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Field.json_name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return json_name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Field::set_allocated_json_name(::std::string* json_name) {\n  if (json_name != NULL) {\n    \n  } else {\n    \n  }\n  json_name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), json_name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Field.json_name)\n}\ninline void Field::unsafe_arena_set_allocated_json_name(\n    ::std::string* json_name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (json_name != NULL) {\n    \n  } else {\n    \n  }\n  json_name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      json_name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Field.json_name)\n}\n\n// optional string default_value = 11;\ninline void Field::clear_default_value() {\n  default_value_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Field::default_value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Field.default_value)\n  return default_value_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Field::set_default_value(const ::std::string& value) {\n  \n  default_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Field.default_value)\n}\ninline void Field::set_default_value(const char* value) {\n  \n  default_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Field.default_value)\n}\ninline void Field::set_default_value(const char* value,\n    size_t size) {\n  \n  default_value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Field.default_value)\n}\ninline ::std::string* Field::mutable_default_value() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Field.default_value)\n  return default_value_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::release_default_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Field.default_value)\n  \n  return default_value_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Field::unsafe_arena_release_default_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Field.default_value)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return default_value_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Field::set_allocated_default_value(::std::string* default_value) {\n  if (default_value != NULL) {\n    \n  } else {\n    \n  }\n  default_value_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), default_value,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Field.default_value)\n}\ninline void Field::unsafe_arena_set_allocated_default_value(\n    ::std::string* default_value) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (default_value != NULL) {\n    \n  } else {\n    \n  }\n  default_value_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      default_value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Field.default_value)\n}\n\ninline const Field* Field::internal_default_instance() {\n  return &Field_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Enum\n\n// optional string name = 1;\ninline void Enum::clear_name() {\n  name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Enum::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Enum.name)\n  return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Enum::set_name(const ::std::string& value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Enum.name)\n}\ninline void Enum::set_name(const char* value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Enum.name)\n}\ninline void Enum::set_name(const char* value,\n    size_t size) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Enum.name)\n}\ninline ::std::string* Enum::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Enum.name)\n  return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Enum::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Enum.name)\n  \n  return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Enum::unsafe_arena_release_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Enum.name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Enum::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Enum.name)\n}\ninline void Enum::unsafe_arena_set_allocated_name(\n    ::std::string* name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Enum.name)\n}\n\n// repeated .google.protobuf.EnumValue enumvalue = 2;\ninline int Enum::enumvalue_size() const {\n  return enumvalue_.size();\n}\ninline void Enum::clear_enumvalue() {\n  enumvalue_.Clear();\n}\ninline const ::google::protobuf::EnumValue& Enum::enumvalue(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Enum.enumvalue)\n  return enumvalue_.Get(index);\n}\ninline ::google::protobuf::EnumValue* Enum::mutable_enumvalue(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Enum.enumvalue)\n  return enumvalue_.Mutable(index);\n}\ninline ::google::protobuf::EnumValue* Enum::add_enumvalue() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Enum.enumvalue)\n  return enumvalue_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValue >*\nEnum::mutable_enumvalue() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Enum.enumvalue)\n  return &enumvalue_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::EnumValue >&\nEnum::enumvalue() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Enum.enumvalue)\n  return enumvalue_;\n}\n\n// repeated .google.protobuf.Option options = 3;\ninline int Enum::options_size() const {\n  return options_.size();\n}\ninline void Enum::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& Enum::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Enum.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* Enum::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Enum.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* Enum::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.Enum.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nEnum::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.Enum.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nEnum::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.Enum.options)\n  return options_;\n}\n\n// optional .google.protobuf.SourceContext source_context = 4;\ninline bool Enum::has_source_context() const {\n  return this != internal_default_instance() && source_context_ != NULL;\n}\ninline void Enum::clear_source_context() {\n  if (GetArenaNoVirtual() == NULL && source_context_ != NULL) delete source_context_;\n  source_context_ = NULL;\n}\ninline const ::google::protobuf::SourceContext& Enum::source_context() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Enum.source_context)\n  return source_context_ != NULL ? *source_context_\n                         : *::google::protobuf::SourceContext::internal_default_instance();\n}\ninline ::google::protobuf::SourceContext* Enum::mutable_source_context() {\n  \n  if (source_context_ == NULL) {\n    _slow_mutable_source_context();\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Enum.source_context)\n  return source_context_;\n}\ninline ::google::protobuf::SourceContext* Enum::release_source_context() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Enum.source_context)\n  \n  if (GetArenaNoVirtual() != NULL) {\n    return _slow_release_source_context();\n  } else {\n    ::google::protobuf::SourceContext* temp = source_context_;\n    source_context_ = NULL;\n    return temp;\n  }\n}\ninline  void Enum::set_allocated_source_context(::google::protobuf::SourceContext* source_context) {\n  ::google::protobuf::Arena* message_arena = GetArenaNoVirtual();\n  if (message_arena == NULL) {\n    delete source_context_;\n  }\n  if (source_context != NULL) {\n    if (message_arena != NULL) {\n      message_arena->Own(source_context);\n    }\n  }\n  source_context_ = source_context;\n  if (source_context) {\n    \n  } else {\n    \n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Enum.source_context)\n}\n\n// optional .google.protobuf.Syntax syntax = 5;\ninline void Enum::clear_syntax() {\n  syntax_ = 0;\n}\ninline ::google::protobuf::Syntax Enum::syntax() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Enum.syntax)\n  return static_cast< ::google::protobuf::Syntax >(syntax_);\n}\ninline void Enum::set_syntax(::google::protobuf::Syntax value) {\n  \n  syntax_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Enum.syntax)\n}\n\ninline const Enum* Enum::internal_default_instance() {\n  return &Enum_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// EnumValue\n\n// optional string name = 1;\ninline void EnumValue::clear_name() {\n  name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& EnumValue::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValue.name)\n  return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void EnumValue::set_name(const ::std::string& value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumValue.name)\n}\ninline void EnumValue::set_name(const char* value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.EnumValue.name)\n}\ninline void EnumValue::set_name(const char* value,\n    size_t size) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.EnumValue.name)\n}\ninline ::std::string* EnumValue::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumValue.name)\n  return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* EnumValue::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.EnumValue.name)\n  \n  return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* EnumValue::unsafe_arena_release_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.EnumValue.name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void EnumValue::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.EnumValue.name)\n}\ninline void EnumValue::unsafe_arena_set_allocated_name(\n    ::std::string* name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.EnumValue.name)\n}\n\n// optional int32 number = 2;\ninline void EnumValue::clear_number() {\n  number_ = 0;\n}\ninline ::google::protobuf::int32 EnumValue::number() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValue.number)\n  return number_;\n}\ninline void EnumValue::set_number(::google::protobuf::int32 value) {\n  \n  number_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.EnumValue.number)\n}\n\n// repeated .google.protobuf.Option options = 3;\ninline int EnumValue::options_size() const {\n  return options_.size();\n}\ninline void EnumValue::clear_options() {\n  options_.Clear();\n}\ninline const ::google::protobuf::Option& EnumValue::options(int index) const {\n  // @@protoc_insertion_point(field_get:google.protobuf.EnumValue.options)\n  return options_.Get(index);\n}\ninline ::google::protobuf::Option* EnumValue::mutable_options(int index) {\n  // @@protoc_insertion_point(field_mutable:google.protobuf.EnumValue.options)\n  return options_.Mutable(index);\n}\ninline ::google::protobuf::Option* EnumValue::add_options() {\n  // @@protoc_insertion_point(field_add:google.protobuf.EnumValue.options)\n  return options_.Add();\n}\ninline ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >*\nEnumValue::mutable_options() {\n  // @@protoc_insertion_point(field_mutable_list:google.protobuf.EnumValue.options)\n  return &options_;\n}\ninline const ::google::protobuf::RepeatedPtrField< ::google::protobuf::Option >&\nEnumValue::options() const {\n  // @@protoc_insertion_point(field_list:google.protobuf.EnumValue.options)\n  return options_;\n}\n\ninline const EnumValue* EnumValue::internal_default_instance() {\n  return &EnumValue_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Option\n\n// optional string name = 1;\ninline void Option::clear_name() {\n  name_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& Option::name() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Option.name)\n  return name_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void Option::set_name(const ::std::string& value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.Option.name)\n}\ninline void Option::set_name(const char* value) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.Option.name)\n}\ninline void Option::set_name(const char* value,\n    size_t size) {\n  \n  name_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.Option.name)\n}\ninline ::std::string* Option::mutable_name() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.Option.name)\n  return name_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Option::release_name() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Option.name)\n  \n  return name_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* Option::unsafe_arena_release_name() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.Option.name)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return name_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void Option::set_allocated_name(::std::string* name) {\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Option.name)\n}\ninline void Option::unsafe_arena_set_allocated_name(\n    ::std::string* name) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (name != NULL) {\n    \n  } else {\n    \n  }\n  name_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      name, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.Option.name)\n}\n\n// optional .google.protobuf.Any value = 2;\ninline bool Option::has_value() const {\n  return this != internal_default_instance() && value_ != NULL;\n}\ninline void Option::clear_value() {\n  if (GetArenaNoVirtual() == NULL && value_ != NULL) delete value_;\n  value_ = NULL;\n}\ninline const ::google::protobuf::Any& Option::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Option.value)\n  return value_ != NULL ? *value_\n                         : *::google::protobuf::Any::internal_default_instance();\n}\ninline ::google::protobuf::Any* Option::mutable_value() {\n  \n  if (value_ == NULL) {\n    _slow_mutable_value();\n  }\n  // @@protoc_insertion_point(field_mutable:google.protobuf.Option.value)\n  return value_;\n}\ninline ::google::protobuf::Any* Option::release_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.Option.value)\n  \n  if (GetArenaNoVirtual() != NULL) {\n    return _slow_release_value();\n  } else {\n    ::google::protobuf::Any* temp = value_;\n    value_ = NULL;\n    return temp;\n  }\n}\ninline  void Option::set_allocated_value(::google::protobuf::Any* value) {\n  ::google::protobuf::Arena* message_arena = GetArenaNoVirtual();\n  if (message_arena == NULL) {\n    delete value_;\n  }\n  if (value != NULL) {\n    if (message_arena != NULL) {\n      message_arena->Own(value);\n    }\n  }\n  value_ = value;\n  if (value) {\n    \n  } else {\n    \n  }\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.Option.value)\n}\n\ninline const Option* Option::internal_default_instance() {\n  return &Option_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n#ifndef SWIG\nnamespace google {\nnamespace protobuf {\n\ntemplate <> struct is_proto_enum< ::google::protobuf::Field_Kind> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::Field_Kind>() {\n  return ::google::protobuf::Field_Kind_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::Field_Cardinality> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::Field_Cardinality>() {\n  return ::google::protobuf::Field_Cardinality_descriptor();\n}\ntemplate <> struct is_proto_enum< ::google::protobuf::Syntax> : ::google::protobuf::internal::true_type {};\ntemplate <>\ninline const EnumDescriptor* GetEnumDescriptor< ::google::protobuf::Syntax>() {\n  return ::google::protobuf::Syntax_descriptor();\n}\n\n}  // namespace protobuf\n}  // namespace google\n#endif  // SWIG\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2ftype_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/type.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/source_context.proto\";\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TypeProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A protocol buffer message type.\nmessage Type {\n  // The fully qualified message name.\n  string name = 1;\n  // The list of fields.\n  repeated Field fields = 2;\n  // The list of types appearing in `oneof` definitions in this type.\n  repeated string oneofs = 3;\n  // The protocol buffer options.\n  repeated Option options = 4;\n  // The source context.\n  SourceContext source_context = 5;\n  // The source syntax.\n  Syntax syntax = 6;\n}\n\n// A single field of a message type.\nmessage Field {\n  // Basic field types.\n  enum Kind {\n    // Field type unknown.\n    TYPE_UNKNOWN        = 0;\n    // Field type double.\n    TYPE_DOUBLE         = 1;\n    // Field type float.\n    TYPE_FLOAT          = 2;\n    // Field type int64.\n    TYPE_INT64          = 3;\n    // Field type uint64.\n    TYPE_UINT64         = 4;\n    // Field type int32.\n    TYPE_INT32          = 5;\n    // Field type fixed64.\n    TYPE_FIXED64        = 6;\n    // Field type fixed32.\n    TYPE_FIXED32        = 7;\n    // Field type bool.\n    TYPE_BOOL           = 8;\n    // Field type string.\n    TYPE_STRING         = 9;\n    // Field type group. Proto2 syntax only, and deprecated.\n    TYPE_GROUP          = 10;\n    // Field type message.\n    TYPE_MESSAGE        = 11;\n    // Field type bytes.\n    TYPE_BYTES          = 12;\n    // Field type uint32.\n    TYPE_UINT32         = 13;\n    // Field type enum.\n    TYPE_ENUM           = 14;\n    // Field type sfixed32.\n    TYPE_SFIXED32       = 15;\n    // Field type sfixed64.\n    TYPE_SFIXED64       = 16;\n    // Field type sint32.\n    TYPE_SINT32         = 17;\n    // Field type sint64.\n    TYPE_SINT64         = 18;\n  };\n\n  // Whether a field is optional, required, or repeated.\n  enum Cardinality {\n    // For fields with unknown cardinality.\n    CARDINALITY_UNKNOWN = 0;\n    // For optional fields.\n    CARDINALITY_OPTIONAL = 1;\n    // For required fields. Proto2 syntax only.\n    CARDINALITY_REQUIRED = 2;\n    // For repeated fields.\n    CARDINALITY_REPEATED = 3;\n  };\n\n  // The field type.\n  Kind kind = 1;\n  // The field cardinality.\n  Cardinality cardinality = 2;\n  // The field number.\n  int32 number = 3;\n  // The field name.\n  string name = 4;\n  // The field type URL, without the scheme, for message or enumeration\n  // types. Example: `\"type.googleapis.com/google.protobuf.Timestamp\"`.\n  string type_url = 6;\n  // The index of the field type in `Type.oneofs`, for message or enumeration\n  // types. The first type has index 1; zero means the type is not in the list.\n  int32 oneof_index = 7;\n  // Whether to use alternative packed wire representation.\n  bool packed = 8;\n  // The protocol buffer options.\n  repeated Option options = 9;\n  // The field JSON name.\n  string json_name = 10;\n  // The string value of the default value of this field. Proto2 syntax only.\n  string default_value = 11;\n}\n\n// Enum type definition.\nmessage Enum {\n  // Enum type name.\n  string name = 1;\n  // Enum value definitions.\n  repeated EnumValue enumvalue = 2;\n  // Protocol buffer options.\n  repeated Option options = 3;\n  // The source context.\n  SourceContext source_context = 4;\n  // The source syntax.\n  Syntax syntax = 5;\n}\n\n// Enum value definition.\nmessage EnumValue {\n  // Enum value name.\n  string name = 1;\n  // Enum value number.\n  int32 number = 2;\n  // Protocol buffer options.\n  repeated Option options = 3;\n}\n\n// A protocol buffer option, which can be attached to a message, field,\n// enumeration, etc.\nmessage Option {\n  // The option's name. For example, `\"java_package\"`.\n  string name = 1;\n  // The option's value. For example, `\"com.google.protobuf\"`.\n  Any value = 2;\n}\n\n// The syntax in which a protocol buffer element is defined.\nenum Syntax {\n  // Syntax `proto2`.\n  SYNTAX_PROTO2 = 0;\n  // Syntax `proto3`.\n  SYNTAX_PROTO3 = 1;\n}\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/unknown_field_set.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// Contains classes used to keep track of unrecognized fields seen while\n// parsing a protocol message.\n\n#ifndef GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__\n#define GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__\n\n#include <assert.h>\n#include <string>\n#include <vector>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/logging.h>\n\nnamespace google {\nnamespace protobuf {\n  namespace io {\n    class CodedInputStream;         // coded_stream.h\n    class CodedOutputStream;        // coded_stream.h\n    class ZeroCopyInputStream;      // zero_copy_stream.h\n  }\n  namespace internal {\n    class InternalMetadataWithArena;  // metadata.h\n    class WireFormat;               // wire_format.h\n    class MessageSetFieldSkipperUsingCord;\n                                    // extension_set_heavy.cc\n  }\n\nclass Message;                      // message.h\nclass UnknownField;                 // below\n\n// An UnknownFieldSet contains fields that were encountered while parsing a\n// message but were not defined by its type.  Keeping track of these can be\n// useful, especially in that they may be written if the message is serialized\n// again without being cleared in between.  This means that software which\n// simply receives messages and forwards them to other servers does not need\n// to be updated every time a new field is added to the message definition.\n//\n// To get the UnknownFieldSet attached to any message, call\n// Reflection::GetUnknownFields().\n//\n// This class is necessarily tied to the protocol buffer wire format, unlike\n// the Reflection interface which is independent of any serialization scheme.\nclass LIBPROTOBUF_EXPORT UnknownFieldSet {\n public:\n  UnknownFieldSet();\n  ~UnknownFieldSet();\n\n  // Remove all fields.\n  inline void Clear();\n\n  // Remove all fields and deallocate internal data objects\n  void ClearAndFreeMemory();\n\n  // Is this set empty?\n  inline bool empty() const;\n\n  // Merge the contents of some other UnknownFieldSet with this one.\n  void MergeFrom(const UnknownFieldSet& other);\n\n  // Similar to above, but this function will destroy the contents of other.\n  void MergeFromAndDestroy(UnknownFieldSet* other);\n\n  // Merge the contents an UnknownFieldSet with the UnknownFieldSet in\n  // *metadata, if there is one.  If *metadata doesn't have an UnknownFieldSet\n  // then add one to it and make it be a copy of the first arg.\n  static void MergeToInternalMetdata(\n      const UnknownFieldSet& other,\n      internal::InternalMetadataWithArena* metadata);\n\n  // Swaps the contents of some other UnknownFieldSet with this one.\n  inline void Swap(UnknownFieldSet* x);\n\n  // Computes (an estimate of) the total number of bytes currently used for\n  // storing the unknown fields in memory. Does NOT include\n  // sizeof(*this) in the calculation.\n  int SpaceUsedExcludingSelf() const;\n\n  // Version of SpaceUsed() including sizeof(*this).\n  int SpaceUsed() const;\n\n  // Returns the number of fields present in the UnknownFieldSet.\n  inline int field_count() const;\n  // Get a field in the set, where 0 <= index < field_count().  The fields\n  // appear in the order in which they were added.\n  inline const UnknownField& field(int index) const;\n  // Get a mutable pointer to a field in the set, where\n  // 0 <= index < field_count().  The fields appear in the order in which\n  // they were added.\n  inline UnknownField* mutable_field(int index);\n\n  // Adding fields ---------------------------------------------------\n\n  void AddVarint(int number, uint64 value);\n  void AddFixed32(int number, uint32 value);\n  void AddFixed64(int number, uint64 value);\n  void AddLengthDelimited(int number, const string& value);\n  string* AddLengthDelimited(int number);\n  UnknownFieldSet* AddGroup(int number);\n\n  // Adds an unknown field from another set.\n  void AddField(const UnknownField& field);\n\n  // Delete fields with indices in the range [start .. start+num-1].\n  // Caution: implementation moves all fields with indices [start+num .. ].\n  void DeleteSubrange(int start, int num);\n\n  // Delete all fields with a specific field number. The order of left fields\n  // is preserved.\n  // Caution: implementation moves all fields after the first deleted field.\n  void DeleteByNumber(int number);\n\n  // Parsing helpers -------------------------------------------------\n  // These work exactly like the similarly-named methods of Message.\n\n  bool MergeFromCodedStream(io::CodedInputStream* input);\n  bool ParseFromCodedStream(io::CodedInputStream* input);\n  bool ParseFromZeroCopyStream(io::ZeroCopyInputStream* input);\n  bool ParseFromArray(const void* data, int size);\n  inline bool ParseFromString(const string& data) {\n    return ParseFromArray(data.data(), static_cast<int>(data.size()));\n  }\n\n  static const UnknownFieldSet* default_instance();\n private:\n  // For InternalMergeFrom\n  friend class UnknownField;\n  // Merges from other UnknownFieldSet. This method assumes, that this object\n  // is newly created and has fields_ == NULL;\n  void InternalMergeFrom(const UnknownFieldSet& other);\n  void ClearFallback();\n\n  // fields_ is either NULL, or a pointer to a vector that is *non-empty*. We\n  // never hold the empty vector because we want the 'do we have any unknown\n  // fields' check to be fast, and avoid a cache miss: the UFS instance gets\n  // embedded in the message object, so 'fields_ != NULL' tests a member\n  // variable hot in the cache, without the need to go touch a vector somewhere\n  // else in memory.\n  std::vector<UnknownField>* fields_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(UnknownFieldSet);\n};\n\n// Represents one field in an UnknownFieldSet.\nclass LIBPROTOBUF_EXPORT UnknownField {\n public:\n  enum Type {\n    TYPE_VARINT,\n    TYPE_FIXED32,\n    TYPE_FIXED64,\n    TYPE_LENGTH_DELIMITED,\n    TYPE_GROUP\n  };\n\n  // The field's field number, as seen on the wire.\n  inline int number() const;\n\n  // The field type.\n  inline Type type() const;\n\n  // Accessors -------------------------------------------------------\n  // Each method works only for UnknownFields of the corresponding type.\n\n  inline uint64 varint() const;\n  inline uint32 fixed32() const;\n  inline uint64 fixed64() const;\n  inline const string& length_delimited() const;\n  inline const UnknownFieldSet& group() const;\n\n  inline void set_varint(uint64 value);\n  inline void set_fixed32(uint32 value);\n  inline void set_fixed64(uint64 value);\n  inline void set_length_delimited(const string& value);\n  inline string* mutable_length_delimited();\n  inline UnknownFieldSet* mutable_group();\n\n  // Serialization API.\n  // These methods can take advantage of the underlying implementation and may\n  // archieve a better performance than using getters to retrieve the data and\n  // do the serialization yourself.\n  void SerializeLengthDelimitedNoTag(io::CodedOutputStream* output) const;\n  uint8* SerializeLengthDelimitedNoTagToArray(uint8* target) const;\n\n  inline size_t GetLengthDelimitedSize() const;\n\n private:\n  friend class UnknownFieldSet;\n\n  // If this UnknownField contains a pointer, delete it.\n  void Delete();\n\n  // Reset all the underlying pointers to NULL. A special function to be only\n  // used while merging from a temporary UFS.\n  void Reset();\n\n  // Make a deep copy of any pointers in this UnknownField.\n  void DeepCopy(const UnknownField& other);\n\n  // Set the wire type of this UnknownField. Should only be used when this\n  // UnknownField is being created.\n  inline void SetType(Type type);\n\n  union LengthDelimited {\n    string* string_value_;\n  };\n\n  uint32 number_;\n  uint32 type_;\n  union {\n    uint64 varint_;\n    uint32 fixed32_;\n    uint64 fixed64_;\n    mutable union LengthDelimited length_delimited_;\n    UnknownFieldSet* group_;\n  };\n};\n\n// ===================================================================\n// inline implementations\n\ninline UnknownFieldSet::UnknownFieldSet() : fields_(NULL) {}\n\ninline UnknownFieldSet::~UnknownFieldSet() { Clear(); }\n\ninline void UnknownFieldSet::ClearAndFreeMemory() { Clear(); }\n\ninline void UnknownFieldSet::Clear() {\n  if (fields_ != NULL) {\n    ClearFallback();\n  }\n}\n\ninline bool UnknownFieldSet::empty() const {\n  // Invariant: fields_ is never empty if present.\n  return !fields_;\n}\n\ninline void UnknownFieldSet::Swap(UnknownFieldSet* x) {\n  std::swap(fields_, x->fields_);\n}\n\ninline int UnknownFieldSet::field_count() const {\n  return fields_ ? static_cast<int>(fields_->size()) : 0;\n}\ninline const UnknownField& UnknownFieldSet::field(int index) const {\n  GOOGLE_DCHECK(fields_ != NULL);\n  return (*fields_)[index];\n}\ninline UnknownField* UnknownFieldSet::mutable_field(int index) {\n  return &(*fields_)[index];\n}\n\ninline void UnknownFieldSet::AddLengthDelimited(\n    int number, const string& value) {\n  AddLengthDelimited(number)->assign(value);\n}\n\n\ninline int UnknownField::number() const { return number_; }\ninline UnknownField::Type UnknownField::type() const {\n  return static_cast<Type>(type_);\n}\n\ninline uint64 UnknownField::varint() const {\n  assert(type() == TYPE_VARINT);\n  return varint_;\n}\ninline uint32 UnknownField::fixed32() const {\n  assert(type() == TYPE_FIXED32);\n  return fixed32_;\n}\ninline uint64 UnknownField::fixed64() const {\n  assert(type() == TYPE_FIXED64);\n  return fixed64_;\n}\ninline const string& UnknownField::length_delimited() const {\n  assert(type() == TYPE_LENGTH_DELIMITED);\n  return *length_delimited_.string_value_;\n}\ninline const UnknownFieldSet& UnknownField::group() const {\n  assert(type() == TYPE_GROUP);\n  return *group_;\n}\n\ninline void UnknownField::set_varint(uint64 value) {\n  assert(type() == TYPE_VARINT);\n  varint_ = value;\n}\ninline void UnknownField::set_fixed32(uint32 value) {\n  assert(type() == TYPE_FIXED32);\n  fixed32_ = value;\n}\ninline void UnknownField::set_fixed64(uint64 value) {\n  assert(type() == TYPE_FIXED64);\n  fixed64_ = value;\n}\ninline void UnknownField::set_length_delimited(const string& value) {\n  assert(type() == TYPE_LENGTH_DELIMITED);\n  length_delimited_.string_value_->assign(value);\n}\ninline string* UnknownField::mutable_length_delimited() {\n  assert(type() == TYPE_LENGTH_DELIMITED);\n  return length_delimited_.string_value_;\n}\ninline UnknownFieldSet* UnknownField::mutable_group() {\n  assert(type() == TYPE_GROUP);\n  return group_;\n}\n\ninline size_t UnknownField::GetLengthDelimitedSize() const {\n  GOOGLE_DCHECK_EQ(TYPE_LENGTH_DELIMITED, type());\n  return length_delimited_.string_value_->size();\n}\n\ninline void UnknownField::SetType(Type type) {\n  type_ = type;\n}\n\n\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UNKNOWN_FIELD_SET_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/field_comparator.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Defines classes for field comparison.\n\n// Author: ksroka@google.com (Krzysztof Sroka)\n\n#ifndef GOOGLE_PROTOBUF_UTIL_FIELD_COMPARATOR_H__\n#define GOOGLE_PROTOBUF_UTIL_FIELD_COMPARATOR_H__\n\n#include <map>\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\nnamespace google {\nnamespace protobuf {\n\nclass Message;\nclass EnumValueDescriptor;\nclass FieldDescriptor;\n\nnamespace util {\n\nclass FieldContext;\n\n// Base class specifying the interface for comparing protocol buffer fields.\n// Regular users should consider using or subclassing DefaultFieldComparator\n// rather than this interface.\n// Currently, this does not support comparing unknown fields.\nclass LIBPROTOBUF_EXPORT FieldComparator {\n public:\n  FieldComparator();\n  virtual ~FieldComparator();\n\n  enum ComparisonResult {\n    SAME,       // Compared fields are equal. In case of comparing submessages,\n                // user should not recursively compare their contents.\n    DIFFERENT,  // Compared fields are different. In case of comparing\n                // submessages, user should not recursively compare their\n                // contents.\n    RECURSE,    // Compared submessages need to be compared recursively.\n                // FieldComparator does not specify the semantics of recursive\n                // comparison. This value should not be returned for simple\n                // values.\n  };\n\n  // Compares the values of a field in two protocol buffer messages.\n  // Returns SAME or DIFFERENT for simple values, and SAME, DIFFERENT or RECURSE\n  // for submessages. Returning RECURSE for fields not being submessages is\n  // illegal.\n  // In case the given FieldDescriptor points to a repeated field, the indices\n  // need to be valid. Otherwise they should be ignored.\n  //\n  // FieldContext contains information about the specific instances of the\n  // fields being compared, versus FieldDescriptor which only contains general\n  // type information about the fields.\n  virtual ComparisonResult Compare(\n      const google::protobuf::Message& message_1,\n      const google::protobuf::Message& message_2,\n      const google::protobuf::FieldDescriptor* field,\n      int index_1, int index_2,\n      const google::protobuf::util::FieldContext* field_context) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(FieldComparator);\n};\n\n// Basic implementation of FieldComparator.  Supports three modes of floating\n// point value comparison: exact, approximate using MathUtil::AlmostEqual\n// method, and arbitrarily precise using MathUtil::WithinFractionOrMargin.\nclass LIBPROTOBUF_EXPORT DefaultFieldComparator : public FieldComparator {\n public:\n  enum FloatComparison {\n     EXACT,               // Floats and doubles are compared exactly.\n     APPROXIMATE,         // Floats and doubles are compared using the\n                          // MathUtil::AlmostEqual method or\n                          // MathUtil::WithinFractionOrMargin method.\n     // TODO(ksroka): Introduce third value to differenciate uses of AlmostEqual\n     //               and WithinFractionOrMargin.\n  };\n\n  // Creates new comparator with float comparison set to EXACT.\n  DefaultFieldComparator();\n\n  virtual ~DefaultFieldComparator();\n\n  virtual ComparisonResult Compare(\n      const google::protobuf::Message& message_1,\n      const google::protobuf::Message& message_2,\n      const google::protobuf::FieldDescriptor* field,\n      int index_1, int index_2,\n      const google::protobuf::util::FieldContext* field_context);\n\n  void set_float_comparison(FloatComparison float_comparison) {\n    float_comparison_ = float_comparison;\n  }\n\n  FloatComparison float_comparison() const {\n    return float_comparison_;\n  }\n\n  // Set whether the FieldComparator shall treat floats or doubles that are both\n  // NaN as equal (treat_nan_as_equal = true) or as different\n  // (treat_nan_as_equal = false). Default is treating NaNs always as different.\n  void set_treat_nan_as_equal(bool treat_nan_as_equal) {\n    treat_nan_as_equal_ = treat_nan_as_equal;\n  }\n\n  bool treat_nan_as_equal() const {\n    return treat_nan_as_equal_;\n  }\n\n  // Sets the fraction and margin for the float comparison of a given field.\n  // Uses MathUtil::WithinFractionOrMargin to compare the values.\n  //\n  // REQUIRES: field->cpp_type == FieldDescriptor::CPPTYPE_DOUBLE or\n  //           field->cpp_type == FieldDescriptor::CPPTYPE_FLOAT\n  // REQUIRES: float_comparison_ == APPROXIMATE\n  void SetFractionAndMargin(const FieldDescriptor* field, double fraction,\n                            double margin);\n\n  // Sets the fraction and margin for the float comparison of all float and\n  // double fields, unless a field has been given a specific setting via\n  // SetFractionAndMargin() above.\n  // Uses MathUtil::WithinFractionOrMargin to compare the values.\n  //\n  // REQUIRES: float_comparison_ == APPROXIMATE\n  void SetDefaultFractionAndMargin(double fraction, double margin);\n\n private:\n  // Defines the tolerance for floating point comparison (fraction and margin).\n  struct Tolerance {\n    double fraction;\n    double margin;\n    Tolerance()\n        : fraction(0.0),\n          margin(0.0) {}\n    Tolerance(double f, double m)\n        : fraction(f),\n          margin(m) {}\n  };\n\n  // Defines the map to store the tolerances for floating point comparison.\n  typedef std::map<const FieldDescriptor*, Tolerance> ToleranceMap;\n\n  // The following methods get executed when CompareFields is called for the\n  // basic types (instead of submessages). They return true on success. One\n  // can use ResultFromBoolean() to convert that boolean to a ComparisonResult\n  // value.\n  bool CompareBool(const google::protobuf::FieldDescriptor& field,\n                   bool value_1, bool value_2) {\n    return value_1 == value_2;\n  }\n\n  // Uses CompareDoubleOrFloat, a helper function used by both CompareDouble and\n  // CompareFloat.\n  bool CompareDouble(const google::protobuf::FieldDescriptor& field,\n                     double value_1, double value_2);\n\n  bool CompareEnum(const google::protobuf::FieldDescriptor& field,\n                   const EnumValueDescriptor* value_1,\n                   const EnumValueDescriptor* value_2);\n\n  // Uses CompareDoubleOrFloat, a helper function used by both CompareDouble and\n  // CompareFloat.\n  bool CompareFloat(const google::protobuf::FieldDescriptor& field,\n                    float value_1, float value_2);\n\n  bool CompareInt32(const google::protobuf::FieldDescriptor& field,\n                    int32 value_1, int32 value_2) {\n    return value_1 == value_2;\n  }\n\n  bool CompareInt64(const google::protobuf::FieldDescriptor& field,\n                    int64 value_1, int64 value_2) {\n    return value_1 == value_2;\n  }\n\n  bool CompareString(const google::protobuf::FieldDescriptor& field,\n                     const string& value_1, const string& value_2) {\n    return value_1 == value_2;\n  }\n\n  bool CompareUInt32(const google::protobuf::FieldDescriptor& field,\n                     uint32 value_1, uint32 value_2) {\n    return value_1 == value_2;\n  }\n\n  bool CompareUInt64(const google::protobuf::FieldDescriptor& field,\n                     uint64 value_1, uint64 value_2) {\n    return value_1 == value_2;\n  }\n\n  // This function is used by CompareDouble and CompareFloat to avoid code\n  // duplication. There are no checks done against types of the values passed,\n  // but it's likely to fail if passed non-numeric arguments.\n  template<typename T>\n  bool CompareDoubleOrFloat(const google::protobuf::FieldDescriptor& field,\n                            T value_1, T value_2);\n\n  // Returns FieldComparator::SAME if boolean_result is true and\n  // FieldComparator::DIFFERENT otherwise.\n  ComparisonResult ResultFromBoolean(bool boolean_result) const;\n\n  FloatComparison float_comparison_;\n\n  // If true, floats and doubles that are both NaN are considered to be\n  // equal. Otherwise, two floats or doubles that are NaN are considered to be\n  // different.\n  bool treat_nan_as_equal_;\n\n  // True iff default_tolerance_ has been explicitly set.\n  //\n  // If false, then the default tolerance for flaots and doubles is that which\n  // is used by MathUtil::AlmostEquals().\n  bool has_default_tolerance_;\n\n  // Default float/double tolerance. Only meaningful if\n  // has_default_tolerance_ == true.\n  Tolerance default_tolerance_;\n\n  // Field-specific float/double tolerances, which override any default for\n  // those particular fields.\n  ToleranceMap map_tolerance_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(DefaultFieldComparator);\n};\n\n}  // namespace util\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_FIELD_COMPARATOR_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/field_mask_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Defines utilities for the FieldMask well known type.\n\n#ifndef GOOGLE_PROTOBUF_UTIL_FIELD_MASK_UTIL_H__\n#define GOOGLE_PROTOBUF_UTIL_FIELD_MASK_UTIL_H__\n\n#include <string>\n\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/field_mask.pb.h>\n#include <google/protobuf/stubs/stringpiece.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace util {\n\nclass LIBPROTOBUF_EXPORT FieldMaskUtil {\n  typedef google::protobuf::FieldMask FieldMask;\n\n public:\n  // Converts FieldMask to/from string, formatted by separating each path\n  // with a comma (e.g., \"foo_bar,baz.quz\").\n  static string ToString(const FieldMask& mask);\n  static void FromString(StringPiece str, FieldMask* out);\n\n  // Converts FieldMask to/from string, formatted according to proto3 JSON\n  // spec for FieldMask (e.g., \"fooBar,baz.quz\"). If the field name is not\n  // style conforming (i.e., not snake_case when converted to string, or not\n  // camelCase when converted from string), the conversion will fail.\n  static bool ToJsonString(const FieldMask& mask, string* out);\n  static bool FromJsonString(StringPiece str, FieldMask* out);\n\n  // Checks whether the given path is valid for type T.\n  template <typename T>\n  static bool IsValidPath(StringPiece path) {\n    return InternalIsValidPath(T::descriptor(), path);\n  }\n\n  // Checks whether the given FieldMask is valid for type T.\n  template <typename T>\n  static bool IsValidFieldMask(const FieldMask& mask) {\n    for (int i = 0; i < mask.paths_size(); ++i) {\n      if (!InternalIsValidPath(T::descriptor(), mask.paths(i))) return false;\n    }\n    return true;\n  }\n\n  // Adds a path to FieldMask after checking whether the given path is valid.\n  // This method check-fails if the path is not a valid path for type T.\n  template <typename T>\n  static void AddPathToFieldMask(StringPiece path, FieldMask* mask) {\n    GOOGLE_CHECK(IsValidPath<T>(path));\n    mask->add_paths(path);\n  }\n\n  // Creates a FieldMask with all fields of type T. This FieldMask only\n  // contains fields of T but not any sub-message fields.\n  template <typename T>\n  static void GetFieldMaskForAllFields(FieldMask* out) {\n    InternalGetFieldMaskForAllFields(T::descriptor(), out);\n  }\n\n  // Converts a FieldMask to the canonical form. It will:\n  //   1. Remove paths that are covered by another path. For example,\n  //      \"foo.bar\" is covered by \"foo\" and will be removed if \"foo\"\n  //      is also in the FieldMask.\n  //   2. Sort all paths in alphabetical order.\n  static void ToCanonicalForm(const FieldMask& mask, FieldMask* out);\n\n  // Creates an union of two FieldMasks.\n  static void Union(const FieldMask& mask1, const FieldMask& mask2,\n                    FieldMask* out);\n\n  // Creates an intersection of two FieldMasks.\n  static void Intersect(const FieldMask& mask1, const FieldMask& mask2,\n                        FieldMask* out);\n\n  // Returns true if path is covered by the given FieldMask. Note that path\n  // \"foo.bar\" covers all paths like \"foo.bar.baz\", \"foo.bar.quz.x\", etc.\n  static bool IsPathInFieldMask(StringPiece path, const FieldMask& mask);\n\n  class MergeOptions;\n  // Merges fields specified in a FieldMask into another message. See the\n  // comments in MergeOptions regarding compatibility with\n  // google/protobuf/field_mask.proto\n  static void MergeMessageTo(const Message& source, const FieldMask& mask,\n                             const MergeOptions& options, Message* destination);\n\n  // Removes from 'message' any field that is not represented in the given\n  // FieldMask. If the FieldMask is empty, does nothing.\n  static void TrimMessage(const FieldMask& mask, Message* message);\n\n private:\n  friend class SnakeCaseCamelCaseTest;\n  // Converts a field name from snake_case to camelCase:\n  //   1. Every character after \"_\" will be converted to uppercase.\n  //   2. All \"_\"s are removed.\n  // The conversion will fail if:\n  //   1. The field name contains uppercase letters.\n  //   2. Any character after a \"_\" is not a lowercase letter.\n  // If the conversion succeeds, it's guaranteed that the resulted\n  // camelCase name will yield the original snake_case name when\n  // converted using CamelCaseToSnakeCase().\n  //\n  // Note that the input can contain characters not allowed in C identifiers.\n  // For example, \"foo_bar,baz_quz\" will be converted to \"fooBar,bazQuz\"\n  // successfully.\n  static bool SnakeCaseToCamelCase(StringPiece input, string* output);\n  // Converts a field name from camelCase to snake_case:\n  //   1. Every uppercase letter is converted to lowercase with a additional\n  //      preceding \"-\".\n  // The conversion will fail if:\n  //   1. The field name contains \"_\"s.\n  // If the conversion succeeds, it's guaranteed that the resulted\n  // snake_case name will yield the original camelCase name when\n  // converted using SnakeCaseToCamelCase().\n  //\n  // Note that the input can contain characters not allowed in C identifiers.\n  // For example, \"fooBar,bazQuz\" will be converted to \"foo_bar,baz_quz\"\n  // successfully.\n  static bool CamelCaseToSnakeCase(StringPiece input, string* output);\n\n  static bool InternalIsValidPath(const Descriptor* descriptor,\n                                  StringPiece path);\n\n  static void InternalGetFieldMaskForAllFields(const Descriptor* descriptor,\n                                               FieldMask* out);\n};\n\n// Note that for compatibility with the defined behaviour for FieldMask in\n// google/protobuf/field_mask.proto, set replace_message_fields and\n// replace_repeated_fields to 'true'. The default options are not compatible\n// with google/protobuf/field_mask.proto.\nclass LIBPROTOBUF_EXPORT FieldMaskUtil::MergeOptions {\n public:\n  MergeOptions()\n      : replace_message_fields_(false), replace_repeated_fields_(false) {}\n  // When merging message fields, the default behavior is to merge the\n  // content of two message fields together. If you instead want to use\n  // the field from the source message to replace the corresponding field\n  // in the destination message, set this flag to true. When this flag is set,\n  // specified submessage fields that are missing in source will be cleared in\n  // destination.\n  void set_replace_message_fields(bool value) {\n    replace_message_fields_ = value;\n  }\n  bool replace_message_fields() const { return replace_message_fields_; }\n  // The default merging behavior will append entries from the source\n  // repeated field to the destination repeated field. If you only want\n  // to keep the entries from the source repeated field, set this flag\n  // to true.\n  void set_replace_repeated_fields(bool value) {\n    replace_repeated_fields_ = value;\n  }\n  bool replace_repeated_fields() const { return replace_repeated_fields_; }\n\n private:\n  bool replace_message_fields_;\n  bool replace_repeated_fields_;\n};\n\n}  // namespace util\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_FIELD_MASK_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/json_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Utility functions to convert between protobuf binary format and proto3 JSON\n// format.\n#ifndef GOOGLE_PROTOBUF_UTIL_JSON_UTIL_H__\n#define GOOGLE_PROTOBUF_UTIL_JSON_UTIL_H__\n\n#include <google/protobuf/message.h>\n#include <google/protobuf/util/type_resolver.h>\n#include <google/protobuf/stubs/bytestream.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace io {\nclass ZeroCopyInputStream;\nclass ZeroCopyOutputStream;\n}  // namespace io\nnamespace util {\n\nstruct JsonParseOptions {\n  // Whether to ignore unknown JSON fields during parsing\n  bool ignore_unknown_fields;\n\n  JsonParseOptions() : ignore_unknown_fields(false) {}\n};\n\nstruct JsonPrintOptions {\n  // Whether to add spaces, line breaks and indentation to make the JSON output\n  // easy to read.\n  bool add_whitespace;\n  // Whether to always print primitive fields. By default primitive fields with\n  // default values will be omitted in JSON joutput. For example, an int32 field\n  // set to 0 will be omitted. Set this flag to true will override the default\n  // behavior and print primitive fields regardless of their values.\n  bool always_print_primitive_fields;\n\n  JsonPrintOptions() : add_whitespace(false),\n                       always_print_primitive_fields(false) {\n  }\n};\n\n// DEPRECATED. Use JsonPrintOptions instead.\ntypedef JsonPrintOptions JsonOptions;\n\n// Converts from protobuf message to JSON. This is a simple wrapper of\n// BinaryToJsonString(). It will use the DescriptorPool of the passed-in\n// message to resolve Any types.\nLIBPROTOBUF_EXPORT util::Status MessageToJsonString(const Message& message,\n                                   string* output,\n                                   const JsonOptions& options);\n\ninline util::Status MessageToJsonString(const Message& message,\n                                          string* output) {\n  return MessageToJsonString(message, output, JsonOptions());\n}\n\n// Converts from JSON to protobuf message. This is a simple wrapper of\n// JsonStringToBinary(). It will use the DescriptorPool of the passed-in\n// message to resolve Any types.\nLIBPROTOBUF_EXPORT util::Status JsonStringToMessage(const string& input,\n                                   Message* message,\n                                   const JsonParseOptions& options);\n\ninline util::Status JsonStringToMessage(const string& input,\n                                          Message* message) {\n  return JsonStringToMessage(input, message, JsonParseOptions());\n}\n\n// Converts protobuf binary data to JSON.\n// The conversion will fail if:\n//   1. TypeResolver fails to resolve a type.\n//   2. input is not valid protobuf wire format, or conflicts with the type\n//      information returned by TypeResolver.\n// Note that unknown fields will be discarded silently.\nLIBPROTOBUF_EXPORT util::Status BinaryToJsonStream(\n    TypeResolver* resolver,\n    const string& type_url,\n    io::ZeroCopyInputStream* binary_input,\n    io::ZeroCopyOutputStream* json_output,\n    const JsonPrintOptions& options);\n\ninline util::Status BinaryToJsonStream(\n    TypeResolver* resolver, const string& type_url,\n    io::ZeroCopyInputStream* binary_input,\n    io::ZeroCopyOutputStream* json_output) {\n  return BinaryToJsonStream(resolver, type_url, binary_input, json_output,\n                            JsonPrintOptions());\n}\n\nLIBPROTOBUF_EXPORT util::Status BinaryToJsonString(\n    TypeResolver* resolver,\n    const string& type_url,\n    const string& binary_input,\n    string* json_output,\n    const JsonPrintOptions& options);\n\ninline util::Status BinaryToJsonString(TypeResolver* resolver,\n                                         const string& type_url,\n                                         const string& binary_input,\n                                         string* json_output) {\n  return BinaryToJsonString(resolver, type_url, binary_input, json_output,\n                            JsonPrintOptions());\n}\n\n// Converts JSON data to protobuf binary format.\n// The conversion will fail if:\n//   1. TypeResolver fails to resolve a type.\n//   2. input is not valid JSON format, or conflicts with the type\n//      information returned by TypeResolver.\nLIBPROTOBUF_EXPORT util::Status JsonToBinaryStream(\n    TypeResolver* resolver,\n    const string& type_url,\n    io::ZeroCopyInputStream* json_input,\n    io::ZeroCopyOutputStream* binary_output,\n    const JsonParseOptions& options);\n\ninline util::Status JsonToBinaryStream(\n    TypeResolver* resolver,\n    const string& type_url,\n    io::ZeroCopyInputStream* json_input,\n    io::ZeroCopyOutputStream* binary_output) {\n  return JsonToBinaryStream(resolver, type_url, json_input, binary_output,\n                            JsonParseOptions());\n}\n\nLIBPROTOBUF_EXPORT util::Status JsonToBinaryString(\n    TypeResolver* resolver,\n    const string& type_url,\n    const string& json_input,\n    string* binary_output,\n    const JsonParseOptions& options);\n\ninline util::Status JsonToBinaryString(\n    TypeResolver* resolver,\n    const string& type_url,\n    const string& json_input,\n    string* binary_output) {\n  return JsonToBinaryString(resolver, type_url, json_input, binary_output,\n                            JsonParseOptions());\n}\n\nnamespace internal {\n// Internal helper class. Put in the header so we can write unit-tests for it.\nclass LIBPROTOBUF_EXPORT ZeroCopyStreamByteSink : public strings::ByteSink {\n public:\n  explicit ZeroCopyStreamByteSink(io::ZeroCopyOutputStream* stream)\n      : stream_(stream) {}\n\n  virtual void Append(const char* bytes, size_t len);\n\n private:\n  io::ZeroCopyOutputStream* stream_;\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ZeroCopyStreamByteSink);\n};\n}  // namespace internal\n\n}  // namespace util\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_JSON_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/message_differencer.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: jschorr@google.com (Joseph Schorr)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This file defines static methods and classes for comparing Protocol\n// Messages.\n//\n// Aug. 2008: Added Unknown Fields Comparison for messages.\n// Aug. 2009: Added different options to compare repeated fields.\n// Apr. 2010: Moved field comparison to FieldComparator.\n\n#ifndef GOOGLE_PROTOBUF_UTIL_MESSAGE_DIFFERENCER_H__\n#define GOOGLE_PROTOBUF_UTIL_MESSAGE_DIFFERENCER_H__\n\n#include <map>\n#include <set>\n#include <string>\n#include <vector>\n#include <google/protobuf/descriptor.h>  // FieldDescriptor\n#include <google/protobuf/message.h>  // Message\n#include <google/protobuf/unknown_field_set.h>\n#include <google/protobuf/util/field_comparator.h>\n\nnamespace google {\nnamespace protobuf {\n\nclass DynamicMessageFactory;\nclass FieldDescriptor;\n\nnamespace io {\nclass ZeroCopyOutputStream;\nclass Printer;\n}\n\nnamespace util {\n\nclass FieldContext;  // declared below MessageDifferencer\n\n// A basic differencer that can be used to determine\n// the differences between two specified Protocol Messages. If any differences\n// are found, the Compare method will return false, and any differencer reporter\n// specified via ReportDifferencesTo will have its reporting methods called (see\n// below for implementation of the report). Based off of the original\n// ProtocolDifferencer implementation in //net/proto/protocol-differencer.h\n// (Thanks Todd!).\n//\n// MessageDifferencer REQUIRES that compared messages be the same type, defined\n// as messages that share the same descriptor.  If not, the behavior of this\n// class is undefined.\n//\n// People disagree on what MessageDifferencer should do when asked to compare\n// messages with different descriptors.  Some people think it should always\n// return false.  Others expect it to try to look for similar fields and\n// compare them anyway -- especially if the descriptors happen to be identical.\n// If we chose either of these behaviors, some set of people would find it\n// surprising, and could end up writing code expecting the other behavior\n// without realizing their error.  Therefore, we forbid that usage.\n//\n// This class is implemented based on the proto2 reflection. The performance\n// should be good enough for normal usages. However, for places where the\n// performance is extremely sensitive, there are several alternatives:\n// - Comparing serialized string\n// Downside: false negatives (there are messages that are the same but their\n// serialized strings are different).\n// - Equals code generator by compiler plugin (net/proto2/contrib/equals_plugin)\n// Downside: more generated code; maintenance overhead for the additional rule\n// (must be in sync with the original proto_library).\n//\n// Note on handling of google.protobuf.Any: MessageDifferencer automatically\n// unpacks Any::value into a Message and compares its individual fields.\n// Messages encoded in a repeated Any cannot be compared using TreatAsMap.\n//\n//\n// Note on thread-safety: MessageDifferencer is *not* thread-safe. You need to\n// guard it with a lock to use the same MessageDifferencer instance from\n// multiple threads. Note that it's fine to call static comparison methods\n// (like MessageDifferencer::Equals) concurrently.\nclass LIBPROTOBUF_EXPORT MessageDifferencer {\n public:\n  // Determines whether the supplied messages are equal. Equality is defined as\n  // all fields within the two messages being set to the same value. Primitive\n  // fields and strings are compared by value while embedded messages/groups\n  // are compared as if via a recursive call. Use IgnoreField() and Compare()\n  // if some fields should be ignored in the comparison.\n  //\n  // This method REQUIRES that the two messages have the same\n  // Descriptor (message1.GetDescriptor() == message2.GetDescriptor()).\n  static bool Equals(const Message& message1, const Message& message2);\n\n  // Determines whether the supplied messages are equivalent. Equivalency is\n  // defined as all fields within the two messages having the same value. This\n  // differs from the Equals method above in that fields with default values\n  // are considered set to said value automatically. For details on how default\n  // values are defined for each field type, see http://shortn/_x2Gv6XFrWt.\n  // Also, Equivalent() ignores unknown fields. Use IgnoreField() and Compare()\n  // if some fields should be ignored in the comparison.\n  //\n  // This method REQUIRES that the two messages have the same\n  // Descriptor (message1.GetDescriptor() == message2.GetDescriptor()).\n  static bool Equivalent(const Message& message1, const Message& message2);\n\n  // Determines whether the supplied messages are approximately equal.\n  // Approximate equality is defined as all fields within the two messages\n  // being approximately equal.  Primitive (non-float) fields and strings are\n  // compared by value, floats are compared using MathUtil::AlmostEquals() and\n  // embedded messages/groups are compared as if via a recursive call. Use\n  // IgnoreField() and Compare() if some fields should be ignored in the\n  // comparison.\n  //\n  // This method REQUIRES that the two messages have the same\n  // Descriptor (message1.GetDescriptor() == message2.GetDescriptor()).\n  static bool ApproximatelyEquals(const Message& message1,\n                                  const Message& message2);\n\n  // Determines whether the supplied messages are approximately equivalent.\n  // Approximate equivalency is defined as all fields within the two messages\n  // being approximately equivalent. As in\n  // MessageDifferencer::ApproximatelyEquals, primitive (non-float) fields and\n  // strings are compared by value, floats are compared using\n  // MathUtil::AlmostEquals() and embedded messages/groups are compared as if\n  // via a recursive call. However, fields with default values are considered\n  // set to said value, as per MessageDiffencer::Equivalent. Use IgnoreField()\n  // and Compare() if some fields should be ignored in the comparison.\n  //\n  // This method REQUIRES that the two messages have the same\n  // Descriptor (message1.GetDescriptor() == message2.GetDescriptor()).\n  static bool ApproximatelyEquivalent(const Message& message1,\n                                      const Message& message2);\n\n  // Identifies an individual field in a message instance.  Used for field_path,\n  // below.\n  struct SpecificField {\n    // For known fields, \"field\" is filled in and \"unknown_field_number\" is -1.\n    // For unknown fields, \"field\" is NULL, \"unknown_field_number\" is the field\n    // number, and \"unknown_field_type\" is its type.\n    const FieldDescriptor* field;\n    int unknown_field_number;\n    UnknownField::Type unknown_field_type;\n\n    // If this a repeated field, \"index\" is the index within it.  For unknown\n    // fields, this is the index of the field among all unknown fields of the\n    // same field number and type.\n    int index;\n\n    // If \"field\" is a repeated field which is being treated as a map or\n    // a set (see TreatAsMap() and TreatAsSet(), below), new_index indicates\n    // the index the position to which the element has moved.  This only\n    // applies to ReportMoved() and (in the case of TreatAsMap())\n    // ReportModified().  In all other cases, \"new_index\" will have the same\n    // value as \"index\".\n    int new_index;\n\n    // For unknown fields, these are the pointers to the UnknownFieldSet\n    // containing the unknown fields. In certain cases (e.g. proto1's\n    // MessageSet, or nested groups of unknown fields), these may differ from\n    // the messages' internal UnknownFieldSets.\n    const UnknownFieldSet* unknown_field_set1;\n    const UnknownFieldSet* unknown_field_set2;\n\n    // For unknown fields, these are the index of the field within the\n    // UnknownFieldSets. One or the other will be -1 when\n    // reporting an addition or deletion.\n    int unknown_field_index1;\n    int unknown_field_index2;\n\n    SpecificField()\n        : field(NULL),\n          unknown_field_number(-1),\n          index(-1),\n          new_index(-1),\n          unknown_field_set1(NULL),\n          unknown_field_set2(NULL),\n          unknown_field_index1(-1),\n          unknown_field_index2(-1) {}\n  };\n\n  // Abstract base class from which all MessageDifferencer\n  // reporters derive. The five Report* methods below will be called when\n  // a field has been added, deleted, modified, moved, or matched. The third\n  // argument is a vector of FieldDescriptor pointers which describes the chain\n  // of fields that was taken to find the current field. For example, for a\n  // field found in an embedded message, the vector will contain two\n  // FieldDescriptors. The first will be the field of the embedded message\n  // itself and the second will be the actual field in the embedded message\n  // that was added/deleted/modified.\n  class LIBPROTOBUF_EXPORT Reporter {\n   public:\n    Reporter();\n    virtual ~Reporter();\n\n    // Reports that a field has been added into Message2.\n    virtual void ReportAdded(\n        const Message& message1, const Message& message2,\n        const std::vector<SpecificField>& field_path) = 0;\n\n    // Reports that a field has been deleted from Message1.\n    virtual void ReportDeleted(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& field_path) = 0;\n\n    // Reports that the value of a field has been modified.\n    virtual void ReportModified(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& field_path) = 0;\n\n    // Reports that a repeated field has been moved to another location.  This\n    // only applies when using TreatAsSet or TreatAsMap()  -- see below. Also\n    // note that for any given field, ReportModified and ReportMoved are\n    // mutually exclusive. If a field has been both moved and modified, then\n    // only ReportModified will be called.\n    virtual void ReportMoved(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& field_path) { }\n\n    // Reports that two fields match. Useful for doing side-by-side diffs.\n    // This function is mutually exclusive with ReportModified and ReportMoved.\n    // Note that you must call set_report_matches(true) before calling Compare\n    // to make use of this function.\n    virtual void ReportMatched(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& field_path) { }\n\n    // Reports that two fields would have been compared, but the\n    // comparison has been skipped because the field was marked as\n    // 'ignored' using IgnoreField().  This function is mutually\n    // exclusive with all the other Report() functions.\n    //\n    // The contract of ReportIgnored is slightly different than the\n    // other Report() functions, in that |field_path.back().index| is\n    // always equal to -1, even if the last field is repeated. This is\n    // because while the other Report() functions indicate where in a\n    // repeated field the action (Addition, Deletion, etc...)\n    // happened, when a repeated field is 'ignored', the differencer\n    // simply calls ReportIgnored on the repeated field as a whole and\n    // moves on without looking at its individual elements.\n    //\n    // Furthermore, ReportIgnored() does not indicate whether the\n    // fields were in fact equal or not, as Compare() does not inspect\n    // these fields at all. It is up to the Reporter to decide whether\n    // the fields are equal or not (perhaps with a second call to\n    // Compare()), if it cares.\n    virtual void ReportIgnored(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& field_path) { }\n\n    // Report that an unknown field is ignored. (see comment above).\n    // Note this is a different function since the last SpecificField in field\n    // path has a null field.  This could break existing Reporter.\n    virtual void ReportUnknownFieldIgnored(\n        const Message& message1, const Message& message2,\n        const std::vector<SpecificField>& field_path) {}\n\n   private:\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(Reporter);\n  };\n\n  // MapKeyComparator is used to determine if two elements have the same key\n  // when comparing elements of a repeated field as a map.\n  class LIBPROTOBUF_EXPORT MapKeyComparator {\n   public:\n    MapKeyComparator();\n    virtual ~MapKeyComparator();\n\n    virtual bool IsMatch(\n        const Message& message1,\n        const Message& message2,\n        const std::vector<SpecificField>& parent_fields) const {\n      GOOGLE_CHECK(false) << \"IsMatch() is not implemented.\";\n      return false;\n    }\n\n   private:\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MapKeyComparator);\n  };\n\n  // Abstract base class from which all IgnoreCriteria derive.\n  // By adding IgnoreCriteria more complex ignore logic can be implemented.\n  // IgnoreCriteria are registed with AddIgnoreCriteria. For each compared\n  // field IsIgnored is called on each added IgnoreCriteria until one returns\n  // true or all return false.\n  // IsIgnored is called for fields where at least one side has a value.\n  class LIBPROTOBUF_EXPORT IgnoreCriteria {\n   public:\n    IgnoreCriteria();\n    virtual ~IgnoreCriteria();\n\n    // Returns true if the field should be ignored.\n    virtual bool IsIgnored(\n        const Message& message1,\n        const Message& message2,\n        const FieldDescriptor* field,\n        const std::vector<SpecificField>& parent_fields) = 0;\n\n    // Returns true if the unknown field should be ignored.\n    // Note: This will be called for unknown fields as well in which case\n    //       field.field will be null.\n    virtual bool IsUnknownFieldIgnored(\n        const Message& message1, const Message& message2,\n        const SpecificField& field,\n        const std::vector<SpecificField>& parent_fields) {\n      return false;\n    }\n  };\n\n  // To add a Reporter, construct default here, then use ReportDifferencesTo or\n  // ReportDifferencesToString.\n  explicit MessageDifferencer();\n\n  ~MessageDifferencer();\n\n  enum MessageFieldComparison {\n    EQUAL,       // Fields must be present in both messages\n                 // for the messages to be considered the same.\n    EQUIVALENT,  // Fields with default values are considered set\n                 // for comparison purposes even if not explicitly\n                 // set in the messages themselves.  Unknown fields\n                 // are ignored.\n  };\n\n  enum Scope {\n    FULL,    // All fields of both messages are considered in the comparison.\n    PARTIAL  // Only fields present in the first message are considered; fields\n             // set only in the second message will be skipped during\n             // comparison.\n  };\n\n  // DEPRECATED. Use FieldComparator::FloatComparison instead.\n  enum FloatComparison {\n    EXACT,       // Floats and doubles are compared exactly.\n    APPROXIMATE  // Floats and doubles are compared using the\n                 // MathUtil::AlmostEquals method.\n  };\n\n  enum RepeatedFieldComparison {\n    AS_LIST,     // Repeated fields are compared in order.  Differing values at\n                 // the same index are reported using ReportModified().  If the\n                 // repeated fields have different numbers of elements, the\n                 // unpaired elements are reported using ReportAdded() or\n                 // ReportDeleted().\n    AS_SET,      // Treat all the repeated fields as sets by default.\n                 // See TreatAsSet(), as below.\n  };\n\n  // The elements of the given repeated field will be treated as a set for\n  // diffing purposes, so different orderings of the same elements will be\n  // considered equal.  Elements which are present on both sides of the\n  // comparison but which have changed position will be reported with\n  // ReportMoved().  Elements which only exist on one side or the other are\n  // reported with ReportAdded() and ReportDeleted() regardless of their\n  // positions.  ReportModified() is never used for this repeated field.  If\n  // the only differences between the compared messages is that some fields\n  // have been moved, then the comparison returns true.\n  //\n  // If the scope of comparison is set to PARTIAL, then in addition to what's\n  // above, extra values added to repeated fields of the second message will\n  // not cause the comparison to fail.\n  //\n  // Note that set comparison is currently O(k * n^2) (where n is the total\n  // number of elements, and k is the average size of each element). In theory\n  // it could be made O(n * k) with a more complex hashing implementation. Feel\n  // free to contribute one if the current implementation is too slow for you.\n  // If partial matching is also enabled, the time complexity will be O(k * n^2\n  // + n^3) in which n^3 is the time complexity of the maximum matching\n  // algorithm.\n  //\n  // REQUIRES:  field->is_repeated() and field not registered with TreatAsList\n  void TreatAsSet(const FieldDescriptor* field);\n\n  // The elements of the given repeated field will be treated as a list for\n  // diffing purposes, so different orderings of the same elements will NOT be\n  // considered equal.\n  //\n  // REQUIRED: field->is_repeated() and field not registered with TreatAsSet\n  void TreatAsList(const FieldDescriptor* field);\n\n  // The elements of the given repeated field will be treated as a map for\n  // diffing purposes, with |key| being the map key.  Thus, elements with the\n  // same key will be compared even if they do not appear at the same index.\n  // Differences are reported similarly to TreatAsSet(), except that\n  // ReportModified() is used to report elements with the same key but\n  // different values.  Note that if an element is both moved and modified,\n  // only ReportModified() will be called.  As with TreatAsSet, if the only\n  // differences between the compared messages is that some fields have been\n  // moved, then the comparison returns true. See TreatAsSet for notes on\n  // performance.\n  //\n  // REQUIRES:  field->is_repeated()\n  // REQUIRES:  field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE\n  // REQUIRES:  key->containing_type() == field->message_type()\n  void TreatAsMap(const FieldDescriptor* field, const FieldDescriptor* key);\n  // Same as TreatAsMap except that this method will use multiple fields as\n  // the key in comparison. All specified fields in 'key_fields' should be\n  // present in the compared elements. Two elements will be treated as having\n  // the same key iff they have the same value for every specified field. There\n  // are two steps in the comparison process. The first one is key matching.\n  // Every element from one message will be compared to every element from\n  // the other message. Only fields in 'key_fields' are compared in this step\n  // to decide if two elements have the same key. The second step is value\n  // comparison. Those pairs of elements with the same key (with equal value\n  // for every field in 'key_fields') will be compared in this step.\n  // Time complexity of the first step is O(s * m * n ^ 2) where s is the\n  // average size of the fields specified in 'key_fields', m is the number of\n  // fields in 'key_fields' and n is the number of elements. If partial\n  // matching is enabled, an extra O(n^3) will be incured by the maximum\n  // matching algorithm. The second step is O(k * n) where k is the average\n  // size of each element.\n  void TreatAsMapWithMultipleFieldsAsKey(\n      const FieldDescriptor* field,\n      const std::vector<const FieldDescriptor*>& key_fields);\n  // Same as TreatAsMapWithMultipleFieldsAsKey, except that each of the field\n  // do not necessarily need to be a direct subfield. Each element in\n  // key_field_paths indicate a path from the message being compared, listing\n  // successive subfield to reach the key field.\n  //\n  // REQUIRES:\n  //   for key_field_path in key_field_paths:\n  //     key_field_path[0]->containing_type() == field->message_type()\n  //     for i in [0, key_field_path.size() - 1):\n  //       key_field_path[i+1]->containing_type() ==\n  //           key_field_path[i]->message_type()\n  //       key_field_path[i]->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE\n  //       !key_field_path[i]->is_repeated()\n  void TreatAsMapWithMultipleFieldPathsAsKey(\n      const FieldDescriptor* field,\n      const std::vector<std::vector<const FieldDescriptor*> >& key_field_paths);\n\n  // Uses a custom MapKeyComparator to determine if two elements have the same\n  // key when comparing a repeated field as a map.\n  // The caller is responsible to delete the key_comparator.\n  // This method varies from TreatAsMapWithMultipleFieldsAsKey only in the\n  // first key matching step. Rather than comparing some specified fields, it\n  // will invoke the IsMatch method of the given 'key_comparator' to decide if\n  // two elements have the same key.\n  void TreatAsMapUsingKeyComparator(\n      const FieldDescriptor* field,\n      const MapKeyComparator* key_comparator);\n\n  // Add a custom ignore criteria that is evaluated in addition to the\n  // ignored fields added with IgnoreField.\n  // Takes ownership of ignore_criteria.\n  void AddIgnoreCriteria(IgnoreCriteria* ignore_criteria);\n\n  // Indicates that any field with the given descriptor should be\n  // ignored for the purposes of comparing two messages. This applies\n  // to fields nested in the message structure as well as top level\n  // ones. When the MessageDifferencer encounters an ignored field,\n  // ReportIgnored is called on the reporter, if one is specified.\n  //\n  // The only place where the field's 'ignored' status is not applied is when\n  // it is being used as a key in a field passed to TreatAsMap or is one of\n  // the fields passed to TreatAsMapWithMultipleFieldsAsKey.\n  // In this case it is compared in key matching but after that it's ignored\n  // in value comparison.\n  void IgnoreField(const FieldDescriptor* field);\n\n  // Sets the field comparator used to determine differences between protocol\n  // buffer fields. By default it's set to a DefaultFieldComparator instance.\n  // MessageDifferencer doesn't take ownership over the passed object.\n  // Note that this method must be called before Compare for the comparator to\n  // be used.\n  void set_field_comparator(FieldComparator* comparator);\n\n  // DEPRECATED. Pass a DefaultFieldComparator instance instead.\n  // Sets the fraction and margin for the float comparison of a given field.\n  // Uses MathUtil::WithinFractionOrMargin to compare the values.\n  // NOTE: this method does nothing if differencer's field comparator has been\n  //       set to a custom object.\n  //\n  // REQUIRES: field->cpp_type == FieldDescriptor::CPPTYPE_DOUBLE or\n  //           field->cpp_type == FieldDescriptor::CPPTYPE_FLOAT\n  // REQUIRES: float_comparison_ == APPROXIMATE\n  void SetFractionAndMargin(const FieldDescriptor* field, double fraction,\n                            double margin);\n\n  // Sets the type of comparison (as defined in the MessageFieldComparison\n  // enumeration above) that is used by this differencer when determining how\n  // to compare fields in messages.\n  void set_message_field_comparison(MessageFieldComparison comparison);\n\n  // Tells the differencer whether or not to report matches. This method must\n  // be called before Compare. The default for a new differencer is false.\n  void set_report_matches(bool report_matches) {\n    report_matches_ = report_matches;\n  }\n\n  // Sets the scope of the comparison (as defined in the Scope enumeration\n  // above) that is used by this differencer when determining which fields to\n  // compare between the messages.\n  void set_scope(Scope scope);\n\n  // Returns the current scope used by this differencer.\n  Scope scope();\n\n  // DEPRECATED. Pass a DefaultFieldComparator instance instead.\n  // Sets the type of comparison (as defined in the FloatComparison enumeration\n  // above) that is used by this differencer when comparing float (and double)\n  // fields in messages.\n  // NOTE: this method does nothing if differencer's field comparator has been\n  //       set to a custom object.\n  void set_float_comparison(FloatComparison comparison);\n\n  // Sets the type of comparison for repeated field (as defined in the\n  // RepeatedFieldComparison enumeration above) that is used by this\n  // differencer when compare repeated fields in messages.\n  void set_repeated_field_comparison(RepeatedFieldComparison comparison);\n\n  // Compares the two specified messages, returning true if they are the same,\n  // false otherwise. If this method returns false, any changes between the\n  // two messages will be reported if a Reporter was specified via\n  // ReportDifferencesTo (see also ReportDifferencesToString).\n  //\n  // This method REQUIRES that the two messages have the same\n  // Descriptor (message1.GetDescriptor() == message2.GetDescriptor()).\n  bool Compare(const Message& message1, const Message& message2);\n\n  // Same as above, except comparing only the list of fields specified by the\n  // two vectors of FieldDescriptors.\n  bool CompareWithFields(\n      const Message& message1, const Message& message2,\n      const std::vector<const FieldDescriptor*>& message1_fields,\n      const std::vector<const FieldDescriptor*>& message2_fields);\n\n  // Automatically creates a reporter that will output the differences\n  // found (if any) to the specified output string pointer. Note that this\n  // method must be called before Compare.\n  void ReportDifferencesToString(string* output);\n\n  // Tells the MessageDifferencer to report differences via the specified\n  // reporter. Note that this method must be called before Compare for\n  // the reporter to be used. It is the responsibility of the caller to delete\n  // this object.\n  // If the provided pointer equals NULL, the MessageDifferencer stops reporting\n  // differences to any previously set reporters or output strings.\n  void ReportDifferencesTo(Reporter* reporter);\n\n  // An implementation of the MessageDifferencer Reporter that outputs\n  // any differences found in human-readable form to the supplied\n  // ZeroCopyOutputStream or Printer. If a printer is used, the delimiter\n  // *must* be '$'.\n  //\n  // WARNING: this reporter does not necessarily flush its output until it is\n  // destroyed. As a result, it is not safe to assume the output is valid or\n  // complete until after you destroy the reporter. For example, if you use a\n  // StreamReporter to write to a StringOutputStream, the target string may\n  // contain uninitialized data until the reporter is destroyed.\n  class LIBPROTOBUF_EXPORT StreamReporter : public Reporter {\n   public:\n    explicit StreamReporter(io::ZeroCopyOutputStream* output);\n    explicit StreamReporter(io::Printer* printer);  // delimiter '$'\n    virtual ~StreamReporter();\n\n    // When set to true, the stream reporter will also output aggregates nodes\n    // (i.e. messages and groups) whose subfields have been modified. When\n    // false, will only report the individual subfields. Defaults to false.\n    void set_report_modified_aggregates(bool report) {\n      report_modified_aggregates_ = report;\n    }\n\n    // The following are implementations of the methods described above.\n    virtual void ReportAdded(const Message& message1, const Message& message2,\n                             const std::vector<SpecificField>& field_path);\n\n    virtual void ReportDeleted(const Message& message1,\n                               const Message& message2,\n                               const std::vector<SpecificField>& field_path);\n\n    virtual void ReportModified(const Message& message1,\n                                const Message& message2,\n                                const std::vector<SpecificField>& field_path);\n\n    virtual void ReportMoved(const Message& message1,\n                             const Message& message2,\n                             const std::vector<SpecificField>& field_path);\n\n    virtual void ReportMatched(const Message& message1,\n                               const Message& message2,\n                               const std::vector<SpecificField>& field_path);\n\n    virtual void ReportIgnored(const Message& message1,\n                               const Message& message2,\n                               const std::vector<SpecificField>& field_path);\n\n    virtual void ReportUnknownFieldIgnored(\n        const Message& message1, const Message& message2,\n        const std::vector<SpecificField>& field_path);\n\n   protected:\n    // Prints the specified path of fields to the buffer.\n    virtual void PrintPath(const std::vector<SpecificField>& field_path,\n                           bool left_side);\n\n    // Prints the value of fields to the buffer.  left_side is true if the\n    // given message is from the left side of the comparison, false if it\n    // was the right.  This is relevant only to decide whether to follow\n    // unknown_field_index1 or unknown_field_index2 when an unknown field\n    // is encountered in field_path.\n    virtual void PrintValue(const Message& message,\n                            const std::vector<SpecificField>& field_path,\n                            bool left_side);\n\n    // Prints the specified path of unknown fields to the buffer.\n    virtual void PrintUnknownFieldValue(const UnknownField* unknown_field);\n\n    // Just print a string\n    void Print(const string& str);\n\n   private:\n    io::Printer* printer_;\n    bool delete_printer_;\n    bool report_modified_aggregates_;\n\n    GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(StreamReporter);\n  };\n\n private:\n  // A MapKeyComparator to be used in TreatAsMapUsingKeyComparator.\n  // Implementation of this class needs to do field value comparison which\n  // relies on some private methods of MessageDifferencer. That's why this\n  // class is declared as a nested class of MessageDifferencer.\n  class MultipleFieldsMapKeyComparator;\n  // Returns true if field1's number() is less than field2's.\n  static bool FieldBefore(const FieldDescriptor* field1,\n                          const FieldDescriptor* field2);\n\n  // Combine the two lists of fields into the combined_fields output vector.\n  // All fields present in both lists will always be included in the combined\n  // list.  Fields only present in one of the lists will only appear in the\n  // combined list if the corresponding fields_scope option is set to FULL.\n  void CombineFields(const std::vector<const FieldDescriptor*>& fields1,\n                     Scope fields1_scope,\n                     const std::vector<const FieldDescriptor*>& fields2,\n                     Scope fields2_scope,\n                     std::vector<const FieldDescriptor*>* combined_fields);\n\n  // Internal version of the Compare method which performs the actual\n  // comparison. The parent_fields vector is a vector containing field\n  // descriptors of all fields accessed to get to this comparison operation\n  // (i.e. if the current message is an embedded message, the parent_fields\n  // vector will contain the field that has this embedded message).\n  bool Compare(const Message& message1, const Message& message2,\n               std::vector<SpecificField>* parent_fields);\n\n  // Compares all the unknown fields in two messages.\n  bool CompareUnknownFields(const Message& message1, const Message& message2,\n                            const google::protobuf::UnknownFieldSet&,\n                            const google::protobuf::UnknownFieldSet&,\n                            std::vector<SpecificField>* parent_fields);\n\n  // Compares the specified messages for the requested field lists. The field\n  // lists are modified depending on comparison settings, and then passed to\n  // CompareWithFieldsInternal.\n  bool CompareRequestedFieldsUsingSettings(\n      const Message& message1, const Message& message2,\n      const std::vector<const FieldDescriptor*>& message1_fields,\n      const std::vector<const FieldDescriptor*>& message2_fields,\n      std::vector<SpecificField>* parent_fields);\n\n  // Compares the specified messages with the specified field lists.\n  bool CompareWithFieldsInternal(\n      const Message& message1, const Message& message2,\n      const std::vector<const FieldDescriptor*>& message1_fields,\n      const std::vector<const FieldDescriptor*>& message2_fields,\n      std::vector<SpecificField>* parent_fields);\n\n  // Compares the repeated fields, and report the error.\n  bool CompareRepeatedField(const Message& message1, const Message& message2,\n                            const FieldDescriptor* field,\n                            std::vector<SpecificField>* parent_fields);\n\n  // Shorthand for CompareFieldValueUsingParentFields with NULL parent_fields.\n  bool CompareFieldValue(const Message& message1,\n                         const Message& message2,\n                         const FieldDescriptor* field,\n                         int index1,\n                         int index2);\n\n  // Compares the specified field on the two messages, returning\n  // true if they are the same, false otherwise. For repeated fields,\n  // this method only compares the value in the specified index. This method\n  // uses Compare functions to recurse into submessages.\n  // The parent_fields vector is used in calls to a Reporter instance calls.\n  // It can be NULL, in which case the MessageDifferencer will create new\n  // list of parent messages if it needs to recursively compare the given field.\n  // To avoid confusing users you should not set it to NULL unless you modified\n  // Reporter to handle the change of parent_fields correctly.\n  bool CompareFieldValueUsingParentFields(\n      const Message& message1,\n      const Message& message2,\n      const FieldDescriptor* field,\n      int index1,\n      int index2,\n      std::vector<SpecificField>* parent_fields);\n\n  // Compares the specified field on the two messages, returning comparison\n  // result, as returned by appropriate FieldComparator.\n  FieldComparator::ComparisonResult GetFieldComparisonResult(\n      const Message& message1, const Message& message2,\n      const FieldDescriptor* field, int index1, int index2,\n      const FieldContext* field_context);\n\n  // Check if the two elements in the repeated field are match to each other.\n  // if the key_comprator is NULL, this function returns true when the two\n  // elements are equal.\n  bool IsMatch(const FieldDescriptor* repeated_field,\n               const MapKeyComparator* key_comparator,\n               const Message* message1, const Message* message2,\n               const std::vector<SpecificField>& parent_fields,\n               int index1, int index2);\n\n  // Returns true when this repeated field has been configured to be treated\n  // as a set.\n  bool IsTreatedAsSet(const FieldDescriptor* field);\n\n  // Returns true when this repeated field is to be compared as a subset, ie.\n  // has been configured to be treated as a set or map and scope is set to\n  // PARTIAL.\n  bool IsTreatedAsSubset(const FieldDescriptor* field);\n\n  // Returns true if this field is to be ignored when this\n  // MessageDifferencer compares messages.\n  bool IsIgnored(\n      const Message& message1,\n      const Message& message2,\n      const FieldDescriptor* field,\n      const std::vector<SpecificField>& parent_fields);\n\n  // Returns true if this unknown field is to be ignored when this\n  // MessageDifferencer compares messages.\n  bool IsUnknownFieldIgnored(const Message& message1, const Message& message2,\n                             const SpecificField& field,\n                             const std::vector<SpecificField>& parent_fields);\n\n  // Returns MapKeyComparator* when this field has been configured to\n  // be treated as a map.  If not, returns NULL.\n  const MapKeyComparator* GetMapKeyComparator(const FieldDescriptor* field);\n\n  // Attempts to match indices of a repeated field, so that the contained values\n  // match. Clears output vectors and sets their values to indices of paired\n  // messages, ie. if message1[0] matches message2[1], then match_list1[0] == 1\n  // and match_list2[1] == 0. The unmatched indices are indicated by -1.\n  // This method returns false if the match failed. However, it doesn't mean\n  // that the comparison succeeds when this method returns true (you need to\n  // double-check in this case).\n  bool MatchRepeatedFieldIndices(\n      const Message& message1,\n      const Message& message2,\n      const FieldDescriptor* repeated_field,\n      const std::vector<SpecificField>& parent_fields,\n      std::vector<int>* match_list1,\n      std::vector<int>* match_list2);\n\n  // If \"any\" is of type google.protobuf.Any, extract its payload using\n  // DynamicMessageFactory and store in \"data\".\n  bool UnpackAny(const Message& any, google::protobuf::scoped_ptr<Message>* data);\n\n  // Checks if index is equal to new_index in all the specific fields.\n  static bool CheckPathChanged(const std::vector<SpecificField>& parent_fields);\n\n  // Defines a map between field descriptors and their MapKeyComparators.\n  // Used for repeated fields when they are configured as TreatAsMap.\n  typedef std::map<const FieldDescriptor*,\n              const MapKeyComparator*> FieldKeyComparatorMap;\n\n  // Defines a set to store field descriptors.  Used for repeated fields when\n  // they are configured as TreatAsSet.\n  typedef std::set<const FieldDescriptor*> FieldSet;\n\n  Reporter* reporter_;\n  DefaultFieldComparator default_field_comparator_;\n  FieldComparator* field_comparator_;\n  MessageFieldComparison message_field_comparison_;\n  Scope scope_;\n  RepeatedFieldComparison repeated_field_comparison_;\n\n  FieldSet set_fields_;\n  FieldSet list_fields_;\n  // Keeps track of MapKeyComparators that are created within\n  // MessageDifferencer. These MapKeyComparators should be deleted\n  // before MessageDifferencer is destroyed.\n  // When TreatAsMap or TreatAsMapWithMultipleFieldsAsKey is called, we don't\n  // store the supplied FieldDescriptors directly. Instead, a new\n  // MapKeyComparator is created for comparison purpose.\n  std::vector<MapKeyComparator*> owned_key_comparators_;\n  FieldKeyComparatorMap map_field_key_comparator_;\n  std::vector<IgnoreCriteria*> ignore_criteria_;\n\n  FieldSet ignored_fields_;\n\n  bool compare_unknown_fields_;\n  bool report_matches_;\n\n  string* output_string_;\n\n  google::protobuf::scoped_ptr<DynamicMessageFactory> dynamic_message_factory_;\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MessageDifferencer);\n};\n\n// This class provides extra information to the FieldComparator::Compare\n// function.\nclass LIBPROTOBUF_EXPORT FieldContext {\n public:\n  explicit FieldContext(\n      std::vector<MessageDifferencer::SpecificField>* parent_fields)\n      : parent_fields_(parent_fields) {}\n\n  std::vector<MessageDifferencer::SpecificField>* parent_fields() const {\n    return parent_fields_;\n  }\n\n private:\n  std::vector<MessageDifferencer::SpecificField>* parent_fields_;\n};\n\n}\n}\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_MESSAGE_DIFFERENCER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/time_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Defines utilities for the Timestamp and Duration well known types.\n\n#ifndef GOOGLE_PROTOBUF_UTIL_TIME_UTIL_H__\n#define GOOGLE_PROTOBUF_UTIL_TIME_UTIL_H__\n\n#include <ctime>\n#include <ostream>\n#include <string>\n#ifdef _MSC_VER\n#include <winsock2.h>\n#else\n#include <sys/time.h>\n#endif\n\n#include <google/protobuf/duration.pb.h>\n#include <google/protobuf/timestamp.pb.h>\n\nnamespace google {\nnamespace protobuf {\nnamespace util {\n\n// Utility functions for Timestamp and Duration.\nclass LIBPROTOBUF_EXPORT TimeUtil {\n  typedef google::protobuf::Timestamp Timestamp;\n  typedef google::protobuf::Duration Duration;\n\n public:\n  // The min/max Timestamp/Duration values we support.\n  //\n  // For \"0001-01-01T00:00:00Z\".\n  static const int64 kTimestampMinSeconds = -62135596800LL;\n  // For \"9999-12-31T23:59:59.999999999Z\".\n  static const int64 kTimestampMaxSeconds = 253402300799LL;\n  static const int64 kDurationMinSeconds = -315576000000LL;\n  static const int64 kDurationMaxSeconds = 315576000000LL;\n\n  // Converts Timestamp to/from RFC 3339 date string format.\n  // Generated output will always be Z-normalized and uses 3, 6 or 9\n  // fractional digits as required to represent the exact time. When\n  // parsing, any fractional digits (or none) and any offset are\n  // accepted as long as they fit into nano-seconds precision.\n  // Note that Timestamp can only represent time from\n  // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. Converting\n  // a Timestamp outside of this range is undefined behavior.\n  // See https://www.ietf.org/rfc/rfc3339.txt\n  //\n  // Example of generated format:\n  //   \"1972-01-01T10:00:20.021Z\"\n  //\n  // Example of accepted format:\n  //   \"1972-01-01T10:00:20.021-05:00\"\n  static string ToString(const Timestamp& timestamp);\n  static bool FromString(const string& value, Timestamp* timestamp);\n\n  // Converts Duration to/from string format. The string format will contains\n  // 3, 6, or 9 fractional digits depending on the precision required to\n  // represent the exact Duration value. For example:\n  //   \"1s\", \"1.010s\", \"1.000000100s\", \"-3.100s\"\n  // The range that can be represented by Duration is from -315,576,000,000\n  // to +315,576,000,000 inclusive (in seconds).\n  static string ToString(const Duration& duration);\n  static bool FromString(const string& value, Duration* timestamp);\n\n#ifdef GetCurrentTime\n#undef GetCurrentTime  // Visual Studio has macro GetCurrentTime\n#endif\n  // Gets the current UTC time.\n  static Timestamp GetCurrentTime();\n  // Returns the Time representing \"1970-01-01 00:00:00\".\n  static Timestamp GetEpoch();\n\n  // Converts between Duration and integer types. The behavior is undefined if\n  // the input value is not in the valid range of Duration.\n  static Duration NanosecondsToDuration(int64 nanos);\n  static Duration MicrosecondsToDuration(int64 micros);\n  static Duration MillisecondsToDuration(int64 millis);\n  static Duration SecondsToDuration(int64 seconds);\n  static Duration MinutesToDuration(int64 minutes);\n  static Duration HoursToDuration(int64 hours);\n  // Result will be truncated towards zero. For example, \"-1.5s\" will be\n  // truncated to \"-1s\", and \"1.5s\" to \"1s\" when converting to seconds.\n  // It's undefined behavior if the input duration is not valid or the result\n  // exceeds the range of int64. A duration is not valid if it's not in the\n  // valid range of Duration, or have an invalid nanos value (i.e., larger\n  // than 999999999, less than -999999999, or have a different sign from the\n  // seconds part).\n  static int64 DurationToNanoseconds(const Duration& duration);\n  static int64 DurationToMicroseconds(const Duration& duration);\n  static int64 DurationToMilliseconds(const Duration& duration);\n  static int64 DurationToSeconds(const Duration& duration);\n  static int64 DurationToMinutes(const Duration& duration);\n  static int64 DurationToHours(const Duration& duration);\n  // Creates Timestamp from integer types. The integer value indicates the\n  // time elapsed from Epoch time. The behavior is undefined if the input\n  // value is not in the valid range of Timestamp.\n  static Timestamp NanosecondsToTimestamp(int64 nanos);\n  static Timestamp MicrosecondsToTimestamp(int64 micros);\n  static Timestamp MillisecondsToTimestamp(int64 millis);\n  static Timestamp SecondsToTimestamp(int64 seconds);\n  // Result will be truncated down to the nearest integer value. For example,\n  // with \"1969-12-31T23:59:59.9Z\", TimestampToMilliseconds() returns -100\n  // and TimestampToSeconds() returns -1. It's undefined behavior if the input\n  // Timestamp is not valid (i.e., its seconds part or nanos part does not fall\n  // in the valid range) or the return value doesn't fit into int64.\n  static int64 TimestampToNanoseconds(const Timestamp& timestamp);\n  static int64 TimestampToMicroseconds(const Timestamp& timestamp);\n  static int64 TimestampToMilliseconds(const Timestamp& timestamp);\n  static int64 TimestampToSeconds(const Timestamp& timestamp);\n\n  // Conversion to/from other time/date types. Note that these types may\n  // have a different precision and time range from Timestamp/Duration.\n  // When converting to a lower precision type, the value will be truncated\n  // to the nearest value that can be represented. If the value is\n  // out of the range of the result type, the return value is undefined.\n  //\n  // Conversion to/from time_t\n  static Timestamp TimeTToTimestamp(time_t value);\n  static time_t TimestampToTimeT(const Timestamp& value);\n\n  // Conversion to/from timeval\n  static Timestamp TimevalToTimestamp(const timeval& value);\n  static timeval TimestampToTimeval(const Timestamp& value);\n  static Duration TimevalToDuration(const timeval& value);\n  static timeval DurationToTimeval(const Duration& value);\n};\n\n}  // namespace util\n}  // namespace protobuf\n\n\nnamespace protobuf {\n// Overloaded operators for Duration.\n//\n// Assignment operators.\nLIBPROTOBUF_EXPORT Duration& operator+=(Duration& d1, const Duration& d2);  // NOLINT\nLIBPROTOBUF_EXPORT Duration& operator-=(Duration& d1, const Duration& d2);  // NOLINT\nLIBPROTOBUF_EXPORT Duration& operator*=(Duration& d, int64 r);  // NOLINT\nLIBPROTOBUF_EXPORT Duration& operator*=(Duration& d, double r);  // NOLINT\nLIBPROTOBUF_EXPORT Duration& operator/=(Duration& d, int64 r);  // NOLINT\nLIBPROTOBUF_EXPORT Duration& operator/=(Duration& d, double r);  // NOLINT\n// Overload for other integer types.\ntemplate <typename T>\nDuration& operator*=(Duration& d, T r) {  // NOLINT\n  int64 x = r;\n  return d *= x;\n}\ntemplate <typename T>\nDuration& operator/=(Duration& d, T r) {  // NOLINT\n  int64 x = r;\n  return d /= x;\n}\nLIBPROTOBUF_EXPORT Duration& operator%=(Duration& d1, const Duration& d2);  // NOLINT\n// Relational operators.\ninline bool operator<(const Duration& d1, const Duration& d2) {\n  if (d1.seconds() == d2.seconds()) {\n    return d1.nanos() < d2.nanos();\n  }\n  return d1.seconds() < d2.seconds();\n}\ninline bool operator>(const Duration& d1, const Duration& d2) {\n  return d2 < d1;\n}\ninline bool operator>=(const Duration& d1, const Duration& d2) {\n  return !(d1 < d2);\n}\ninline bool operator<=(const Duration& d1, const Duration& d2) {\n  return !(d2 < d1);\n}\ninline bool operator==(const Duration& d1, const Duration& d2) {\n  return d1.seconds() == d2.seconds() && d1.nanos() == d2.nanos();\n}\ninline bool operator!=(const Duration& d1, const Duration& d2) {\n  return !(d1 == d2);\n}\n// Additive operators\ninline Duration operator-(const Duration& d) {\n  Duration result;\n  result.set_seconds(-d.seconds());\n  result.set_nanos(-d.nanos());\n  return result;\n}\ninline Duration operator+(const Duration& d1, const Duration& d2) {\n  Duration result = d1;\n  return result += d2;\n}\ninline Duration operator-(const Duration& d1, const Duration& d2) {\n  Duration result = d1;\n  return result -= d2;\n}\n// Multiplicative operators\ntemplate<typename T>\ninline Duration operator*(Duration d, T r) {\n  return d *= r;\n}\ntemplate<typename T>\ninline Duration operator*(T r, Duration d) {\n  return d *= r;\n}\ntemplate<typename T>\ninline Duration operator/(Duration d, T r) {\n  return d /= r;\n}\nLIBPROTOBUF_EXPORT int64 operator/(const Duration& d1, const Duration& d2);\n\ninline Duration operator%(const Duration& d1, const Duration& d2) {\n  Duration result = d1;\n  return result %= d2;\n}\n\ninline ostream& operator<<(ostream& out, const Duration& d) {\n  out << google::protobuf::util::TimeUtil::ToString(d);\n  return out;\n}\n\n// Overloaded operators for Timestamp\n//\n// Assignement operators.\nLIBPROTOBUF_EXPORT Timestamp& operator+=(Timestamp& t, const Duration& d);  // NOLINT\nLIBPROTOBUF_EXPORT Timestamp& operator-=(Timestamp& t, const Duration& d);  // NOLINT\n// Relational operators.\ninline bool operator<(const Timestamp& t1, const Timestamp& t2) {\n  if (t1.seconds() == t2.seconds()) {\n    return t1.nanos() < t2.nanos();\n  }\n  return t1.seconds() < t2.seconds();\n}\ninline bool operator>(const Timestamp& t1, const Timestamp& t2) {\n  return t2 < t1;\n}\ninline bool operator>=(const Timestamp& t1, const Timestamp& t2) {\n  return !(t1 < t2);\n}\ninline bool operator<=(const Timestamp& t1, const Timestamp& t2) {\n  return !(t2 < t1);\n}\ninline bool operator==(const Timestamp& t1, const Timestamp& t2) {\n  return t1.seconds() == t2.seconds() && t1.nanos() == t2.nanos();\n}\ninline bool operator!=(const Timestamp& t1, const Timestamp& t2) {\n  return !(t1 == t2);\n}\n// Additive operators.\ninline Timestamp operator+(const Timestamp& t, const Duration& d) {\n  Timestamp result = t;\n  return result += d;\n}\ninline Timestamp operator+(const Duration& d, const Timestamp& t) {\n  Timestamp result = t;\n  return result += d;\n}\ninline Timestamp operator-(const Timestamp& t, const Duration& d) {\n  Timestamp result = t;\n  return result -= d;\n}\nLIBPROTOBUF_EXPORT Duration operator-(const Timestamp& t1, const Timestamp& t2);\n\ninline ostream& operator<<(ostream& out, const Timestamp& t) {\n  out << google::protobuf::util::TimeUtil::ToString(t);\n  return out;\n}\n\n}  // namespace protobuf\n\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_TIME_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/type_resolver.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Defines a TypeResolver for the Any message.\n\n#ifndef GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_H__\n#define GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/status.h>\n\n\nnamespace google {\nnamespace protobuf {\nclass Type;\nclass Enum;\n}  // namespace protobuf\n\n\nnamespace protobuf {\nclass DescriptorPool;\nnamespace util {\n\n// Abstract interface for a type resovler.\n//\n// Implementations of this interface must be thread-safe.\nclass LIBPROTOBUF_EXPORT TypeResolver {\n public:\n  TypeResolver() {}\n  virtual ~TypeResolver() {}\n\n  // Resolves a type url for a message type.\n  virtual util::Status ResolveMessageType(\n      const string& type_url, google::protobuf::Type* message_type) = 0;\n\n  // Resolves a type url for an enum type.\n  virtual util::Status ResolveEnumType(const string& type_url,\n                                         google::protobuf::Enum* enum_type) = 0;\n\n private:\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeResolver);\n};\n\n}  // namespace util\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/util/type_resolver_util.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Defines utilities for the TypeResolver.\n\n#ifndef GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_UTIL_H__\n#define GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_UTIL_H__\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\nnamespace google {\nnamespace protobuf {\nclass DescriptorPool;\nnamespace util {\nclass TypeResolver;\n\n// Creates a TypeResolver that serves type information in the given descriptor\n// pool. Caller takes ownership of the returned TypeResolver.\nLIBPROTOBUF_EXPORT TypeResolver* NewTypeResolverForDescriptorPool(\n    const string& url_prefix, const DescriptorPool* pool);\n\n}  // namespace util\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_UTIL_TYPE_RESOLVER_UTIL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/wire_format.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//         atenasio@google.com (Chris Atenasio) (ZigZag transform)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n\n#ifndef GOOGLE_PROTOBUF_WIRE_FORMAT_H__\n#define GOOGLE_PROTOBUF_WIRE_FORMAT_H__\n\n#include <string>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/descriptor.pb.h>\n#include <google/protobuf/descriptor.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/wire_format_lite.h>\n\n// Do UTF-8 validation on string type in Debug build only\n#ifndef NDEBUG\n#define GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED\n#endif\n\nnamespace google {\nnamespace protobuf {\n  namespace io {\n    class CodedInputStream;      // coded_stream.h\n    class CodedOutputStream;     // coded_stream.h\n  }\n  class UnknownFieldSet;         // unknown_field_set.h\n}\n\nnamespace protobuf {\nnamespace internal {\n\n// This class is for internal use by the protocol buffer library and by\n// protocol-complier-generated message classes.  It must not be called\n// directly by clients.\n//\n// This class contains code for implementing the binary protocol buffer\n// wire format via reflection.  The WireFormatLite class implements the\n// non-reflection based routines.\n//\n// This class is really a namespace that contains only static methods\nclass LIBPROTOBUF_EXPORT WireFormat {\n public:\n\n  // Given a field return its WireType\n  static inline WireFormatLite::WireType WireTypeForField(\n      const FieldDescriptor* field);\n\n  // Given a FieldDescriptor::Type return its WireType\n  static inline WireFormatLite::WireType WireTypeForFieldType(\n      FieldDescriptor::Type type);\n\n  // Compute the byte size of a tag.  For groups, this includes both the start\n  // and end tags.\n  static inline size_t TagSize(int field_number, FieldDescriptor::Type type);\n\n  // These procedures can be used to implement the methods of Message which\n  // handle parsing and serialization of the protocol buffer wire format\n  // using only the Reflection interface.  When you ask the protocol\n  // compiler to optimize for code size rather than speed, it will implement\n  // those methods in terms of these procedures.  Of course, these are much\n  // slower than the specialized implementations which the protocol compiler\n  // generates when told to optimize for speed.\n\n  // Read a message in protocol buffer wire format.\n  //\n  // This procedure reads either to the end of the input stream or through\n  // a WIRETYPE_END_GROUP tag ending the message, whichever comes first.\n  // It returns false if the input is invalid.\n  //\n  // Required fields are NOT checked by this method.  You must call\n  // IsInitialized() on the resulting message yourself.\n  static bool ParseAndMergePartial(io::CodedInputStream* input,\n                                   Message* message);\n\n  // Serialize a message in protocol buffer wire format.\n  //\n  // Any embedded messages within the message must have their correct sizes\n  // cached.  However, the top-level message need not; its size is passed as\n  // a parameter to this procedure.\n  //\n  // These return false iff the underlying stream returns a write error.\n  static void SerializeWithCachedSizes(\n      const Message& message,\n      int size, io::CodedOutputStream* output);\n\n  // Implements Message::ByteSize() via reflection.  WARNING:  The result\n  // of this method is *not* cached anywhere.  However, all embedded messages\n  // will have their ByteSize() methods called, so their sizes will be cached.\n  // Therefore, calling this method is sufficient to allow you to call\n  // WireFormat::SerializeWithCachedSizes() on the same object.\n  static size_t ByteSize(const Message& message);\n\n  // -----------------------------------------------------------------\n  // Helpers for dealing with unknown fields\n\n  // Skips a field value of the given WireType.  The input should start\n  // positioned immediately after the tag.  If unknown_fields is non-NULL,\n  // the contents of the field will be added to it.\n  static bool SkipField(io::CodedInputStream* input, uint32 tag,\n                        UnknownFieldSet* unknown_fields);\n\n  // Reads and ignores a message from the input.  If unknown_fields is non-NULL,\n  // the contents will be added to it.\n  static bool SkipMessage(io::CodedInputStream* input,\n                          UnknownFieldSet* unknown_fields);\n\n  // Read a packed enum field. If the is_valid function is not NULL, values for\n  // which is_valid(value) returns false are appended to unknown_fields_stream.\n  static bool ReadPackedEnumPreserveUnknowns(io::CodedInputStream* input,\n                                             uint32 field_number,\n                                             bool (*is_valid)(int),\n                                             UnknownFieldSet* unknown_fields,\n                                             RepeatedField<int>* values);\n\n  // Write the contents of an UnknownFieldSet to the output.\n  static void SerializeUnknownFields(const UnknownFieldSet& unknown_fields,\n                                     io::CodedOutputStream* output);\n  // Same as above, except writing directly to the provided buffer.\n  // Requires that the buffer have sufficient capacity for\n  // ComputeUnknownFieldsSize(unknown_fields).\n  //\n  // Returns a pointer past the last written byte.\n  static uint8* SerializeUnknownFieldsToArray(\n      const UnknownFieldSet& unknown_fields,\n      uint8* target);\n\n  // Same thing except for messages that have the message_set_wire_format\n  // option.\n  static void SerializeUnknownMessageSetItems(\n      const UnknownFieldSet& unknown_fields,\n      io::CodedOutputStream* output);\n  // Same as above, except writing directly to the provided buffer.\n  // Requires that the buffer have sufficient capacity for\n  // ComputeUnknownMessageSetItemsSize(unknown_fields).\n  //\n  // Returns a pointer past the last written byte.\n  static uint8* SerializeUnknownMessageSetItemsToArray(\n      const UnknownFieldSet& unknown_fields,\n      uint8* target);\n\n  // Compute the size of the UnknownFieldSet on the wire.\n  static size_t ComputeUnknownFieldsSize(const UnknownFieldSet& unknown_fields);\n\n  // Same thing except for messages that have the message_set_wire_format\n  // option.\n  static size_t ComputeUnknownMessageSetItemsSize(\n      const UnknownFieldSet& unknown_fields);\n\n\n  // Helper functions for encoding and decoding tags.  (Inlined below and in\n  // _inl.h)\n  //\n  // This is different from MakeTag(field->number(), field->type()) in the case\n  // of packed repeated fields.\n  static uint32 MakeTag(const FieldDescriptor* field);\n\n  // Parse a single field.  The input should start out positioned immediately\n  // after the tag.\n  static bool ParseAndMergeField(\n      uint32 tag,\n      const FieldDescriptor* field,        // May be NULL for unknown\n      Message* message,\n      io::CodedInputStream* input);\n\n  // Serialize a single field.\n  static void SerializeFieldWithCachedSizes(\n      const FieldDescriptor* field,        // Cannot be NULL\n      const Message& message,\n      io::CodedOutputStream* output);\n\n  // Compute size of a single field.  If the field is a message type, this\n  // will call ByteSize() for the embedded message, insuring that it caches\n  // its size.\n  static size_t FieldByteSize(\n      const FieldDescriptor* field,        // Cannot be NULL\n      const Message& message);\n\n  // Parse/serialize a MessageSet::Item group.  Used with messages that use\n  // opion message_set_wire_format = true.\n  static bool ParseAndMergeMessageSetItem(\n      io::CodedInputStream* input,\n      Message* message);\n  static void SerializeMessageSetItemWithCachedSizes(\n      const FieldDescriptor* field,\n      const Message& message,\n      io::CodedOutputStream* output);\n  static size_t MessageSetItemByteSize(\n      const FieldDescriptor* field,\n      const Message& message);\n\n  // Computes the byte size of a field, excluding tags. For packed fields, it\n  // only includes the size of the raw data, and not the size of the total\n  // length, but for other length-delimited types, the size of the length is\n  // included.\n  static size_t FieldDataOnlyByteSize(\n      const FieldDescriptor* field,        // Cannot be NULL\n      const Message& message);\n\n  enum Operation {\n    PARSE = 0,\n    SERIALIZE = 1,\n  };\n\n  // Verifies that a string field is valid UTF8, logging an error if not.\n  // This function will not be called by newly generated protobuf code\n  // but remains present to support existing code.\n  static void VerifyUTF8String(const char* data, int size, Operation op);\n  // The NamedField variant takes a field name in order to produce an\n  // informative error message if verification fails.\n  static void VerifyUTF8StringNamedField(const char* data,\n                                         int size,\n                                         Operation op,\n                                         const char* field_name);\n\n private:\n  // Skip a MessageSet field.\n  static bool SkipMessageSetField(io::CodedInputStream* input,\n                                  uint32 field_number,\n                                  UnknownFieldSet* unknown_fields);\n\n  // Parse a MessageSet field.\n  static bool ParseAndMergeMessageSetField(uint32 field_number,\n                                           const FieldDescriptor* field,\n                                           Message* message,\n                                           io::CodedInputStream* input);\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(WireFormat);\n};\n\n// Subclass of FieldSkipper which saves skipped fields to an UnknownFieldSet.\nclass LIBPROTOBUF_EXPORT UnknownFieldSetFieldSkipper : public FieldSkipper {\n public:\n  UnknownFieldSetFieldSkipper(UnknownFieldSet* unknown_fields)\n      : unknown_fields_(unknown_fields) {}\n  virtual ~UnknownFieldSetFieldSkipper() {}\n\n  // implements FieldSkipper -----------------------------------------\n  virtual bool SkipField(io::CodedInputStream* input, uint32 tag);\n  virtual bool SkipMessage(io::CodedInputStream* input);\n  virtual void SkipUnknownEnum(int field_number, int value);\n\n protected:\n  UnknownFieldSet* unknown_fields_;\n};\n\n// inline methods ====================================================\n\ninline WireFormatLite::WireType WireFormat::WireTypeForField(\n    const FieldDescriptor* field) {\n  if (field->is_packed()) {\n    return WireFormatLite::WIRETYPE_LENGTH_DELIMITED;\n  } else {\n    return WireTypeForFieldType(field->type());\n  }\n}\n\ninline WireFormatLite::WireType WireFormat::WireTypeForFieldType(\n    FieldDescriptor::Type type) {\n  // Some compilers don't like enum -> enum casts, so we implicit_cast to\n  // int first.\n  return WireFormatLite::WireTypeForFieldType(\n      static_cast<WireFormatLite::FieldType>(\n        implicit_cast<int>(type)));\n}\n\ninline uint32 WireFormat::MakeTag(const FieldDescriptor* field) {\n  return WireFormatLite::MakeTag(field->number(), WireTypeForField(field));\n}\n\ninline size_t WireFormat::TagSize(int field_number,\n                                  FieldDescriptor::Type type) {\n  // Some compilers don't like enum -> enum casts, so we implicit_cast to\n  // int first.\n  return WireFormatLite::TagSize(field_number,\n      static_cast<WireFormatLite::FieldType>(\n        implicit_cast<int>(type)));\n}\n\ninline void WireFormat::VerifyUTF8String(const char* data, int size,\n    WireFormat::Operation op) {\n#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED\n  WireFormatLite::VerifyUtf8String(\n      data, size, static_cast<WireFormatLite::Operation>(op), NULL);\n#else\n  // Avoid the compiler warning about unused variables.\n  (void)data; (void)size; (void)op;\n#endif\n}\n\ninline void WireFormat::VerifyUTF8StringNamedField(\n    const char* data, int size, WireFormat::Operation op,\n    const char* field_name) {\n#ifdef GOOGLE_PROTOBUF_UTF8_VALIDATION_ENABLED\n  WireFormatLite::VerifyUtf8String(\n      data, size, static_cast<WireFormatLite::Operation>(op), field_name);\n#endif\n}\n\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_WIRE_FORMAT_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/wire_format_lite.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//         atenasio@google.com (Chris Atenasio) (ZigZag transform)\n//         wink@google.com (Wink Saville) (refactored from wire_format.h)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// This header is logically internal, but is made public because it is used\n// from protocol-compiler-generated code, which may reside in other components.\n\n#ifndef GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__\n#define GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__\n\n#include <string>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/message_lite.h>\n#include <google/protobuf/io/coded_stream.h>  // for CodedOutputStream::Varint32Size\n\nnamespace google {\n\nnamespace protobuf {\n  template <typename T> class RepeatedField;  // repeated_field.h\n}\n\nnamespace protobuf {\nnamespace internal {\n\nclass StringPieceField;\n\n// This class is for internal use by the protocol buffer library and by\n// protocol-complier-generated message classes.  It must not be called\n// directly by clients.\n//\n// This class contains helpers for implementing the binary protocol buffer\n// wire format without the need for reflection. Use WireFormat when using\n// reflection.\n//\n// This class is really a namespace that contains only static methods.\nclass LIBPROTOBUF_EXPORT WireFormatLite {\n public:\n\n  // -----------------------------------------------------------------\n  // Helper constants and functions related to the format.  These are\n  // mostly meant for internal and generated code to use.\n\n  // The wire format is composed of a sequence of tag/value pairs, each\n  // of which contains the value of one field (or one element of a repeated\n  // field).  Each tag is encoded as a varint.  The lower bits of the tag\n  // identify its wire type, which specifies the format of the data to follow.\n  // The rest of the bits contain the field number.  Each type of field (as\n  // declared by FieldDescriptor::Type, in descriptor.h) maps to one of\n  // these wire types.  Immediately following each tag is the field's value,\n  // encoded in the format specified by the wire type.  Because the tag\n  // identifies the encoding of this data, it is possible to skip\n  // unrecognized fields for forwards compatibility.\n\n  enum WireType {\n    WIRETYPE_VARINT           = 0,\n    WIRETYPE_FIXED64          = 1,\n    WIRETYPE_LENGTH_DELIMITED = 2,\n    WIRETYPE_START_GROUP      = 3,\n    WIRETYPE_END_GROUP        = 4,\n    WIRETYPE_FIXED32          = 5,\n  };\n\n  // Lite alternative to FieldDescriptor::Type.  Must be kept in sync.\n  enum FieldType {\n    TYPE_DOUBLE         = 1,\n    TYPE_FLOAT          = 2,\n    TYPE_INT64          = 3,\n    TYPE_UINT64         = 4,\n    TYPE_INT32          = 5,\n    TYPE_FIXED64        = 6,\n    TYPE_FIXED32        = 7,\n    TYPE_BOOL           = 8,\n    TYPE_STRING         = 9,\n    TYPE_GROUP          = 10,\n    TYPE_MESSAGE        = 11,\n    TYPE_BYTES          = 12,\n    TYPE_UINT32         = 13,\n    TYPE_ENUM           = 14,\n    TYPE_SFIXED32       = 15,\n    TYPE_SFIXED64       = 16,\n    TYPE_SINT32         = 17,\n    TYPE_SINT64         = 18,\n    MAX_FIELD_TYPE      = 18,\n  };\n\n  // Lite alternative to FieldDescriptor::CppType.  Must be kept in sync.\n  enum CppType {\n    CPPTYPE_INT32       = 1,\n    CPPTYPE_INT64       = 2,\n    CPPTYPE_UINT32      = 3,\n    CPPTYPE_UINT64      = 4,\n    CPPTYPE_DOUBLE      = 5,\n    CPPTYPE_FLOAT       = 6,\n    CPPTYPE_BOOL        = 7,\n    CPPTYPE_ENUM        = 8,\n    CPPTYPE_STRING      = 9,\n    CPPTYPE_MESSAGE     = 10,\n    MAX_CPPTYPE         = 10,\n  };\n\n  // Helper method to get the CppType for a particular Type.\n  static CppType FieldTypeToCppType(FieldType type);\n\n  // Given a FieldSescriptor::Type return its WireType\n  static inline WireFormatLite::WireType WireTypeForFieldType(\n      WireFormatLite::FieldType type) {\n    return kWireTypeForFieldType[type];\n  }\n\n  // Number of bits in a tag which identify the wire type.\n  static const int kTagTypeBits = 3;\n  // Mask for those bits.\n  static const uint32 kTagTypeMask = (1 << kTagTypeBits) - 1;\n\n  // Helper functions for encoding and decoding tags.  (Inlined below and in\n  // _inl.h)\n  //\n  // This is different from MakeTag(field->number(), field->type()) in the case\n  // of packed repeated fields.\n  static uint32 MakeTag(int field_number, WireType type);\n  static WireType GetTagWireType(uint32 tag);\n  static int GetTagFieldNumber(uint32 tag);\n\n  // Compute the byte size of a tag.  For groups, this includes both the start\n  // and end tags.\n  static inline size_t TagSize(int field_number,\n                               WireFormatLite::FieldType type);\n\n  // Skips a field value with the given tag.  The input should start\n  // positioned immediately after the tag.  Skipped values are simply discarded,\n  // not recorded anywhere.  See WireFormat::SkipField() for a version that\n  // records to an UnknownFieldSet.\n  static bool SkipField(io::CodedInputStream* input, uint32 tag);\n\n  // Skips a field value with the given tag.  The input should start\n  // positioned immediately after the tag. Skipped values are recorded to a\n  // CodedOutputStream.\n  static bool SkipField(io::CodedInputStream* input, uint32 tag,\n                        io::CodedOutputStream* output);\n\n  // Reads and ignores a message from the input.  Skipped values are simply\n  // discarded, not recorded anywhere.  See WireFormat::SkipMessage() for a\n  // version that records to an UnknownFieldSet.\n  static bool SkipMessage(io::CodedInputStream* input);\n\n  // Reads and ignores a message from the input.  Skipped values are recorded\n  // to a CodedOutputStream.\n  static bool SkipMessage(io::CodedInputStream* input,\n                          io::CodedOutputStream* output);\n\n// This macro does the same thing as WireFormatLite::MakeTag(), but the\n// result is usable as a compile-time constant, which makes it usable\n// as a switch case or a template input.  WireFormatLite::MakeTag() is more\n// type-safe, though, so prefer it if possible.\n#define GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(FIELD_NUMBER, TYPE)                  \\\n  static_cast<uint32>(                                                   \\\n    ((FIELD_NUMBER) << ::google::protobuf::internal::WireFormatLite::kTagTypeBits) \\\n      | (TYPE))\n\n  // These are the tags for the old MessageSet format, which was defined as:\n  //   message MessageSet {\n  //     repeated group Item = 1 {\n  //       required int32 type_id = 2;\n  //       required string message = 3;\n  //     }\n  //   }\n  static const int kMessageSetItemNumber = 1;\n  static const int kMessageSetTypeIdNumber = 2;\n  static const int kMessageSetMessageNumber = 3;\n  static const int kMessageSetItemStartTag =\n    GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetItemNumber,\n                                WireFormatLite::WIRETYPE_START_GROUP);\n  static const int kMessageSetItemEndTag =\n    GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetItemNumber,\n                                WireFormatLite::WIRETYPE_END_GROUP);\n  static const int kMessageSetTypeIdTag =\n    GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetTypeIdNumber,\n                                WireFormatLite::WIRETYPE_VARINT);\n  static const int kMessageSetMessageTag =\n    GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(kMessageSetMessageNumber,\n                                WireFormatLite::WIRETYPE_LENGTH_DELIMITED);\n\n  // Byte size of all tags of a MessageSet::Item combined.\n  static const size_t kMessageSetItemTagsSize;\n\n  // Helper functions for converting between floats/doubles and IEEE-754\n  // uint32s/uint64s so that they can be written.  (Assumes your platform\n  // uses IEEE-754 floats.)\n  static uint32 EncodeFloat(float value);\n  static float DecodeFloat(uint32 value);\n  static uint64 EncodeDouble(double value);\n  static double DecodeDouble(uint64 value);\n\n  // Helper functions for mapping signed integers to unsigned integers in\n  // such a way that numbers with small magnitudes will encode to smaller\n  // varints.  If you simply static_cast a negative number to an unsigned\n  // number and varint-encode it, it will always take 10 bytes, defeating\n  // the purpose of varint.  So, for the \"sint32\" and \"sint64\" field types,\n  // we ZigZag-encode the values.\n  static uint32 ZigZagEncode32(int32 n);\n  static int32  ZigZagDecode32(uint32 n);\n  static uint64 ZigZagEncode64(int64 n);\n  static int64  ZigZagDecode64(uint64 n);\n\n  // =================================================================\n  // Methods for reading/writing individual field.  The implementations\n  // of these methods are defined in wire_format_lite_inl.h; you must #include\n  // that file to use these.\n\n// Avoid ugly line wrapping\n#define input  io::CodedInputStream*  input_arg\n#define output io::CodedOutputStream* output_arg\n#define field_number int field_number_arg\n#define INL GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n\n  // Read fields, not including tags.  The assumption is that you already\n  // read the tag to determine what field to read.\n\n  // For primitive fields, we just use a templatized routine parameterized by\n  // the represented type and the FieldType. These are specialized with the\n  // appropriate definition for each declared type.\n  template <typename CType, enum FieldType DeclaredType> INL\n  static bool ReadPrimitive(input, CType* value);\n\n  // Reads repeated primitive values, with optimizations for repeats.\n  // tag_size and tag should both be compile-time constants provided by the\n  // protocol compiler.\n  template <typename CType, enum FieldType DeclaredType> INL\n  static bool ReadRepeatedPrimitive(int tag_size,\n                                    uint32 tag,\n                                    input,\n                                    RepeatedField<CType>* value);\n\n  // Identical to ReadRepeatedPrimitive, except will not inline the\n  // implementation.\n  template <typename CType, enum FieldType DeclaredType>\n  static bool ReadRepeatedPrimitiveNoInline(int tag_size,\n                                            uint32 tag,\n                                            input,\n                                            RepeatedField<CType>* value);\n\n  // Reads a primitive value directly from the provided buffer. It returns a\n  // pointer past the segment of data that was read.\n  //\n  // This is only implemented for the types with fixed wire size, e.g.\n  // float, double, and the (s)fixed* types.\n  template <typename CType, enum FieldType DeclaredType> INL\n  static const uint8* ReadPrimitiveFromArray(const uint8* buffer, CType* value);\n\n  // Reads a primitive packed field.\n  //\n  // This is only implemented for packable types.\n  template <typename CType, enum FieldType DeclaredType> INL\n  static bool ReadPackedPrimitive(input, RepeatedField<CType>* value);\n\n  // Identical to ReadPackedPrimitive, except will not inline the\n  // implementation.\n  template <typename CType, enum FieldType DeclaredType>\n  static bool ReadPackedPrimitiveNoInline(input, RepeatedField<CType>* value);\n\n  // Read a packed enum field. If the is_valid function is not NULL, values for\n  // which is_valid(value) returns false are silently dropped.\n  static bool ReadPackedEnumNoInline(input,\n                                     bool (*is_valid)(int),\n                                     RepeatedField<int>* values);\n\n  // Read a packed enum field. If the is_valid function is not NULL, values for\n  // which is_valid(value) returns false are appended to unknown_fields_stream.\n  static bool ReadPackedEnumPreserveUnknowns(\n      input,\n      field_number,\n      bool (*is_valid)(int),\n      io::CodedOutputStream* unknown_fields_stream,\n      RepeatedField<int>* values);\n\n  // Read a string.  ReadString(..., string* value) requires an existing string.\n  static inline bool ReadString(input, string* value);\n  // ReadString(..., string** p) is internal-only, and should only be called\n  // from generated code. It starts by setting *p to \"new string\"\n  // if *p == &GetEmptyStringAlreadyInited().  It then invokes\n  // ReadString(input, *p).  This is useful for reducing code size.\n  static inline bool ReadString(input, string** p);\n  // Analogous to ReadString().\n  static bool ReadBytes(input, string* value);\n  static bool ReadBytes(input, string** p);\n\n\n  enum Operation {\n    PARSE = 0,\n    SERIALIZE = 1,\n  };\n\n  // Returns true if the data is valid UTF-8.\n  static bool VerifyUtf8String(const char* data, int size,\n                               Operation op,\n                               const char* field_name);\n\n  static inline bool ReadGroup  (field_number, input, MessageLite* value);\n  static inline bool ReadMessage(input, MessageLite* value);\n\n  // Like above, but de-virtualize the call to MergePartialFromCodedStream().\n  // The pointer must point at an instance of MessageType, *not* a subclass (or\n  // the subclass must not override MergePartialFromCodedStream()).\n  template<typename MessageType>\n  static inline bool ReadGroupNoVirtual(field_number, input,\n                                        MessageType* value);\n  template<typename MessageType>\n  static inline bool ReadMessageNoVirtual(input, MessageType* value);\n\n  // The same, but do not modify input's recursion depth.  This is useful\n  // when reading a bunch of groups or messages in a loop, because then the\n  // recursion depth can be incremented before the loop and decremented after.\n  template<typename MessageType>\n  static inline bool ReadGroupNoVirtualNoRecursionDepth(field_number, input,\n                                                        MessageType* value);\n\n  template<typename MessageType>\n  static inline bool ReadMessageNoVirtualNoRecursionDepth(input,\n                                                          MessageType* value);\n\n  // Write a tag.  The Write*() functions typically include the tag, so\n  // normally there's no need to call this unless using the Write*NoTag()\n  // variants.\n  INL static void WriteTag(field_number, WireType type, output);\n\n  // Write fields, without tags.\n  INL static void WriteInt32NoTag   (int32 value, output);\n  INL static void WriteInt64NoTag   (int64 value, output);\n  INL static void WriteUInt32NoTag  (uint32 value, output);\n  INL static void WriteUInt64NoTag  (uint64 value, output);\n  INL static void WriteSInt32NoTag  (int32 value, output);\n  INL static void WriteSInt64NoTag  (int64 value, output);\n  INL static void WriteFixed32NoTag (uint32 value, output);\n  INL static void WriteFixed64NoTag (uint64 value, output);\n  INL static void WriteSFixed32NoTag(int32 value, output);\n  INL static void WriteSFixed64NoTag(int64 value, output);\n  INL static void WriteFloatNoTag   (float value, output);\n  INL static void WriteDoubleNoTag  (double value, output);\n  INL static void WriteBoolNoTag    (bool value, output);\n  INL static void WriteEnumNoTag    (int value, output);\n\n  // Write fields, including tags.\n  static void WriteInt32   (field_number,  int32 value, output);\n  static void WriteInt64   (field_number,  int64 value, output);\n  static void WriteUInt32  (field_number, uint32 value, output);\n  static void WriteUInt64  (field_number, uint64 value, output);\n  static void WriteSInt32  (field_number,  int32 value, output);\n  static void WriteSInt64  (field_number,  int64 value, output);\n  static void WriteFixed32 (field_number, uint32 value, output);\n  static void WriteFixed64 (field_number, uint64 value, output);\n  static void WriteSFixed32(field_number,  int32 value, output);\n  static void WriteSFixed64(field_number,  int64 value, output);\n  static void WriteFloat   (field_number,  float value, output);\n  static void WriteDouble  (field_number, double value, output);\n  static void WriteBool    (field_number,   bool value, output);\n  static void WriteEnum    (field_number,    int value, output);\n\n  static void WriteString(field_number, const string& value, output);\n  static void WriteBytes (field_number, const string& value, output);\n  static void WriteStringMaybeAliased(\n      field_number, const string& value, output);\n  static void WriteBytesMaybeAliased(\n      field_number, const string& value, output);\n\n  static void WriteGroup(\n    field_number, const MessageLite& value, output);\n  static void WriteMessage(\n    field_number, const MessageLite& value, output);\n  // Like above, but these will check if the output stream has enough\n  // space to write directly to a flat array.\n  static void WriteGroupMaybeToArray(\n    field_number, const MessageLite& value, output);\n  static void WriteMessageMaybeToArray(\n    field_number, const MessageLite& value, output);\n\n  // Like above, but de-virtualize the call to SerializeWithCachedSizes().  The\n  // pointer must point at an instance of MessageType, *not* a subclass (or\n  // the subclass must not override SerializeWithCachedSizes()).\n  template<typename MessageType>\n  static inline void WriteGroupNoVirtual(\n    field_number, const MessageType& value, output);\n  template<typename MessageType>\n  static inline void WriteMessageNoVirtual(\n    field_number, const MessageType& value, output);\n\n#undef output\n#define output uint8* target\n\n  // Like above, but use only *ToArray methods of CodedOutputStream.\n  INL static uint8* WriteTagToArray(field_number, WireType type, output);\n\n  // Write fields, without tags.\n  INL static uint8* WriteInt32NoTagToArray   (int32 value, output);\n  INL static uint8* WriteInt64NoTagToArray   (int64 value, output);\n  INL static uint8* WriteUInt32NoTagToArray  (uint32 value, output);\n  INL static uint8* WriteUInt64NoTagToArray  (uint64 value, output);\n  INL static uint8* WriteSInt32NoTagToArray  (int32 value, output);\n  INL static uint8* WriteSInt64NoTagToArray  (int64 value, output);\n  INL static uint8* WriteFixed32NoTagToArray (uint32 value, output);\n  INL static uint8* WriteFixed64NoTagToArray (uint64 value, output);\n  INL static uint8* WriteSFixed32NoTagToArray(int32 value, output);\n  INL static uint8* WriteSFixed64NoTagToArray(int64 value, output);\n  INL static uint8* WriteFloatNoTagToArray   (float value, output);\n  INL static uint8* WriteDoubleNoTagToArray  (double value, output);\n  INL static uint8* WriteBoolNoTagToArray    (bool value, output);\n  INL static uint8* WriteEnumNoTagToArray    (int value, output);\n\n  // Write fields, including tags.\n  INL static uint8* WriteInt32ToArray(field_number, int32 value, output);\n  INL static uint8* WriteInt64ToArray(field_number, int64 value, output);\n  INL static uint8* WriteUInt32ToArray(field_number, uint32 value, output);\n  INL static uint8* WriteUInt64ToArray(field_number, uint64 value, output);\n  INL static uint8* WriteSInt32ToArray(field_number, int32 value, output);\n  INL static uint8* WriteSInt64ToArray(field_number, int64 value, output);\n  INL static uint8* WriteFixed32ToArray(field_number, uint32 value, output);\n  INL static uint8* WriteFixed64ToArray(field_number, uint64 value, output);\n  INL static uint8* WriteSFixed32ToArray(field_number, int32 value, output);\n  INL static uint8* WriteSFixed64ToArray(field_number, int64 value, output);\n  INL static uint8* WriteFloatToArray(field_number, float value, output);\n  INL static uint8* WriteDoubleToArray(field_number, double value, output);\n  INL static uint8* WriteBoolToArray(field_number, bool value, output);\n  INL static uint8* WriteEnumToArray(field_number, int value, output);\n\n  INL static uint8* WriteStringToArray(\n    field_number, const string& value, output);\n  INL static uint8* WriteBytesToArray(\n    field_number, const string& value, output);\n\n  // Whether to serialize deterministically (e.g., map keys are\n  // sorted) is a property of a CodedOutputStream, and in the process\n  // of serialization, the \"ToArray\" variants may be invoked.  But they don't\n  // have a CodedOutputStream available, so they get an additional parameter\n  // telling them whether to serialize deterministically.\n  INL static uint8* InternalWriteGroupToArray(\n      field_number, const MessageLite& value, bool deterministic, output);\n  INL static uint8* InternalWriteMessageToArray(\n      field_number, const MessageLite& value, bool deterministic, output);\n\n  // Like above, but de-virtualize the call to SerializeWithCachedSizes().  The\n  // pointer must point at an instance of MessageType, *not* a subclass (or\n  // the subclass must not override SerializeWithCachedSizes()).\n  template<typename MessageType>\n  INL static uint8* InternalWriteGroupNoVirtualToArray(\n    field_number, const MessageType& value, bool deterministic, output);\n  template<typename MessageType>\n  INL static uint8* InternalWriteMessageNoVirtualToArray(\n    field_number, const MessageType& value, bool deterministic, output);\n\n  // For backward-compatibility, the last four methods also have versions\n  // that are non-deterministic always.\n  INL static uint8* WriteGroupToArray(\n      field_number, const MessageLite& value, output) {\n    return InternalWriteGroupToArray(field_number_arg, value, false, target);\n  }\n  INL static uint8* WriteMessageToArray(\n      field_number, const MessageLite& value, output) {\n    return InternalWriteMessageToArray(field_number_arg, value, false, target);\n  }\n  template<typename MessageType>\n  INL static uint8* WriteGroupNoVirtualToArray(\n      field_number, const MessageType& value, output) {\n    return InternalWriteGroupNoVirtualToArray(field_number_arg, value, false,\n                                              target);\n  }\n  template<typename MessageType>\n  INL static uint8* WriteMessageNoVirtualToArray(\n      field_number, const MessageType& value, output) {\n    return InternalWriteMessageNoVirtualToArray(field_number_arg, value, false,\n                                                target);\n  }\n\n#undef output\n#undef input\n#undef INL\n\n#undef field_number\n\n  // Compute the byte size of a field.  The XxSize() functions do NOT include\n  // the tag, so you must also call TagSize().  (This is because, for repeated\n  // fields, you should only call TagSize() once and multiply it by the element\n  // count, but you may have to call XxSize() for each individual element.)\n  static inline size_t Int32Size   ( int32 value);\n  static inline size_t Int64Size   ( int64 value);\n  static inline size_t UInt32Size  (uint32 value);\n  static inline size_t UInt64Size  (uint64 value);\n  static inline size_t SInt32Size  ( int32 value);\n  static inline size_t SInt64Size  ( int64 value);\n  static inline size_t EnumSize    (   int value);\n\n  // These types always have the same size.\n  static const size_t kFixed32Size  = 4;\n  static const size_t kFixed64Size  = 8;\n  static const size_t kSFixed32Size = 4;\n  static const size_t kSFixed64Size = 8;\n  static const size_t kFloatSize    = 4;\n  static const size_t kDoubleSize   = 8;\n  static const size_t kBoolSize     = 1;\n\n  static inline size_t StringSize(const string& value);\n  static inline size_t BytesSize (const string& value);\n\n  static inline size_t GroupSize  (const MessageLite& value);\n  static inline size_t MessageSize(const MessageLite& value);\n\n  // Like above, but de-virtualize the call to ByteSize().  The\n  // pointer must point at an instance of MessageType, *not* a subclass (or\n  // the subclass must not override ByteSize()).\n  template<typename MessageType>\n  static inline size_t GroupSizeNoVirtual  (const MessageType& value);\n  template<typename MessageType>\n  static inline size_t MessageSizeNoVirtual(const MessageType& value);\n\n  // Given the length of data, calculate the byte size of the data on the\n  // wire if we encode the data as a length delimited field.\n  static inline size_t LengthDelimitedSize(size_t length);\n\n private:\n  // A helper method for the repeated primitive reader. This method has\n  // optimizations for primitive types that have fixed size on the wire, and\n  // can be read using potentially faster paths.\n  template <typename CType, enum FieldType DeclaredType> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static bool ReadRepeatedFixedSizePrimitive(\n      int tag_size,\n      uint32 tag,\n      google::protobuf::io::CodedInputStream* input,\n      RepeatedField<CType>* value);\n\n  // Like ReadRepeatedFixedSizePrimitive but for packed primitive fields.\n  template <typename CType, enum FieldType DeclaredType> GOOGLE_ATTRIBUTE_ALWAYS_INLINE\n  static bool ReadPackedFixedSizePrimitive(google::protobuf::io::CodedInputStream* input,\n                                           RepeatedField<CType>* value);\n\n  static const CppType kFieldTypeToCppTypeMap[];\n  static const WireFormatLite::WireType kWireTypeForFieldType[];\n\n  GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(WireFormatLite);\n};\n\n// A class which deals with unknown values.  The default implementation just\n// discards them.  WireFormat defines a subclass which writes to an\n// UnknownFieldSet.  This class is used by ExtensionSet::ParseField(), since\n// ExtensionSet is part of the lite library but UnknownFieldSet is not.\nclass LIBPROTOBUF_EXPORT FieldSkipper {\n public:\n  FieldSkipper() {}\n  virtual ~FieldSkipper() {}\n\n  // Skip a field whose tag has already been consumed.\n  virtual bool SkipField(io::CodedInputStream* input, uint32 tag);\n\n  // Skip an entire message or group, up to an end-group tag (which is consumed)\n  // or end-of-stream.\n  virtual bool SkipMessage(io::CodedInputStream* input);\n\n  // Deal with an already-parsed unrecognized enum value.  The default\n  // implementation does nothing, but the UnknownFieldSet-based implementation\n  // saves it as an unknown varint.\n  virtual void SkipUnknownEnum(int field_number, int value);\n};\n\n// Subclass of FieldSkipper which saves skipped fields to a CodedOutputStream.\n\nclass LIBPROTOBUF_EXPORT CodedOutputStreamFieldSkipper : public FieldSkipper {\n public:\n  explicit CodedOutputStreamFieldSkipper(io::CodedOutputStream* unknown_fields)\n      : unknown_fields_(unknown_fields) {}\n  virtual ~CodedOutputStreamFieldSkipper() {}\n\n  // implements FieldSkipper -----------------------------------------\n  virtual bool SkipField(io::CodedInputStream* input, uint32 tag);\n  virtual bool SkipMessage(io::CodedInputStream* input);\n  virtual void SkipUnknownEnum(int field_number, int value);\n\n protected:\n  io::CodedOutputStream* unknown_fields_;\n};\n\n\n// inline methods ====================================================\n\ninline WireFormatLite::CppType\nWireFormatLite::FieldTypeToCppType(FieldType type) {\n  return kFieldTypeToCppTypeMap[type];\n}\n\ninline uint32 WireFormatLite::MakeTag(int field_number, WireType type) {\n  return GOOGLE_PROTOBUF_WIRE_FORMAT_MAKE_TAG(field_number, type);\n}\n\ninline WireFormatLite::WireType WireFormatLite::GetTagWireType(uint32 tag) {\n  return static_cast<WireType>(tag & kTagTypeMask);\n}\n\ninline int WireFormatLite::GetTagFieldNumber(uint32 tag) {\n  return static_cast<int>(tag >> kTagTypeBits);\n}\n\ninline size_t WireFormatLite::TagSize(int field_number,\n                                      WireFormatLite::FieldType type) {\n  size_t result = io::CodedOutputStream::VarintSize32(\n    field_number << kTagTypeBits);\n  if (type == TYPE_GROUP) {\n    // Groups have both a start and an end tag.\n    return result * 2;\n  } else {\n    return result;\n  }\n}\n\ninline uint32 WireFormatLite::EncodeFloat(float value) {\n  union {float f; uint32 i;};\n  f = value;\n  return i;\n}\n\ninline float WireFormatLite::DecodeFloat(uint32 value) {\n  union {float f; uint32 i;};\n  i = value;\n  return f;\n}\n\ninline uint64 WireFormatLite::EncodeDouble(double value) {\n  union {double f; uint64 i;};\n  f = value;\n  return i;\n}\n\ninline double WireFormatLite::DecodeDouble(uint64 value) {\n  union {double f; uint64 i;};\n  i = value;\n  return f;\n}\n\n// ZigZag Transform:  Encodes signed integers so that they can be\n// effectively used with varint encoding.\n//\n// varint operates on unsigned integers, encoding smaller numbers into\n// fewer bytes.  If you try to use it on a signed integer, it will treat\n// this number as a very large unsigned integer, which means that even\n// small signed numbers like -1 will take the maximum number of bytes\n// (10) to encode.  ZigZagEncode() maps signed integers to unsigned\n// in such a way that those with a small absolute value will have smaller\n// encoded values, making them appropriate for encoding using varint.\n//\n//       int32 ->     uint32\n// -------------------------\n//           0 ->          0\n//          -1 ->          1\n//           1 ->          2\n//          -2 ->          3\n//         ... ->        ...\n//  2147483647 -> 4294967294\n// -2147483648 -> 4294967295\n//\n//        >> encode >>\n//        << decode <<\n\ninline uint32 WireFormatLite::ZigZagEncode32(int32 n) {\n  // Note:  the right-shift must be arithmetic\n  return (static_cast<uint32>(n) << 1) ^ (n >> 31);\n}\n\ninline int32 WireFormatLite::ZigZagDecode32(uint32 n) {\n  return (n >> 1) ^ -static_cast<int32>(n & 1);\n}\n\ninline uint64 WireFormatLite::ZigZagEncode64(int64 n) {\n  // Note:  the right-shift must be arithmetic\n  return (static_cast<uint64>(n) << 1) ^ (n >> 63);\n}\n\ninline int64 WireFormatLite::ZigZagDecode64(uint64 n) {\n  return (n >> 1) ^ -static_cast<int64>(n & 1);\n}\n\n// String is for UTF-8 text only, but, even so, ReadString() can simply\n// call ReadBytes().\n\ninline bool WireFormatLite::ReadString(io::CodedInputStream* input,\n                                       string* value) {\n  return ReadBytes(input, value);\n}\n\ninline bool WireFormatLite::ReadString(io::CodedInputStream* input,\n                                       string** p) {\n  return ReadBytes(input, p);\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/wire_format_lite_inl.h",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//         wink@google.com (Wink Saville) (refactored from wire_format.h)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n\n#ifndef GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__\n#define GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__\n\n#ifdef _MSC_VER\n// This is required for min/max on VS2013 only.\n#include <algorithm>\n#endif\n\n#include <string>\n#include <google/protobuf/stubs/common.h>\n#include <google/protobuf/stubs/logging.h>\n#include <google/protobuf/message_lite.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/wire_format_lite.h>\n#include <google/protobuf/io/coded_stream.h>\n#include <google/protobuf/arenastring.h>\n\n\nnamespace google {\nnamespace protobuf {\nnamespace internal {\n\n// Implementation details of ReadPrimitive.\n\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int32, WireFormatLite::TYPE_INT32>(\n    io::CodedInputStream* input,\n    int32* value) {\n  uint32 temp;\n  if (!input->ReadVarint32(&temp)) return false;\n  *value = static_cast<int32>(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int64, WireFormatLite::TYPE_INT64>(\n    io::CodedInputStream* input,\n    int64* value) {\n  uint64 temp;\n  if (!input->ReadVarint64(&temp)) return false;\n  *value = static_cast<int64>(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<uint32, WireFormatLite::TYPE_UINT32>(\n    io::CodedInputStream* input,\n    uint32* value) {\n  return input->ReadVarint32(value);\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<uint64, WireFormatLite::TYPE_UINT64>(\n    io::CodedInputStream* input,\n    uint64* value) {\n  return input->ReadVarint64(value);\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int32, WireFormatLite::TYPE_SINT32>(\n    io::CodedInputStream* input,\n    int32* value) {\n  uint32 temp;\n  if (!input->ReadVarint32(&temp)) return false;\n  *value = ZigZagDecode32(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int64, WireFormatLite::TYPE_SINT64>(\n    io::CodedInputStream* input,\n    int64* value) {\n  uint64 temp;\n  if (!input->ReadVarint64(&temp)) return false;\n  *value = ZigZagDecode64(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<uint32, WireFormatLite::TYPE_FIXED32>(\n    io::CodedInputStream* input,\n    uint32* value) {\n  return input->ReadLittleEndian32(value);\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<uint64, WireFormatLite::TYPE_FIXED64>(\n    io::CodedInputStream* input,\n    uint64* value) {\n  return input->ReadLittleEndian64(value);\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int32, WireFormatLite::TYPE_SFIXED32>(\n    io::CodedInputStream* input,\n    int32* value) {\n  uint32 temp;\n  if (!input->ReadLittleEndian32(&temp)) return false;\n  *value = static_cast<int32>(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int64, WireFormatLite::TYPE_SFIXED64>(\n    io::CodedInputStream* input,\n    int64* value) {\n  uint64 temp;\n  if (!input->ReadLittleEndian64(&temp)) return false;\n  *value = static_cast<int64>(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<float, WireFormatLite::TYPE_FLOAT>(\n    io::CodedInputStream* input,\n    float* value) {\n  uint32 temp;\n  if (!input->ReadLittleEndian32(&temp)) return false;\n  *value = DecodeFloat(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<double, WireFormatLite::TYPE_DOUBLE>(\n    io::CodedInputStream* input,\n    double* value) {\n  uint64 temp;\n  if (!input->ReadLittleEndian64(&temp)) return false;\n  *value = DecodeDouble(temp);\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<bool, WireFormatLite::TYPE_BOOL>(\n    io::CodedInputStream* input,\n    bool* value) {\n  uint64 temp;\n  if (!input->ReadVarint64(&temp)) return false;\n  *value = temp != 0;\n  return true;\n}\ntemplate <>\ninline bool WireFormatLite::ReadPrimitive<int, WireFormatLite::TYPE_ENUM>(\n    io::CodedInputStream* input,\n    int* value) {\n  uint32 temp;\n  if (!input->ReadVarint32(&temp)) return false;\n  *value = static_cast<int>(temp);\n  return true;\n}\n\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  uint32, WireFormatLite::TYPE_FIXED32>(\n    const uint8* buffer,\n    uint32* value) {\n  return io::CodedInputStream::ReadLittleEndian32FromArray(buffer, value);\n}\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  uint64, WireFormatLite::TYPE_FIXED64>(\n    const uint8* buffer,\n    uint64* value) {\n  return io::CodedInputStream::ReadLittleEndian64FromArray(buffer, value);\n}\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  int32, WireFormatLite::TYPE_SFIXED32>(\n    const uint8* buffer,\n    int32* value) {\n  uint32 temp;\n  buffer = io::CodedInputStream::ReadLittleEndian32FromArray(buffer, &temp);\n  *value = static_cast<int32>(temp);\n  return buffer;\n}\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  int64, WireFormatLite::TYPE_SFIXED64>(\n    const uint8* buffer,\n    int64* value) {\n  uint64 temp;\n  buffer = io::CodedInputStream::ReadLittleEndian64FromArray(buffer, &temp);\n  *value = static_cast<int64>(temp);\n  return buffer;\n}\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  float, WireFormatLite::TYPE_FLOAT>(\n    const uint8* buffer,\n    float* value) {\n  uint32 temp;\n  buffer = io::CodedInputStream::ReadLittleEndian32FromArray(buffer, &temp);\n  *value = DecodeFloat(temp);\n  return buffer;\n}\ntemplate <>\ninline const uint8* WireFormatLite::ReadPrimitiveFromArray<\n  double, WireFormatLite::TYPE_DOUBLE>(\n    const uint8* buffer,\n    double* value) {\n  uint64 temp;\n  buffer = io::CodedInputStream::ReadLittleEndian64FromArray(buffer, &temp);\n  *value = DecodeDouble(temp);\n  return buffer;\n}\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\ninline bool WireFormatLite::ReadRepeatedPrimitive(\n    int,  // tag_size, unused.\n    uint32 tag,\n    io::CodedInputStream* input,\n    RepeatedField<CType>* values) {\n  CType value;\n  if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;\n  values->Add(value);\n  int elements_already_reserved = values->Capacity() - values->size();\n  while (elements_already_reserved > 0 && input->ExpectTag(tag)) {\n    if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;\n    values->AddAlreadyReserved(value);\n    elements_already_reserved--;\n  }\n  return true;\n}\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\ninline bool WireFormatLite::ReadRepeatedFixedSizePrimitive(\n    int tag_size,\n    uint32 tag,\n    io::CodedInputStream* input,\n    RepeatedField<CType>* values) {\n  GOOGLE_DCHECK_EQ(UInt32Size(tag), static_cast<size_t>(tag_size));\n  CType value;\n  if (!ReadPrimitive<CType, DeclaredType>(input, &value))\n    return false;\n  values->Add(value);\n\n  // For fixed size values, repeated values can be read more quickly by\n  // reading directly from a raw array.\n  //\n  // We can get a tight loop by only reading as many elements as can be\n  // added to the RepeatedField without having to do any resizing. Additionally,\n  // we only try to read as many elements as are available from the current\n  // buffer space. Doing so avoids having to perform boundary checks when\n  // reading the value: the maximum number of elements that can be read is\n  // known outside of the loop.\n  const void* void_pointer;\n  int size;\n  input->GetDirectBufferPointerInline(&void_pointer, &size);\n  if (size > 0) {\n    const uint8* buffer = reinterpret_cast<const uint8*>(void_pointer);\n    // The number of bytes each type occupies on the wire.\n    const int per_value_size = tag_size + sizeof(value);\n\n    int elements_available =\n        std::min(values->Capacity() - values->size(), size / per_value_size);\n    int num_read = 0;\n    while (num_read < elements_available &&\n           (buffer = io::CodedInputStream::ExpectTagFromArray(\n               buffer, tag)) != NULL) {\n      buffer = ReadPrimitiveFromArray<CType, DeclaredType>(buffer, &value);\n      values->AddAlreadyReserved(value);\n      ++num_read;\n    }\n    const int read_bytes = num_read * per_value_size;\n    if (read_bytes > 0) {\n      input->Skip(read_bytes);\n    }\n  }\n  return true;\n}\n\n// Specializations of ReadRepeatedPrimitive for the fixed size types, which use\n// the optimized code path.\n#define READ_REPEATED_FIXED_SIZE_PRIMITIVE(CPPTYPE, DECLARED_TYPE)             \\\ntemplate <>                                                                    \\\ninline bool WireFormatLite::ReadRepeatedPrimitive<                             \\\n  CPPTYPE, WireFormatLite::DECLARED_TYPE>(                                     \\\n    int tag_size,                                                              \\\n    uint32 tag,                                                                \\\n    io::CodedInputStream* input,                                               \\\n    RepeatedField<CPPTYPE>* values) {                                          \\\n  return ReadRepeatedFixedSizePrimitive<                                       \\\n    CPPTYPE, WireFormatLite::DECLARED_TYPE>(                                   \\\n      tag_size, tag, input, values);                                           \\\n}\n\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(uint32, TYPE_FIXED32)\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(uint64, TYPE_FIXED64)\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(int32, TYPE_SFIXED32)\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(int64, TYPE_SFIXED64)\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(float, TYPE_FLOAT)\nREAD_REPEATED_FIXED_SIZE_PRIMITIVE(double, TYPE_DOUBLE)\n\n#undef READ_REPEATED_FIXED_SIZE_PRIMITIVE\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\nbool WireFormatLite::ReadRepeatedPrimitiveNoInline(\n    int tag_size,\n    uint32 tag,\n    io::CodedInputStream* input,\n    RepeatedField<CType>* value) {\n  return ReadRepeatedPrimitive<CType, DeclaredType>(\n      tag_size, tag, input, value);\n}\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\ninline bool WireFormatLite::ReadPackedPrimitive(io::CodedInputStream* input,\n                                                RepeatedField<CType>* values) {\n  int length;\n  if (!input->ReadVarintSizeAsInt(&length)) return false;\n  io::CodedInputStream::Limit limit = input->PushLimit(length);\n  while (input->BytesUntilLimit() > 0) {\n    CType value;\n    if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;\n    values->Add(value);\n  }\n  input->PopLimit(limit);\n  return true;\n}\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\ninline bool WireFormatLite::ReadPackedFixedSizePrimitive(\n    io::CodedInputStream* input, RepeatedField<CType>* values) {\n  int length;\n  if (!input->ReadVarintSizeAsInt(&length)) return false;\n  const int old_entries = values->size();\n  const int new_entries = length / sizeof(CType);\n  const int new_bytes = new_entries * sizeof(CType);\n  if (new_bytes != length) return false;\n  // We would *like* to pre-allocate the buffer to write into (for\n  // speed), but *must* avoid performing a very large allocation due\n  // to a malicious user-supplied \"length\" above.  So we have a fast\n  // path that pre-allocates when the \"length\" is less than a bound.\n  // We determine the bound by calling BytesUntilTotalBytesLimit() and\n  // BytesUntilLimit().  These return -1 to mean \"no limit set\".\n  // There are four cases:\n  // TotalBytesLimit  Limit\n  // -1               -1     Use slow path.\n  // -1               >= 0   Use fast path if length <= Limit.\n  // >= 0             -1     Use slow path.\n  // >= 0             >= 0   Use fast path if length <= min(both limits).\n  int64 bytes_limit = input->BytesUntilTotalBytesLimit();\n  if (bytes_limit == -1) {\n    bytes_limit = input->BytesUntilLimit();\n  } else {\n    bytes_limit =\n        std::min(bytes_limit, static_cast<int64>(input->BytesUntilLimit()));\n  }\n  if (bytes_limit >= new_bytes) {\n    // Fast-path that pre-allocates *values to the final size.\n#if defined(PROTOBUF_LITTLE_ENDIAN)\n    values->Resize(old_entries + new_entries, 0);\n    // values->mutable_data() may change after Resize(), so do this after:\n    void* dest = reinterpret_cast<void*>(values->mutable_data() + old_entries);\n    if (!input->ReadRaw(dest, new_bytes)) {\n      values->Truncate(old_entries);\n      return false;\n    }\n#else\n    values->Reserve(old_entries + new_entries);\n    CType value;\n    for (int i = 0; i < new_entries; ++i) {\n      if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;\n      values->AddAlreadyReserved(value);\n    }\n#endif\n  } else {\n    // This is the slow-path case where \"length\" may be too large to\n    // safely allocate.  We read as much as we can into *values\n    // without pre-allocating \"length\" bytes.\n    CType value;\n    for (int i = 0; i < new_entries; ++i) {\n      if (!ReadPrimitive<CType, DeclaredType>(input, &value)) return false;\n      values->Add(value);\n    }\n  }\n  return true;\n}\n\n// Specializations of ReadPackedPrimitive for the fixed size types, which use\n// an optimized code path.\n#define READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(CPPTYPE, DECLARED_TYPE)      \\\ntemplate <>                                                                    \\\ninline bool WireFormatLite::ReadPackedPrimitive<                               \\\n  CPPTYPE, WireFormatLite::DECLARED_TYPE>(                                     \\\n    io::CodedInputStream* input,                                               \\\n    RepeatedField<CPPTYPE>* values) {                                          \\\n  return ReadPackedFixedSizePrimitive<                                         \\\n      CPPTYPE, WireFormatLite::DECLARED_TYPE>(input, values);                  \\\n}\n\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(uint32, TYPE_FIXED32)\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(uint64, TYPE_FIXED64)\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(int32, TYPE_SFIXED32)\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(int64, TYPE_SFIXED64)\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(float, TYPE_FLOAT)\nREAD_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE(double, TYPE_DOUBLE)\n\n#undef READ_REPEATED_PACKED_FIXED_SIZE_PRIMITIVE\n\ntemplate <typename CType, enum WireFormatLite::FieldType DeclaredType>\nbool WireFormatLite::ReadPackedPrimitiveNoInline(io::CodedInputStream* input,\n                                                 RepeatedField<CType>* values) {\n  return ReadPackedPrimitive<CType, DeclaredType>(input, values);\n}\n\n\n\ninline bool WireFormatLite::ReadGroup(int field_number,\n                                      io::CodedInputStream* input,\n                                      MessageLite* value) {\n  if (!input->IncrementRecursionDepth()) return false;\n  if (!value->MergePartialFromCodedStream(input)) return false;\n  input->DecrementRecursionDepth();\n  // Make sure the last thing read was an end tag for this group.\n  if (!input->LastTagWas(MakeTag(field_number, WIRETYPE_END_GROUP))) {\n    return false;\n  }\n  return true;\n}\ninline bool WireFormatLite::ReadMessage(io::CodedInputStream* input,\n                                        MessageLite* value) {\n  int length;\n  if (!input->ReadVarintSizeAsInt(&length)) return false;\n  std::pair<io::CodedInputStream::Limit, int> p =\n      input->IncrementRecursionDepthAndPushLimit(length);\n  if (p.second < 0 || !value->MergePartialFromCodedStream(input)) return false;\n  // Make sure that parsing stopped when the limit was hit, not at an endgroup\n  // tag.\n  return input->DecrementRecursionDepthAndPopLimit(p.first);\n}\n\n// We name the template parameter something long and extremely unlikely to occur\n// elsewhere because a *qualified* member access expression designed to avoid\n// virtual dispatch, C++03 [basic.lookup.classref] 3.4.5/4 requires that the\n// name of the qualifying class to be looked up both in the context of the full\n// expression (finding the template parameter) and in the context of the object\n// whose member we are accessing. This could potentially find a nested type\n// within that object. The standard goes on to require these names to refer to\n// the same entity, which this collision would violate. The lack of a safe way\n// to avoid this collision appears to be a defect in the standard, but until it\n// is corrected, we choose the name to avoid accidental collisions.\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline bool WireFormatLite::ReadGroupNoVirtual(\n    int field_number, io::CodedInputStream* input,\n    MessageType_WorkAroundCppLookupDefect* value) {\n  if (!input->IncrementRecursionDepth()) return false;\n  if (!value->\n      MessageType_WorkAroundCppLookupDefect::MergePartialFromCodedStream(input))\n    return false;\n  input->UnsafeDecrementRecursionDepth();\n  // Make sure the last thing read was an end tag for this group.\n  if (!input->LastTagWas(MakeTag(field_number, WIRETYPE_END_GROUP))) {\n    return false;\n  }\n  return true;\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline bool WireFormatLite::ReadGroupNoVirtualNoRecursionDepth(\n    int field_number, io::CodedInputStream* input,\n    MessageType_WorkAroundCppLookupDefect* value) {\n  return value->MessageType_WorkAroundCppLookupDefect::\n             MergePartialFromCodedStream(input) &&\n         input->LastTagWas(MakeTag(field_number, WIRETYPE_END_GROUP));\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline bool WireFormatLite::ReadMessageNoVirtual(\n    io::CodedInputStream* input, MessageType_WorkAroundCppLookupDefect* value) {\n  int length;\n  if (!input->ReadVarintSizeAsInt(&length)) return false;\n  std::pair<io::CodedInputStream::Limit, int> p =\n      input->IncrementRecursionDepthAndPushLimit(length);\n  if (p.second < 0 || !value->\n      MessageType_WorkAroundCppLookupDefect::MergePartialFromCodedStream(input))\n    return false;\n  // Make sure that parsing stopped when the limit was hit, not at an endgroup\n  // tag.\n  return input->DecrementRecursionDepthAndPopLimit(p.first);\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline bool WireFormatLite::ReadMessageNoVirtualNoRecursionDepth(\n    io::CodedInputStream* input, MessageType_WorkAroundCppLookupDefect* value) {\n  io::CodedInputStream::Limit old_limit = input->ReadLengthAndPushLimit();\n  if (!value->\n      MessageType_WorkAroundCppLookupDefect::MergePartialFromCodedStream(input))\n    return false;\n  // Make sure that parsing stopped when the limit was hit, not at an endgroup\n  // tag.\n  return input->CheckEntireMessageConsumedAndPopLimit(old_limit);\n}\n\n// ===================================================================\n\ninline void WireFormatLite::WriteTag(int field_number, WireType type,\n                                     io::CodedOutputStream* output) {\n  output->WriteTag(MakeTag(field_number, type));\n}\n\ninline void WireFormatLite::WriteInt32NoTag(int32 value,\n                                            io::CodedOutputStream* output) {\n  output->WriteVarint32SignExtended(value);\n}\ninline void WireFormatLite::WriteInt64NoTag(int64 value,\n                                            io::CodedOutputStream* output) {\n  output->WriteVarint64(static_cast<uint64>(value));\n}\ninline void WireFormatLite::WriteUInt32NoTag(uint32 value,\n                                             io::CodedOutputStream* output) {\n  output->WriteVarint32(value);\n}\ninline void WireFormatLite::WriteUInt64NoTag(uint64 value,\n                                             io::CodedOutputStream* output) {\n  output->WriteVarint64(value);\n}\ninline void WireFormatLite::WriteSInt32NoTag(int32 value,\n                                             io::CodedOutputStream* output) {\n  output->WriteVarint32(ZigZagEncode32(value));\n}\ninline void WireFormatLite::WriteSInt64NoTag(int64 value,\n                                             io::CodedOutputStream* output) {\n  output->WriteVarint64(ZigZagEncode64(value));\n}\ninline void WireFormatLite::WriteFixed32NoTag(uint32 value,\n                                              io::CodedOutputStream* output) {\n  output->WriteLittleEndian32(value);\n}\ninline void WireFormatLite::WriteFixed64NoTag(uint64 value,\n                                              io::CodedOutputStream* output) {\n  output->WriteLittleEndian64(value);\n}\ninline void WireFormatLite::WriteSFixed32NoTag(int32 value,\n                                               io::CodedOutputStream* output) {\n  output->WriteLittleEndian32(static_cast<uint32>(value));\n}\ninline void WireFormatLite::WriteSFixed64NoTag(int64 value,\n                                               io::CodedOutputStream* output) {\n  output->WriteLittleEndian64(static_cast<uint64>(value));\n}\ninline void WireFormatLite::WriteFloatNoTag(float value,\n                                            io::CodedOutputStream* output) {\n  output->WriteLittleEndian32(EncodeFloat(value));\n}\ninline void WireFormatLite::WriteDoubleNoTag(double value,\n                                             io::CodedOutputStream* output) {\n  output->WriteLittleEndian64(EncodeDouble(value));\n}\ninline void WireFormatLite::WriteBoolNoTag(bool value,\n                                           io::CodedOutputStream* output) {\n  output->WriteVarint32(value ? 1 : 0);\n}\ninline void WireFormatLite::WriteEnumNoTag(int value,\n                                           io::CodedOutputStream* output) {\n  output->WriteVarint32SignExtended(value);\n}\n\n// See comment on ReadGroupNoVirtual to understand the need for this template\n// parameter name.\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline void WireFormatLite::WriteGroupNoVirtual(\n    int field_number, const MessageType_WorkAroundCppLookupDefect& value,\n    io::CodedOutputStream* output) {\n  WriteTag(field_number, WIRETYPE_START_GROUP, output);\n  value.MessageType_WorkAroundCppLookupDefect::SerializeWithCachedSizes(output);\n  WriteTag(field_number, WIRETYPE_END_GROUP, output);\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline void WireFormatLite::WriteMessageNoVirtual(\n    int field_number, const MessageType_WorkAroundCppLookupDefect& value,\n    io::CodedOutputStream* output) {\n  WriteTag(field_number, WIRETYPE_LENGTH_DELIMITED, output);\n  output->WriteVarint32(\n      value.MessageType_WorkAroundCppLookupDefect::GetCachedSize());\n  value.MessageType_WorkAroundCppLookupDefect::SerializeWithCachedSizes(output);\n}\n\n// ===================================================================\n\ninline uint8* WireFormatLite::WriteTagToArray(int field_number,\n                                              WireType type,\n                                              uint8* target) {\n  return io::CodedOutputStream::WriteTagToArray(MakeTag(field_number, type),\n                                                target);\n}\n\ninline uint8* WireFormatLite::WriteInt32NoTagToArray(int32 value,\n                                                     uint8* target) {\n  return io::CodedOutputStream::WriteVarint32SignExtendedToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteInt64NoTagToArray(int64 value,\n                                                     uint8* target) {\n  return io::CodedOutputStream::WriteVarint64ToArray(\n      static_cast<uint64>(value), target);\n}\ninline uint8* WireFormatLite::WriteUInt32NoTagToArray(uint32 value,\n                                                      uint8* target) {\n  return io::CodedOutputStream::WriteVarint32ToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteUInt64NoTagToArray(uint64 value,\n                                                      uint8* target) {\n  return io::CodedOutputStream::WriteVarint64ToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSInt32NoTagToArray(int32 value,\n                                                      uint8* target) {\n  return io::CodedOutputStream::WriteVarint32ToArray(ZigZagEncode32(value),\n                                                     target);\n}\ninline uint8* WireFormatLite::WriteSInt64NoTagToArray(int64 value,\n                                                      uint8* target) {\n  return io::CodedOutputStream::WriteVarint64ToArray(ZigZagEncode64(value),\n                                                     target);\n}\ninline uint8* WireFormatLite::WriteFixed32NoTagToArray(uint32 value,\n                                                       uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian32ToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteFixed64NoTagToArray(uint64 value,\n                                                       uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian64ToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSFixed32NoTagToArray(int32 value,\n                                                        uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian32ToArray(\n      static_cast<uint32>(value), target);\n}\ninline uint8* WireFormatLite::WriteSFixed64NoTagToArray(int64 value,\n                                                        uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian64ToArray(\n      static_cast<uint64>(value), target);\n}\ninline uint8* WireFormatLite::WriteFloatNoTagToArray(float value,\n                                                     uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian32ToArray(EncodeFloat(value),\n                                                           target);\n}\ninline uint8* WireFormatLite::WriteDoubleNoTagToArray(double value,\n                                                      uint8* target) {\n  return io::CodedOutputStream::WriteLittleEndian64ToArray(EncodeDouble(value),\n                                                           target);\n}\ninline uint8* WireFormatLite::WriteBoolNoTagToArray(bool value,\n                                                    uint8* target) {\n  return io::CodedOutputStream::WriteVarint32ToArray(value ? 1 : 0, target);\n}\ninline uint8* WireFormatLite::WriteEnumNoTagToArray(int value,\n                                                    uint8* target) {\n  return io::CodedOutputStream::WriteVarint32SignExtendedToArray(value, target);\n}\n\ninline uint8* WireFormatLite::WriteInt32ToArray(int field_number,\n                                                int32 value,\n                                                uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteInt32NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteInt64ToArray(int field_number,\n                                                int64 value,\n                                                uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteInt64NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteUInt32ToArray(int field_number,\n                                                 uint32 value,\n                                                 uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteUInt32NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteUInt64ToArray(int field_number,\n                                                 uint64 value,\n                                                 uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteUInt64NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSInt32ToArray(int field_number,\n                                                 int32 value,\n                                                 uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteSInt32NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSInt64ToArray(int field_number,\n                                                 int64 value,\n                                                 uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteSInt64NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteFixed32ToArray(int field_number,\n                                                  uint32 value,\n                                                  uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target);\n  return WriteFixed32NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteFixed64ToArray(int field_number,\n                                                  uint64 value,\n                                                  uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target);\n  return WriteFixed64NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSFixed32ToArray(int field_number,\n                                                   int32 value,\n                                                   uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target);\n  return WriteSFixed32NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteSFixed64ToArray(int field_number,\n                                                   int64 value,\n                                                   uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target);\n  return WriteSFixed64NoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteFloatToArray(int field_number,\n                                                float value,\n                                                uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED32, target);\n  return WriteFloatNoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteDoubleToArray(int field_number,\n                                                 double value,\n                                                 uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_FIXED64, target);\n  return WriteDoubleNoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteBoolToArray(int field_number,\n                                               bool value,\n                                               uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteBoolNoTagToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteEnumToArray(int field_number,\n                                               int value,\n                                               uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_VARINT, target);\n  return WriteEnumNoTagToArray(value, target);\n}\n\ninline uint8* WireFormatLite::WriteStringToArray(int field_number,\n                                                 const string& value,\n                                                 uint8* target) {\n  // String is for UTF-8 text only\n  // WARNING:  In wire_format.cc, both strings and bytes are handled by\n  //   WriteString() to avoid code duplication.  If the implementations become\n  //   different, you will need to update that usage.\n  target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target);\n  return io::CodedOutputStream::WriteStringWithSizeToArray(value, target);\n}\ninline uint8* WireFormatLite::WriteBytesToArray(int field_number,\n                                                const string& value,\n                                                uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target);\n  return io::CodedOutputStream::WriteStringWithSizeToArray(value, target);\n}\n\n\ninline uint8* WireFormatLite::InternalWriteGroupToArray(\n    int field_number, const MessageLite& value, bool deterministic,\n    uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_START_GROUP, target);\n  target = value.InternalSerializeWithCachedSizesToArray(deterministic, target);\n  return WriteTagToArray(field_number, WIRETYPE_END_GROUP, target);\n}\ninline uint8* WireFormatLite::InternalWriteMessageToArray(\n    int field_number, const MessageLite& value, bool deterministic,\n    uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target);\n  target = io::CodedOutputStream::WriteVarint32ToArray(\n    value.GetCachedSize(), target);\n  return value.InternalSerializeWithCachedSizesToArray(deterministic, target);\n}\n\n// See comment on ReadGroupNoVirtual to understand the need for this template\n// parameter name.\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline uint8* WireFormatLite::InternalWriteGroupNoVirtualToArray(\n    int field_number, const MessageType_WorkAroundCppLookupDefect& value,\n    bool deterministic, uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_START_GROUP, target);\n  target = value.InternalSerializeWithCachedSizesToArray(deterministic, target);\n  return WriteTagToArray(field_number, WIRETYPE_END_GROUP, target);\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline uint8* WireFormatLite::InternalWriteMessageNoVirtualToArray(\n    int field_number, const MessageType_WorkAroundCppLookupDefect& value,\n    bool deterministic, uint8* target) {\n  target = WriteTagToArray(field_number, WIRETYPE_LENGTH_DELIMITED, target);\n  target = io::CodedOutputStream::WriteVarint32ToArray(\n    value.MessageType_WorkAroundCppLookupDefect::GetCachedSize(), target);\n  return value.InternalSerializeWithCachedSizesToArray(deterministic, target);\n}\n\n// ===================================================================\n\ninline size_t WireFormatLite::Int32Size(int32 value) {\n  return io::CodedOutputStream::VarintSize32SignExtended(value);\n}\ninline size_t WireFormatLite::Int64Size(int64 value) {\n  return io::CodedOutputStream::VarintSize64(static_cast<uint64>(value));\n}\ninline size_t WireFormatLite::UInt32Size(uint32 value) {\n  return io::CodedOutputStream::VarintSize32(value);\n}\ninline size_t WireFormatLite::UInt64Size(uint64 value) {\n  return io::CodedOutputStream::VarintSize64(value);\n}\ninline size_t WireFormatLite::SInt32Size(int32 value) {\n  return io::CodedOutputStream::VarintSize32(ZigZagEncode32(value));\n}\ninline size_t WireFormatLite::SInt64Size(int64 value) {\n  return io::CodedOutputStream::VarintSize64(ZigZagEncode64(value));\n}\ninline size_t WireFormatLite::EnumSize(int value) {\n  return io::CodedOutputStream::VarintSize32SignExtended(value);\n}\n\ninline size_t WireFormatLite::StringSize(const string& value) {\n  return LengthDelimitedSize(value.size());\n}\ninline size_t WireFormatLite::BytesSize(const string& value) {\n  return LengthDelimitedSize(value.size());\n}\n\n\ninline size_t WireFormatLite::GroupSize(const MessageLite& value) {\n  return value.ByteSizeLong();\n}\ninline size_t WireFormatLite::MessageSize(const MessageLite& value) {\n  return LengthDelimitedSize(value.ByteSizeLong());\n}\n\n// See comment on ReadGroupNoVirtual to understand the need for this template\n// parameter name.\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline size_t WireFormatLite::GroupSizeNoVirtual(\n    const MessageType_WorkAroundCppLookupDefect& value) {\n  return value.MessageType_WorkAroundCppLookupDefect::ByteSizeLong();\n}\ntemplate<typename MessageType_WorkAroundCppLookupDefect>\ninline size_t WireFormatLite::MessageSizeNoVirtual(\n    const MessageType_WorkAroundCppLookupDefect& value) {\n  return LengthDelimitedSize(\n      value.MessageType_WorkAroundCppLookupDefect::ByteSizeLong());\n}\n\ninline size_t WireFormatLite::LengthDelimitedSize(size_t length) {\n  return io::CodedOutputStream::VarintSize32(length) + length;\n}\n\n}  // namespace internal\n}  // namespace protobuf\n\n}  // namespace google\n#endif  // GOOGLE_PROTOBUF_WIRE_FORMAT_LITE_INL_H__\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/wrappers.pb.h",
    "content": "// Generated by the protocol buffer compiler.  DO NOT EDIT!\n// source: google/protobuf/wrappers.proto\n\n#ifndef PROTOBUF_google_2fprotobuf_2fwrappers_2eproto__INCLUDED\n#define PROTOBUF_google_2fprotobuf_2fwrappers_2eproto__INCLUDED\n\n#include <string>\n\n#include <google/protobuf/stubs/common.h>\n\n#if GOOGLE_PROTOBUF_VERSION < 3001000\n#error This file was generated by a newer version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please update\n#error your headers.\n#endif\n#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION\n#error This file was generated by an older version of protoc which is\n#error incompatible with your Protocol Buffer headers.  Please\n#error regenerate this file with a newer version of protoc.\n#endif\n\n#include <google/protobuf/arena.h>\n#include <google/protobuf/arenastring.h>\n#include <google/protobuf/generated_message_util.h>\n#include <google/protobuf/metadata.h>\n#include <google/protobuf/message.h>\n#include <google/protobuf/repeated_field.h>\n#include <google/protobuf/extension_set.h>\n#include <google/protobuf/unknown_field_set.h>\n// @@protoc_insertion_point(includes)\n\nnamespace google {\nnamespace protobuf {\n\n// Internal implementation detail -- do not call these.\nvoid LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto();\nvoid LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto();\nvoid protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\nvoid protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\nclass BoolValue;\nclass BytesValue;\nclass DoubleValue;\nclass FloatValue;\nclass Int32Value;\nclass Int64Value;\nclass StringValue;\nclass UInt32Value;\nclass UInt64Value;\n\n// ===================================================================\n\nclass LIBPROTOBUF_EXPORT DoubleValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.DoubleValue) */ {\n public:\n  DoubleValue();\n  virtual ~DoubleValue();\n\n  DoubleValue(const DoubleValue& from);\n\n  inline DoubleValue& operator=(const DoubleValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const DoubleValue& default_instance();\n\n  static const DoubleValue* internal_default_instance();\n\n  void UnsafeArenaSwap(DoubleValue* other);\n  void Swap(DoubleValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline DoubleValue* New() const { return New(NULL); }\n\n  DoubleValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const DoubleValue& from);\n  void MergeFrom(const DoubleValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(DoubleValue* other);\n  void UnsafeMergeFrom(const DoubleValue& from);\n  protected:\n  explicit DoubleValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional double value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  double value() const;\n  void set_value(double value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.DoubleValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  double value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<DoubleValue> DoubleValue_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT FloatValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.FloatValue) */ {\n public:\n  FloatValue();\n  virtual ~FloatValue();\n\n  FloatValue(const FloatValue& from);\n\n  inline FloatValue& operator=(const FloatValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const FloatValue& default_instance();\n\n  static const FloatValue* internal_default_instance();\n\n  void UnsafeArenaSwap(FloatValue* other);\n  void Swap(FloatValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline FloatValue* New() const { return New(NULL); }\n\n  FloatValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const FloatValue& from);\n  void MergeFrom(const FloatValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(FloatValue* other);\n  void UnsafeMergeFrom(const FloatValue& from);\n  protected:\n  explicit FloatValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional float value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  float value() const;\n  void set_value(float value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.FloatValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  float value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<FloatValue> FloatValue_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Int64Value : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Int64Value) */ {\n public:\n  Int64Value();\n  virtual ~Int64Value();\n\n  Int64Value(const Int64Value& from);\n\n  inline Int64Value& operator=(const Int64Value& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Int64Value& default_instance();\n\n  static const Int64Value* internal_default_instance();\n\n  void UnsafeArenaSwap(Int64Value* other);\n  void Swap(Int64Value* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Int64Value* New() const { return New(NULL); }\n\n  Int64Value* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Int64Value& from);\n  void MergeFrom(const Int64Value& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Int64Value* other);\n  void UnsafeMergeFrom(const Int64Value& from);\n  protected:\n  explicit Int64Value(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int64 value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  ::google::protobuf::int64 value() const;\n  void set_value(::google::protobuf::int64 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Int64Value)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::int64 value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Int64Value> Int64Value_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT UInt64Value : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.UInt64Value) */ {\n public:\n  UInt64Value();\n  virtual ~UInt64Value();\n\n  UInt64Value(const UInt64Value& from);\n\n  inline UInt64Value& operator=(const UInt64Value& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const UInt64Value& default_instance();\n\n  static const UInt64Value* internal_default_instance();\n\n  void UnsafeArenaSwap(UInt64Value* other);\n  void Swap(UInt64Value* other);\n\n  // implements Message ----------------------------------------------\n\n  inline UInt64Value* New() const { return New(NULL); }\n\n  UInt64Value* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const UInt64Value& from);\n  void MergeFrom(const UInt64Value& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(UInt64Value* other);\n  void UnsafeMergeFrom(const UInt64Value& from);\n  protected:\n  explicit UInt64Value(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional uint64 value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  ::google::protobuf::uint64 value() const;\n  void set_value(::google::protobuf::uint64 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.UInt64Value)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::uint64 value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<UInt64Value> UInt64Value_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT Int32Value : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.Int32Value) */ {\n public:\n  Int32Value();\n  virtual ~Int32Value();\n\n  Int32Value(const Int32Value& from);\n\n  inline Int32Value& operator=(const Int32Value& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const Int32Value& default_instance();\n\n  static const Int32Value* internal_default_instance();\n\n  void UnsafeArenaSwap(Int32Value* other);\n  void Swap(Int32Value* other);\n\n  // implements Message ----------------------------------------------\n\n  inline Int32Value* New() const { return New(NULL); }\n\n  Int32Value* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const Int32Value& from);\n  void MergeFrom(const Int32Value& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(Int32Value* other);\n  void UnsafeMergeFrom(const Int32Value& from);\n  protected:\n  explicit Int32Value(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional int32 value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  ::google::protobuf::int32 value() const;\n  void set_value(::google::protobuf::int32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.Int32Value)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::int32 value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<Int32Value> Int32Value_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT UInt32Value : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.UInt32Value) */ {\n public:\n  UInt32Value();\n  virtual ~UInt32Value();\n\n  UInt32Value(const UInt32Value& from);\n\n  inline UInt32Value& operator=(const UInt32Value& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const UInt32Value& default_instance();\n\n  static const UInt32Value* internal_default_instance();\n\n  void UnsafeArenaSwap(UInt32Value* other);\n  void Swap(UInt32Value* other);\n\n  // implements Message ----------------------------------------------\n\n  inline UInt32Value* New() const { return New(NULL); }\n\n  UInt32Value* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const UInt32Value& from);\n  void MergeFrom(const UInt32Value& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(UInt32Value* other);\n  void UnsafeMergeFrom(const UInt32Value& from);\n  protected:\n  explicit UInt32Value(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional uint32 value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  ::google::protobuf::uint32 value() const;\n  void set_value(::google::protobuf::uint32 value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.UInt32Value)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::uint32 value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<UInt32Value> UInt32Value_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT BoolValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.BoolValue) */ {\n public:\n  BoolValue();\n  virtual ~BoolValue();\n\n  BoolValue(const BoolValue& from);\n\n  inline BoolValue& operator=(const BoolValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const BoolValue& default_instance();\n\n  static const BoolValue* internal_default_instance();\n\n  void UnsafeArenaSwap(BoolValue* other);\n  void Swap(BoolValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline BoolValue* New() const { return New(NULL); }\n\n  BoolValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const BoolValue& from);\n  void MergeFrom(const BoolValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(BoolValue* other);\n  void UnsafeMergeFrom(const BoolValue& from);\n  protected:\n  explicit BoolValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bool value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  bool value() const;\n  void set_value(bool value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.BoolValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  bool value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<BoolValue> BoolValue_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT StringValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.StringValue) */ {\n public:\n  StringValue();\n  virtual ~StringValue();\n\n  StringValue(const StringValue& from);\n\n  inline StringValue& operator=(const StringValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const StringValue& default_instance();\n\n  static const StringValue* internal_default_instance();\n\n  void UnsafeArenaSwap(StringValue* other);\n  void Swap(StringValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline StringValue* New() const { return New(NULL); }\n\n  StringValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const StringValue& from);\n  void MergeFrom(const StringValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(StringValue* other);\n  void UnsafeMergeFrom(const StringValue& from);\n  protected:\n  explicit StringValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional string value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  const ::std::string& value() const;\n  void set_value(const ::std::string& value);\n  void set_value(const char* value);\n  void set_value(const char* value, size_t size);\n  ::std::string* mutable_value();\n  ::std::string* release_value();\n  void set_allocated_value(::std::string* value);\n  ::std::string* unsafe_arena_release_value();\n  void unsafe_arena_set_allocated_value(\n      ::std::string* value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.StringValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::internal::ArenaStringPtr value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<StringValue> StringValue_default_instance_;\n\n// -------------------------------------------------------------------\n\nclass LIBPROTOBUF_EXPORT BytesValue : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:google.protobuf.BytesValue) */ {\n public:\n  BytesValue();\n  virtual ~BytesValue();\n\n  BytesValue(const BytesValue& from);\n\n  inline BytesValue& operator=(const BytesValue& from) {\n    CopyFrom(from);\n    return *this;\n  }\n\n  inline ::google::protobuf::Arena* GetArena() const { return GetArenaNoVirtual(); }\n  inline void* GetMaybeArenaPointer() const {\n    return MaybeArenaPtr();\n  }\n  static const ::google::protobuf::Descriptor* descriptor();\n  static const BytesValue& default_instance();\n\n  static const BytesValue* internal_default_instance();\n\n  void UnsafeArenaSwap(BytesValue* other);\n  void Swap(BytesValue* other);\n\n  // implements Message ----------------------------------------------\n\n  inline BytesValue* New() const { return New(NULL); }\n\n  BytesValue* New(::google::protobuf::Arena* arena) const;\n  void CopyFrom(const ::google::protobuf::Message& from);\n  void MergeFrom(const ::google::protobuf::Message& from);\n  void CopyFrom(const BytesValue& from);\n  void MergeFrom(const BytesValue& from);\n  void Clear();\n  bool IsInitialized() const;\n\n  size_t ByteSizeLong() const;\n  bool MergePartialFromCodedStream(\n      ::google::protobuf::io::CodedInputStream* input);\n  void SerializeWithCachedSizes(\n      ::google::protobuf::io::CodedOutputStream* output) const;\n  ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray(\n      bool deterministic, ::google::protobuf::uint8* output) const;\n  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const {\n    return InternalSerializeWithCachedSizesToArray(false, output);\n  }\n  int GetCachedSize() const { return _cached_size_; }\n  private:\n  void SharedCtor();\n  void SharedDtor();\n  void SetCachedSize(int size) const;\n  void InternalSwap(BytesValue* other);\n  void UnsafeMergeFrom(const BytesValue& from);\n  protected:\n  explicit BytesValue(::google::protobuf::Arena* arena);\n  private:\n  static void ArenaDtor(void* object);\n  inline void RegisterArenaDtor(::google::protobuf::Arena* arena);\n  private:\n  inline ::google::protobuf::Arena* GetArenaNoVirtual() const {\n    return _internal_metadata_.arena();\n  }\n  inline void* MaybeArenaPtr() const {\n    return _internal_metadata_.raw_arena_ptr();\n  }\n  public:\n\n  ::google::protobuf::Metadata GetMetadata() const;\n\n  // nested types ----------------------------------------------------\n\n  // accessors -------------------------------------------------------\n\n  // optional bytes value = 1;\n  void clear_value();\n  static const int kValueFieldNumber = 1;\n  const ::std::string& value() const;\n  void set_value(const ::std::string& value);\n  void set_value(const char* value);\n  void set_value(const void* value, size_t size);\n  ::std::string* mutable_value();\n  ::std::string* release_value();\n  void set_allocated_value(::std::string* value);\n  ::std::string* unsafe_arena_release_value();\n  void unsafe_arena_set_allocated_value(\n      ::std::string* value);\n\n  // @@protoc_insertion_point(class_scope:google.protobuf.BytesValue)\n private:\n\n  ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_;\n  friend class ::google::protobuf::Arena;\n  typedef void InternalArenaConstructable_;\n  typedef void DestructorSkippable_;\n  ::google::protobuf::internal::ArenaStringPtr value_;\n  mutable int _cached_size_;\n  friend void LIBPROTOBUF_EXPORT protobuf_InitDefaults_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void LIBPROTOBUF_EXPORT protobuf_AddDesc_google_2fprotobuf_2fwrappers_2eproto_impl();\n  friend void protobuf_AssignDesc_google_2fprotobuf_2fwrappers_2eproto();\n  friend void protobuf_ShutdownFile_google_2fprotobuf_2fwrappers_2eproto();\n\n  void InitAsDefaultInstance();\n};\nextern ::google::protobuf::internal::ExplicitlyConstructed<BytesValue> BytesValue_default_instance_;\n\n// ===================================================================\n\n\n// ===================================================================\n\n#if !PROTOBUF_INLINE_NOT_IN_HEADERS\n// DoubleValue\n\n// optional double value = 1;\ninline void DoubleValue::clear_value() {\n  value_ = 0;\n}\ninline double DoubleValue::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.DoubleValue.value)\n  return value_;\n}\ninline void DoubleValue::set_value(double value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.DoubleValue.value)\n}\n\ninline const DoubleValue* DoubleValue::internal_default_instance() {\n  return &DoubleValue_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// FloatValue\n\n// optional float value = 1;\ninline void FloatValue::clear_value() {\n  value_ = 0;\n}\ninline float FloatValue::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.FloatValue.value)\n  return value_;\n}\ninline void FloatValue::set_value(float value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.FloatValue.value)\n}\n\ninline const FloatValue* FloatValue::internal_default_instance() {\n  return &FloatValue_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Int64Value\n\n// optional int64 value = 1;\ninline void Int64Value::clear_value() {\n  value_ = GOOGLE_LONGLONG(0);\n}\ninline ::google::protobuf::int64 Int64Value::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Int64Value.value)\n  return value_;\n}\ninline void Int64Value::set_value(::google::protobuf::int64 value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Int64Value.value)\n}\n\ninline const Int64Value* Int64Value::internal_default_instance() {\n  return &Int64Value_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// UInt64Value\n\n// optional uint64 value = 1;\ninline void UInt64Value::clear_value() {\n  value_ = GOOGLE_ULONGLONG(0);\n}\ninline ::google::protobuf::uint64 UInt64Value::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UInt64Value.value)\n  return value_;\n}\ninline void UInt64Value::set_value(::google::protobuf::uint64 value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UInt64Value.value)\n}\n\ninline const UInt64Value* UInt64Value::internal_default_instance() {\n  return &UInt64Value_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// Int32Value\n\n// optional int32 value = 1;\ninline void Int32Value::clear_value() {\n  value_ = 0;\n}\ninline ::google::protobuf::int32 Int32Value::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.Int32Value.value)\n  return value_;\n}\ninline void Int32Value::set_value(::google::protobuf::int32 value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.Int32Value.value)\n}\n\ninline const Int32Value* Int32Value::internal_default_instance() {\n  return &Int32Value_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// UInt32Value\n\n// optional uint32 value = 1;\ninline void UInt32Value::clear_value() {\n  value_ = 0u;\n}\ninline ::google::protobuf::uint32 UInt32Value::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.UInt32Value.value)\n  return value_;\n}\ninline void UInt32Value::set_value(::google::protobuf::uint32 value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.UInt32Value.value)\n}\n\ninline const UInt32Value* UInt32Value::internal_default_instance() {\n  return &UInt32Value_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// BoolValue\n\n// optional bool value = 1;\ninline void BoolValue::clear_value() {\n  value_ = false;\n}\ninline bool BoolValue::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.BoolValue.value)\n  return value_;\n}\ninline void BoolValue::set_value(bool value) {\n  \n  value_ = value;\n  // @@protoc_insertion_point(field_set:google.protobuf.BoolValue.value)\n}\n\ninline const BoolValue* BoolValue::internal_default_instance() {\n  return &BoolValue_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// StringValue\n\n// optional string value = 1;\ninline void StringValue::clear_value() {\n  value_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& StringValue::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.StringValue.value)\n  return value_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void StringValue::set_value(const ::std::string& value) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.StringValue.value)\n}\ninline void StringValue::set_value(const char* value) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.StringValue.value)\n}\ninline void StringValue::set_value(const char* value,\n    size_t size) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.StringValue.value)\n}\ninline ::std::string* StringValue::mutable_value() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.StringValue.value)\n  return value_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* StringValue::release_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.StringValue.value)\n  \n  return value_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* StringValue::unsafe_arena_release_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.StringValue.value)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return value_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void StringValue::set_allocated_value(::std::string* value) {\n  if (value != NULL) {\n    \n  } else {\n    \n  }\n  value_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.StringValue.value)\n}\ninline void StringValue::unsafe_arena_set_allocated_value(\n    ::std::string* value) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (value != NULL) {\n    \n  } else {\n    \n  }\n  value_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.StringValue.value)\n}\n\ninline const StringValue* StringValue::internal_default_instance() {\n  return &StringValue_default_instance_.get();\n}\n// -------------------------------------------------------------------\n\n// BytesValue\n\n// optional bytes value = 1;\ninline void BytesValue::clear_value() {\n  value_.ClearToEmpty(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline const ::std::string& BytesValue::value() const {\n  // @@protoc_insertion_point(field_get:google.protobuf.BytesValue.value)\n  return value_.Get(&::google::protobuf::internal::GetEmptyStringAlreadyInited());\n}\ninline void BytesValue::set_value(const ::std::string& value) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set:google.protobuf.BytesValue.value)\n}\ninline void BytesValue::set_value(const char* value) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value),\n              GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_char:google.protobuf.BytesValue.value)\n}\ninline void BytesValue::set_value(const void* value,\n    size_t size) {\n  \n  value_.Set(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(\n      reinterpret_cast<const char*>(value), size), GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_pointer:google.protobuf.BytesValue.value)\n}\ninline ::std::string* BytesValue::mutable_value() {\n  \n  // @@protoc_insertion_point(field_mutable:google.protobuf.BytesValue.value)\n  return value_.Mutable(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* BytesValue::release_value() {\n  // @@protoc_insertion_point(field_release:google.protobuf.BytesValue.value)\n  \n  return value_.Release(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), GetArenaNoVirtual());\n}\ninline ::std::string* BytesValue::unsafe_arena_release_value() {\n  // @@protoc_insertion_point(field_unsafe_arena_release:google.protobuf.BytesValue.value)\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  \n  return value_.UnsafeArenaRelease(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      GetArenaNoVirtual());\n}\ninline void BytesValue::set_allocated_value(::std::string* value) {\n  if (value != NULL) {\n    \n  } else {\n    \n  }\n  value_.SetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value,\n      GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_set_allocated:google.protobuf.BytesValue.value)\n}\ninline void BytesValue::unsafe_arena_set_allocated_value(\n    ::std::string* value) {\n  GOOGLE_DCHECK(GetArenaNoVirtual() != NULL);\n  if (value != NULL) {\n    \n  } else {\n    \n  }\n  value_.UnsafeArenaSetAllocated(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),\n      value, GetArenaNoVirtual());\n  // @@protoc_insertion_point(field_unsafe_arena_set_allocated:google.protobuf.BytesValue.value)\n}\n\ninline const BytesValue* BytesValue::internal_default_instance() {\n  return &BytesValue_default_instance_.get();\n}\n#endif  // !PROTOBUF_INLINE_NOT_IN_HEADERS\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n// -------------------------------------------------------------------\n\n\n// @@protoc_insertion_point(namespace_scope)\n\n}  // namespace protobuf\n}  // namespace google\n\n// @@protoc_insertion_point(global_scope)\n\n#endif  // PROTOBUF_google_2fprotobuf_2fwrappers_2eproto__INCLUDED\n"
  },
  {
    "path": "app/src/main/cpp/google/protobuf/wrappers.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Wrappers for primitive (non-message) types. These types are useful\n// for embedding primitives in the `google.protobuf.Any` type and for places\n// where we need to distinguish between the absence of a primitive\n// typed field and its default value.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"github.com/golang/protobuf/ptypes/wrappers\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"WrappersProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// Wrapper message for `double`.\n//\n// The JSON representation for `DoubleValue` is JSON number.\nmessage DoubleValue {\n  // The double value.\n  double value = 1;\n}\n\n// Wrapper message for `float`.\n//\n// The JSON representation for `FloatValue` is JSON number.\nmessage FloatValue {\n  // The float value.\n  float value = 1;\n}\n\n// Wrapper message for `int64`.\n//\n// The JSON representation for `Int64Value` is JSON string.\nmessage Int64Value {\n  // The int64 value.\n  int64 value = 1;\n}\n\n// Wrapper message for `uint64`.\n//\n// The JSON representation for `UInt64Value` is JSON string.\nmessage UInt64Value {\n  // The uint64 value.\n  uint64 value = 1;\n}\n\n// Wrapper message for `int32`.\n//\n// The JSON representation for `Int32Value` is JSON number.\nmessage Int32Value {\n  // The int32 value.\n  int32 value = 1;\n}\n\n// Wrapper message for `uint32`.\n//\n// The JSON representation for `UInt32Value` is JSON number.\nmessage UInt32Value {\n  // The uint32 value.\n  uint32 value = 1;\n}\n\n// Wrapper message for `bool`.\n//\n// The JSON representation for `BoolValue` is JSON `true` and `false`.\nmessage BoolValue {\n  // The bool value.\n  bool value = 1;\n}\n\n// Wrapper message for `string`.\n//\n// The JSON representation for `StringValue` is JSON string.\nmessage StringValue {\n  // The string value.\n  string value = 1;\n}\n\n// Wrapper message for `bytes`.\n//\n// The JSON representation for `BytesValue` is JSON string.\nmessage BytesValue {\n  // The bytes value.\n  bytes value = 1;\n}\n"
  },
  {
    "path": "app/src/main/cpp/native-lib.cpp",
    "content": "#include <jni.h>\n#include <string>\n#include <algorithm>\n#define PROTOBUF_USE_DLLS 1\n#define CAFFE2_USE_LITE_PROTO 1\n#include <caffe2/core/predictor.h>\n#include <caffe2/core/operator.h>\n#include <caffe2/core/timer.h>\n\n#include \"caffe2/core/init.h\"\n\n#include <android/asset_manager.h>\n#include <android/asset_manager_jni.h>\n#include <android/log.h>\n#include \"classes.h\"\n#define IMG_H 227\n#define IMG_W 227\n#define IMG_C 3\n#define MAX_DATA_SIZE IMG_H * IMG_W * IMG_C\n#define alog(...) __android_log_print(ANDROID_LOG_ERROR, \"F8DEMO\", __VA_ARGS__);\n\nstatic caffe2::NetDef _initNet, _predictNet;\nstatic caffe2::Predictor *_predictor;\nstatic char raw_data[MAX_DATA_SIZE];\nstatic float input_data[MAX_DATA_SIZE];\nstatic caffe2::Workspace ws;\n\n// A function to load the NetDefs from protobufs.\nvoid loadToNetDef(AAssetManager* mgr, caffe2::NetDef* net, const char *filename) {\n    AAsset* asset = AAssetManager_open(mgr, filename, AASSET_MODE_BUFFER);\n    assert(asset != nullptr);\n    const void *data = AAsset_getBuffer(asset);\n    assert(data != nullptr);\n    off_t len = AAsset_getLength(asset);\n    assert(len != 0);\n    if (!net->ParseFromArray(data, len)) {\n        alog(\"Couldn't parse net from data.\\n\");\n    }\n    AAsset_close(asset);\n}\n\nextern \"C\"\nvoid\nJava_facebook_f8demo_ClassifyCamera_initCaffe2(\n        JNIEnv* env,\n        jobject /* this */,\n        jobject assetManager) {\n    AAssetManager *mgr = AAssetManager_fromJava(env, assetManager);\n    alog(\"Attempting to load protobuf netdefs...\");\n    loadToNetDef(mgr, &_initNet,   \"squeeze_init_net.pb\");\n    loadToNetDef(mgr, &_predictNet,\"squeeze_predict_net.pb\");\n    alog(\"done.\");\n    alog(\"Instantiating predictor...\");\n    _predictor = new caffe2::Predictor(_initNet, _predictNet);\n    alog(\"done.\")\n}\n\nfloat avg_fps = 0.0;\nfloat total_fps = 0.0;\nint iters_fps = 10;\n\nextern \"C\"\nJNIEXPORT jstring JNICALL\nJava_facebook_f8demo_ClassifyCamera_classificationFromCaffe2(\n        JNIEnv *env,\n        jobject /* this */,\n        jint h, jint w, jbyteArray Y, jbyteArray U, jbyteArray V,\n        jint rowStride, jint pixelStride,\n        jboolean infer_HWC) {\n    if (!_predictor) {\n        return env->NewStringUTF(\"Loading...\");\n    }\n    jsize Y_len = env->GetArrayLength(Y);\n    jbyte * Y_data = env->GetByteArrayElements(Y, 0);\n    assert(Y_len <= MAX_DATA_SIZE);\n    jsize U_len = env->GetArrayLength(U);\n    jbyte * U_data = env->GetByteArrayElements(U, 0);\n    assert(U_len <= MAX_DATA_SIZE);\n    jsize V_len = env->GetArrayLength(V);\n    jbyte * V_data = env->GetByteArrayElements(V, 0);\n    assert(V_len <= MAX_DATA_SIZE);\n\n#define min(a,b) ((a) > (b)) ? (b) : (a)\n#define max(a,b) ((a) > (b)) ? (a) : (b)\n\n    auto h_offset = max(0, (h - IMG_H) / 2);\n    auto w_offset = max(0, (w - IMG_W) / 2);\n\n    auto iter_h = IMG_H;\n    auto iter_w = IMG_W;\n    if (h < IMG_H) {\n        iter_h = h;\n    }\n    if (w < IMG_W) {\n        iter_w = w;\n    }\n\n    for (auto i = 0; i < iter_h; ++i) {\n        jbyte* Y_row = &Y_data[(h_offset + i) * w];\n        jbyte* U_row = &U_data[(h_offset + i) / 2 * rowStride];\n        jbyte* V_row = &V_data[(h_offset + i) / 2 * rowStride];\n        for (auto j = 0; j < iter_w; ++j) {\n            // Tested on Pixel and S7.\n            char y = Y_row[w_offset + j];\n            char u = U_row[pixelStride * ((w_offset+j)/pixelStride)];\n            char v = V_row[pixelStride * ((w_offset+j)/pixelStride)];\n\n            float b_mean = 104.00698793f;\n            float g_mean = 116.66876762f;\n            float r_mean = 122.67891434f;\n\n            auto b_i = 0 * IMG_H * IMG_W + j * IMG_W + i;\n            auto g_i = 1 * IMG_H * IMG_W + j * IMG_W + i;\n            auto r_i = 2 * IMG_H * IMG_W + j * IMG_W + i;\n\n            if (infer_HWC) {\n                b_i = (j * IMG_W + i) * IMG_C;\n                g_i = (j * IMG_W + i) * IMG_C + 1;\n                r_i = (j * IMG_W + i) * IMG_C + 2;\n            }\n/*\n  R = Y + 1.402 (V-128)\n  G = Y - 0.34414 (U-128) - 0.71414 (V-128)\n  B = Y + 1.772 (U-V)\n */\n            input_data[r_i] = -r_mean + (float) ((float) min(255., max(0., (float) (y + 1.402 * (v - 128)))));\n            input_data[g_i] = -g_mean + (float) ((float) min(255., max(0., (float) (y - 0.34414 * (u - 128) - 0.71414 * (v - 128)))));\n            input_data[b_i] = -b_mean + (float) ((float) min(255., max(0., (float) (y + 1.772 * (u - v)))));\n\n        }\n    }\n\n    caffe2::TensorCPU input;\n    if (infer_HWC) {\n        input.Resize(std::vector<int>({IMG_H, IMG_W, IMG_C}));\n    } else {\n        input.Resize(std::vector<int>({1, IMG_C, IMG_H, IMG_W}));\n    }\n    memcpy(input.mutable_data<float>(), input_data, IMG_H * IMG_W * IMG_C * sizeof(float));\n    caffe2::Predictor::TensorVector input_vec{&input};\n    caffe2::Predictor::TensorVector output_vec;\n    caffe2::Timer t;\n    t.Start();\n    _predictor->run(input_vec, &output_vec);\n    float fps = 1000/t.MilliSeconds();\n    total_fps += fps;\n    avg_fps = total_fps / iters_fps;\n    total_fps -= avg_fps;\n\n    constexpr int k = 5;\n    float max[k] = {0};\n    int max_index[k] = {0};\n    // Find the top-k results manually.\n    if (output_vec.capacity() > 0) {\n        for (auto output : output_vec) {\n            for (auto i = 0; i < output->size(); ++i) {\n                for (auto j = 0; j < k; ++j) {\n                    if (output->template data<float>()[i] > max[j]) {\n                        for (auto _j = k - 1; _j > j; --_j) {\n                            max[_j - 1] = max[_j];\n                            max_index[_j - 1] = max_index[_j];\n                        }\n                        max[j] = output->template data<float>()[i];\n                        max_index[j] = i;\n                        goto skip;\n                    }\n                }\n                skip:;\n            }\n        }\n    }\n    std::ostringstream stringStream;\n    stringStream << avg_fps << \" FPS\\n\";\n\n    for (auto j = 0; j < k; ++j) {\n        stringStream << j << \": \" << imagenet_classes[max_index[j]] << \" - \" << max[j] / 10 << \"%\\n\";\n    }\n    return env->NewStringUTF(stringStream.str().c_str());\n}\n"
  },
  {
    "path": "app/src/main/java/facebook/f8demo/ClassifyCamera.java",
    "content": "package facebook.f8demo;\n\nimport android.Manifest;\nimport android.app.ActionBar;\nimport android.content.Context;\nimport android.content.pm.PackageManager;\nimport android.content.res.AssetManager;\nimport android.graphics.ImageFormat;\nimport android.graphics.PixelFormat;\nimport android.graphics.SurfaceTexture;\nimport android.hardware.camera2.CameraAccessException;\nimport android.hardware.camera2.CameraCaptureSession;\nimport android.hardware.camera2.CameraCharacteristics;\nimport android.hardware.camera2.CameraDevice;\nimport android.hardware.camera2.CameraManager;\nimport android.hardware.camera2.CameraMetadata;\nimport android.hardware.camera2.CaptureRequest;\nimport android.hardware.camera2.TotalCaptureResult;\nimport android.hardware.camera2.params.StreamConfigurationMap;\nimport android.media.Image;\nimport android.media.ImageReader;\nimport android.os.AsyncTask;\nimport android.os.Build;\nimport android.os.Handler;\nimport android.os.HandlerThread;\nimport android.support.annotation.NonNull;\nimport android.util.Size;\nimport android.support.v4.app.ActivityCompat;\nimport android.support.v7.app.AppCompatActivity;\nimport android.os.Bundle;\nimport android.util.Log;\nimport android.view.GestureDetector;\nimport android.view.MotionEvent;\nimport android.view.Surface;\nimport android.view.TextureView;\nimport android.view.View;\nimport android.view.Window;\nimport android.view.WindowManager;\nimport android.widget.TextView;\nimport android.widget.Toast;\n\nimport java.io.FileNotFoundException;\nimport java.io.IOException;\nimport java.nio.ByteBuffer;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.concurrent.TimeUnit;\n\nimport static android.view.View.SYSTEM_UI_FLAG_IMMERSIVE;\n\npublic class ClassifyCamera extends AppCompatActivity {\n    private static final String TAG = \"F8DEMO\";\n    private static final int REQUEST_CAMERA_PERMISSION = 200;\n\n    private TextureView textureView;\n    private String cameraId;\n    protected CameraDevice cameraDevice;\n    protected CameraCaptureSession cameraCaptureSessions;\n    protected CaptureRequest.Builder captureRequestBuilder;\n    private Size imageDimension;\n    private Handler mBackgroundHandler;\n    private HandlerThread mBackgroundThread;\n    private TextView tv;\n    private String predictedClass = \"none\";\n    private AssetManager mgr;\n    private boolean processing = false;\n    private Image image = null;\n    private boolean run_HWC = false;\n\n\n    static {\n        System.loadLibrary(\"native-lib\");\n    }\n\n    public native String classificationFromCaffe2(int h, int w, byte[] Y, byte[] U, byte[] V,\n                                                  int rowStride, int pixelStride, boolean r_hwc);\n    public native void initCaffe2(AssetManager mgr);\n    private class SetUpNeuralNetwork extends AsyncTask<Void, Void, Void> {\n        @Override\n        protected Void doInBackground(Void[] v) {\n            try {\n                initCaffe2(mgr);\n                predictedClass = \"Neural net loaded! Inferring...\";\n            } catch (Exception e) {\n                Log.d(TAG, \"Couldn't load neural network.\");\n            }\n            return null;\n        }\n    }\n\n    @Override\n    protected void onCreate(Bundle savedInstanceState) {\n        super.onCreate(savedInstanceState);\n        this.requestWindowFeature(Window.FEATURE_NO_TITLE);\n\n        mgr = getResources().getAssets();\n\n        new SetUpNeuralNetwork().execute();\n\n        View decorView = getWindow().getDecorView();\n        int uiOptions = View.SYSTEM_UI_FLAG_FULLSCREEN;\n        decorView.setSystemUiVisibility(uiOptions);\n\n        setContentView(R.layout.activity_classify_camera);\n\n        textureView = (TextureView) findViewById(R.id.textureView);\n        textureView.setSystemUiVisibility(SYSTEM_UI_FLAG_IMMERSIVE);\n        final GestureDetector gestureDetector = new GestureDetector(this.getApplicationContext(),\n                new GestureDetector.SimpleOnGestureListener(){\n            @Override\n            public boolean onDoubleTap(MotionEvent e) {\n                return true;\n            }\n\n            @Override\n            public void onLongPress(MotionEvent e) {\n                super.onLongPress(e);\n\n            }\n\n            @Override\n            public boolean onDoubleTapEvent(MotionEvent e) {\n                return true;\n            }\n\n            @Override\n            public boolean onDown(MotionEvent e) {\n                return true;\n            }\n        });\n\n        textureView.setOnTouchListener(new View.OnTouchListener() {\n            @Override\n            public boolean onTouch(View v, MotionEvent event) {\n                return gestureDetector.onTouchEvent(event);\n            }\n        });\n\n        assert textureView != null;\n        textureView.setSurfaceTextureListener(textureListener);\n        tv = (TextView) findViewById(R.id.sample_text);\n\n    }\n\n    TextureView.SurfaceTextureListener textureListener = new TextureView.SurfaceTextureListener() {\n        @Override\n        public void onSurfaceTextureAvailable(SurfaceTexture surface, int width, int height) {\n            //open your camera here\n            openCamera();\n        }\n        @Override\n        public void onSurfaceTextureSizeChanged(SurfaceTexture surface, int width, int height) {\n            // Transform you image captured size according to the surface width and height\n        }\n        @Override\n        public boolean onSurfaceTextureDestroyed(SurfaceTexture surface) {\n            return false;\n        }\n        @Override\n        public void onSurfaceTextureUpdated(SurfaceTexture surface) {\n        }\n    };\n    private final CameraDevice.StateCallback stateCallback = new CameraDevice.StateCallback() {\n        @Override\n        public void onOpened(CameraDevice camera) {\n            cameraDevice = camera;\n            createCameraPreview();\n        }\n        @Override\n        public void onDisconnected(CameraDevice camera) {\n            cameraDevice.close();\n        }\n        @Override\n        public void onError(CameraDevice camera, int error) {\n            cameraDevice.close();\n            cameraDevice = null;\n        }\n    };\n    protected void startBackgroundThread() {\n        mBackgroundThread = new HandlerThread(\"Camera Background\");\n        mBackgroundThread.start();\n        mBackgroundHandler = new Handler(mBackgroundThread.getLooper());\n    }\n    protected void stopBackgroundThread() {\n        mBackgroundThread.quitSafely();\n        try {\n            mBackgroundThread.join();\n            mBackgroundThread = null;\n            mBackgroundHandler = null;\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n    }\n\n    protected void createCameraPreview() {\n        try {\n            SurfaceTexture texture = textureView.getSurfaceTexture();\n            assert texture != null;\n            texture.setDefaultBufferSize(imageDimension.getWidth(), imageDimension.getHeight());\n            Surface surface = new Surface(texture);\n            int width = 227;\n            int height = 227;\n            ImageReader reader = ImageReader.newInstance(width, height, ImageFormat.YUV_420_888, 4);\n            ImageReader.OnImageAvailableListener readerListener = new ImageReader.OnImageAvailableListener() {\n                @Override\n                public void onImageAvailable(ImageReader reader) {\n                    try {\n\n                        image = reader.acquireNextImage();\n                        if (processing) {\n                            image.close();\n                            return;\n                        }\n                        processing = true;\n                        int w = image.getWidth();\n                        int h = image.getHeight();\n                        ByteBuffer Ybuffer = image.getPlanes()[0].getBuffer();\n                        ByteBuffer Ubuffer = image.getPlanes()[1].getBuffer();\n                        ByteBuffer Vbuffer = image.getPlanes()[2].getBuffer();\n                        // TODO: use these for proper image processing on different formats.\n                        int rowStride = image.getPlanes()[1].getRowStride();\n                        int pixelStride = image.getPlanes()[1].getPixelStride();\n                        byte[] Y = new byte[Ybuffer.capacity()];\n                        byte[] U = new byte[Ubuffer.capacity()];\n                        byte[] V = new byte[Vbuffer.capacity()];\n                        Ybuffer.get(Y);\n                        Ubuffer.get(U);\n                        Vbuffer.get(V);\n\n                        predictedClass = classificationFromCaffe2(h, w, Y, U, V,\n                                rowStride, pixelStride, run_HWC);\n                        runOnUiThread(new Runnable() {\n                            @Override\n                            public void run() {\n                                tv.setText(predictedClass);\n                                processing = false;\n                            }\n                        });\n\n                    } finally {\n                        if (image != null) {\n                            image.close();\n                        }\n                    }\n                }\n            };\n            reader.setOnImageAvailableListener(readerListener, mBackgroundHandler);\n            captureRequestBuilder = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);\n            captureRequestBuilder.addTarget(surface);\n            captureRequestBuilder.addTarget(reader.getSurface());\n\n            cameraDevice.createCaptureSession(Arrays.asList(surface, reader.getSurface()), new CameraCaptureSession.StateCallback(){\n                @Override\n                public void onConfigured(@NonNull CameraCaptureSession cameraCaptureSession) {\n                    if (null == cameraDevice) {\n                        return;\n                    }\n                    cameraCaptureSessions = cameraCaptureSession;\n                    updatePreview();\n                }\n                @Override\n                public void onConfigureFailed(@NonNull CameraCaptureSession cameraCaptureSession) {\n                    Toast.makeText(ClassifyCamera.this, \"Configuration change\", Toast.LENGTH_SHORT).show();\n                }\n            }, null);\n        } catch (CameraAccessException e) {\n            e.printStackTrace();\n        }\n    }\n    private void openCamera() {\n        CameraManager manager = (CameraManager) getSystemService(Context.CAMERA_SERVICE);\n        try {\n            cameraId = manager.getCameraIdList()[0];\n            CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);\n            StreamConfigurationMap map = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);\n            assert map != null;\n            imageDimension = map.getOutputSizes(SurfaceTexture.class)[0];\n            if (ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED && ActivityCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {\n                ActivityCompat.requestPermissions(ClassifyCamera.this, new String[]{Manifest.permission.CAMERA, Manifest.permission.WRITE_EXTERNAL_STORAGE}, REQUEST_CAMERA_PERMISSION);\n                return;\n            }\n            manager.openCamera(cameraId, stateCallback, null);\n        } catch (CameraAccessException e) {\n            e.printStackTrace();\n        }\n    }\n\n    protected void updatePreview() {\n        if(null == cameraDevice) {\n            return;\n        }\n        captureRequestBuilder.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO);\n        try {\n            cameraCaptureSessions.setRepeatingRequest(captureRequestBuilder.build(), null, mBackgroundHandler);\n        } catch (CameraAccessException e) {\n            e.printStackTrace();\n        }\n    }\n\n    private void closeCamera() {\n        if (null != cameraDevice) {\n            cameraDevice.close();\n            cameraDevice = null;\n        }\n    }\n    @Override\n    public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {\n        if (requestCode == REQUEST_CAMERA_PERMISSION) {\n            if (grantResults[0] == PackageManager.PERMISSION_DENIED) {\n                Toast.makeText(ClassifyCamera.this, \"You can't use this app without granting permission\", Toast.LENGTH_LONG).show();\n                finish();\n            }\n        }\n    }\n    @Override\n    protected void onResume() {\n        super.onResume();\n        startBackgroundThread();\n        if (textureView.isAvailable()) {\n            openCamera();\n        } else {\n            textureView.setSurfaceTextureListener(textureListener);\n        }\n    }\n\n    @Override\n    protected void onPause() {\n        closeCamera();\n        stopBackgroundThread();\n        super.onPause();\n    }\n}\n"
  },
  {
    "path": "app/src/main/res/drawable/ic_logo.xml",
    "content": "<vector android:height=\"24dp\" android:viewportHeight=\"105.43\"\n    android:viewportWidth=\"92.1\" android:width=\"24dp\" xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M77.65,53.44a10.27,10.27 0,0 1,0 20.54h-11.1v-20.54h11.1Z\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M73.1,58.66a29.83,29.83 0,0 1,-59.67 0v-13.37h59.67L73.1,58.66Z\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M28.79,25.11L45.41,25.11\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M37.1,16.8L37.1,33.42\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M46.64,11.41L63.26,11.41\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M54.95,3.1L54.95,19.72\"/>\n    <path android:fillColor=\"#FF000000\" android:pathData=\"M4.14,101.29L82.65,101.29\"/>\n</vector>\n"
  },
  {
    "path": "app/src/main/res/drawable/ic_thumb.xml",
    "content": "<vector android:height=\"24dp\" android:viewportHeight=\"329.0\"\n    android:viewportWidth=\"341.0\" android:width=\"24dp\" xmlns:android=\"http://schemas.android.com/apk/res/android\">\n    <path android:fillColor=\"#32363b\" android:pathData=\"M178,0h3.9c12.4,1.2 23.1,9 30.4,18.8 12.7,16.9 18.8,38.7 16.5,59.7 -1.7,11.8 -3.7,23.6 -5.4,35.4 26.2,0.1 52.4,-0 78.6,0.1 15.4,0.3 30,11.2 34.3,26 3.2,10.8 0.8,23 -6.1,31.8 6.4,6.4 10.1,15 10.9,24v3.6c-0.9,11.6 -7.2,22.6 -17.1,28.8 3.9,10.2 3.6,22.2 -2,31.7 -3.7,6.8 -10,12 -17,15.3 3.4,13.9 -2.4,29.5 -14.2,37.6 -6,4.2 -13.4,6.6 -20.7,6.4 -41,-0 -82,0 -122.9,-0 -12.6,-0.4 -25.2,-5 -35,-13.1 -0.2,5.1 0.9,10.6 -1.9,15.2 -2.3,4.5 -7,7 -11.8,7.8h-84.6c-7.1,-0.8 -13,-6.6 -13.8,-13.8V157.5c0.8,-7.1 6.7,-13.5 14,-13.6 27.7,-0.1 55.3,0 83,-0.1 4,-0.2 8,1.5 10.8,4.4 2.4,2.3 3.3,5.5 4.7,8.4 1,-1.4 2,-2.9 2.8,-4.4 13.9,-25.8 27.8,-51.6 41.7,-77.3 3,-5.2 2.3,-11.3 2,-17 -1.3,-15.9 -2.4,-31.7 -3.7,-47.6C161.5,4.6 169.4,0.8 178,0m-8,17.1c1.2,15 2.4,30 3.4,44.9 0.6,6.7 -0.7,13.5 -3.8,19.5 -14,26 -28,51.9 -42,77.9 -2.9,7.1 -10.3,10.6 -15.4,15.9 0,38.1 -0,76.2 0,114.3 6.7,2.5 11.4,8.3 18,11.1 6.2,2.8 13.1,4.4 19.9,4.2 39.6,-0 79.3,-0 118.9,0 13,1.2 24.8,-11.7 22.5,-24.5 -0.8,-5.7 -4.2,-10.4 -6.6,-15.5 5.6,-1.4 11.8,-1.2 16.8,-4.3 7.2,-4 11.5,-12.5 10.6,-20.7 -0.3,-7.6 -6.1,-13.1 -10.3,-18.9 6,-2.2 12.8,-3.1 17.6,-7.6 7.8,-6.7 9.5,-19.2 3.7,-27.7 -3.8,-6.3 -11,-8.8 -17,-12.5 3.3,-2.5 6.6,-4.9 9.8,-7.5 5.9,-5 8.7,-13.4 6.9,-20.9 -1.9,-7.7 -8.4,-13.8 -15.9,-16 -3.5,-1.2 -7.3,-0.9 -10.9,-0.9 -29.8,0 -59.5,0 -89.3,-0 2.5,-17.3 5.4,-34.6 7.9,-51.9 1.5,-14.4 -2.1,-29.1 -9,-41.8 -4.3,-7.6 -10.2,-15 -18.5,-18.5 -5.5,-2.4 -12.1,-1.7 -17.2,1.4M14.5,158.4c0.1,52 -0,104.1 0.1,156.1 27.7,0.1 55.4,0 83,0 0.1,-52.1 0.1,-104.1 0,-156.2 -27.7,-0 -55.4,-0.2 -83.1,0.1z\"/>\n    <path android:fillColor=\"#758fb5\" android:pathData=\"M14.5,158.4c27.7,-0.3 55.4,-0.1 83.1,-0.1 0,52.1 0.1,104.1 -0,156.2 -27.7,0 -55.4,0.1 -83,-0 -0.1,-52 0,-104.1 -0.1,-156.1z\"/>\n</vector>\n"
  },
  {
    "path": "app/src/main/res/layout/activity_classify_camera.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<android.support.constraint.ConstraintLayout xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    xmlns:app=\"http://schemas.android.com/apk/res-auto\"\n    xmlns:tools=\"http://schemas.android.com/tools\"\n    android:layout_width=\"match_parent\"\n    android:layout_height=\"match_parent\"\n    tools:context=\"facebook.f8demo.ClassifyCamera\"\n    app:layout_constraintTop_toTopOf=\"@+id/sample_text\"\n    tools:layout_editor_absoluteX=\"0dp\"\n    android:layout_marginTop=\"8dp\"\n    android:theme=\"@android:style/Theme.Holo.NoActionBar\"\n    >\n\n    <TextureView\n        android:id=\"@+id/textureView\"\n        android:layout_width=\"395dp\"\n        android:layout_height=\"659dp\"\n        app:layout_constraintTop_toTopOf=\"parent\"\n        tools:layout_editor_absoluteX=\"8dp\"\n        tools:layout_editor_absoluteY=\"8dp\" />\n\n    <TextView\n        android:id=\"@+id/sample_text\"\n        android:layout_width=\"0dp\"\n        android:layout_height=\"wrap_content\"\n        android:background=\"#AA000000\"\n        android:text=\"Loading...\"\n        android:textAllCaps=\"true\"\n        android:textColor=\"@android:color/white\"\n        android:textStyle=\"bold\"\n        app:layout_constraintLeft_toLeftOf=\"parent\"\n        app:layout_constraintRight_toRightOf=\"parent\"\n        app:layout_constraintTop_toTopOf=\"parent\" />\n\n</android.support.constraint.ConstraintLayout>\n"
  },
  {
    "path": "app/src/main/res/values/colors.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <color name=\"colorPrimary\">#3F51B5</color>\n    <color name=\"colorPrimaryDark\">#303F9F</color>\n    <color name=\"colorAccent\">#FF4081</color>\n</resources>\n"
  },
  {
    "path": "app/src/main/res/values/strings.xml",
    "content": "<resources>\n    <string name=\"app_name\">AICamera</string>\n</resources>\n"
  },
  {
    "path": "app/src/main/res/values/styles.xml",
    "content": "<resources>\n    <style name=\"AppBaseTheme\" parent=\"Theme.AppCompat.Light.NoActionBar\">\n        <!-- Customize your theme here. -->\n\n    </style>\n    <style name=\"AppTheme\" parent=\"AppBaseTheme\">\n        <!-- Customize your theme here. -->\n        <item name=\"android:windowNoTitle\">true</item>\n        <item name=\"android:windowActionBar\">false</item>\n        <item name=\"colorPrimary\">@color/colorPrimary</item>\n        <item name=\"colorPrimaryDark\">@color/colorPrimaryDark</item>\n        <item name=\"android:colorBackground\">@android:color/black</item>\n        <item name=\"colorAccent\">@color/colorAccent</item>\n    </style>\n\n</resources>\n"
  },
  {
    "path": "app/src/test/java/facebook/f8demo/ExampleUnitTest.java",
    "content": "package facebook.f8demo;\n\nimport org.junit.Test;\n\nimport static org.junit.Assert.*;\n\n/**\n * Example local unit test, which will execute on the development machine (host).\n *\n * @see <a href=\"http://d.android.com/tools/testing\">Testing documentation</a>\n */\npublic class ExampleUnitTest {\n    @Test\n    public void addition_isCorrect() throws Exception {\n        assertEquals(4, 2 + 2);\n    }\n}"
  },
  {
    "path": "build.gradle",
    "content": "// Top-level build file where you can add configuration options common to all sub-projects/modules.\n\nbuildscript {\n    repositories {\n        google()\n        jcenter()\n    }\n    dependencies {\n        classpath 'com.android.tools.build:gradle:3.0.1'\n\n        // NOTE: Do not place your application dependencies here; they belong\n        // in the individual module build.gradle files\n    }\n}\n\nallprojects {\n    repositories {\n        google()\n        jcenter()\n    }\n}\n\ntask clean(type: Delete) {\n    delete rootProject.buildDir\n}\n"
  },
  {
    "path": "gradle/wrapper/gradle-wrapper.properties",
    "content": "#Tue Mar 06 19:43:14 PST 2018\ndistributionBase=GRADLE_USER_HOME\ndistributionPath=wrapper/dists\nzipStoreBase=GRADLE_USER_HOME\nzipStorePath=wrapper/dists\ndistributionUrl=https\\://services.gradle.org/distributions/gradle-4.1-all.zip\n"
  },
  {
    "path": "gradle.properties",
    "content": "# Project-wide Gradle settings.\n\n# IDE (e.g. Android Studio) users:\n# Gradle settings configured through the IDE *will override*\n# any settings specified in this file.\n\n# For more details on how to configure your build environment visit\n# http://www.gradle.org/docs/current/userguide/build_environment.html\n\n# Specifies the JVM arguments used for the daemon process.\n# The setting is particularly useful for tweaking memory settings.\norg.gradle.jvmargs=-Xmx1536m\n\n# When configured, Gradle will run in incubating parallel mode.\n# This option should only be used with decoupled projects. More details, visit\n# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects\n# org.gradle.parallel=true\n"
  },
  {
    "path": "gradlew",
    "content": "#!/usr/bin/env bash\n\n##############################################################################\n##\n##  Gradle start up script for UN*X\n##\n##############################################################################\n\n# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.\nDEFAULT_JVM_OPTS=\"\"\n\nAPP_NAME=\"Gradle\"\nAPP_BASE_NAME=`basename \"$0\"`\n\n# Use the maximum available, or set MAX_FD != -1 to use that value.\nMAX_FD=\"maximum\"\n\nwarn ( ) {\n    echo \"$*\"\n}\n\ndie ( ) {\n    echo\n    echo \"$*\"\n    echo\n    exit 1\n}\n\n# OS specific support (must be 'true' or 'false').\ncygwin=false\nmsys=false\ndarwin=false\ncase \"`uname`\" in\n  CYGWIN* )\n    cygwin=true\n    ;;\n  Darwin* )\n    darwin=true\n    ;;\n  MINGW* )\n    msys=true\n    ;;\nesac\n\n# Attempt to set APP_HOME\n# Resolve links: $0 may be a link\nPRG=\"$0\"\n# Need this for relative symlinks.\nwhile [ -h \"$PRG\" ] ; do\n    ls=`ls -ld \"$PRG\"`\n    link=`expr \"$ls\" : '.*-> \\(.*\\)$'`\n    if expr \"$link\" : '/.*' > /dev/null; then\n        PRG=\"$link\"\n    else\n        PRG=`dirname \"$PRG\"`\"/$link\"\n    fi\ndone\nSAVED=\"`pwd`\"\ncd \"`dirname \\\"$PRG\\\"`/\" >/dev/null\nAPP_HOME=\"`pwd -P`\"\ncd \"$SAVED\" >/dev/null\n\nCLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar\n\n# Determine the Java command to use to start the JVM.\nif [ -n \"$JAVA_HOME\" ] ; then\n    if [ -x \"$JAVA_HOME/jre/sh/java\" ] ; then\n        # IBM's JDK on AIX uses strange locations for the executables\n        JAVACMD=\"$JAVA_HOME/jre/sh/java\"\n    else\n        JAVACMD=\"$JAVA_HOME/bin/java\"\n    fi\n    if [ ! -x \"$JAVACMD\" ] ; then\n        die \"ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME\n\nPlease set the JAVA_HOME variable in your environment to match the\nlocation of your Java installation.\"\n    fi\nelse\n    JAVACMD=\"java\"\n    which java >/dev/null 2>&1 || die \"ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.\n\nPlease set the JAVA_HOME variable in your environment to match the\nlocation of your Java installation.\"\nfi\n\n# Increase the maximum file descriptors if we can.\nif [ \"$cygwin\" = \"false\" -a \"$darwin\" = \"false\" ] ; then\n    MAX_FD_LIMIT=`ulimit -H -n`\n    if [ $? -eq 0 ] ; then\n        if [ \"$MAX_FD\" = \"maximum\" -o \"$MAX_FD\" = \"max\" ] ; then\n            MAX_FD=\"$MAX_FD_LIMIT\"\n        fi\n        ulimit -n $MAX_FD\n        if [ $? -ne 0 ] ; then\n            warn \"Could not set maximum file descriptor limit: $MAX_FD\"\n        fi\n    else\n        warn \"Could not query maximum file descriptor limit: $MAX_FD_LIMIT\"\n    fi\nfi\n\n# For Darwin, add options to specify how the application appears in the dock\nif $darwin; then\n    GRADLE_OPTS=\"$GRADLE_OPTS \\\"-Xdock:name=$APP_NAME\\\" \\\"-Xdock:icon=$APP_HOME/media/gradle.icns\\\"\"\nfi\n\n# For Cygwin, switch paths to Windows format before running java\nif $cygwin ; then\n    APP_HOME=`cygpath --path --mixed \"$APP_HOME\"`\n    CLASSPATH=`cygpath --path --mixed \"$CLASSPATH\"`\n    JAVACMD=`cygpath --unix \"$JAVACMD\"`\n\n    # We build the pattern for arguments to be converted via cygpath\n    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`\n    SEP=\"\"\n    for dir in $ROOTDIRSRAW ; do\n        ROOTDIRS=\"$ROOTDIRS$SEP$dir\"\n        SEP=\"|\"\n    done\n    OURCYGPATTERN=\"(^($ROOTDIRS))\"\n    # Add a user-defined pattern to the cygpath arguments\n    if [ \"$GRADLE_CYGPATTERN\" != \"\" ] ; then\n        OURCYGPATTERN=\"$OURCYGPATTERN|($GRADLE_CYGPATTERN)\"\n    fi\n    # Now convert the arguments - kludge to limit ourselves to /bin/sh\n    i=0\n    for arg in \"$@\" ; do\n        CHECK=`echo \"$arg\"|egrep -c \"$OURCYGPATTERN\" -`\n        CHECK2=`echo \"$arg\"|egrep -c \"^-\"`                                 ### Determine if an option\n\n        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition\n            eval `echo args$i`=`cygpath --path --ignore --mixed \"$arg\"`\n        else\n            eval `echo args$i`=\"\\\"$arg\\\"\"\n        fi\n        i=$((i+1))\n    done\n    case $i in\n        (0) set -- ;;\n        (1) set -- \"$args0\" ;;\n        (2) set -- \"$args0\" \"$args1\" ;;\n        (3) set -- \"$args0\" \"$args1\" \"$args2\" ;;\n        (4) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" ;;\n        (5) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" ;;\n        (6) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" ;;\n        (7) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" ;;\n        (8) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" \"$args7\" ;;\n        (9) set -- \"$args0\" \"$args1\" \"$args2\" \"$args3\" \"$args4\" \"$args5\" \"$args6\" \"$args7\" \"$args8\" ;;\n    esac\nfi\n\n# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules\nfunction splitJvmOpts() {\n    JVM_OPTS=(\"$@\")\n}\neval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS\nJVM_OPTS[${#JVM_OPTS[*]}]=\"-Dorg.gradle.appname=$APP_BASE_NAME\"\n\nexec \"$JAVACMD\" \"${JVM_OPTS[@]}\" -classpath \"$CLASSPATH\" org.gradle.wrapper.GradleWrapperMain \"$@\"\n"
  },
  {
    "path": "gradlew.bat",
    "content": "@if \"%DEBUG%\" == \"\" @echo off\r\n@rem ##########################################################################\r\n@rem\r\n@rem  Gradle startup script for Windows\r\n@rem\r\n@rem ##########################################################################\r\n\r\n@rem Set local scope for the variables with windows NT shell\r\nif \"%OS%\"==\"Windows_NT\" setlocal\r\n\r\n@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.\r\nset DEFAULT_JVM_OPTS=\r\n\r\nset DIRNAME=%~dp0\r\nif \"%DIRNAME%\" == \"\" set DIRNAME=.\r\nset APP_BASE_NAME=%~n0\r\nset APP_HOME=%DIRNAME%\r\n\r\n@rem Find java.exe\r\nif defined JAVA_HOME goto findJavaFromJavaHome\r\n\r\nset JAVA_EXE=java.exe\r\n%JAVA_EXE% -version >NUL 2>&1\r\nif \"%ERRORLEVEL%\" == \"0\" goto init\r\n\r\necho.\r\necho ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.\r\necho.\r\necho Please set the JAVA_HOME variable in your environment to match the\r\necho location of your Java installation.\r\n\r\ngoto fail\r\n\r\n:findJavaFromJavaHome\r\nset JAVA_HOME=%JAVA_HOME:\"=%\r\nset JAVA_EXE=%JAVA_HOME%/bin/java.exe\r\n\r\nif exist \"%JAVA_EXE%\" goto init\r\n\r\necho.\r\necho ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%\r\necho.\r\necho Please set the JAVA_HOME variable in your environment to match the\r\necho location of your Java installation.\r\n\r\ngoto fail\r\n\r\n:init\r\n@rem Get command-line arguments, handling Windowz variants\r\n\r\nif not \"%OS%\" == \"Windows_NT\" goto win9xME_args\r\nif \"%@eval[2+2]\" == \"4\" goto 4NT_args\r\n\r\n:win9xME_args\r\n@rem Slurp the command line arguments.\r\nset CMD_LINE_ARGS=\r\nset _SKIP=2\r\n\r\n:win9xME_args_slurp\r\nif \"x%~1\" == \"x\" goto execute\r\n\r\nset CMD_LINE_ARGS=%*\r\ngoto execute\r\n\r\n:4NT_args\r\n@rem Get arguments from the 4NT Shell from JP Software\r\nset CMD_LINE_ARGS=%$\r\n\r\n:execute\r\n@rem Setup the command line\r\n\r\nset CLASSPATH=%APP_HOME%\\gradle\\wrapper\\gradle-wrapper.jar\r\n\r\n@rem Execute Gradle\r\n\"%JAVA_EXE%\" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% \"-Dorg.gradle.appname=%APP_BASE_NAME%\" -classpath \"%CLASSPATH%\" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%\r\n\r\n:end\r\n@rem End local scope for the variables with windows NT shell\r\nif \"%ERRORLEVEL%\"==\"0\" goto mainEnd\r\n\r\n:fail\r\nrem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of\r\nrem the _cmd.exe /c_ return code!\r\nif  not \"\" == \"%GRADLE_EXIT_CONSOLE%\" exit 1\r\nexit /b 1\r\n\r\n:mainEnd\r\nif \"%OS%\"==\"Windows_NT\" endlocal\r\n\r\n:omega\r\n"
  },
  {
    "path": "settings.gradle",
    "content": "include ':app'\n"
  }
]